📄 perfmon.c
字号:
* clear hardware registers to make sure we don't leak * information and pick up stale state */ for (i=0; i < pmu_conf.num_ibrs; i++) { ia64_set_ibr(i, 0UL); } for (i=0; i < pmu_conf.num_dbrs; i++) { ia64_set_dbr(i, 0UL); } } } ret = -EFAULT; /* * Now install the values into the registers */ for (i = 0; i < count; i++, req++) { if (copy_from_user(&tmp, req, sizeof(tmp))) goto abort_mission; rnum = tmp.dbreg_num; dbreg.val = tmp.dbreg_value; ret = -EINVAL; if ((mode == 0 && !IBR_IS_IMPL(rnum)) || ((mode == 1) && !DBR_IS_IMPL(rnum))) { DBprintk(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n", rnum, dbreg.val, mode, i, count)); goto abort_mission; } /* * make sure we do not install enabled breakpoint */ if (rnum & 0x1) { if (mode == 0) dbreg.ibr.ibr_x = 0; else dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0; } /* * clear return flags and copy back to user * * XXX: fix once EAGAIN is implemented */ ret = -EFAULT; PFM_REG_RETFLAG_SET(tmp.dbreg_flags, 0); if (copy_to_user(req, &tmp, sizeof(tmp))) goto abort_mission; /* * Debug registers, just like PMC, can only be modified * by a kernel call. Moreover, perfmon() access to those * registers are centralized in this routine. The hardware * does not modify the value of these registers, therefore, * if we save them as they are written, we can avoid having * to save them on context switch out. This is made possible * by the fact that when perfmon uses debug registers, ptrace() * won't be able to modify them concurrently. */ if (mode == 0) { CTX_USED_IBR(ctx, rnum); ia64_set_ibr(rnum, dbreg.val); thread->ibr[rnum] = dbreg.val; DBprintk(("write ibr%u=0x%lx used_ibrs=0x%lx\n", rnum, dbreg.val, ctx->ctx_used_ibrs[0])); } else { CTX_USED_DBR(ctx, rnum); ia64_set_dbr(rnum, dbreg.val); thread->dbr[rnum] = dbreg.val; DBprintk(("write dbr%u=0x%lx used_dbrs=0x%lx\n", rnum, dbreg.val, ctx->ctx_used_dbrs[0])); } } return 0;abort_mission: /* * in case it was our first attempt, we undo the global modifications */ if (first_time) { LOCK_PFS(); if (ctx->ctx_fl_system) { pfm_sessions.pfs_sys_use_dbregs--; } UNLOCK_PFS(); ctx->ctx_fl_using_dbreg = 0; } /* * install error return flag */ if (ret != -EFAULT) { /* * XXX: for now we can only come here on EINVAL */ PFM_REG_RETFLAG_SET(tmp.dbreg_flags, PFM_REG_RETFL_EINVAL); copy_to_user(req, &tmp, sizeof(tmp)); } return ret;}static intpfm_write_ibrs(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs){ /* we don't quite support this right now */ if (task != current) return -EINVAL; if (!CTX_IS_ENABLED(ctx)) return -EINVAL; return pfm_write_ibr_dbr(0, task, arg, count, regs);}static intpfm_write_dbrs(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs){ /* we don't quite support this right now */ if (task != current) return -EINVAL; if (!CTX_IS_ENABLED(ctx)) return -EINVAL; return pfm_write_ibr_dbr(1, task, arg, count, regs);}#endif /* PFM_PMU_USES_DBR */static intpfm_get_features(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs){ pfarg_features_t tmp; memset(&tmp, 0, sizeof(tmp)); tmp.ft_version = PFM_VERSION; tmp.ft_smpl_version = PFM_SMPL_VERSION; if (copy_to_user(arg, &tmp, sizeof(tmp))) return -EFAULT; return 0;}static intpfm_start(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs){ /* we don't quite support this right now */ if (task != current) return -EINVAL; /* * Cannot do anything before PMU is enabled */ if (!CTX_IS_ENABLED(ctx)) return -EINVAL; DBprintk(("[%d] fl_system=%d owner=%p current=%p\n", current->pid, ctx->ctx_fl_system, PMU_OWNER(), current)); if (PMU_OWNER() != task) { printk("perfmon: pfm_start task [%d] not pmu owner\n", task->pid); return -EINVAL; } if (ctx->ctx_fl_system) { /* enable dcr pp */ ia64_set_dcr(ia64_get_dcr()|IA64_DCR_PP);#ifdef CONFIG_SMP local_cpu_data->pfm_dcr_pp = 1;#else pfm_tasklist_toggle_pp(1);#endif ia64_psr(regs)->pp = 1; __asm__ __volatile__ ("ssm psr.pp;;"::: "memory"); } else { if ((task->thread.flags & IA64_THREAD_PM_VALID) == 0) { printk("perfmon: pfm_start task flag not set for [%d]\n", task->pid); return -EINVAL; } ia64_psr(regs)->up = 1; __asm__ __volatile__ ("sum psr.up;;"::: "memory"); } ia64_srlz_d(); return 0;}static intpfm_enable(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs){ /* we don't quite support this right now */ if (task != current) return -EINVAL; if (ctx->ctx_fl_system == 0 && PMU_OWNER() && PMU_OWNER() != current) pfm_lazy_save_regs(PMU_OWNER()); /* reset all registers to stable quiet state */ ia64_reset_pmu(task); /* make sure nothing starts */ if (ctx->ctx_fl_system) { ia64_psr(regs)->pp = 0; ia64_psr(regs)->up = 0; /* just to make sure! */ __asm__ __volatile__ ("rsm psr.pp;;"::: "memory");#ifdef CONFIG_SMP local_cpu_data->pfm_syst_wide = 1; local_cpu_data->pfm_dcr_pp = 0;#endif } else { /* * needed in case the task was a passive task during * a system wide session and now wants to have its own * session */ ia64_psr(regs)->pp = 0; /* just to make sure! */ ia64_psr(regs)->up = 0; __asm__ __volatile__ ("rum psr.up;;"::: "memory"); /* * allow user control (user monitors only) if (task == ctx->ctx_owner) { */ { DBprintk(("clearing psr.sp for [%d]\n", current->pid)); ia64_psr(regs)->sp = 0; } task->thread.flags |= IA64_THREAD_PM_VALID; } SET_PMU_OWNER(task); ctx->ctx_flags.state = PFM_CTX_ENABLED; atomic_set(&ctx->ctx_last_cpu, smp_processor_id()); /* simply unfreeze */ ia64_set_pmc(0, 0); ia64_srlz_d(); return 0;}/* * functions MUST be listed in the increasing order of their index (see permfon.h) */static pfm_cmd_desc_t pfm_cmd_tab[]={/* 0 */{ NULL, 0, 0, 0}, /* not used *//* 1 */{ pfm_write_pmcs, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_READ|PFM_CMD_ARG_WRITE, PFM_CMD_ARG_MANY, sizeof(pfarg_reg_t)}, /* 2 */{ pfm_write_pmds, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_READ, PFM_CMD_ARG_MANY, sizeof(pfarg_reg_t)},/* 3 */{ pfm_read_pmds, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_READ|PFM_CMD_ARG_WRITE, PFM_CMD_ARG_MANY, sizeof(pfarg_reg_t)},/* 4 */{ pfm_stop, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},/* 5 */{ pfm_start, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},/* 6 */{ pfm_enable, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},/* 7 */{ pfm_disable, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},/* 8 */{ pfm_create_context, PFM_CMD_ARG_READ, 1, sizeof(pfarg_context_t)},/* 9 */{ pfm_destroy_context, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},/* 10 */{ pfm_restart, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_NOCHK, 0, 0},/* 11 */{ pfm_protect_context, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},/* 12 */{ pfm_get_features, PFM_CMD_ARG_WRITE, 0, 0},/* 13 */{ pfm_debug, 0, 1, sizeof(unsigned int)},/* 14 */{ pfm_unprotect_context, PFM_CMD_PID|PFM_CMD_CTX, 0, 0},/* 15 */{ NULL, 0, 0, 0}, /* not used *//* 16 */{ NULL, 0, 0, 0}, /* not used *//* 17 */{ NULL, 0, 0, 0}, /* not used *//* 18 */{ NULL, 0, 0, 0}, /* not used *//* 19 */{ NULL, 0, 0, 0}, /* not used *//* 20 */{ NULL, 0, 0, 0}, /* not used *//* 21 */{ NULL, 0, 0, 0}, /* not used *//* 22 */{ NULL, 0, 0, 0}, /* not used *//* 23 */{ NULL, 0, 0, 0}, /* not used *//* 24 */{ NULL, 0, 0, 0}, /* not used *//* 25 */{ NULL, 0, 0, 0}, /* not used *//* 26 */{ NULL, 0, 0, 0}, /* not used *//* 27 */{ NULL, 0, 0, 0}, /* not used *//* 28 */{ NULL, 0, 0, 0}, /* not used *//* 29 */{ NULL, 0, 0, 0}, /* not used *//* 30 */{ NULL, 0, 0, 0}, /* not used *//* 31 */{ NULL, 0, 0, 0}, /* not used */#ifdef PFM_PMU_USES_DBR/* 32 */{ pfm_write_ibrs, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_READ|PFM_CMD_ARG_WRITE, PFM_CMD_ARG_MANY, sizeof(pfarg_dbreg_t)},/* 33 */{ pfm_write_dbrs, PFM_CMD_PID|PFM_CMD_CTX|PFM_CMD_ARG_READ|PFM_CMD_ARG_WRITE, PFM_CMD_ARG_MANY, sizeof(pfarg_dbreg_t)}#endif};#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))static intcheck_task_state(struct task_struct *task){ int ret = 0;#ifdef CONFIG_SMP /* We must wait until the state has been completely * saved. There can be situations where the reader arrives before * after the task is marked as STOPPED but before pfm_save_regs() * is completed. */ for (;;) { task_lock(task); if (!task_has_cpu(task)) break; task_unlock(task); do { if (task->state != TASK_ZOMBIE && task->state != TASK_STOPPED) return -EBUSY; barrier(); cpu_relax(); } while (task_has_cpu(task)); } task_unlock(task);#else if (task->state != TASK_ZOMBIE && task->state != TASK_STOPPED) { DBprintk(("warning [%d] not in stable state %ld\n", task->pid, task->state)); ret = -EBUSY; }#endif return ret;}asmlinkage intsys_perfmonctl (pid_t pid, int cmd, void *arg, int count, long arg5, long arg6, long arg7, long arg8, long stack){ struct pt_regs *regs = (struct pt_regs *)&stack; struct task_struct *task = current; pfm_context_t *ctx = task->thread.pfm_context; size_t sz; int ret = -ESRCH, narg; /* * reject any call if perfmon was disabled at initialization time */ if (PFM_IS_DISABLED()) return -ENOSYS; DBprintk(("cmd=%d idx=%d valid=%d narg=0x%x\n", cmd, PFM_CMD_IDX(cmd), PFM_CMD_IS_VALID(cmd), PFM_CMD_NARG(cmd))); if (PFM_CMD_IS_VALID(cmd) == 0) return -EINVAL; /* ingore arguments when command has none */ narg = PFM_CMD_NARG(cmd); if ((narg == PFM_CMD_ARG_MANY && count == 0) || (narg > 0 && narg != count)) return -EINVAL; sz = PFM_CMD_ARG_SIZE(cmd); if (PFM_CMD_READ_ARG(cmd) && !access_ok(VERIFY_READ, arg, sz*count)) return -EFAULT; if (PFM_CMD_WRITE_ARG(cmd) && !access_ok(VERIFY_WRITE, arg, sz*count)) return -EFAULT; if (PFM_CMD_USE_PID(cmd)) { /* * XXX: may need to fine tune this one */ if (pid < 2) return -EPERM; if (pid != current->pid) { read_lock(&tasklist_lock); task = find_task_by_pid(pid); if (!task) goto abort_call; ret = -EPERM; if (pfm_bad_permissions(task)) goto abort_call; if (PFM_CMD_CHK(cmd)) { ret = check_task_state(task); if (ret != 0) goto abort_call; } ctx = task->thread.pfm_context; } } if (PFM_CMD_USE_CTX(cmd)) { ret = -EINVAL; if (ctx == NULL) { DBprintk(("no context for task %d\n", task->pid)); goto abort_call; } ret = -EPERM; /* * we only grant access to the context if: * - the caller is the creator of the context (ctx_owner) * OR - the context is attached to the caller AND The context IS NOT * in protected mode */ if (ctx->ctx_owner != current && (ctx->ctx_fl_protected || task != current)) { DBprintk(("context protected, no access for [%d]\n", task->pid)); goto abort_call; } } ret = (*pfm_cmd_tab[PFM_CMD_IDX(cmd)].cmd_func)(task, ctx, arg, count, regs);abort_call: if (task != current) read_unlock(&tasklist_lock); return ret;}#if __GNUC__ >= 3void asmlinkagepfm_ovfl_block_reset(u64 arg0, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, long info)#elsevoid asmlinkagepfm_ovfl_block_reset(u64 arg0, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, long info)#endif{ struct thread_struct *th = ¤t->thread; pfm_context_t *ctx = current->thread.pfm_context; int ret; /* * clear the flag, to make sure we won't get here * again */ th->pfm_ovfl_block_reset = 0; /* * do some sanity checks first */ if (!ctx) { printk("perfmon: [%d] has no PFM context\n", current->pid); return; } if (CTX_OVFL_NOBLOCK(ctx)) goto non_blocking; DBprintk(("[%d] before sleeping\n", current->pid)); /* * may go through without blocking on SMP systems * if restart has been received already by the time we call down() */ ret = down_interruptible(&ctx->ctx_restart_sem); DBprintk(("[%d] after sleeping ret=%d\n", current->pid, ret)); /* * in case of interruption of down() we don't restart anything */ if (ret >= 0) {non_blocking: /* we reactivate on context switch */ ctx->ctx_fl_frozen = 0; /* * the ovfl_sem is cleared by the restart task and this is safe because we always * use the local reference */ pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PF
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -