📄 perfmon.c
字号:
} else if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) { DBprintk(("cannot set ovfl_notify or random on pmc%u\n", cnum)); goto error; } /* * execute write checker, if any */ if (PMC_WR_FUNC(cnum)) { ret = PMC_WR_FUNC(cnum)(task, cnum, &value, regs); if (ret) goto error; ret = -EINVAL; } /* * no error on this register */ PFM_REG_RETFLAG_SET(tmp.reg_flags, 0); /* * update register return value, abort all if problem during copy. * we only modify the reg_flags field. no check mode is fine because * access has been verified upfront in sys_perfmonctl(). * * If this fails, then the software state is not modified */ if (__put_user(tmp.reg_flags, &req->reg_flags)) return -EFAULT; /* * Now we commit the changes to the software state */ /* * full flag update each time a register is programmed */ ctx->ctx_soft_pmds[cnum].flags = flags; if (PMC_IS_COUNTING(cnum)) { ctx->ctx_soft_pmds[cnum].reset_pmds[0] = reset_pmds; /* mark all PMDS to be accessed as used */ CTX_USED_PMD(ctx, reset_pmds); } /* * Needed in case the user does not initialize the equivalent * PMD. Clearing is done in reset_pmu() so there is no possible * leak here. */ CTX_USED_PMD(ctx, pmu_conf.pmc_desc[cnum].dep_pmd[0]); /* * keep copy the pmc, used for register reload */ th->pmc[cnum] = value; ia64_set_pmc(cnum, value); DBprintk(("[%d] pmc[%u]=0x%lx flags=0x%x used_pmds=0x%lx\n", task->pid, cnum, value, ctx->ctx_soft_pmds[cnum].flags, ctx->ctx_used_pmds[0])); } return 0;error: PFM_REG_RETFLAG_SET(tmp.reg_flags, PFM_REG_RETFL_EINVAL); if (__put_user(tmp.reg_flags, &req->reg_flags)) ret = -EFAULT; DBprintk(("[%d] pmc[%u]=0x%lx error %d\n", task->pid, cnum, value, ret)); return ret;}static intpfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs){ pfarg_reg_t tmp, *req = (pfarg_reg_t *)arg; unsigned long value, hw_value; unsigned int cnum; int i; int ret = 0; /* * Cannot do anything before PMU is enabled */ if (!CTX_IS_ENABLED(ctx)) return -EINVAL; /* XXX: ctx locking may be required here */ ret = -EINVAL; for (i = 0; i < count; i++, req++) { if (__copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT; cnum = tmp.reg_num; value = tmp.reg_value; if (!PMD_IS_IMPL(cnum)) { DBprintk(("pmd[%u] is unimplemented or invalid\n", cnum)); goto abort_mission; } /* * execute write checker, if any */ if (PMD_WR_FUNC(cnum)) { unsigned long v = value; ret = PMD_WR_FUNC(cnum)(task, cnum, &v, regs); if (ret) goto abort_mission; value = v; ret = -EINVAL; } hw_value = value; /* * no error on this register */ PFM_REG_RETFLAG_SET(tmp.reg_flags, 0); if (__put_user(tmp.reg_flags, &req->reg_flags)) return -EFAULT; /* * now commit changes to software state */ /* update virtualized (64bits) counter */ if (PMD_IS_COUNTING(cnum)) { ctx->ctx_soft_pmds[cnum].lval = value; ctx->ctx_soft_pmds[cnum].val = value & ~pmu_conf.ovfl_val; hw_value = value & pmu_conf.ovfl_val; ctx->ctx_soft_pmds[cnum].long_reset = tmp.reg_long_reset; ctx->ctx_soft_pmds[cnum].short_reset = tmp.reg_short_reset; ctx->ctx_soft_pmds[cnum].seed = tmp.reg_random_seed; ctx->ctx_soft_pmds[cnum].mask = tmp.reg_random_mask; } /* keep track of what we use */ CTX_USED_PMD(ctx, pmu_conf.pmd_desc[(cnum)].dep_pmd[0]); /* mark this register as used as well */ CTX_USED_PMD(ctx, RDEP(cnum)); /* writes to unimplemented part is ignored, so this is safe */ ia64_set_pmd(cnum, hw_value); /* to go away */ ia64_srlz_d(); DBprintk(("[%d] pmd[%u]: value=0x%lx hw_value=0x%lx soft_pmd=0x%lx short_reset=0x%lx " "long_reset=0x%lx hw_pmd=%lx notify=%c used_pmds=0x%lx reset_pmds=0x%lx\n", task->pid, cnum, value, hw_value, ctx->ctx_soft_pmds[cnum].val, ctx->ctx_soft_pmds[cnum].short_reset, ctx->ctx_soft_pmds[cnum].long_reset, ia64_get_pmd(cnum) & pmu_conf.ovfl_val, PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N', ctx->ctx_used_pmds[0], ctx->ctx_soft_pmds[cnum].reset_pmds[0])); } return 0;abort_mission: /* * for now, we have only one possibility for error */ PFM_REG_RETFLAG_SET(tmp.reg_flags, PFM_REG_RETFL_EINVAL); /* * we change the return value to EFAULT in case we cannot write register return code. * The caller first must correct this error, then a resubmission of the request will * eventually yield the EINVAL. */ if (__put_user(tmp.reg_flags, &req->reg_flags)) ret = -EFAULT; DBprintk(("[%d] pmc[%u]=0x%lx ret %d\n", task->pid, cnum, value, ret)); return ret;}static intpfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs){ struct thread_struct *th = &task->thread; unsigned long val, lval; pfarg_reg_t *req = (pfarg_reg_t *)arg; unsigned int cnum, reg_flags = 0; int i, ret = 0;#if __GNUC__ < 3 int foo;#endif if (!CTX_IS_ENABLED(ctx)) return -EINVAL; /* * XXX: MUST MAKE SURE WE DON"T HAVE ANY PENDING OVERFLOW BEFORE READING * This is required when the monitoring has been stoppped by user or kernel. * If it is still going on, then that's fine because we a re not guaranteed * to return an accurate value in this case. */ /* XXX: ctx locking may be required here */ DBprintk(("ctx_last_cpu=%d for [%d]\n", atomic_read(&ctx->ctx_last_cpu), task->pid)); for (i = 0; i < count; i++, req++) {#if __GNUC__ < 3 foo = __get_user(cnum, &req->reg_num); if (foo) return -EFAULT; foo = __get_user(reg_flags, &req->reg_flags); if (foo) return -EFAULT;#else if (__get_user(cnum, &req->reg_num)) return -EFAULT; if (__get_user(reg_flags, &req->reg_flags)) return -EFAULT;#endif lval = 0UL; if (!PMD_IS_IMPL(cnum)) goto abort_mission; /* * we can only read the register that we use. That includes * the one we explicitely initialize AND the one we want included * in the sampling buffer (smpl_regs). * * Having this restriction allows optimization in the ctxsw routine * without compromising security (leaks) */ if (!CTX_IS_USED_PMD(ctx, cnum)) goto abort_mission; /* * If the task is not the current one, then we check if the * PMU state is still in the local live register due to lazy ctxsw. * If true, then we read directly from the registers. */ if (atomic_read(&ctx->ctx_last_cpu) == smp_processor_id()){ ia64_srlz_d(); val = ia64_get_pmd(cnum); DBprintk(("reading pmd[%u]=0x%lx from hw\n", cnum, val)); } else {#ifdef CONFIG_SMP int cpu; /* * for SMP system, the context may still be live on another * CPU so we need to fetch it before proceeding with the read * This call we only be made once for the whole loop because * of ctx_last_cpu becoming == -1. * * We cannot reuse ctx_last_cpu as it may change before we get to the * actual IPI call. In this case, we will do the call for nothing but * there is no way around it. The receiving side will simply do nothing. */ cpu = atomic_read(&ctx->ctx_last_cpu); if (cpu != -1) { DBprintk(("must fetch on CPU%d for [%d]\n", cpu, task->pid)); pfm_fetch_regs(cpu, task, ctx); }#endif /* context has been saved */ val = th->pmd[cnum]; } if (PMD_IS_COUNTING(cnum)) { /* * XXX: need to check for overflow */ val &= pmu_conf.ovfl_val; val += ctx->ctx_soft_pmds[cnum].val; lval = ctx->ctx_soft_pmds[cnum].lval; } /* * execute read checker, if any */ if (PMD_RD_FUNC(cnum)) { unsigned long v = val; ret = PMD_RD_FUNC(cnum)(task, cnum, &v, regs); val = v; } PFM_REG_RETFLAG_SET(reg_flags, ret); DBprintk(("read pmd[%u] ret=%d value=0x%lx pmc=0x%lx\n", cnum, ret, val, ia64_get_pmc(cnum))); /* * update register return value, abort all if problem during copy. * we only modify the reg_flags field. no check mode is fine because * access has been verified upfront in sys_perfmonctl(). */ if (__put_user(cnum, &req->reg_num)) return -EFAULT; if (__put_user(val, &req->reg_value)) return -EFAULT; if (__put_user(reg_flags, &req->reg_flags)) return -EFAULT; if (__put_user(lval, &req->reg_last_reset_value)) return -EFAULT; } return 0;abort_mission: PFM_REG_RETFLAG_SET(reg_flags, PFM_REG_RETFL_EINVAL); /* * XXX: if this fails, we stick with the original failure, flag not updated! */ __put_user(reg_flags, &req->reg_flags); return -EINVAL;}#ifdef PFM_PMU_USES_DBR/* * Only call this function when a process it trying to * write the debug registers (reading is always allowed) */intpfm_use_debug_registers(struct task_struct *task){ pfm_context_t *ctx = task->thread.pfm_context; int ret = 0; DBprintk(("called for [%d]\n", task->pid)); /* * do it only once */ if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0; /* * Even on SMP, we do not need to use an atomic here because * the only way in is via ptrace() and this is possible only when the * process is stopped. Even in the case where the ctxsw out is not totally * completed by the time we come here, there is no way the 'stopped' process * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine. * So this is always safe. */ if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1; LOCK_PFS(); /* * We cannot allow setting breakpoints when system wide monitoring * sessions are using the debug registers. */ if (pfm_sessions.pfs_sys_use_dbregs> 0) ret = -1; else pfm_sessions.pfs_ptrace_use_dbregs++; DBprintk(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n", pfm_sessions.pfs_ptrace_use_dbregs, pfm_sessions.pfs_sys_use_dbregs, task->pid, ret)); UNLOCK_PFS(); return ret;}/* * This function is called for every task that exits with the * IA64_THREAD_DBG_VALID set. This indicates a task which was * able to use the debug registers for debugging purposes via * ptrace(). Therefore we know it was not using them for * perfmormance monitoring, so we only decrement the number * of "ptraced" debug register users to keep the count up to date */intpfm_release_debug_registers(struct task_struct *task){ int ret; LOCK_PFS(); if (pfm_sessions.pfs_ptrace_use_dbregs == 0) { printk("perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid); ret = -1; } else { pfm_sessions.pfs_ptrace_use_dbregs--; ret = 0; } UNLOCK_PFS(); return ret;}#else /* PFM_PMU_USES_DBR is true *//* * in case, the PMU does not use the debug registers, these two functions are nops. * The first function is called from arch/ia64/kernel/ptrace.c. * The second function is called from arch/ia64/kernel/process.c. */intpfm_use_debug_registers(struct task_struct *task){ return 0;}intpfm_release_debug_registers(struct task_struct *task){ return 0;}#endif /* PFM_PMU_USES_DBR */static intpfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs){ void *sem = &ctx->ctx_restart_sem; /* * Cannot do anything before PMU is enabled */ if (!CTX_IS_ENABLED(ctx)) return -EINVAL; if (task == current) { DBprintk(("restarting self %d frozen=%d ovfl_regs=0x%lx\n", task->pid, ctx->ctx_fl_frozen, ctx->ctx_ovfl_regs[0])); pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET); ctx->ctx_ovfl_regs[0] = 0UL; /* * We ignore block/don't block because we never block * for a self-monitoring process. */ ctx->ctx_fl_frozen = 0; if (CTX_HAS_SMPL(ctx)) { ctx->ctx_psb->psb_hdr->hdr_count = 0; ctx->ctx_psb->psb_index = 0; } /* simply unfreeze */ ia64_set_pmc(0, 0); ia64_srlz_d(); return 0; } /* restart on another task */ /* * if blocking, then post the semaphore. * if non-blocking, then we ensure that the task will go into * pfm_overflow_must_block() before returning to user mode. * We cannot explicitely reset another task, it MUST always * be done by the task itself. This works for system wide because * the tool that is controlling the session is doing "self-monitoring". * * XXX: what if the task never goes back to user? * */ if (CTX_OVFL_NOBLOCK(ctx) == 0) { DBprintk(("unblocking %d \n", task->pid)); up(sem); } else { task->thread.pfm_ovfl_block_reset = 1; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -