📄 perfmon.c
字号:
pfm_restore_monitoring(struct task_struct *task){ pfm_context_t *ctx = PFM_GET_CTX(task); struct thread_struct *th = &task->thread; unsigned long mask, ovfl_mask; unsigned long psr, val; int i, is_system; is_system = ctx->ctx_fl_system; ovfl_mask = pmu_conf->ovfl_val; if (task != current) { printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid); return; } if (ctx->ctx_state != PFM_CTX_MASKED) { printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__, task->pid, current->pid, ctx->ctx_state); return; } psr = pfm_get_psr(); /* * monitoring is masked via the PMC. * As we restore their value, we do not want each counter to * restart right away. We stop monitoring using the PSR, * restore the PMC (and PMD) and then re-establish the psr * as it was. Note that there can be no pending overflow at * this point, because monitoring was MASKED. * * system-wide session are pinned and self-monitoring */ if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { /* disable dcr pp */ ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP); pfm_clear_psr_pp(); } else { pfm_clear_psr_up(); } /* * first, we restore the PMD */ mask = ctx->ctx_used_pmds[0]; for (i = 0; mask; i++, mask>>=1) { /* skip non used pmds */ if ((mask & 0x1) == 0) continue; if (PMD_IS_COUNTING(i)) { /* * we split the 64bit value according to * counter width */ val = ctx->ctx_pmds[i].val & ovfl_mask; ctx->ctx_pmds[i].val &= ~ovfl_mask; } else { val = ctx->ctx_pmds[i].val; } ia64_set_pmd(i, val); DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n", i, ctx->ctx_pmds[i].val, val)); } /* * restore the PMCs */ mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { if ((mask & 0x1) == 0UL) continue; th->pmcs[i] = ctx->ctx_pmcs[i]; ia64_set_pmc(i, th->pmcs[i]); DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, th->pmcs[i])); } ia64_srlz_d(); /* * must restore DBR/IBR because could be modified while masked * XXX: need to optimize */ if (ctx->ctx_fl_using_dbreg) { pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); } /* * now restore PSR */ if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) { /* enable dcr pp */ ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP); ia64_srlz_i(); } pfm_set_psr_l(psr);}#endifstatic inline voidpfm_save_pmds(unsigned long *pmds, unsigned long mask){ int i; ia64_srlz_d(); for (i=0; mask; i++, mask>>=1) { if (mask & 0x1) pmds[i] = ia64_get_pmd(i); }}#ifndef XEN/* * reload from thread state (used for ctxw only) */static inline voidpfm_restore_pmds(unsigned long *pmds, unsigned long mask){ int i; unsigned long val, ovfl_val = pmu_conf->ovfl_val; for (i=0; mask; i++, mask>>=1) { if ((mask & 0x1) == 0) continue; val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i]; ia64_set_pmd(i, val); } ia64_srlz_d();}/* * propagate PMD from context to thread-state */static inline voidpfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx){ struct thread_struct *thread = &task->thread; unsigned long ovfl_val = pmu_conf->ovfl_val; unsigned long mask = ctx->ctx_all_pmds[0]; unsigned long val; int i; DPRINT(("mask=0x%lx\n", mask)); for (i=0; mask; i++, mask>>=1) { val = ctx->ctx_pmds[i].val; /* * We break up the 64 bit value into 2 pieces * the lower bits go to the machine state in the * thread (will be reloaded on ctxsw in). * The upper part stays in the soft-counter. */ if (PMD_IS_COUNTING(i)) { ctx->ctx_pmds[i].val = val & ~ovfl_val; val &= ovfl_val; } thread->pmds[i] = val; DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n", i, thread->pmds[i], ctx->ctx_pmds[i].val)); }}#elsestatic inline voidxenpfm_restore_pmds(pfm_context_t* ctx){ int i; unsigned long ovfl_val = pmu_conf->ovfl_val; unsigned long mask = ctx->ctx_all_pmds[0]; unsigned long val; for (i = 0; mask; i++, mask >>= 1) { if ((mask & 0x1) == 0) continue; val = ctx->ctx_pmds[i].val; /* * We break up the 64 bit value into 2 pieces * the lower bits go to the machine state in the * thread (will be reloaded on ctxsw in). * The upper part stays in the soft-counter. */ if (PMD_IS_COUNTING(i)) { ctx->ctx_pmds[i].val = val & ~ovfl_val; val &= ovfl_val; } ia64_set_pmd(i, val); } ia64_srlz_d();}#endif#ifndef XEN/* * propagate PMC from context to thread-state */static inline voidpfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx){ struct thread_struct *thread = &task->thread; unsigned long mask = ctx->ctx_all_pmcs[0]; int i; DPRINT(("mask=0x%lx\n", mask)); for (i=0; mask; i++, mask>>=1) { /* masking 0 with ovfl_val yields 0 */ thread->pmcs[i] = ctx->ctx_pmcs[i]; DPRINT(("pmc[%d]=0x%lx\n", i, thread->pmcs[i])); }}static inline voidpfm_restore_pmcs(unsigned long *pmcs, unsigned long mask){ int i; for (i=0; mask; i++, mask>>=1) { if ((mask & 0x1) == 0) continue; ia64_set_pmc(i, pmcs[i]); } ia64_srlz_d();}#elsestatic inline voidxenpfm_restore_pmcs(pfm_context_t* ctx){ int i; unsigned long mask = ctx->ctx_all_pmcs[0]; for (i = 0; mask; i++, mask >>= 1) { if ((mask & 0x1) == 0) continue; ia64_set_pmc(i, ctx->ctx_pmcs[i]); DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i])); } ia64_srlz_d(); }#endifstatic inline intpfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b){ return memcmp(a, b, sizeof(pfm_uuid_t));}static inline intpfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs){ int ret = 0; if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs); return ret;}static inline intpfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size){ int ret = 0; if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size); return ret;}static inline intpfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg){ int ret = 0; if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg); return ret;}static inline intpfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg){ int ret = 0; if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg); return ret;}static inline intpfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs){ int ret = 0; if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs); return ret;}static inline intpfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs){ int ret = 0; if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs); return ret;}static pfm_buffer_fmt_t *__pfm_find_buffer_fmt(pfm_uuid_t uuid){ struct list_head * pos; pfm_buffer_fmt_t * entry; list_for_each(pos, &pfm_buffer_fmt_list) { entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list); if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0) return entry; } return NULL;} /* * find a buffer format based on its uuid */static pfm_buffer_fmt_t *pfm_find_buffer_fmt(pfm_uuid_t uuid){ pfm_buffer_fmt_t * fmt; spin_lock(&pfm_buffer_fmt_lock); fmt = __pfm_find_buffer_fmt(uuid); spin_unlock(&pfm_buffer_fmt_lock); return fmt;} intpfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt){ int ret = 0; /* some sanity checks */ if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL; /* we need at least a handler */ if (fmt->fmt_handler == NULL) return -EINVAL; /* * XXX: need check validity of fmt_arg_size */ spin_lock(&pfm_buffer_fmt_lock); if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) { printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name); ret = -EBUSY; goto out; } list_add(&fmt->fmt_list, &pfm_buffer_fmt_list); printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);out: spin_unlock(&pfm_buffer_fmt_lock); return ret;}EXPORT_SYMBOL(pfm_register_buffer_fmt);intpfm_unregister_buffer_fmt(pfm_uuid_t uuid){ pfm_buffer_fmt_t *fmt; int ret = 0; spin_lock(&pfm_buffer_fmt_lock); fmt = __pfm_find_buffer_fmt(uuid); if (!fmt) { printk(KERN_ERR "perfmon: cannot unregister format, not found\n"); ret = -EINVAL; goto out; } list_del_init(&fmt->fmt_list); printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);out: spin_unlock(&pfm_buffer_fmt_lock); return ret;}EXPORT_SYMBOL(pfm_unregister_buffer_fmt);extern void update_pal_halt_status(int);static intpfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu){ unsigned long flags; /* * validy checks on cpu_mask have been done upstream */ LOCK_PFS(flags); DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", pfm_sessions.pfs_sys_sessions, pfm_sessions.pfs_task_sessions, pfm_sessions.pfs_sys_use_dbregs, is_syswide, cpu)); if (is_syswide) { /* * cannot mix system wide and per-task sessions */ if (pfm_sessions.pfs_task_sessions > 0UL) { DPRINT(("system wide not possible, %u conflicting task_sessions\n", pfm_sessions.pfs_task_sessions)); goto abort; } if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict; DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));#ifndef XEN pfm_sessions.pfs_sys_session[cpu] = task;#else pfm_sessions.pfs_sys_session[cpu] = XENOPROF_TASK;#endif pfm_sessions.pfs_sys_sessions++ ; } else { if (pfm_sessions.pfs_sys_sessions) goto abort; pfm_sessions.pfs_task_sessions++; } DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", pfm_sessions.pfs_sys_sessions, pfm_sessions.pfs_task_sessions, pfm_sessions.pfs_sys_use_dbregs, is_syswide, cpu)); /* * disable default_idle() to go to PAL_HALT */ update_pal_halt_status(0); UNLOCK_PFS(flags); return 0;error_conflict: DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",#ifndef XEN pfm_sessions.pfs_sys_session[cpu]->pid,#else -1,#endif cpu));abort: UNLOCK_PFS(flags); return -EBUSY;}static intpfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu){ unsigned long flags; /* * validy checks on cpu_mask have been done upstream */ LOCK_PFS(flags); DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", pfm_sessions.pfs_sys_sessions, pfm_sessions.pfs_task_sessions, pfm_sessions.pfs_sys_use_dbregs, is_syswide, cpu)); if (is_syswide) { pfm_sessions.pfs_sys_session[cpu] = NULL; /* * would not work with perfmon+more than one bit in cpu_mask */ if (ctx && ctx->ctx_fl_using_dbreg) { if (pfm_sessions.pfs_sys_use_dbregs == 0) { printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx); } else { pfm_sessions.pfs_sys_use_dbregs--; } } pfm_sessions.pfs_sys_sessions--; } else { pfm_sessions.pfs_task_sessions--; } DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n", pfm_sessions.pfs_sys_sessions, pfm_sessions.pfs_task_sessions, pfm_sessions.pfs_sys_use_dbregs, is_syswide, cpu)); /* * if possible, enable default_idle() to go into PAL_HALT */ if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) update_pal_halt_status(1); UNLOCK_PFS(flags); return 0;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -