📄 perfmon.c
字号:
ibr_mask_reg_t ibr; dbr_mask_reg_t dbr;} dbreg_t;/* * perfmon command descriptions */typedef struct { int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); char *cmd_name; int cmd_flags; unsigned int cmd_narg; size_t cmd_argsize; int (*cmd_getsize)(void *arg, size_t *sz);} pfm_cmd_desc_t;#define PFM_CMD_FD 0x01 /* command requires a file descriptor */#define PFM_CMD_ARG_READ 0x02 /* command must read argument(s) */#define PFM_CMD_ARG_RW 0x04 /* command must read/write argument(s) */#define PFM_CMD_STOP 0x08 /* command does not work on zombie context */#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)#define PFM_CMD_ARG_MANY -1 /* cannot be zero */typedef struct { unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ unsigned long pfm_ovfl_intr_count; /* keep track of ovfl interrupts */ unsigned long pfm_ovfl_intr_cycles; /* cycles spent processing ovfl interrupts */ unsigned long pfm_ovfl_intr_cycles_min; /* min cycles spent processing ovfl interrupts */ unsigned long pfm_ovfl_intr_cycles_max; /* max cycles spent processing ovfl interrupts */ unsigned long pfm_smpl_handler_calls; unsigned long pfm_smpl_handler_cycles; char pad[SMP_CACHE_BYTES] ____cacheline_aligned;} pfm_stats_t;/* * perfmon internal variables */static pfm_stats_t pfm_stats[NR_CPUS];static pfm_session_t pfm_sessions; /* global sessions information */#ifndef XENstatic DEFINE_SPINLOCK(pfm_alt_install_check);#endifstatic pfm_intr_handler_desc_t *pfm_alt_intr_handler;#ifndef XENstatic struct proc_dir_entry *perfmon_dir;#endifstatic pfm_uuid_t pfm_null_uuid = {0,};static spinlock_t pfm_buffer_fmt_lock;static LIST_HEAD(pfm_buffer_fmt_list);static pmu_config_t *pmu_conf;/* sysctl() controls */pfm_sysctl_t pfm_sysctl;EXPORT_SYMBOL(pfm_sysctl);#ifndef XENstatic ctl_table pfm_ctl_table[]={ {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, {2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, {3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,}, {4, "expert_mode", &pfm_sysctl.expert_mode, sizeof(int), 0600, NULL, &proc_dointvec, NULL,}, { 0, },};static ctl_table pfm_sysctl_dir[] = { {1, "perfmon", NULL, 0, 0755, pfm_ctl_table, }, {0,},};static ctl_table pfm_sysctl_root[] = { {1, "kernel", NULL, 0, 0755, pfm_sysctl_dir, }, {0,},};static struct ctl_table_header *pfm_sysctl_header;static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);static int pfm_flush(struct file *filp);#endif#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)#define pfm_get_cpu_data(a,b) per_cpu(a, b)#ifndef XENstatic inline voidpfm_put_task(struct task_struct *task){ if (task != current) put_task_struct(task);}static inline voidpfm_set_task_notify(struct task_struct *task){ struct thread_info *info; info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE); set_bit(TIF_NOTIFY_RESUME, &info->flags);}static inline voidpfm_clear_task_notify(void){ clear_thread_flag(TIF_NOTIFY_RESUME);}static inline voidpfm_reserve_page(unsigned long a){ SetPageReserved(vmalloc_to_page((void *)a));}static inline voidpfm_unreserve_page(unsigned long a){ ClearPageReserved(vmalloc_to_page((void*)a));}#endifstatic inline unsigned longpfm_protect_ctx_ctxsw(pfm_context_t *x){ spin_lock(&(x)->ctx_lock); return 0UL;}static inline voidpfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f){ spin_unlock(&(x)->ctx_lock);}#ifndef XENstatic inline unsigned intpfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct){ return do_munmap(mm, addr, len);}static inline unsigned long pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec){ return get_unmapped_area(file, addr, len, pgoff, flags);}static struct super_block *pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data){ return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);}static struct file_system_type pfm_fs_type = { .name = "pfmfs", .get_sb = pfmfs_get_sb, .kill_sb = kill_anon_super,};#endifDEFINE_PER_CPU(unsigned long, pfm_syst_info);DEFINE_PER_CPU(struct task_struct *, pmu_owner);DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);DEFINE_PER_CPU(unsigned long, pmu_activation_number);#ifndef XENEXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);/* forward declaration */static struct file_operations pfm_file_ops;#endif/* * forward declarations */#ifndef CONFIG_SMPstatic void pfm_lazy_save_regs (struct task_struct *ta);#endifvoid dump_pmu_state(const char *);static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);#include "perfmon_itanium.h"#include "perfmon_mckinley.h"#include "perfmon_montecito.h"#include "perfmon_generic.h"static pmu_config_t *pmu_confs[]={ &pmu_conf_mont, &pmu_conf_mck, &pmu_conf_ita, &pmu_conf_gen, /* must be last */ NULL};#ifndef XENstatic int pfm_end_notify_user(pfm_context_t *ctx);#endifstatic inline voidpfm_clear_psr_pp(void){ ia64_rsm(IA64_PSR_PP); ia64_srlz_i();}static inline voidpfm_set_psr_pp(void){ ia64_ssm(IA64_PSR_PP); ia64_srlz_i();}static inline voidpfm_clear_psr_up(void){ ia64_rsm(IA64_PSR_UP); ia64_srlz_i();}static inline voidpfm_set_psr_up(void){ ia64_ssm(IA64_PSR_UP); ia64_srlz_i();}static inline unsigned longpfm_get_psr(void){ unsigned long tmp; tmp = ia64_getreg(_IA64_REG_PSR); ia64_srlz_i(); return tmp;}static inline voidpfm_set_psr_l(unsigned long val){ ia64_setreg(_IA64_REG_PSR_L, val); ia64_srlz_i();}static inline voidpfm_freeze_pmu(void){ ia64_set_pmc(0,1UL); ia64_srlz_d();}static inline voidpfm_unfreeze_pmu(void){ ia64_set_pmc(0,0UL); ia64_srlz_d();}static inline voidpfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs){ int i; for (i=0; i < nibrs; i++) { ia64_set_ibr(i, ibrs[i]); ia64_dv_serialize_instruction(); } ia64_srlz_i();}static inline voidpfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs){ int i; for (i=0; i < ndbrs; i++) { ia64_set_dbr(i, dbrs[i]); ia64_dv_serialize_data(); } ia64_srlz_d();}/* * PMD[i] must be a counter. no check is made */static inline unsigned longpfm_read_soft_counter(pfm_context_t *ctx, int i){ return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);}/* * PMD[i] must be a counter. no check is made */static inline voidpfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val){ unsigned long ovfl_val = pmu_conf->ovfl_val; ctx->ctx_pmds[i].val = val & ~ovfl_val; /* * writing to unimplemented part is ignore, so we do not need to * mask off top part */ ia64_set_pmd(i, val & ovfl_val);}#ifndef XENstatic pfm_msg_t *pfm_get_new_msg(pfm_context_t *ctx){ int idx, next; next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS; DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); if (next == ctx->ctx_msgq_head) return NULL; idx = ctx->ctx_msgq_tail; ctx->ctx_msgq_tail = next; DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx)); return ctx->ctx_msgq+idx;}static pfm_msg_t *pfm_get_next_msg(pfm_context_t *ctx){ pfm_msg_t *msg; DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); if (PFM_CTXQ_EMPTY(ctx)) return NULL; /* * get oldest message */ msg = ctx->ctx_msgq+ctx->ctx_msgq_head; /* * and move forward */ ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS; DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type)); return msg;}static voidpfm_reset_msgq(pfm_context_t *ctx){ ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; DPRINT(("ctx=%p msgq reset\n", ctx));}static void *pfm_rvmalloc(unsigned long size){ void *mem; unsigned long addr; size = PAGE_ALIGN(size); mem = vmalloc(size); if (mem) { //printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem); memset(mem, 0, size); addr = (unsigned long)mem; while (size > 0) { pfm_reserve_page(addr); addr+=PAGE_SIZE; size-=PAGE_SIZE; } } return mem;}static voidpfm_rvfree(void *mem, unsigned long size){ unsigned long addr; if (mem) { DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size)); addr = (unsigned long) mem; while ((long) size > 0) { pfm_unreserve_page(addr); addr+=PAGE_SIZE; size-=PAGE_SIZE; } vfree(mem); } return;}#endifstatic pfm_context_t *pfm_context_alloc(void){ pfm_context_t *ctx; /* * allocate context descriptor * must be able to free with interrupts disabled */ ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL); if (ctx) { memset(ctx, 0, sizeof(pfm_context_t)); DPRINT(("alloc ctx @%p\n", ctx)); } return ctx;}static voidpfm_context_free(pfm_context_t *ctx){ if (ctx) { DPRINT(("free ctx @%p\n", ctx)); kfree(ctx); }}#ifndef XENstatic voidpfm_mask_monitoring(struct task_struct *task){ pfm_context_t *ctx = PFM_GET_CTX(task); struct thread_struct *th = &task->thread; unsigned long mask, val, ovfl_mask; int i; DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid)); ovfl_mask = pmu_conf->ovfl_val; /* * monitoring can only be masked as a result of a valid * counter overflow. In UP, it means that the PMU still * has an owner. Note that the owner can be different * from the current task. However the PMU state belongs * to the owner. * In SMP, a valid overflow only happens when task is * current. Therefore if we come here, we know that * the PMU state belongs to the current task, therefore * we can access the live registers. * * So in both cases, the live register contains the owner's * state. We can ONLY touch the PMU registers and NOT the PSR. * * As a consequence to this call, the thread->pmds[] array * contains stale information which must be ignored * when context is reloaded AND monitoring is active (see * pfm_restart). */ mask = ctx->ctx_used_pmds[0]; for (i = 0; mask; i++, mask>>=1) { /* skip non used pmds */ if ((mask & 0x1) == 0) continue; val = ia64_get_pmd(i); if (PMD_IS_COUNTING(i)) { /* * we rebuild the full 64 bit value of the counter */ ctx->ctx_pmds[i].val += (val & ovfl_mask); } else { ctx->ctx_pmds[i].val = val; } DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n", i, ctx->ctx_pmds[i].val, val & ovfl_mask)); } /* * mask monitoring by setting the privilege level to 0 * we cannot use psr.pp/psr.up for this, it is controlled by * the user * * if task is current, modify actual registers, otherwise modify * thread save state, i.e., what will be restored in pfm_load_regs() */ mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) { if ((mask & 0x1) == 0UL) continue; ia64_set_pmc(i, th->pmcs[i] & ~0xfUL); th->pmcs[i] &= ~0xfUL; DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, th->pmcs[i])); } /* * make all of this visible */ ia64_srlz_d();}/* * must always be done with task == current * * context must be in MASKED state when calling */static void
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -