⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 perfmon.c

📁 优龙2410linux2.6.8内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
/* * perfmon internal variables */static pfm_stats_t		pfm_stats[NR_CPUS];static pfm_session_t		pfm_sessions;	/* global sessions information */static struct proc_dir_entry 	*perfmon_dir;static pfm_uuid_t		pfm_null_uuid = {0,};static spinlock_t		pfm_buffer_fmt_lock;static LIST_HEAD(pfm_buffer_fmt_list);static pmu_config_t		*pmu_conf;/* sysctl() controls */static pfm_sysctl_t pfm_sysctl;int pfm_debug_var;static ctl_table pfm_ctl_table[]={	{1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},	{2, "debug_ovfl", &pfm_sysctl.debug_ovfl, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},	{3, "fastctxsw", &pfm_sysctl.fastctxsw, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},	{4, "expert_mode", &pfm_sysctl.expert_mode, sizeof(int), 0600, NULL, &proc_dointvec, NULL,},	{ 0, },};static ctl_table pfm_sysctl_dir[] = {	{1, "perfmon", NULL, 0, 0755, pfm_ctl_table, }, 	{0,},};static ctl_table pfm_sysctl_root[] = {	{1, "kernel", NULL, 0, 0755, pfm_sysctl_dir, }, 	{0,},};static struct ctl_table_header *pfm_sysctl_header;static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);static int pfm_flush(struct file *filp);#define pfm_get_cpu_var(v)		__ia64_per_cpu_var(v)#define pfm_get_cpu_data(a,b)		per_cpu(a, b)static inline voidpfm_put_task(struct task_struct *task){	if (task != current) put_task_struct(task);}static inline voidpfm_set_task_notify(struct task_struct *task){	struct thread_info *info;	info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);	set_bit(TIF_NOTIFY_RESUME, &info->flags);}static inline voidpfm_clear_task_notify(void){	clear_thread_flag(TIF_NOTIFY_RESUME);}static inline voidpfm_reserve_page(unsigned long a){	SetPageReserved(vmalloc_to_page((void *)a));}static inline voidpfm_unreserve_page(unsigned long a){	ClearPageReserved(vmalloc_to_page((void*)a));}static inline intpfm_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long phys_addr, unsigned long size, pgprot_t prot){	return remap_page_range(vma, from, phys_addr, size, prot);}static inline unsigned longpfm_protect_ctx_ctxsw(pfm_context_t *x){	spin_lock(&(x)->ctx_lock);	return 0UL;}static inline unsigned longpfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f){	spin_unlock(&(x)->ctx_lock);}static inline unsigned intpfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct){	return do_munmap(mm, addr, len);}static inline unsigned long pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec){	return get_unmapped_area(file, addr, len, pgoff, flags);}static struct super_block *pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data){	return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);}static struct file_system_type pfm_fs_type = {	.name     = "pfmfs",	.get_sb   = pfmfs_get_sb,	.kill_sb  = kill_anon_super,};DEFINE_PER_CPU(unsigned long, pfm_syst_info);DEFINE_PER_CPU(struct task_struct *, pmu_owner);DEFINE_PER_CPU(pfm_context_t  *, pmu_ctx);DEFINE_PER_CPU(unsigned long, pmu_activation_number);/* forward declaration */static struct file_operations pfm_file_ops;/* * forward declarations */#ifndef CONFIG_SMPstatic void pfm_lazy_save_regs (struct task_struct *ta);#endifvoid dump_pmu_state(const char *);static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);#include "perfmon_itanium.h"#include "perfmon_mckinley.h"#include "perfmon_generic.h"static pmu_config_t *pmu_confs[]={	&pmu_conf_mck,	&pmu_conf_ita,	&pmu_conf_gen, /* must be last */	NULL};static int pfm_end_notify_user(pfm_context_t *ctx);static inline voidpfm_clear_psr_pp(void){	ia64_rsm(IA64_PSR_PP);	ia64_srlz_i();}static inline voidpfm_set_psr_pp(void){	ia64_ssm(IA64_PSR_PP);	ia64_srlz_i();}static inline voidpfm_clear_psr_up(void){	ia64_rsm(IA64_PSR_UP);	ia64_srlz_i();}static inline voidpfm_set_psr_up(void){	ia64_ssm(IA64_PSR_UP);	ia64_srlz_i();}static inline unsigned longpfm_get_psr(void){	unsigned long tmp;	tmp = ia64_getreg(_IA64_REG_PSR);	ia64_srlz_i();	return tmp;}static inline voidpfm_set_psr_l(unsigned long val){	ia64_setreg(_IA64_REG_PSR_L, val);	ia64_srlz_i();}static inline voidpfm_freeze_pmu(void){	ia64_set_pmc(0,1UL);	ia64_srlz_d();}static inline voidpfm_unfreeze_pmu(void){	ia64_set_pmc(0,0UL);	ia64_srlz_d();}static inline voidpfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs){	int i;	for (i=0; i < nibrs; i++) {		ia64_set_ibr(i, ibrs[i]);		ia64_dv_serialize_instruction();	}	ia64_srlz_i();}static inline voidpfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs){	int i;	for (i=0; i < ndbrs; i++) {		ia64_set_dbr(i, dbrs[i]);		ia64_dv_serialize_data();	}	ia64_srlz_d();}/* * PMD[i] must be a counter. no check is made */static inline unsigned longpfm_read_soft_counter(pfm_context_t *ctx, int i){	return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);}/* * PMD[i] must be a counter. no check is made */static inline voidpfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val){	unsigned long ovfl_val = pmu_conf->ovfl_val;	ctx->ctx_pmds[i].val = val  & ~ovfl_val;	/*	 * writing to unimplemented part is ignore, so we do not need to	 * mask off top part	 */	ia64_set_pmd(i, val & ovfl_val);}static pfm_msg_t *pfm_get_new_msg(pfm_context_t *ctx){	int idx, next;	next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;	DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));	if (next == ctx->ctx_msgq_head) return NULL; 	idx = 	ctx->ctx_msgq_tail;	ctx->ctx_msgq_tail = next;	DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));	return ctx->ctx_msgq+idx;}static pfm_msg_t *pfm_get_next_msg(pfm_context_t *ctx){	pfm_msg_t *msg;	DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));	if (PFM_CTXQ_EMPTY(ctx)) return NULL;	/*	 * get oldest message	 */	msg = ctx->ctx_msgq+ctx->ctx_msgq_head;	/*	 * and move forward	 */	ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;	DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));	return msg;}static voidpfm_reset_msgq(pfm_context_t *ctx){	ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;	DPRINT(("ctx=%p msgq reset\n", ctx));}/* Here we want the physical address of the memory. * This is used when initializing the contents of the * area and marking the pages as reserved. */static inline unsigned longpfm_kvirt_to_pa(unsigned long adr){	__u64 pa = ia64_tpa(adr);	return pa;}static void *pfm_rvmalloc(unsigned long size){	void *mem;	unsigned long addr;	size = PAGE_ALIGN(size);	mem  = vmalloc(size);	if (mem) {		//printk("perfmon: CPU%d pfm_rvmalloc(%ld)=%p\n", smp_processor_id(), size, mem);		memset(mem, 0, size);		addr = (unsigned long)mem;		while (size > 0) {			pfm_reserve_page(addr);			addr+=PAGE_SIZE;			size-=PAGE_SIZE;		}	}	return mem;}static voidpfm_rvfree(void *mem, unsigned long size){	unsigned long addr;	if (mem) {		DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));		addr = (unsigned long) mem;		while ((long) size > 0) {			pfm_unreserve_page(addr);			addr+=PAGE_SIZE;			size-=PAGE_SIZE;		}		vfree(mem);	}	return;}static pfm_context_t *pfm_context_alloc(void){	pfm_context_t *ctx;	/* 	 * allocate context descriptor 	 * must be able to free with interrupts disabled	 */	ctx = kmalloc(sizeof(pfm_context_t), GFP_KERNEL);	if (ctx) {		memset(ctx, 0, sizeof(pfm_context_t));		DPRINT(("alloc ctx @%p\n", ctx));	}	return ctx;}static voidpfm_context_free(pfm_context_t *ctx){	if (ctx) {		DPRINT(("free ctx @%p\n", ctx));		kfree(ctx);	}}static voidpfm_mask_monitoring(struct task_struct *task){	pfm_context_t *ctx = PFM_GET_CTX(task);	struct thread_struct *th = &task->thread;	unsigned long mask, val, ovfl_mask;	int i;	DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid));	ovfl_mask = pmu_conf->ovfl_val;	/*	 * monitoring can only be masked as a result of a valid	 * counter overflow. In UP, it means that the PMU still	 * has an owner. Note that the owner can be different	 * from the current task. However the PMU state belongs	 * to the owner.	 * In SMP, a valid overflow only happens when task is	 * current. Therefore if we come here, we know that	 * the PMU state belongs to the current task, therefore	 * we can access the live registers.	 *	 * So in both cases, the live register contains the owner's	 * state. We can ONLY touch the PMU registers and NOT the PSR.	 *	 * As a consequence to this call, the thread->pmds[] array	 * contains stale information which must be ignored	 * when context is reloaded AND monitoring is active (see	 * pfm_restart).	 */	mask = ctx->ctx_used_pmds[0];	for (i = 0; mask; i++, mask>>=1) {		/* skip non used pmds */		if ((mask & 0x1) == 0) continue;		val = ia64_get_pmd(i);		if (PMD_IS_COUNTING(i)) {			/*		 	 * we rebuild the full 64 bit value of the counter		 	 */			ctx->ctx_pmds[i].val += (val & ovfl_mask);		} else {			ctx->ctx_pmds[i].val = val;		}		DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",			i,			ctx->ctx_pmds[i].val,			val & ovfl_mask));	}	/*	 * mask monitoring by setting the privilege level to 0	 * we cannot use psr.pp/psr.up for this, it is controlled by	 * the user	 *	 * if task is current, modify actual registers, otherwise modify	 * thread save state, i.e., what will be restored in pfm_load_regs()	 */	mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;	for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {		if ((mask & 0x1) == 0UL) continue;		ia64_set_pmc(i, th->pmcs[i] & ~0xfUL);		th->pmcs[i] &= ~0xfUL;		DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, th->pmcs[i]));	}	/*	 * make all of this visible	 */	ia64_srlz_d();}/* * must always be done with task == current * * context must be in MASKED state when calling */static voidpfm_restore_monitoring(struct task_struct *task){	pfm_context_t *ctx = PFM_GET_CTX(task);	struct thread_struct *th = &task->thread;	unsigned long mask, ovfl_mask;	unsigned long psr, val;	int i, is_system;	is_system = ctx->ctx_fl_system;	ovfl_mask = pmu_conf->ovfl_val;	if (task != current) {		printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid);		return;	}	if (ctx->ctx_state != PFM_CTX_MASKED) {		printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,			task->pid, current->pid, ctx->ctx_state);		return;	}	psr = pfm_get_psr();	/*	 * monitoring is masked via the PMC.	 * As we restore their value, we do not want each counter to	 * restart right away. We stop monitoring using the PSR,	 * restore the PMC (and PMD) and then re-establish the psr	 * as it was. Note that there can be no pending overflow at	 * this point, because monitoring was MASKED.	 *	 * system-wide session are pinned and self-monitoring	 */	if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {		/* disable dcr pp */		ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);		pfm_clear_psr_pp();	} else {		pfm_clear_psr_up();	}	/*	 * first, we restore the PMD	 */	mask = ctx->ctx_used_pmds[0];

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -