⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 perfmon.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	      default:		DBprintk((__FUNCTION__" UNknown command 0x%x\n", cmd));		return -EINVAL;		break;        }        return 0;}asmlinkage intsys_perfmonctl (int pid, int cmd, int flags, perfmon_req_t *req, int count, long arg6, long arg7, long arg8, long stack){	struct pt_regs *regs = (struct pt_regs *) &stack;	struct task_struct *child = current;	int ret;	if (pid != current->pid) {		read_lock(&tasklist_lock);		{			child = find_task_by_pid(pid);			if (child)				get_task_struct(child);		}		if (!child) { 			read_unlock(&tasklist_lock);			return -ESRCH;		}		/*		 * XXX: need to do more checking here		 */		if (child->state != TASK_ZOMBIE) {			DBprintk((__FUNCTION__" warning process %d not in stable state %ld\n", pid, child->state));		}	} 	ret = do_perfmonctl(child, cmd, flags, req, count, regs);	if (child != current) read_unlock(&tasklist_lock);	return ret;}static inline intupdate_counters (u64 pmc0){	unsigned long mask, i, cnum;	struct thread_struct *th;	struct task_struct *ta;	if (pmu_owners[smp_processor_id()] == NULL) {		DBprintk((__FUNCTION__" Spurious overflow interrupt: PMU not owned\n"));		return 0;	}		/*	 * It is never safe to access the task for which the overflow interrupt is destinated	 * using the current variable as the interrupt may occur in the middle of a context switch	 * where current does not hold the task that is running yet.	 *	 * For monitoring, however, we do need to get access to the task which caused the overflow	 * to account for overflow on the counters.	 * We accomplish this by maintaining a current owner of the PMU per CPU. During context	 * switch the ownership is changed in a way such that the reflected owner is always the 	 * valid one, i.e. the one that caused the interrupt.	 */	ta = pmu_owners[smp_processor_id()];	th = &pmu_owners[smp_processor_id()]->thread;	/*	 * Don't think this could happen given first test. Keep as sanity check	 */	if ((th->flags & IA64_THREAD_PM_VALID) == 0) {		DBprintk((__FUNCTION__" Spurious overflow interrupt: process %d not using perfmon\n", ta->pid));		return 0;	}	/*	 * if PMU not frozen: spurious from previous context 	 * if PMC[0] = 0x1 : frozen but no overflow reported: leftover from previous context	 *	 * in either case we don't touch the state upon return from handler	 */	if ((pmc0 & 0x1) == 0 || pmc0 == 0x1) { 		DBprintk((__FUNCTION__" Spurious overflow interrupt: process %d freeze=0\n",ta->pid));		return 0;	}	mask = pmc0 >> 4;	for (i = 0, cnum = PMU_FIRST_COUNTER; i < pmu_conf.max_counters; cnum++, i++, mask >>= 1) {		if (mask & 0x1) {			DBprintk((__FUNCTION__ " PMD[%ld] overflowed pmd=0x%lx pmod.val=0x%lx\n", cnum, ia64_get_pmd(cnum), th->pmu_counters[i].val)); 						/*			 * Because we somtimes (EARS/BTB) reset to a specific value, we cannot simply use 			 * val to count the number of times we overflowed. Otherwise we would loose the value			 * current in the PMD (which can be >0). So to make sure we don't loose			 * the residual counts we set val to contain full 64bits value of the counter.			 */			th->pmu_counters[i].val += 1+pmu_conf.perf_ovfl_val+(ia64_get_pmd(cnum) &pmu_conf.perf_ovfl_val);			/* writes to upper part are ignored, so this is safe */			ia64_set_pmd(cnum, th->pmu_counters[i].rval);			DBprintk((__FUNCTION__ " pmod[%ld].val=0x%lx pmd=0x%lx\n", i, th->pmu_counters[i].val, ia64_get_pmd(cnum)&pmu_conf.perf_ovfl_val)); 			if (th->pmu_counters[i].pid != 0 && th->pmu_counters[i].sig>0) {				DBprintk((__FUNCTION__ " shouild notify process %d with signal %d\n",th->pmu_counters[i].pid, th->pmu_counters[i].sig)); 			}		}	}	return 1;}static voidperfmon_interrupt (int irq, void *arg, struct pt_regs *regs){	/* unfreeze if not spurious */	if ( update_counters(ia64_get_pmc(0)) ) {		ia64_set_pmc(0, 0);		ia64_srlz_d();	}}static struct irqaction perfmon_irqaction = {	handler:	perfmon_interrupt,	flags:		SA_INTERRUPT,	name:		"perfmon"};static intperfmon_proc_info(char *page){	char *p = page;	u64 pmc0 = ia64_get_pmc(0);	int i;	p += sprintf(p, "PMC[0]=%lx\nPerfmon debug: %s\n", pmc0, pfm_debug ? "On" : "Off");	for(i=0; i < NR_CPUS; i++) {		if (cpu_is_online(i)) 			p += sprintf(p, "CPU%d.PMU %d\n", i, pmu_owners[i] ? pmu_owners[i]->pid: -1);	}	return p - page;}static intperfmon_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data){	int len = perfmon_proc_info(page);        if (len <= off+count) *eof = 1;        *start = page + off;        len   -= off;        if (len>count) len = count;        if (len<0) len = 0;        return len;}static struct proc_dir_entry *perfmon_dir;void __initperfmon_init (void){	pal_perf_mon_info_u_t pm_info;	s64 status;		irq_desc[PERFMON_IRQ].status |= IRQ_PER_CPU;	irq_desc[PERFMON_IRQ].handler = &irq_type_ia64_sapic;	setup_irq(PERFMON_IRQ, &perfmon_irqaction);	ia64_set_pmv(PERFMON_IRQ);	ia64_srlz_d();	printk("perfmon: Initialized vector to %u\n",PERFMON_IRQ);	if ((status=ia64_pal_perf_mon_info(pmu_conf.impl_regs, &pm_info)) != 0) {		printk(__FUNCTION__ " pal call failed (%ld)\n", status);		return;	} 	pmu_conf.perf_ovfl_val = (1L << pm_info.pal_perf_mon_info_s.width) - 1; 	/* XXX need to use PAL instead */	pmu_conf.max_counters  = pm_info.pal_perf_mon_info_s.generic;	printk("perfmon: Counters are %d bits\n", pm_info.pal_perf_mon_info_s.width);	printk("perfmon: Maximum counter value 0x%lx\n", pmu_conf.perf_ovfl_val);	/*	 * for now here for debug purposes	 */	perfmon_dir = create_proc_read_entry ("perfmon", 0, 0, perfmon_read_entry, NULL);}voidperfmon_init_percpu (void){	ia64_set_pmv(PERFMON_IRQ);	ia64_srlz_d();}/* * XXX: for system wide this function MUST never be called */voidia64_save_pm_regs (struct task_struct *ta){	struct thread_struct *t = &ta->thread;	u64 pmc0, psr;	int i,j;	/*	 * We must maek sure that we don't loose any potential overflow	 * interrupt while saving PMU context. In this code, external	 * interrupts are always enabled.	 */	/*	 * save current PSR: needed because we modify it	 */	__asm__ __volatile__ ("mov %0=psr;;": "=r"(psr) :: "memory");	/*	 * stop monitoring:	 * This is the only way to stop monitoring without destroying overflow	 * information in PMC[0..3].	 * This is the last instruction which can cause overflow when monitoring	 * in kernel.	 * By now, we could still have an overflow interrupt in flight.	 */	__asm__ __volatile__ ("rsm psr.up;;"::: "memory");		/*	 * read current overflow status:	 *	 * We may be reading stale information at this point, if we got interrupt	 * just before the read(pmc0) but that's all right. However, if we did	 * not get the interrupt before, this read reflects LAST state.	 *	 */	pmc0 = ia64_get_pmc(0);	/*	 * freeze PMU:	 *	 * This destroys the overflow information. This is required to make sure	 * next process does not start with monitoring on if not requested	 * (PSR.up may not be enough).	 *	 * We could still get an overflow interrupt by now. However the handler	 * will not do anything if is sees PMC[0].fr=1 but no overflow bits	 * are set. So PMU will stay in frozen state. This implies that pmc0	 * will still be holding the correct unprocessed information.	 *	 */	ia64_set_pmc(0, 1);	ia64_srlz_d();	/*	 * check for overflow bits set:	 *	 * If pmc0 reports PMU frozen, this means we have a pending overflow,	 * therefore we invoke the handler. Handler is reentrant with regards	 * to PMC[0] so it is safe to call it twice.	 *	 * IF pmc0 reports overflow, we need to reread current PMC[0] value	 * in case the handler was invoked right after the first pmc0 read.	 * it is was not invoked then pmc0==PMC[0], otherwise it's been invoked	 * and overflow information has been processed, so we don't need to call.	 *	 * Test breakdown:	 *	- pmc0 & ~0x1: test if overflow happened	 * 	- second part: check if current register reflects this as well.	 *	 * NOTE: testing for pmc0 & 0x1 is not enough has it would trigger call	 * when PM_VALID and PMU.fr which is common when setting up registers	 * just before actually starting monitors.	 *	 */	if ((pmc0 & ~0x1) && ((pmc0=ia64_get_pmc(0)) &~0x1) ) {		printk(__FUNCTION__" Warning: pmc[0]=0x%lx\n", pmc0);		update_counters(pmc0);		/* 		 * XXX: not sure that's enough. the next task may still get the		 * interrupt.		 */	}	/*	 * restore PSR for context switch to save	 */	__asm__ __volatile__ ("mov psr.l=%0;;"::"r"(psr): "memory");	/*	 * XXX: this will need to be extended beyong just counters	 */	for (i=0,j=4; i< IA64_NUM_PMD_COUNTERS; i++,j++) {		t->pmd[i] = ia64_get_pmd(j);		t->pmc[i] = ia64_get_pmc(j);	}	/*	 * PMU is frozen, PMU context is saved: nobody owns the PMU on this CPU	 * At this point, we should not receive any pending interrupt from the 	 * 'switched out' task	 */	pmu_owners[smp_processor_id()] = NULL;}voidia64_load_pm_regs (struct task_struct *ta){	struct thread_struct *t = &ta->thread;	int i,j;	/*	 * we first restore ownership of the PMU to the 'soon to be current'	 * context. This way, if, as soon as we unfreeze the PMU at the end	 * of this function, we get an interrupt, we attribute it to the correct	 * task	 */	pmu_owners[smp_processor_id()] = ta;	/*	 * XXX: this will need to be extended beyong just counters 	 */	for (i=0,j=4; i< IA64_NUM_PMD_COUNTERS; i++,j++) {		ia64_set_pmd(j, t->pmd[i]);		ia64_set_pmc(j, t->pmc[i]);	}	/*	 * unfreeze PMU	 */	ia64_set_pmc(0, 0);	ia64_srlz_d();}#else /* !CONFIG_PERFMON */asmlinkage unsigned longsys_perfmonctl (int cmd, int count, void *ptr){	return -ENOSYS;}#endif /* !CONFIG_PERFMON */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -