⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 perfmon.c

📁 上传linux-jx2410的源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	 * It is still disabled at this point, so it won't run	printk(__FUNCTION__" tasklet is %p state=%d, count=%d\n", &perfmon_tasklet, perfmon_tasklet.state, perfmon_tasklet.count);	 */	/*	 * for now here for debug purposes	 */	perfmon_dir = create_proc_read_entry ("perfmon", 0, 0, perfmon_read_entry, NULL);}voidperfmon_init_percpu (void){	ia64_set_pmv(IA64_PERFMON_VECTOR);	ia64_srlz_d();}voidpfm_save_regs (struct task_struct *ta){	struct task_struct *owner;	pfm_context_t *ctx;	struct thread_struct *t;	u64 pmc0, psr;	unsigned long mask;	int i;	t   = &ta->thread;	ctx = ta->thread.pfm_context;	/*	 * We must make sure that we don't loose any potential overflow	 * interrupt while saving PMU context. In this code, external	 * interrupts are always enabled.	 */	/*	 * save current PSR: needed because we modify it	 */	__asm__ __volatile__ ("mov %0=psr;;": "=r"(psr) :: "memory");	/*	 * stop monitoring:	 * This is the only way to stop monitoring without destroying overflow	 * information in PMC[0].	 * This is the last instruction which can cause overflow when monitoring	 * in kernel.	 * By now, we could still have an overflow interrupt in-flight.	 */	__asm__ __volatile__ ("rsm psr.up|psr.pp;;"::: "memory");	/*	 * Mark the PMU as not owned	 * This will cause the interrupt handler to do nothing in case an overflow	 * interrupt was in-flight	 * This also guarantees that pmc0 will contain the final state	 * It virtually gives us full control over overflow processing from that point	 * on.	 * It must be an atomic operation.	 */	owner = PMU_OWNER();	SET_PMU_OWNER(NULL);	/*	 * read current overflow status:	 *	 * we are guaranteed to read the final stable state	 */	ia64_srlz_d();	pmc0 = ia64_get_pmc(0); /* slow */	/*	 * freeze PMU:	 *	 * This destroys the overflow information. This is required to make sure	 * next process does not start with monitoring on if not requested	 */	ia64_set_pmc(0, 1);	/*	 * Check for overflow bits and proceed manually if needed	 *	 * It is safe to call the interrupt handler now because it does	 * not try to block the task right away. Instead it will set a	 * flag and let the task proceed. The blocking will only occur	 * next time the task exits from the kernel.	 */	if (pmc0 & ~0x1) {		update_counters(owner, pmc0, NULL);		/* we will save the updated version of pmc0 */	}	/*	 * restore PSR for context switch to save	 */	__asm__ __volatile__ ("mov psr.l=%0;; srlz.i;;"::"r"(psr): "memory");	/*	 * we do not save registers if we can do lazy	 */	if (PFM_CAN_DO_LAZY()) {		SET_PMU_OWNER(owner);		return;	}	/*	 * XXX needs further optimization.	 * Also must take holes into account	 */	mask = ctx->ctx_used_pmds[0];	for (i=0; mask; i++, mask>>=1) {		if (mask & 0x1) t->pmd[i] =ia64_get_pmd(i);	}	/* skip PMC[0], we handle it separately */	mask = ctx->ctx_used_pmcs[0]>>1;	for (i=1; mask; i++, mask>>=1) {		if (mask & 0x1) t->pmc[i] = ia64_get_pmc(i);	}	/*	 * Throughout this code we could have gotten an overflow interrupt. It is transformed	 * into a spurious interrupt as soon as we give up pmu ownership.	 */}static voidpfm_lazy_save_regs (struct task_struct *ta){	pfm_context_t *ctx;	struct thread_struct *t;	unsigned long mask;	int i;	DBprintk(("  on [%d] by [%d]\n", ta->pid, current->pid));	t   = &ta->thread;	ctx = ta->thread.pfm_context;	/*	 * XXX needs further optimization.	 * Also must take holes into account	 */	mask = ctx->ctx_used_pmds[0];	for (i=0; mask; i++, mask>>=1) {		if (mask & 0x1) t->pmd[i] =ia64_get_pmd(i);	}		/* skip PMC[0], we handle it separately */	mask = ctx->ctx_used_pmcs[0]>>1;	for (i=1; mask; i++, mask>>=1) {		if (mask & 0x1) t->pmc[i] = ia64_get_pmc(i);	}	SET_PMU_OWNER(NULL);}voidpfm_load_regs (struct task_struct *ta){	struct thread_struct *t = &ta->thread;	pfm_context_t *ctx = ta->thread.pfm_context;	struct task_struct *owner;	unsigned long mask;	int i;	owner = PMU_OWNER();	if (owner == ta) goto skip_restore;	if (owner) pfm_lazy_save_regs(owner);	SET_PMU_OWNER(ta);	mask = ctx->ctx_used_pmds[0];	for (i=0; mask; i++, mask>>=1) {		if (mask & 0x1) ia64_set_pmd(i, t->pmd[i]);	}	/* skip PMC[0] to avoid side effects */	mask = ctx->ctx_used_pmcs[0]>>1;	for (i=1; mask; i++, mask>>=1) {		if (mask & 0x1) ia64_set_pmc(i, t->pmc[i]);	}skip_restore:	/*	 * unfreeze only when possible	 */	if (ctx->ctx_fl_frozen == 0) {		ia64_set_pmc(0, 0);		ia64_srlz_d();		/* place where we potentially (kernel level) start monitoring again */	}}/* * This function is called when a thread exits (from exit_thread()). * This is a simplified pfm_save_regs() that simply flushes the current * register state into the save area taking into account any pending * overflow. This time no notification is sent because the taks is dying * anyway. The inline processing of overflows avoids loosing some counts. * The PMU is frozen on exit from this call and is to never be reenabled * again for this task. */voidpfm_flush_regs (struct task_struct *ta){	pfm_context_t *ctx;	u64 pmc0, psr, mask;	int i,j;	if (ta == NULL) {		panic(__FUNCTION__" task is NULL\n");	}	ctx = ta->thread.pfm_context;	if (ctx == NULL) {		panic(__FUNCTION__" no PFM ctx is NULL\n");	}	/*	 * We must make sure that we don't loose any potential overflow	 * interrupt while saving PMU context. In this code, external	 * interrupts are always enabled.	 */	/*	 * save current PSR: needed because we modify it	 */	__asm__ __volatile__ ("mov %0=psr;;": "=r"(psr) :: "memory");	/*	 * stop monitoring:	 * This is the only way to stop monitoring without destroying overflow	 * information in PMC[0].	 * This is the last instruction which can cause overflow when monitoring	 * in kernel.	 * By now, we could still have an overflow interrupt in-flight.	 */	__asm__ __volatile__ ("rsm psr.up;;"::: "memory");	/*	 * Mark the PMU as not owned	 * This will cause the interrupt handler to do nothing in case an overflow	 * interrupt was in-flight	 * This also guarantees that pmc0 will contain the final state	 * It virtually gives us full control on overflow processing from that point	 * on.	 * It must be an atomic operation.	 */	SET_PMU_OWNER(NULL);	/*	 * read current overflow status:	 *	 * we are guaranteed to read the final stable state	 */	ia64_srlz_d();	pmc0 = ia64_get_pmc(0); /* slow */	/*	 * freeze PMU:	 *	 * This destroys the overflow information. This is required to make sure	 * next process does not start with monitoring on if not requested	 */	ia64_set_pmc(0, 1);	ia64_srlz_d();	/*	 * restore PSR for context switch to save	 */	__asm__ __volatile__ ("mov psr.l=%0;;srlz.i;"::"r"(psr): "memory");	/*	 * This loop flushes the PMD into the PFM context.	 * IT also processes overflow inline.	 *	 * IMPORTANT: No notification is sent at this point as the process is dying.	 * The implicit notification will come from a SIGCHILD or a return from a	 * waitpid().	 *	 * XXX: must take holes into account	 */	mask = pmc0 >> PMU_FIRST_COUNTER;	for (i=0,j=PMU_FIRST_COUNTER; i< pmu_conf.max_counters; i++,j++) {		/* collect latest results */		ctx->ctx_pmds[i].val += ia64_get_pmd(j) & pmu_conf.perf_ovfl_val;		/*		 * now everything is in ctx_pmds[] and we need		 * to clear the saved context from save_regs() such that		 * pfm_read_pmds() gets the correct value		 */		ta->thread.pmd[j] = 0;		/* take care of overflow inline */		if (mask & 0x1) {			ctx->ctx_pmds[i].val += 1 + pmu_conf.perf_ovfl_val;			DBprintk((" PMD[%d] overflowed pmd=0x%lx pmds.val=0x%lx\n",			j, ia64_get_pmd(j), ctx->ctx_pmds[i].val));		}		mask >>=1;	}}/* * XXX: this routine is not very portable for PMCs * XXX: make this routine able to work with non current context */static voidia64_reset_pmu(void){	int i;	/* PMU is frozen, no pending overflow bits */	ia64_set_pmc(0,1);	/* extra overflow bits + counter configs cleared */	for(i=1; i< PMU_FIRST_COUNTER + pmu_conf.max_counters ; i++) {		ia64_set_pmc(i,0);	}	/* opcode matcher set to all 1s */	ia64_set_pmc(8,~0);	ia64_set_pmc(9,~0);	/* I-EAR config cleared, plm=0 */	ia64_set_pmc(10,0);	/* D-EAR config cleared, PMC[11].pt must be 1 */	ia64_set_pmc(11,1 << 28);	/* BTB config. plm=0 */	ia64_set_pmc(12,0);	/* Instruction address range, PMC[13].ta must be 1 */	ia64_set_pmc(13,1);	/* clears all PMD registers */	for(i=0;i< pmu_conf.num_pmds; i++) {		if (PMD_IS_IMPL(i))  ia64_set_pmd(i,0);	}	ia64_srlz_d();}/* * task is the newly created task */intpfm_inherit(struct task_struct *task, struct pt_regs *regs){	pfm_context_t *ctx = current->thread.pfm_context;	pfm_context_t *nctx;	struct thread_struct *th = &task->thread;	int i, cnum;	/*	 * bypass completely for system wide	 */	if (pfs_info.pfs_sys_session) {		DBprintk((" enabling psr.pp for %d\n", task->pid));		ia64_psr(regs)->pp = pfs_info.pfs_pp;		return 0;	}	/*	 * takes care of easiest case first	 */	if (CTX_INHERIT_MODE(ctx) == PFM_FL_INHERIT_NONE) {		DBprintk((" removing PFM context for %d\n", task->pid));		task->thread.pfm_context     = NULL;		task->thread.pfm_must_block  = 0;		atomic_set(&task->thread.pfm_notifiers_check, 0);		/* copy_thread() clears IA64_THREAD_PM_VALID */		return 0;	}	nctx = pfm_context_alloc();	if (nctx == NULL) return -ENOMEM;	/* copy content */	*nctx = *ctx;	if (CTX_INHERIT_MODE(ctx) == PFM_FL_INHERIT_ONCE) {		nctx->ctx_fl_inherit = PFM_FL_INHERIT_NONE;		atomic_set(&task->thread.pfm_notifiers_check, 0);		DBprintk((" downgrading to INHERIT_NONE for %d\n", task->pid));		pfs_info.pfs_proc_sessions++;	}	/* initialize counters in new context */	for(i=0, cnum= PMU_FIRST_COUNTER; i < pmu_conf.max_counters; cnum++, i++) {		nctx->ctx_pmds[i].val = nctx->ctx_pmds[i].ival & ~pmu_conf.perf_ovfl_val;		th->pmd[cnum]	      = nctx->ctx_pmds[i].ival & pmu_conf.perf_ovfl_val;	}	/* clear BTB index register */	th->pmd[16] = 0;	/* if sampling then increment number of users of buffer */	if (nctx->ctx_smpl_buf) {		atomic_inc(&nctx->ctx_smpl_buf->psb_refcnt);	}	nctx->ctx_fl_frozen = 0;	nctx->ctx_ovfl_regs = 0;	sema_init(&nctx->ctx_restart_sem, 0); /* reset this semaphore to locked */	/* clear pending notification */	th->pfm_must_block = 0;	/* link with new task */	th->pfm_context     = nctx;	DBprintk((" nctx=%p for process %d\n", (void *)nctx, task->pid));	/*	 * the copy_thread routine automatically clears	 * IA64_THREAD_PM_VALID, so we need to reenable it, if it was used by the caller	 */	if (current->thread.flags & IA64_THREAD_PM_VALID) {		DBprintk(("  setting PM_VALID for %d\n", task->pid));		th->flags |= IA64_THREAD_PM_VALID;	}	return 0;}/*  * called from release_thread(), at this point this task is not in the  * tasklist anymore */voidpfm_context_exit(struct task_struct *task){	pfm_context_t *ctx = task->thread.pfm_context;	if (!ctx) {		DBprintk((" invalid context for %d\n", task->pid));		return;	}	/* check is we have a sampling buffer attached */	if (ctx->ctx_smpl_buf) {		pfm_smpl_buffer_desc_t *psb = ctx->ctx_smpl_buf;		/* if only user left, then remove */		DBprintk((" [%d] [%d] psb->refcnt=%d\n", current->pid, task->pid, psb->psb_refcnt.counter));		if (atomic_dec_and_test(&psb->psb_refcnt) ) {			rvfree(psb->psb_hdr, psb->psb_size);			vfree(psb);			DBprintk((" [%d] cleaning [%d] sampling buffer\n", current->pid, task->pid ));		}	}	DBprintk((" [%d] cleaning [%d] pfm_context @%p\n", current->pid, task->pid, (void *)ctx));	/*	 * To avoid getting the notified task scan the entire process list	 * when it exits because it would have pfm_notifiers_check set, we 	 * decrease it by 1 to inform the task, that one less task is going	 * to send it notification. each new notifer increases this field by	 * 1 in pfm_context_create(). Of course, there is race condition between	 * decreasing the value and the notified task exiting. The danger comes	 * from the fact that we have a direct pointer to its task structure	 * thereby bypassing the tasklist. We must make sure that if we have 	 * notify_task!= NULL, the target task is still somewhat present. It may	 * already be detached from the tasklist but that's okay. Note that it is	 * okay if we 'miss the deadline' and the task scans the list for nothing,	 * it will affect performance 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -