⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 perfmon.c

📁 该文件是rt_linux
💻 C
📖 第 1 页 / 共 5 页
字号:
		/*		 * execute write checker, if any		 */		if (PMD_WR_FUNC(cnum)) ret = PMD_WR_FUNC(cnum)(task, cnum, &tmp.reg_value, regs);abort_mission:		if (ret == -EINVAL) reg_retval = PFM_REG_RETFL_EINVAL;		PFM_REG_RETFLAG_SET(tmp.reg_flags, reg_retval);		if (copy_to_user(req, &tmp, sizeof(tmp))) return -EFAULT;		/*		 * if there was something wrong on this register, don't touch		 * the hardware at all and abort write request for others.		 *		 * On error, the user mut sequentially scan the table and the first		 * entry which has a return flag set is the one that caused the error.		 */		if (ret != 0) {			DBprintk(("[%d] pmc[%u]=0x%lx error %d\n",				  task->pid, cnum, tmp.reg_value, reg_retval));			break;		}		/* keep track of what we use */		CTX_USED_PMD(ctx, pmu_conf.pmd_desc[(cnum)].dep_pmd[0]);		/* mark this register as used as well */		CTX_USED_PMD(ctx, RDEP(cnum));		/* writes to unimplemented part is ignored, so this is safe */		ia64_set_pmd(cnum, tmp.reg_value & pmu_conf.perf_ovfl_val);		/* to go away */		ia64_srlz_d();		DBprintk(("[%d] pmd[%u]: soft_pmd=0x%lx  short_reset=0x%lx "			  "long_reset=0x%lx hw_pmd=%lx notify=%c used_pmds=0x%lx reset_pmds=0x%lx\n",				task->pid, cnum,				ctx->ctx_soft_pmds[cnum].val,				ctx->ctx_soft_pmds[cnum].short_reset,				ctx->ctx_soft_pmds[cnum].long_reset,				ia64_get_pmd(cnum) & pmu_conf.perf_ovfl_val,				PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',				ctx->ctx_used_pmds[0],				ctx->ctx_soft_pmds[cnum].reset_pmds[0]));	}	return ret;}static intpfm_read_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs){	struct thread_struct *th = &task->thread;	unsigned long val=0;	pfarg_reg_t tmp, *req = (pfarg_reg_t *)arg;	unsigned int cnum;	int i, ret = 0;	if (!CTX_IS_ENABLED(ctx)) return -EINVAL;	/*	 * XXX: MUST MAKE SURE WE DON"T HAVE ANY PENDING OVERFLOW BEFORE READING	 * This is required when the monitoring has been stoppped by user or kernel.	 * If it is still going on, then that's fine because we a re not guaranteed	 * to return an accurate value in this case.	 */	/* XXX: ctx locking may be required here */	DBprintk(("ctx_last_cpu=%d for [%d]\n", atomic_read(&ctx->ctx_last_cpu), task->pid));	for (i = 0; i < count; i++, req++) {		unsigned long ctx_val = ~0UL;		if (copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT;		cnum = tmp.reg_num;		if (!PMD_IS_IMPL(cnum)) goto abort_mission;		/*		 * we can only read the register that we use. That includes		 * the one we explicitely initialize AND the one we want included		 * in the sampling buffer (smpl_regs).		 *		 * Having this restriction allows optimization in the ctxsw routine		 * without compromising security (leaks)		 */		if (!CTX_IS_USED_PMD(ctx, cnum)) goto abort_mission;		/*		 * If the task is not the current one, then we check if the		 * PMU state is still in the local live register due to lazy ctxsw.		 * If true, then we read directly from the registers.		 */		if (atomic_read(&ctx->ctx_last_cpu) == smp_processor_id()){			ia64_srlz_d();			val = ia64_get_pmd(cnum);			DBprintk(("reading pmd[%u]=0x%lx from hw\n", cnum, val));		} else {#ifdef CONFIG_SMP			int cpu;			/*			 * for SMP system, the context may still be live on another			 * CPU so we need to fetch it before proceeding with the read			 * This call we only be made once for the whole loop because			 * of ctx_last_cpu becoming == -1.			 *			 * We cannot reuse ctx_last_cpu as it may change before we get to the			 * actual IPI call. In this case, we will do the call for nothing but			 * there is no way around it. The receiving side will simply do nothing.			 */			cpu = atomic_read(&ctx->ctx_last_cpu);			if (cpu != -1) {				DBprintk(("must fetch on CPU%d for [%d]\n", cpu, task->pid));				pfm_fetch_regs(cpu, task, ctx);			}#endif			/* context has been saved */			val = th->pmd[cnum];		}		if (PMD_IS_COUNTING(cnum)) {			/*			 * XXX: need to check for overflow			 */			val &= pmu_conf.perf_ovfl_val;			val += ctx_val = ctx->ctx_soft_pmds[cnum].val;		} 		tmp.reg_value = val;		/*		 * execute read checker, if any		 */		if (PMD_RD_FUNC(cnum)) {			ret = PMD_RD_FUNC(cnum)(task, cnum, &tmp.reg_value, regs);		}		PFM_REG_RETFLAG_SET(tmp.reg_flags, ret);		DBprintk(("read pmd[%u] ret=%d value=0x%lx pmc=0x%lx\n", 					cnum, ret, val, ia64_get_pmc(cnum)));		if (copy_to_user(req, &tmp, sizeof(tmp))) return -EFAULT;	}	return 0;abort_mission:	PFM_REG_RETFLAG_SET(tmp.reg_flags, PFM_REG_RETFL_EINVAL);	/* 	 * XXX: if this fails, we stick with the original failure, flag not updated!	 */	copy_to_user(req, &tmp, sizeof(tmp));	return -EINVAL;}#ifdef PFM_PMU_USES_DBR/* * Only call this function when a process it trying to * write the debug registers (reading is always allowed) */intpfm_use_debug_registers(struct task_struct *task){	pfm_context_t *ctx = task->thread.pfm_context;	int ret = 0;	DBprintk(("called for [%d]\n", task->pid));	/*	 * do it only once	 */	if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;	/*	 * Even on SMP, we do not need to use an atomic here because	 * the only way in is via ptrace() and this is possible only when the	 * process is stopped. Even in the case where the ctxsw out is not totally	 * completed by the time we come here, there is no way the 'stopped' process	 * could be in the middle of fiddling with the pfm_write_ibr_dbr() routine.	 * So this is always safe.	 */	if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;	LOCK_PFS();	/*	 * We cannot allow setting breakpoints when system wide monitoring	 * sessions are using the debug registers.	 */	if (pfm_sessions.pfs_sys_use_dbregs> 0)		ret = -1;	else		pfm_sessions.pfs_ptrace_use_dbregs++;	DBprintk(("ptrace_use_dbregs=%lu  sys_use_dbregs=%lu by [%d] ret = %d\n", 		  pfm_sessions.pfs_ptrace_use_dbregs, 		  pfm_sessions.pfs_sys_use_dbregs, 		  task->pid, ret));	UNLOCK_PFS();	return ret;}/* * This function is called for every task that exits with the * IA64_THREAD_DBG_VALID set. This indicates a task which was * able to use the debug registers for debugging purposes via * ptrace(). Therefore we know it was not using them for * perfmormance monitoring, so we only decrement the number * of "ptraced" debug register users to keep the count up to date */intpfm_release_debug_registers(struct task_struct *task){	int ret;	LOCK_PFS();	if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {		printk("perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid);		ret = -1;	}  else {		pfm_sessions.pfs_ptrace_use_dbregs--;		ret = 0;	}	UNLOCK_PFS();	return ret;}#else /* PFM_PMU_USES_DBR is true *//* * in case, the PMU does not use the debug registers, these two functions are nops. * The first function is called from arch/ia64/kernel/ptrace.c. * The second function is called from arch/ia64/kernel/process.c. */intpfm_use_debug_registers(struct task_struct *task){	return 0;}intpfm_release_debug_registers(struct task_struct *task){	return 0;}#endif /* PFM_PMU_USES_DBR */static intpfm_restart(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, 	 struct pt_regs *regs){	void *sem = &ctx->ctx_restart_sem;	/* 	 * Cannot do anything before PMU is enabled 	 */	if (!CTX_IS_ENABLED(ctx)) return -EINVAL;	if (task == current) {		DBprintk(("restarting self %d frozen=%d \n", current->pid, ctx->ctx_fl_frozen));		pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_RELOAD_LONG_RESET);		ctx->ctx_ovfl_regs[0] = 0UL;		/*		 * We ignore block/don't block because we never block		 * for a self-monitoring process.		 */		ctx->ctx_fl_frozen = 0;		if (CTX_HAS_SMPL(ctx)) {			ctx->ctx_psb->psb_hdr->hdr_count = 0;			ctx->ctx_psb->psb_index = 0;		}		/* simply unfreeze */		ia64_set_pmc(0, 0);		ia64_srlz_d();		return 0;	} 	/* restart on another task */	/*	 * if blocking, then post the semaphore.	 * if non-blocking, then we ensure that the task will go into	 * pfm_overflow_must_block() before returning to user mode. 	 * We cannot explicitely reset another task, it MUST always	 * be done by the task itself. This works for system wide because	 * the tool that is controlling the session is doing "self-monitoring".	 *	 * XXX: what if the task never goes back to user?	 *	 */	if (CTX_OVFL_NOBLOCK(ctx) == 0) {		DBprintk(("unblocking %d \n", task->pid));		up(sem);	} else {		task->thread.pfm_ovfl_block_reset = 1;	}#if 0	/*	 * in case of non blocking mode, then it's just a matter of	 * of reseting the sampling buffer (if any) index. The PMU	 * is already active.	 */	/*	 * must reset the header count first	 */	if (CTX_HAS_SMPL(ctx)) {		DBprintk(("resetting sampling indexes for %d \n", task->pid));		ctx->ctx_psb->psb_hdr->hdr_count = 0;		ctx->ctx_psb->psb_index = 0;	}#endif	return 0;}#ifndef CONFIG_SMP/* * On UP kernels, we do not need to constantly set the psr.pp bit * when a task is scheduled. The psr.pp bit can only be changed in * the kernel because of a user request. Given we are on a UP non preeemptive  * kernel we know that no other task is running, so we cna simply update their * psr.pp from their saved state. There is this no impact on the context switch * code compared to the SMP case. */static voidpfm_tasklist_toggle_pp(unsigned int val){	struct task_struct *p;	struct pt_regs *regs;	DBprintk(("invoked by [%d] pp=%u\n", current->pid, val));	read_lock(&tasklist_lock);	for_each_task(p) {       		regs = (struct pt_regs *)((unsigned long) p + IA64_STK_OFFSET);		/*		 * position on pt_regs saved on stack on 1st entry into the kernel		 */		regs--;		/*		 * update psr.pp		 */		ia64_psr(regs)->pp = val;	}	read_unlock(&tasklist_lock);}#endifstatic intpfm_stop(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, 	 struct pt_regs *regs){	/* we don't quite support this right now */	if (task != current) return -EINVAL;	/* 	 * Cannot do anything before PMU is enabled 	 */	if (!CTX_IS_ENABLED(ctx)) return -EINVAL;	DBprintk(("[%d] fl_system=%d owner=%p current=%p\n",				current->pid,				ctx->ctx_fl_system, PMU_OWNER(),				current));	/* simply stop monitoring but not the PMU */	if (ctx->ctx_fl_system) {		/* disable dcr pp */		ia64_set_dcr(ia64_get_dcr() & ~IA64_DCR_PP);		/* stop monitoring */		__asm__ __volatile__ ("rsm psr.pp;;"::: "memory");		ia64_srlz_i();#ifdef CONFIG_SMP		local_cpu_data->pfm_dcr_pp  = 0;#else		pfm_tasklist_toggle_pp(0);#endif		ia64_psr(regs)->pp = 0;	} else {		/* stop monitoring */		__asm__ __volatile__ ("rum psr.up;;"::: "memory");		ia64_srlz_i();		/*		 * clear user level psr.up		 */		ia64_psr(regs)->up = 0;	}	return 0;}static intpfm_disable(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, 	   struct pt_regs *regs){		/* we don't quite support this right now */	if (task != current) return -EINVAL;	if (!CTX_IS_ENABLED(ctx)) return -EINVAL;	/*	 * stop monitoring, freeze PMU, and save state in context	 * this call will clear IA64_THREAD_PM_VALID for per-task sessions.	 */	pfm_flush_regs(task);	if (ctx->ctx_fl_system) {			ia64_psr(regs)->pp = 0;	} else {		ia64_psr(regs)->up = 0;	}	/* 	 * goes back to default behavior: no user level control	 * no need to change live psr.sp because useless at the kernel level	 */	ia64_psr(regs)->sp = 1;	DBprintk(("enabling psr.sp for [%d]\n", current->pid));	ctx->ctx_flags.state = PFM_CTX_DISABLED;	return 0;}static intpfm_context_destroy(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, 	 struct pt_regs *regs){	/* we don't quite support this right now */	if (task != current) return -EINVAL;	/*	 * if context was never enabled, then there is not much	 * to do	 */	if (!CTX_IS_ENABLED(ctx)) goto skipped_stop;	/*	 * Disable context: stop monitoring, flush regs to software state (useless here), 	 * and freeze PMU	 * 	 * The IA64_THREAD_PM_VALID is cleared by pfm_flush_regs() called from pfm_disable()	 */	pfm_disable(task, ctx, arg, count, regs);	if (ctx->ctx_fl_system) {			ia64_psr(regs)->pp = 0;	} else {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -