⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 perfmon.c

📁 该文件是rt_linux
💻 C
📖 第 1 页 / 共 5 页
字号:
		if (cpu_is_online(cpu) == 0) {			DBprintk(("CPU%d is not online\n", cpu));			return -EINVAL;		}		/*		 * check for pre-existing pinning, if conflicting reject		 */		if (task->cpus_allowed != ~0UL && (task->cpus_allowed & (1UL<<cpu)) == 0) {			DBprintk(("[%d] pinned on 0x%lx, mask for CPU%d \n", task->pid, 				task->cpus_allowed, cpu));			return -EINVAL;		}	} else {		/*		 * must provide a target for the signal in blocking mode even when		 * no counter is configured with PFM_FL_REG_OVFL_NOTIFY		 */		if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == 0) {			DBprintk(("must have notify_pid when blocking for [%d]\n", task->pid));			return -EINVAL;		}#if 0		if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == task->pid) {			DBprintk(("cannot notify self when blocking for [%d]\n", task->pid));			return -EINVAL;		}#endif	}	/* probably more to add here */	return 0;}static intpfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int count, 		   struct pt_regs *regs){	pfarg_context_t tmp;	void *uaddr = NULL;	int ret, cpu = 0;	int ctx_flags;	pid_t notify_pid;	/* a context has already been defined */	if (ctx) return -EBUSY;	/*	 * not yet supported	 */	if (task != current) return -EINVAL;	if (copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT;	ret = pfx_is_sane(task, &tmp);	if (ret < 0) return ret;	ctx_flags = tmp.ctx_flags;	ret =  -EBUSY;	LOCK_PFS();	if (ctx_flags & PFM_FL_SYSTEM_WIDE) {		/* at this point, we know there is at least one bit set */		cpu = ffz(~tmp.ctx_cpu_mask);		DBprintk(("requesting CPU%d currently on CPU%d\n",cpu, smp_processor_id()));		if (pfm_sessions.pfs_task_sessions > 0) {			DBprintk(("system wide not possible, task_sessions=%ld\n", pfm_sessions.pfs_task_sessions));			goto abort;		}		if (pfm_sessions.pfs_sys_session[cpu]) {			DBprintk(("system wide not possible, conflicting session [%d] on CPU%d\n",pfm_sessions.pfs_sys_session[cpu]->pid, cpu));			goto abort;		}		pfm_sessions.pfs_sys_session[cpu] = task;		/*		 * count the number of system wide sessions		 */		pfm_sessions.pfs_sys_sessions++;	} else if (pfm_sessions.pfs_sys_sessions == 0) {		pfm_sessions.pfs_task_sessions++;	} else {		/* no per-process monitoring while there is a system wide session */		goto abort;	}	UNLOCK_PFS();	ret = -ENOMEM;	ctx = pfm_context_alloc();	if (!ctx) goto error;	/* record the creator (important for inheritance) */	ctx->ctx_owner = current;	notify_pid = tmp.ctx_notify_pid;	spin_lock_init(&ctx->ctx_lock);	if (notify_pid == current->pid) {		ctx->ctx_notify_task = task = current;		current->thread.pfm_context = ctx;	} else if (notify_pid!=0) {		struct task_struct *notify_task;		read_lock(&tasklist_lock);		notify_task = find_task_by_pid(notify_pid);		if (notify_task) {			ret = -EPERM;			/*			 * check if we can send this task a signal			 */			if (pfm_bad_permissions(notify_task)) goto buffer_error;			/* 		 	 * make visible		 	 * must be done inside critical section		 	 *		 	 * if the initialization does not go through it is still		 	 * okay because child will do the scan for nothing which		 	 * won't hurt.		 	 */			current->thread.pfm_context = ctx;			/*			 * will cause task to check on exit for monitored			 * processes that would notify it. see release_thread()			 * Note: the scan MUST be done in release thread, once the			 * task has been detached from the tasklist otherwise you are			 * exposed to race conditions.			 */			atomic_add(1, &ctx->ctx_notify_task->thread.pfm_notifiers_check);			ctx->ctx_notify_task = notify_task;		}		read_unlock(&tasklist_lock);	}	/*	 * notification process does not exist	 */	if (notify_pid != 0 && ctx->ctx_notify_task == NULL) {		ret = -EINVAL;		goto buffer_error;	}	if (tmp.ctx_smpl_entries) {		DBprintk(("sampling entries=%lu\n",tmp.ctx_smpl_entries));		ret = pfm_smpl_buffer_alloc(ctx, tmp.ctx_smpl_regs, 						 tmp.ctx_smpl_entries, &uaddr);		if (ret<0) goto buffer_error;		tmp.ctx_smpl_vaddr = uaddr;	}	/* initialization of context's flags */	ctx->ctx_fl_inherit   = ctx_flags & PFM_FL_INHERIT_MASK;	ctx->ctx_fl_block     = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;	ctx->ctx_fl_system    = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;	ctx->ctx_fl_frozen    = 0;	/*	 * setting this flag to 0 here means, that the creator or the task that the	 * context is being attached are granted access. Given that a context can only	 * be created for the calling process this, in effect only allows the creator	 * to access the context. See pfm_protect() for more.	 */	ctx->ctx_fl_protected = 0;	/* for system wide mode only (only 1 bit set) */	ctx->ctx_cpu         = cpu;	atomic_set(&ctx->ctx_last_cpu,-1); /* SMP only, means no CPU */	/* may be redudant with memset() but at least it's easier to remember */	atomic_set(&ctx->ctx_saving_in_progress, 0); 	atomic_set(&ctx->ctx_is_busy, 0); 	sema_init(&ctx->ctx_restart_sem, 0); /* init this semaphore to locked */	if (copy_to_user(req, &tmp, sizeof(tmp))) {		ret = -EFAULT;		goto buffer_error;	}	DBprintk(("context=%p, pid=%d notify_task=%p\n",			(void *)ctx, task->pid, ctx->ctx_notify_task));	DBprintk(("context=%p, pid=%d flags=0x%x inherit=%d block=%d system=%d\n", 			(void *)ctx, task->pid, ctx_flags, ctx->ctx_fl_inherit, 			ctx->ctx_fl_block, ctx->ctx_fl_system));	/*	 * when no notification is required, we can make this visible at the last moment	 */	if (notify_pid == 0) task->thread.pfm_context = ctx;	/*	 * pin task to CPU and force reschedule on exit to ensure	 * that when back to user level the task runs on the designated	 * CPU.	 */	if (ctx->ctx_fl_system) {		ctx->ctx_saved_cpus_allowed = task->cpus_allowed;		task->cpus_allowed = 1UL << cpu;		task->need_resched = 1;		DBprintk(("[%d] rescheduled allowed=0x%lx\n", task->pid,task->cpus_allowed));	}	return 0;buffer_error:	pfm_context_free(ctx);error:	/*	 * undo session reservation	 */	LOCK_PFS();	if (ctx_flags & PFM_FL_SYSTEM_WIDE) {		pfm_sessions.pfs_sys_session[cpu] = NULL;		pfm_sessions.pfs_sys_sessions--;	} else {		pfm_sessions.pfs_task_sessions--;	}abort:	UNLOCK_PFS();	return ret;}static voidpfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag){	unsigned long mask = ovfl_regs[0];	unsigned long reset_others = 0UL;	unsigned long val;	int i;	DBprintk(("masks=0x%lx\n", mask));	/*	 * now restore reset value on sampling overflowed counters	 */	mask >>= PMU_FIRST_COUNTER;	for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {		if (mask & 0x1) {			val  = flag == PFM_RELOAD_LONG_RESET ? 					ctx->ctx_soft_pmds[i].long_reset:					ctx->ctx_soft_pmds[i].short_reset;			reset_others |= ctx->ctx_soft_pmds[i].reset_pmds[0];			DBprintk(("[%d] %s reset soft_pmd[%d]=%lx\n", 			  	current->pid, 				flag == PFM_RELOAD_LONG_RESET ? "long" : "short", i, val));			/* upper part is ignored on rval */			pfm_write_soft_counter(ctx, i, val);		}	}	/*	 * Now take care of resetting the other registers	 */	for(i = 0; reset_others; i++, reset_others >>= 1) {		if ((reset_others & 0x1) == 0) continue;		val  = flag == PFM_RELOAD_LONG_RESET ? 					ctx->ctx_soft_pmds[i].long_reset:					ctx->ctx_soft_pmds[i].short_reset;		if (PMD_IS_COUNTING(i)) {			pfm_write_soft_counter(ctx, i, val);		} else {			ia64_set_pmd(i, val);		}		DBprintk(("[%d] %s reset_others pmd[%d]=%lx\n", 			  	current->pid, 				flag == PFM_RELOAD_LONG_RESET ? "long" : "short", i, val));	}	ia64_srlz_d();	/* just in case ! */	ctx->ctx_ovfl_regs[0] = 0UL;}static intpfm_write_pmcs(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs){	struct thread_struct *th = &task->thread;	pfarg_reg_t tmp, *req = (pfarg_reg_t *)arg;	unsigned int cnum;	int i;	int ret = 0, reg_retval = 0;	/* we don't quite support this right now */	if (task != current) return -EINVAL;	if (!CTX_IS_ENABLED(ctx)) return -EINVAL;	/* XXX: ctx locking may be required here */	for (i = 0; i < count; i++, req++) {		if (copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT;		cnum = tmp.reg_num;		/* 		 * we reject all non implemented PMC as well		 * as attempts to modify PMC[0-3] which are used		 * as status registers by the PMU		 */		if (!PMC_IS_IMPL(cnum) || cnum < 4) {			DBprintk(("pmc[%u] is unimplemented or invalid\n", cnum));			ret = -EINVAL;			goto abort_mission;		}		/*		 * A PMC used to configure monitors must be:		 * 	- system-wide session: privileged monitor		 * 	- per-task : user monitor		 * any other configuration is rejected.		 */		if (PMC_IS_MONITOR(cnum) || PMC_IS_COUNTING(cnum)) {			DBprintk(("pmc[%u].pm=%ld\n", cnum, PMC_PM(cnum, tmp.reg_value))); 			if (ctx->ctx_fl_system ^ PMC_PM(cnum, tmp.reg_value)) {				DBprintk(("pmc_pm=%ld fl_system=%d\n", PMC_PM(cnum, tmp.reg_value), ctx->ctx_fl_system));				ret = -EINVAL;				goto abort_mission;			}		}		if (PMC_IS_COUNTING(cnum)) {			pfm_monitor_t *p = (pfm_monitor_t *)&tmp.reg_value;			/*		 	 * enforce generation of overflow interrupt. Necessary on all		 	 * CPUs.		 	 */			p->pmc_oi = 1;			if (tmp.reg_flags & PFM_REGFL_OVFL_NOTIFY) {				/*				 * must have a target for the signal				 */				if (ctx->ctx_notify_task == NULL) {					DBprintk(("no notify_task && PFM_REGFL_OVFL_NOTIFY\n"));					ret = -EINVAL;					goto abort_mission;				}				ctx->ctx_soft_pmds[cnum].flags |= PFM_REGFL_OVFL_NOTIFY;			}			/*			 * copy reset vector			 */			ctx->ctx_soft_pmds[cnum].reset_pmds[0] = tmp.reg_reset_pmds[0];			ctx->ctx_soft_pmds[cnum].reset_pmds[1] = tmp.reg_reset_pmds[1];			ctx->ctx_soft_pmds[cnum].reset_pmds[2] = tmp.reg_reset_pmds[2];			ctx->ctx_soft_pmds[cnum].reset_pmds[3] = tmp.reg_reset_pmds[3];		}		/*		 * execute write checker, if any		 */		if (PMC_WR_FUNC(cnum)) ret = PMC_WR_FUNC(cnum)(task, cnum, &tmp.reg_value, regs);abort_mission:		if (ret == -EINVAL) reg_retval = PFM_REG_RETFL_EINVAL;		PFM_REG_RETFLAG_SET(tmp.reg_flags, reg_retval);		/*		 * update register return value, abort all if problem during copy.		 */		if (copy_to_user(req, &tmp, sizeof(tmp))) return -EFAULT;		/*		 * if there was something wrong on this register, don't touch		 * the hardware at all and abort write request for others.		 *		 * On error, the user mut sequentially scan the table and the first		 * entry which has a return flag set is the one that caused the error.		 */		if (ret != 0) {			DBprintk(("[%d] pmc[%u]=0x%lx error %d\n",				  task->pid, cnum, tmp.reg_value, reg_retval));			break;		}		/* 		 * We can proceed with this register!		 */		/*		 * Needed in case the user does not initialize the equivalent		 * PMD. Clearing is done in reset_pmu() so there is no possible		 * leak here.		 */		CTX_USED_PMD(ctx, pmu_conf.pmc_desc[cnum].dep_pmd[0]);		/* 		 * keep copy the pmc, used for register reload		 */		th->pmc[cnum] = tmp.reg_value;		ia64_set_pmc(cnum, tmp.reg_value);		DBprintk(("[%d] pmc[%u]=0x%lx flags=0x%x used_pmds=0x%lx\n", 			  task->pid, cnum, tmp.reg_value, 			  ctx->ctx_soft_pmds[cnum].flags, 			  ctx->ctx_used_pmds[0]));	}	return ret;}static intpfm_write_pmds(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs){	pfarg_reg_t tmp, *req = (pfarg_reg_t *)arg;	unsigned int cnum;	int i;	int ret = 0, reg_retval = 0;	/* we don't quite support this right now */	if (task != current) return -EINVAL;	/* 	 * Cannot do anything before PMU is enabled 	 */	if (!CTX_IS_ENABLED(ctx)) return -EINVAL;	/* XXX: ctx locking may be required here */	for (i = 0; i < count; i++, req++) {		if (copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT;		cnum = tmp.reg_num;		if (!PMD_IS_IMPL(cnum)) {			ret = -EINVAL;			goto abort_mission;		}		/* update virtualized (64bits) counter */		if (PMD_IS_COUNTING(cnum)) {			ctx->ctx_soft_pmds[cnum].ival = tmp.reg_value;			ctx->ctx_soft_pmds[cnum].val  = tmp.reg_value & ~pmu_conf.perf_ovfl_val;			ctx->ctx_soft_pmds[cnum].long_reset = tmp.reg_long_reset;			ctx->ctx_soft_pmds[cnum].short_reset = tmp.reg_short_reset;		}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -