⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 perfmon.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 5 页
字号:
}#ifndef XEN/* * removes virtual mapping of the sampling buffer. * IMPORTANT: cannot be called with interrupts disable, e.g. inside * a PROTECT_CTX() section. */static intpfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size){	int r;	/* sanity checks */	if (task->mm == NULL || size == 0UL || vaddr == NULL) {		printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm);		return -EINVAL;	}	DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));	/*	 * does the actual unmapping	 */	down_write(&task->mm->mmap_sem);	DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));	r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);	up_write(&task->mm->mmap_sem);	if (r !=0) {		printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size);	}	DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));	return 0;}#endif/* * free actual physical storage used by sampling buffer */#if 0static intpfm_free_smpl_buffer(pfm_context_t *ctx){	pfm_buffer_fmt_t *fmt;	if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;	/*	 * we won't use the buffer format anymore	 */	fmt = ctx->ctx_buf_fmt;	DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",		ctx->ctx_smpl_hdr,		ctx->ctx_smpl_size,		ctx->ctx_smpl_vaddr));	pfm_buf_fmt_exit(fmt, current, NULL, NULL);	/*	 * free the buffer	 */	pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);	ctx->ctx_smpl_hdr  = NULL;	ctx->ctx_smpl_size = 0UL;	return 0;invalid_free:	printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid);	return -EINVAL;}#endifstatic inline voidpfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt){	if (fmt == NULL) return;	pfm_buf_fmt_exit(fmt, current, NULL, NULL);}#ifndef XEN/* * pfmfs should _never_ be mounted by userland - too much of security hassle, * no real gain from having the whole whorehouse mounted. So we don't need * any operations on the root directory. However, we need a non-trivial * d_name - pfm: will go nicely and kill the special-casing in procfs. */static struct vfsmount *pfmfs_mnt;static int __initinit_pfm_fs(void){	int err = register_filesystem(&pfm_fs_type);	if (!err) {		pfmfs_mnt = kern_mount(&pfm_fs_type);		err = PTR_ERR(pfmfs_mnt);		if (IS_ERR(pfmfs_mnt))			unregister_filesystem(&pfm_fs_type);		else			err = 0;	}	return err;}static void __exitexit_pfm_fs(void){	unregister_filesystem(&pfm_fs_type);	mntput(pfmfs_mnt);}static ssize_tpfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos){	pfm_context_t *ctx;	pfm_msg_t *msg;	ssize_t ret;	unsigned long flags;  	DECLARE_WAITQUEUE(wait, current);	if (PFM_IS_FILE(filp) == 0) {		printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);		return -EINVAL;	}	ctx = (pfm_context_t *)filp->private_data;	if (ctx == NULL) {		printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid);		return -EINVAL;	}	/*	 * check even when there is no message	 */	if (size < sizeof(pfm_msg_t)) {		DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));		return -EINVAL;	}	PROTECT_CTX(ctx, flags);  	/*	 * put ourselves on the wait queue	 */  	add_wait_queue(&ctx->ctx_msgq_wait, &wait);  	for(;;) {		/*		 * check wait queue		 */  		set_current_state(TASK_INTERRUPTIBLE);		DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));		ret = 0;		if(PFM_CTXQ_EMPTY(ctx) == 0) break;		UNPROTECT_CTX(ctx, flags);		/*		 * check non-blocking read		 */      		ret = -EAGAIN;		if(filp->f_flags & O_NONBLOCK) break;		/*		 * check pending signals		 */		if(signal_pending(current)) {			ret = -EINTR;			break;		}      		/*		 * no message, so wait		 */      		schedule();		PROTECT_CTX(ctx, flags);	}	DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret));  	set_current_state(TASK_RUNNING);	remove_wait_queue(&ctx->ctx_msgq_wait, &wait);	if (ret < 0) goto abort;	ret = -EINVAL;	msg = pfm_get_next_msg(ctx);	if (msg == NULL) {		printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid);		goto abort_locked;	}	DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));	ret = -EFAULT;  	if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);abort_locked:	UNPROTECT_CTX(ctx, flags);abort:	return ret;}static ssize_tpfm_write(struct file *file, const char __user *ubuf,			  size_t size, loff_t *ppos){	DPRINT(("pfm_write called\n"));	return -EINVAL;}static unsigned intpfm_poll(struct file *filp, poll_table * wait){	pfm_context_t *ctx;	unsigned long flags;	unsigned int mask = 0;	if (PFM_IS_FILE(filp) == 0) {		printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);		return 0;	}	ctx = (pfm_context_t *)filp->private_data;	if (ctx == NULL) {		printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid);		return 0;	}	DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));	poll_wait(filp, &ctx->ctx_msgq_wait, wait);	PROTECT_CTX(ctx, flags);	if (PFM_CTXQ_EMPTY(ctx) == 0)		mask =  POLLIN | POLLRDNORM;	UNPROTECT_CTX(ctx, flags);	DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));	return mask;}static intpfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg){	DPRINT(("pfm_ioctl called\n"));	return -EINVAL;}/* * interrupt cannot be masked when coming here */static inline intpfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on){	int ret;	ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);	DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",		current->pid,		fd,		on,		ctx->ctx_async_queue, ret));	return ret;}static intpfm_fasync(int fd, struct file *filp, int on){	pfm_context_t *ctx;	int ret;	if (PFM_IS_FILE(filp) == 0) {		printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid);		return -EBADF;	}	ctx = (pfm_context_t *)filp->private_data;	if (ctx == NULL) {		printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid);		return -EBADF;	}	/*	 * we cannot mask interrupts during this call because this may	 * may go to sleep if memory is not readily avalaible.	 *	 * We are protected from the conetxt disappearing by the get_fd()/put_fd()	 * done in caller. Serialization of this function is ensured by caller.	 */	ret = pfm_do_fasync(fd, filp, ctx, on);	DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",		fd,		on,		ctx->ctx_async_queue, ret));	return ret;}#ifdef CONFIG_SMP/* * this function is exclusively called from pfm_close(). * The context is not protected at that time, nor are interrupts * on the remote CPU. That's necessary to avoid deadlocks. */static voidpfm_syswide_force_stop(void *info){	pfm_context_t   *ctx = (pfm_context_t *)info;	struct pt_regs *regs = task_pt_regs(current);	struct task_struct *owner;	unsigned long flags;	int ret;	if (ctx->ctx_cpu != smp_processor_id()) {		printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d  but on CPU%d\n",			ctx->ctx_cpu,			smp_processor_id());		return;	}	owner = GET_PMU_OWNER();	if (owner != ctx->ctx_task) {		printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",			smp_processor_id(),			owner->pid, ctx->ctx_task->pid);		return;	}	if (GET_PMU_CTX() != ctx) {		printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",			smp_processor_id(),			GET_PMU_CTX(), ctx);		return;	}	DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid));		/*	 * the context is already protected in pfm_close(), we simply	 * need to mask interrupts to avoid a PMU interrupt race on	 * this CPU	 */	local_irq_save(flags);	ret = pfm_context_unload(ctx, NULL, 0, regs);	if (ret) {		DPRINT(("context_unload returned %d\n", ret));	}	/*	 * unmask interrupts, PMU interrupts are now spurious here	 */	local_irq_restore(flags);}static voidpfm_syswide_cleanup_other_cpu(pfm_context_t *ctx){	int ret;	DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));	ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);	DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));}#endif /* CONFIG_SMP *//* * called for each close(). Partially free resources. * When caller is self-monitoring, the context is unloaded. */static intpfm_flush(struct file *filp){	pfm_context_t *ctx;	struct task_struct *task;	struct pt_regs *regs;	unsigned long flags;	unsigned long smpl_buf_size = 0UL;	void *smpl_buf_vaddr = NULL;	int state, is_system;	if (PFM_IS_FILE(filp) == 0) {		DPRINT(("bad magic for\n"));		return -EBADF;	}	ctx = (pfm_context_t *)filp->private_data;	if (ctx == NULL) {		printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid);		return -EBADF;	}	/*	 * remove our file from the async queue, if we use this mode.	 * This can be done without the context being protected. We come	 * here when the context has become unreacheable by other tasks.	 *	 * We may still have active monitoring at this point and we may	 * end up in pfm_overflow_handler(). However, fasync_helper()	 * operates with interrupts disabled and it cleans up the	 * queue. If the PMU handler is called prior to entering	 * fasync_helper() then it will send a signal. If it is	 * invoked after, it will find an empty queue and no	 * signal will be sent. In both case, we are safe	 */	if (filp->f_flags & FASYNC) {		DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));		pfm_do_fasync (-1, filp, ctx, 0);	}	PROTECT_CTX(ctx, flags);	state     = ctx->ctx_state;	is_system = ctx->ctx_fl_system;	task = PFM_CTX_TASK(ctx);	regs = task_pt_regs(task);	DPRINT(("ctx_state=%d is_current=%d\n",		state,		task == current ? 1 : 0));	/*	 * if state == UNLOADED, then task is NULL	 */	/*	 * we must stop and unload because we are losing access to the context.	 */	if (task == current) {#ifdef CONFIG_SMP		/*		 * the task IS the owner but it migrated to another CPU: that's bad		 * but we must handle this cleanly. Unfortunately, the kernel does		 * not provide a mechanism to block migration (while the context is loaded).		 *		 * We need to release the resource on the ORIGINAL cpu.		 */		if (is_system && ctx->ctx_cpu != smp_processor_id()) {			DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));			/*			 * keep context protected but unmask interrupt for IPI			 */			local_irq_restore(flags);			pfm_syswide_cleanup_other_cpu(ctx);			/*			 * restore interrupt masking			 */			local_irq_save(flags);			/*			 * context is unloaded at this point			 */		} else#endif /* CONFIG_SMP */		{			DPRINT(("forcing unload\n"));			/*		 	* stop and unload, returning with state UNLOADED		 	* and session unreserved.		 	*/			pfm_context_unload(ctx, NULL, 0, regs);			DPRINT(("ctx_state=%d\n", ctx->ctx_state));		}	}	/*	 * remove virtual mapping, if any, for the calling task.	 * cannot reset ctx field until last user is calling close().	 *	 * ctx_smpl_vaddr must never be cleared because it is needed	 * by every task with access to the context	 *	 * When called from do_exit(), the mm context is gone already, therefore	 * mm is NULL, i.e., the VMA is already gone  and we do not have to	 * do anything here	 */	if (ctx->ctx_smpl_vaddr && current->mm) {		smpl_buf_vaddr = ctx->ctx_smpl_vaddr;		smpl_buf_size  = ctx->ctx_smpl_size;	}	UNPROTECT_CTX(ctx, flags);	/*	 * if there was a mapping, then we systematically remove it	 * at this point. Cannot be done inside critical section

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -