⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 perfmon.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 5 页
字号:
	 * because some VM function reenables interrupts.	 *	 */	if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);	return 0;}#endif/* * called either on explicit close() or from exit_files().  * Only the LAST user of the file gets to this point, i.e., it is * called only ONCE. * * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero  * (fput()),i.e, last task to access the file. Nobody else can access the  * file at this point. * * When called from exit_files(), the VMA has been freed because exit_mm() * is executed before exit_files(). * * When called from exit_files(), the current task is not yet ZOMBIE but we * flush the PMU state to the context.  */#ifndef XENstatic intpfm_close(struct inode *inode, struct file *filp)#elsestatic intpfm_close(pfm_context_t *ctx)#endif{#ifndef XEN	pfm_context_t *ctx;	struct task_struct *task;	struct pt_regs *regs;  	DECLARE_WAITQUEUE(wait, current);	unsigned long flags;#endif	unsigned long smpl_buf_size = 0UL;	void *smpl_buf_addr = NULL;	int free_possible = 1;	int state, is_system;#ifndef XEN	DPRINT(("pfm_close called private=%p\n", filp->private_data));	if (PFM_IS_FILE(filp) == 0) {		DPRINT(("bad magic\n"));		return -EBADF;	}		ctx = (pfm_context_t *)filp->private_data;	if (ctx == NULL) {		printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid);		return -EBADF;	}	PROTECT_CTX(ctx, flags);#else	BUG_ON(!spin_is_locked(&ctx->ctx_lock));#endif	state     = ctx->ctx_state;	is_system = ctx->ctx_fl_system;#ifndef XEN	task = PFM_CTX_TASK(ctx);	regs = task_pt_regs(task);	DPRINT(("ctx_state=%d is_current=%d\n", 		state,		task == current ? 1 : 0));	/*	 * if task == current, then pfm_flush() unloaded the context	 */	if (state == PFM_CTX_UNLOADED) goto doit;	/*	 * context is loaded/masked and task != current, we need to	 * either force an unload or go zombie	 */	/*	 * The task is currently blocked or will block after an overflow.	 * we must force it to wakeup to get out of the	 * MASKED state and transition to the unloaded state by itself.	 *	 * This situation is only possible for per-task mode	 */	if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {		/*		 * set a "partial" zombie state to be checked		 * upon return from down() in pfm_handle_work().		 *		 * We cannot use the ZOMBIE state, because it is checked		 * by pfm_load_regs() which is called upon wakeup from down().		 * In such case, it would free the context and then we would		 * return to pfm_handle_work() which would access the		 * stale context. Instead, we set a flag invisible to pfm_load_regs()		 * but visible to pfm_handle_work().		 *		 * For some window of time, we have a zombie context with		 * ctx_state = MASKED  and not ZOMBIE		 */		ctx->ctx_fl_going_zombie = 1;		/*		 * force task to wake up from MASKED state		 */		complete(&ctx->ctx_restart_done);		DPRINT(("waking up ctx_state=%d\n", state));		/*		 * put ourself to sleep waiting for the other		 * task to report completion		 *		 * the context is protected by mutex, therefore there		 * is no risk of being notified of completion before		 * begin actually on the waitq.		 */  		set_current_state(TASK_INTERRUPTIBLE);  		add_wait_queue(&ctx->ctx_zombieq, &wait);		UNPROTECT_CTX(ctx, flags);		/*		 * XXX: check for signals :		 * 	- ok for explicit close		 * 	- not ok when coming from exit_files()		 */      		schedule();		PROTECT_CTX(ctx, flags);		remove_wait_queue(&ctx->ctx_zombieq, &wait);  		set_current_state(TASK_RUNNING);		/*		 * context is unloaded at this point		 */		DPRINT(("after zombie wakeup ctx_state=%d for\n", state));	}	else if (task != current) {#ifdef CONFIG_SMP		/*	 	 * switch context to zombie state	 	 */		ctx->ctx_state = PFM_CTX_ZOMBIE;		DPRINT(("zombie ctx for [%d]\n", task->pid));		/*		 * cannot free the context on the spot. deferred until		 * the task notices the ZOMBIE state		 */		free_possible = 0;#else		pfm_context_unload(ctx, NULL, 0, regs);#endif	}#else	/* XXX XEN */	/* unload context */	BUG_ON(state != PFM_CTX_UNLOADED);#endif#ifndef XENdoit:#endif	/* reload state, may have changed during  opening of critical section */	state = ctx->ctx_state;	/*	 * the context is still attached to a task (possibly current)	 * we cannot destroy it right now	 */	/*	 * we must free the sampling buffer right here because	 * we cannot rely on it being cleaned up later by the	 * monitored task. It is not possible to free vmalloc'ed	 * memory in pfm_load_regs(). Instead, we remove the buffer	 * now. should there be subsequent PMU overflow originally	 * meant for sampling, the will be converted to spurious	 * and that's fine because the monitoring tools is gone anyway.	 */	if (ctx->ctx_smpl_hdr) {		smpl_buf_addr = ctx->ctx_smpl_hdr;		smpl_buf_size = ctx->ctx_smpl_size;		/* no more sampling */		ctx->ctx_smpl_hdr = NULL;		ctx->ctx_fl_is_sampling = 0;	}	DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",		state,		free_possible,		smpl_buf_addr,		smpl_buf_size));	if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);	/*	 * UNLOADED that the session has already been unreserved.	 */	if (state == PFM_CTX_ZOMBIE) {		pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);	}#ifndef XEN	/*	 * disconnect file descriptor from context must be done	 * before we unlock.	 */	filp->private_data = NULL;	/*	 * if we free on the spot, the context is now completely unreacheable	 * from the callers side. The monitored task side is also cut, so we	 * can freely cut.	 *	 * If we have a deferred free, only the caller side is disconnected.	 */	UNPROTECT_CTX(ctx, flags);	/*	 * All memory free operations (especially for vmalloc'ed memory)	 * MUST be done with interrupts ENABLED.	 */	if (smpl_buf_addr)  pfm_rvfree(smpl_buf_addr, smpl_buf_size);#else	UNPROTECT_CTX_NOIRQ(ctx);#endif	/*	 * return the memory used by the context	 */	if (free_possible) pfm_context_free(ctx);	return 0;}#ifndef XENstatic intpfm_no_open(struct inode *irrelevant, struct file *dontcare){	DPRINT(("pfm_no_open called\n"));	return -ENXIO;}static struct file_operations pfm_file_ops = {	.llseek   = no_llseek,	.read     = pfm_read,	.write    = pfm_write,	.poll     = pfm_poll,	.ioctl    = pfm_ioctl,	.open     = pfm_no_open,	/* special open code to disallow open via /proc */	.fasync   = pfm_fasync,	.release  = pfm_close,	.flush	  = pfm_flush};static intpfmfs_delete_dentry(struct dentry *dentry){	return 1;}static struct dentry_operations pfmfs_dentry_operations = {	.d_delete = pfmfs_delete_dentry,};static intpfm_alloc_fd(struct file **cfile){	int fd, ret = 0;	struct file *file = NULL;	struct inode * inode;	char name[32];	struct qstr this;	fd = get_unused_fd();	if (fd < 0) return -ENFILE;	ret = -ENFILE;	file = get_empty_filp();	if (!file) goto out;	/*	 * allocate a new inode	 */	inode = new_inode(pfmfs_mnt->mnt_sb);	if (!inode) goto out;	DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));	inode->i_mode = S_IFCHR|S_IRUGO;	inode->i_uid  = current->fsuid;	inode->i_gid  = current->fsgid;	snprintf(name, sizeof(name), "[%lu]", inode->i_ino);	this.name = name;	this.len  = strlen(name);	this.hash = inode->i_ino;	ret = -ENOMEM;	/*	 * allocate a new dcache entry	 */	file->f_dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);	if (!file->f_dentry) goto out;	file->f_dentry->d_op = &pfmfs_dentry_operations;	d_add(file->f_dentry, inode);	file->f_vfsmnt = mntget(pfmfs_mnt);	file->f_mapping = inode->i_mapping;	file->f_op    = &pfm_file_ops;	file->f_mode  = FMODE_READ;	file->f_flags = O_RDONLY;	file->f_pos   = 0;	/*	 * may have to delay until context is attached?	 */	fd_install(fd, file);	/*	 * the file structure we will use	 */	*cfile = file;	return fd;out:	if (file) put_filp(file);	put_unused_fd(fd);	return ret;}static voidpfm_free_fd(int fd, struct file *file){	struct files_struct *files = current->files;	struct fdtable *fdt;	/* 	 * there ie no fd_uninstall(), so we do it here	 */	spin_lock(&files->file_lock);	fdt = files_fdtable(files);	rcu_assign_pointer(fdt->fd[fd], NULL);	spin_unlock(&files->file_lock);	if (file)		put_filp(file);	put_unused_fd(fd);}static intpfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size){	DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));	while (size > 0) {		unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;		if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))			return -ENOMEM;		addr  += PAGE_SIZE;		buf   += PAGE_SIZE;		size  -= PAGE_SIZE;	}	return 0;}#endif/* * allocate a sampling buffer and remaps it into the user address space of the task */static intpfm_smpl_buffer_alloc(struct task_struct *task, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr){#ifndef XEN	struct mm_struct *mm = task->mm;	struct vm_area_struct *vma = NULL;	unsigned long size;	void *smpl_buf;	/*	 * the fixed header + requested size and align to page boundary	 */	size = PAGE_ALIGN(rsize);	DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));	/*	 * check requested size to avoid Denial-of-service attacks	 * XXX: may have to refine this test	 * Check against address space limit.	 *	 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)	 * 	return -ENOMEM;	 */	if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)		return -ENOMEM;	/*	 * We do the easy to undo allocations first. 	 *	 * pfm_rvmalloc(), clears the buffer, so there is no leak	 */	smpl_buf = pfm_rvmalloc(size);	if (smpl_buf == NULL) {		DPRINT(("Can't allocate sampling buffer\n"));		return -ENOMEM;	}	DPRINT(("smpl_buf @%p\n", smpl_buf));	/* allocate vma */	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);	if (!vma) {		DPRINT(("Cannot allocate vma\n"));		goto error_kmem;	}	memset(vma, 0, sizeof(*vma));	/*	 * partially initialize the vma for the sampling buffer	 */	vma->vm_mm	     = mm;	vma->vm_flags	     = VM_READ| VM_MAYREAD |VM_RESERVED;	vma->vm_page_prot    = PAGE_READONLY; /* XXX may need to change */	/*	 * Now we have everything we need and we can initialize	 * and connect all the data structures	 */	ctx->ctx_smpl_hdr   = smpl_buf;	ctx->ctx_smpl_size  = size; /* aligned size */	/*	 * Let's do the difficult operations next.	 *	 * now we atomically find some area in the address space and	 * remap the buffer in it.	 */	down_write(&task->mm->mmap_sem);	/* find some free area in address space, must have mmap sem held */	vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);	if (vma->vm_start == 0UL) {		DPRINT(("Cannot find unmapped area for size %ld\n", size));		up_write(&task->mm->mmap_sem);		goto error;	}	vma->vm_end = vma->vm_start + size;	vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;	DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));	/* can only be applied to current task, need to have the mm semaphore held when called */	if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {		DPRINT(("Can't remap buffer\n"));		up_write(&task->mm->mmap_sem);		goto error;	}	/*	 * now insert the vma in the vm list for the process, must be	 * done with mmap lock held	 */	insert_vm_struct(mm, vma);	mm->total_vm  += size >> PAGE_SHIFT;	vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,							vma_pages(vma));	up_write(&task->mm->mmap_sem);	/*	 * keep track of user level virtual address	 */	ctx->ctx_smpl_vaddr = (void *)vma->vm_start;	*(unsigned long *)user_vaddr = vma->vm_start;	return 0;error:	kmem_cache_free(vm_area_cachep, vma);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -