📄 perfmon.c
字号:
} return err;}static void __exitexit_pfm_fs(void){ unregister_filesystem(&pfm_fs_type); mntput(pfmfs_mnt);}static ssize_tpfm_read(struct file *filp, char *buf, size_t size, loff_t *ppos){ pfm_context_t *ctx; pfm_msg_t *msg; ssize_t ret; unsigned long flags; DECLARE_WAITQUEUE(wait, current); if (PFM_IS_FILE(filp) == 0) { printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); return -EINVAL; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid); return -EINVAL; } /* * check even when there is no message */ if (size < sizeof(pfm_msg_t)) { DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t))); return -EINVAL; } PROTECT_CTX(ctx, flags); /* * put ourselves on the wait queue */ add_wait_queue(&ctx->ctx_msgq_wait, &wait); for(;;) { /* * check wait queue */ set_current_state(TASK_INTERRUPTIBLE); DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); ret = 0; if(PFM_CTXQ_EMPTY(ctx) == 0) break; UNPROTECT_CTX(ctx, flags); /* * check non-blocking read */ ret = -EAGAIN; if(filp->f_flags & O_NONBLOCK) break; /* * check pending signals */ if(signal_pending(current)) { ret = -EINTR; break; } /* * no message, so wait */ schedule(); PROTECT_CTX(ctx, flags); } DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret)); set_current_state(TASK_RUNNING); remove_wait_queue(&ctx->ctx_msgq_wait, &wait); if (ret < 0) goto abort; ret = -EINVAL; msg = pfm_get_next_msg(ctx); if (msg == NULL) { printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid); goto abort_locked; } DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); ret = -EFAULT; if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);abort_locked: UNPROTECT_CTX(ctx, flags);abort: return ret;}static ssize_tpfm_write(struct file *file, const char *ubuf, size_t size, loff_t *ppos){ DPRINT(("pfm_write called\n")); return -EINVAL;}static unsigned intpfm_poll(struct file *filp, poll_table * wait){ pfm_context_t *ctx; unsigned long flags; unsigned int mask = 0; if (PFM_IS_FILE(filp) == 0) { printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); return 0; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid); return 0; } DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd)); poll_wait(filp, &ctx->ctx_msgq_wait, wait); PROTECT_CTX(ctx, flags); if (PFM_CTXQ_EMPTY(ctx) == 0) mask = POLLIN | POLLRDNORM; UNPROTECT_CTX(ctx, flags); DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask)); return mask;}static intpfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg){ DPRINT(("pfm_ioctl called\n")); return -EINVAL;}/* * context is locked when coming here and interrupts are disabled */static inline intpfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on){ int ret; ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue); DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n", current->pid, fd, on, ctx->ctx_async_queue, ret)); return ret;}static intpfm_fasync(int fd, struct file *filp, int on){ pfm_context_t *ctx; unsigned long flags; int ret; if (PFM_IS_FILE(filp) == 0) { printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid); return -EBADF; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid); return -EBADF; } PROTECT_CTX(ctx, flags); ret = pfm_do_fasync(fd, filp, ctx, on); DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n", fd, on, ctx->ctx_async_queue, ret)); UNPROTECT_CTX(ctx, flags); return ret;}#ifdef CONFIG_SMP/* * this function is exclusively called from pfm_close(). * The context is not protected at that time, nor are interrupts * on the remote CPU. That's necessary to avoid deadlocks. */static voidpfm_syswide_force_stop(void *info){ pfm_context_t *ctx = (pfm_context_t *)info; struct pt_regs *regs = ia64_task_regs(current); struct task_struct *owner; unsigned long flags; int ret; if (ctx->ctx_cpu != smp_processor_id()) { printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n", ctx->ctx_cpu, smp_processor_id()); return; } owner = GET_PMU_OWNER(); if (owner != ctx->ctx_task) { printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n", smp_processor_id(), owner->pid, ctx->ctx_task->pid); return; } if (GET_PMU_CTX() != ctx) { printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n", smp_processor_id(), GET_PMU_CTX(), ctx); return; } DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid)); /* * the context is already protected in pfm_close(), we simply * need to mask interrupts to avoid a PMU interrupt race on * this CPU */ local_irq_save(flags); ret = pfm_context_unload(ctx, NULL, 0, regs); if (ret) { DPRINT(("context_unload returned %d\n", ret)); } /* * unmask interrupts, PMU interrupts are now spurious here */ local_irq_restore(flags);}static voidpfm_syswide_cleanup_other_cpu(pfm_context_t *ctx){ int ret; DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));}#endif /* CONFIG_SMP *//* * called for each close(). Partially free resources. * When caller is self-monitoring, the context is unloaded. */static intpfm_flush(struct file *filp){ pfm_context_t *ctx; struct task_struct *task; struct pt_regs *regs; unsigned long flags; unsigned long smpl_buf_size = 0UL; void *smpl_buf_vaddr = NULL; int state, is_system; if (PFM_IS_FILE(filp) == 0) { DPRINT(("bad magic for\n")); return -EBADF; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid); return -EBADF; } /* * remove our file from the async queue, if we use this mode. * This can be done without the context being protected. We come * here when the context has become unreacheable by other tasks. * * We may still have active monitoring at this point and we may * end up in pfm_overflow_handler(). However, fasync_helper() * operates with interrupts disabled and it cleans up the * queue. If the PMU handler is called prior to entering * fasync_helper() then it will send a signal. If it is * invoked after, it will find an empty queue and no * signal will be sent. In both case, we are safe */ if (filp->f_flags & FASYNC) { DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue)); pfm_do_fasync (-1, filp, ctx, 0); } PROTECT_CTX(ctx, flags); state = ctx->ctx_state; is_system = ctx->ctx_fl_system; task = PFM_CTX_TASK(ctx); regs = ia64_task_regs(task); DPRINT(("ctx_state=%d is_current=%d\n", state, task == current ? 1 : 0)); /* * if state == UNLOADED, then task is NULL */ /* * we must stop and unload because we are losing access to the context. */ if (task == current) {#ifdef CONFIG_SMP /* * the task IS the owner but it migrated to another CPU: that's bad * but we must handle this cleanly. Unfortunately, the kernel does * not provide a mechanism to block migration (while the context is loaded). * * We need to release the resource on the ORIGINAL cpu. */ if (is_system && ctx->ctx_cpu != smp_processor_id()) { DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); /* * keep context protected but unmask interrupt for IPI */ local_irq_restore(flags); pfm_syswide_cleanup_other_cpu(ctx); /* * restore interrupt masking */ local_irq_save(flags); /* * context is unloaded at this point */ } else#endif /* CONFIG_SMP */ { DPRINT(("forcing unload\n")); /* * stop and unload, returning with state UNLOADED * and session unreserved. */ pfm_context_unload(ctx, NULL, 0, regs); DPRINT(("ctx_state=%d\n", ctx->ctx_state)); } } /* * remove virtual mapping, if any, for the calling task. * cannot reset ctx field until last user is calling close(). * * ctx_smpl_vaddr must never be cleared because it is needed * by every task with access to the context * * When called from do_exit(), the mm context is gone already, therefore * mm is NULL, i.e., the VMA is already gone and we do not have to * do anything here */ if (ctx->ctx_smpl_vaddr && current->mm) { smpl_buf_vaddr = ctx->ctx_smpl_vaddr; smpl_buf_size = ctx->ctx_smpl_size; } UNPROTECT_CTX(ctx, flags); /* * if there was a mapping, then we systematically remove it * at this point. Cannot be done inside critical section * because some VM function reenables interrupts. * */ if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size); return 0;}/* * called either on explicit close() or from exit_files(). * Only the LAST user of the file gets to this point, i.e., it is * called only ONCE. * * IMPORTANT: we get called ONLY when the refcnt on the file gets to zero * (fput()),i.e, last task to access the file. Nobody else can access the * file at this point. * * When called from exit_files(), the VMA has been freed because exit_mm() * is executed before exit_files(). * * When called from exit_files(), the current task is not yet ZOMBIE but we * flush the PMU state to the context. */static intpfm_close(struct inode *inode, struct file *filp){ pfm_context_t *ctx; struct task_struct *task; struct pt_regs *regs; DECLARE_WAITQUEUE(wait, current); unsigned long flags; unsigned long smpl_buf_size = 0UL; void *smpl_buf_addr = NULL; int free_possible = 1; int state, is_system; DPRINT(("pfm_close called private=%p\n", filp->private_data)); if (PFM_IS_FILE(filp) == 0) { DPRINT(("bad magic\n")); return -EBADF; } ctx = (pfm_context_t *)filp->private_data; if (ctx == NULL) { printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid); return -EBADF; } PROTECT_CTX(ctx, flags); state = ctx->ctx_state; is_system = ctx->ctx_fl_system; task = PFM_CTX_TASK(ctx); regs = ia64_task_regs(task); DPRINT(("ctx_state=%d is_current=%d\n", state, task == current ? 1 : 0)); /* * if task == current, then pfm_flush() unloaded the context */ if (state == PFM_CTX_UNLOADED) goto doit; /* * context is loaded/masked and task != current, we need to * either force an unload or go zombie */ /* * The task is currently blocked or will block after an overflow. * we must force it to wakeup to get out of the * MASKED state and transition to the unloaded state by itself. * * This situation is only possible for per-task mode */ if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) { /* * set a "partial" zombie state to be checked * upon return from down() in pfm_handle_work(). * * We cannot use the ZOMBIE state, because it is checked * by pfm_load_regs() which is called upon wakeup from down(). * In such case, it would free the context and then we would * return to pfm_handle_work() which would access the * stale context. Instead, we set a flag invisible to pfm_load_regs() * but visible to pfm_handle_work(). * * For some window of time, we have a zombie context with * ctx_state = MASKED and not ZOMBIE */ ctx->ctx_fl_going_zombie = 1; /* * force task to wake up from MASKED state */ up(&ctx->ctx_restart_sem); DPRINT(("waking up ctx_state=%d\n", state)); /* * put ourself to sleep waiting for the other
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -