📄 perfmon.c
字号:
psb->psb_hdr->hdr_pmds[0] = which_pmds[0]; /* * Let's do the difficult operations next. * * now we atomically find some area in the address space and * remap the buffer in it. */ down_write(¤t->mm->mmap_sem); /* find some free area in address space, must have mmap sem held */ vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS); if (vma->vm_start == 0UL) { DBprintk(("Cannot find unmapped area for size %ld\n", size)); up_write(¤t->mm->mmap_sem); goto error; } vma->vm_end = vma->vm_start + size; DBprintk(("entries=%ld aligned size=%ld, unmapped @0x%lx\n", entries, size, vma->vm_start)); /* can only be applied to current, need to have the mm semaphore held when called */ if (pfm_remap_buffer((unsigned long)smpl_buf, vma->vm_start, size)) { DBprintk(("Can't remap buffer\n")); up_write(¤t->mm->mmap_sem); goto error; } /* * now insert the vma in the vm list for the process, must be * done with mmap lock held */ insert_vm_struct(mm, vma); mm->total_vm += size >> PAGE_SHIFT; up_write(¤t->mm->mmap_sem); /* store which PMDS to record */ ctx->ctx_smpl_regs[0] = which_pmds[0]; /* link to perfmon context */ ctx->ctx_psb = psb; /* * keep track of user level virtual address */ ctx->ctx_smpl_vaddr = *(unsigned long *)user_vaddr = vma->vm_start; return 0;error: kmem_cache_free(vm_area_cachep, vma);error_kmem: kfree(psb);error_kmalloc: pfm_rvfree(smpl_buf, size); return -ENOMEM;}static intpfm_reserve_session(struct task_struct *task, int is_syswide, unsigned long cpu_mask){ unsigned long m, undo_mask; unsigned int n, i; /* * validy checks on cpu_mask have been done upstream */ LOCK_PFS(); if (is_syswide) { /* * cannot mix system wide and per-task sessions */ if (pfm_sessions.pfs_task_sessions > 0UL) { DBprintk(("system wide not possible, %u conflicting task_sessions\n", pfm_sessions.pfs_task_sessions)); goto abort; } m = cpu_mask; undo_mask = 0UL; n = 0; DBprintk(("cpu_mask=0x%lx\n", cpu_mask)); for(i=0; m; i++, m>>=1) { if ((m & 0x1) == 0UL) continue; if (pfm_sessions.pfs_sys_session[i]) goto undo; DBprintk(("reserving CPU%d currently on CPU%d\n", i, smp_processor_id())); pfm_sessions.pfs_sys_session[i] = task; undo_mask |= 1UL << i; n++; } pfm_sessions.pfs_sys_sessions += n; } else { if (pfm_sessions.pfs_sys_sessions) goto abort; pfm_sessions.pfs_task_sessions++; } UNLOCK_PFS(); return 0;undo: DBprintk(("system wide not possible, conflicting session [%d] on CPU%d\n", pfm_sessions.pfs_sys_session[i]->pid, i)); for(i=0; undo_mask; i++, undo_mask >>=1) { pfm_sessions.pfs_sys_session[i] = NULL; }abort: UNLOCK_PFS(); return -EBUSY;}static intpfm_unreserve_session(struct task_struct *task, int is_syswide, unsigned long cpu_mask){ pfm_context_t *ctx; unsigned long m; unsigned int n, i; ctx = task ? task->thread.pfm_context : NULL; /* * validy checks on cpu_mask have been done upstream */ LOCK_PFS(); DBprintk(("[%d] sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu_mask=0x%lx\n", task->pid, pfm_sessions.pfs_sys_sessions, pfm_sessions.pfs_task_sessions, pfm_sessions.pfs_sys_use_dbregs, is_syswide, cpu_mask)); if (is_syswide) { m = cpu_mask; n = 0; for(i=0; m; i++, m>>=1) { if ((m & 0x1) == 0UL) continue; pfm_sessions.pfs_sys_session[i] = NULL; n++; } /* * would not work with perfmon+more than one bit in cpu_mask */ if (ctx && ctx->ctx_fl_using_dbreg) { if (pfm_sessions.pfs_sys_use_dbregs == 0) { printk(KERN_DEBUG "perfmon: invalid release for [%d] " "sys_use_dbregs=0\n", task->pid); } else { pfm_sessions.pfs_sys_use_dbregs--; } } pfm_sessions.pfs_sys_sessions -= n; DBprintk(("CPU%d sys_sessions=%u\n", smp_processor_id(), pfm_sessions.pfs_sys_sessions)); } else { pfm_sessions.pfs_task_sessions--; DBprintk(("[%d] task_sessions=%u\n", task->pid, pfm_sessions.pfs_task_sessions)); } UNLOCK_PFS(); return 0;}static voidpfm_send_notification_signal(unsigned long data){ pfm_context_t *ctx = (pfm_context_t *)data; struct siginfo si; int ret; DBprintk(("[%d] tasklet called\n", current->pid)); LOCK_CTX(ctx); if (ctx->ctx_notify_task == NULL) { printk(KERN_INFO "perfmon: tasklet lost notify_task\n"); goto nothing_to_do; } /* no leak */ memset(&si,0, sizeof(si)); si.si_addr = NULL; si.si_pid = current->pid; /* irrelevant */ si.si_signo = SIGPROF; si.si_code = PROF_OVFL; /* indicates a perfmon SIGPROF signal */ si.si_pfm_ovfl[0] = ctx->ctx_ovfl_regs[0]; if (ctx->ctx_notify_task != current) read_lock(&tasklist_lock); DBprintk_ovfl(("[%d] tasklet sending notification to [%d]\n", current->pid, ctx->ctx_notify_task->pid)); ret = send_sig_info(SIGPROF, &si, ctx->ctx_notify_task); if (ret != 0) printk(KERN_ERR "send_sig_info(process %d, SIGPROF)=%d\n", ctx->ctx_notify_task->pid, ret); /* * now undo the protections in order */ if (ctx->ctx_notify_task != current) read_unlock(&tasklist_lock);nothing_to_do: UNLOCK_CTX(ctx);}/* * XXX: do something better here */static intpfm_bad_permissions(struct task_struct *task){ /* stolen from bad_signal() */ return (current->session != task->session) && (current->euid ^ task->suid) && (current->euid ^ task->uid) && (current->uid ^ task->suid) && (current->uid ^ task->uid);}static intpfx_is_sane(struct task_struct *task, pfarg_context_t *pfx){ unsigned long smpl_pmds = pfx->ctx_smpl_regs[0]; int ctx_flags; int cpu; /* valid signal */ /* cannot send to process 1, 0 means do not notify */ if (pfx->ctx_notify_pid == 1) { DBprintk(("invalid notify_pid %d\n", pfx->ctx_notify_pid)); return -EINVAL; } ctx_flags = pfx->ctx_flags; if ((ctx_flags & PFM_FL_INHERIT_MASK) == (PFM_FL_INHERIT_ONCE|PFM_FL_INHERIT_ALL)) { DBprintk(("invalid inherit mask 0x%x\n",ctx_flags & PFM_FL_INHERIT_MASK)); return -EINVAL; } if (ctx_flags & PFM_FL_SYSTEM_WIDE) { DBprintk(("cpu_mask=0x%lx\n", pfx->ctx_cpu_mask)); /* * cannot block in this mode */ if (ctx_flags & PFM_FL_NOTIFY_BLOCK) { DBprintk(("cannot use blocking mode when in system wide monitoring\n")); return -EINVAL; } /* * must only have one bit set in the CPU mask */ if (hweight64(pfx->ctx_cpu_mask) != 1UL) { DBprintk(("invalid CPU mask specified\n")); return -EINVAL; } /* * and it must be a valid CPU */ cpu = ffz(~pfx->ctx_cpu_mask); if (cpu_online(cpu) == 0) { DBprintk(("CPU%d is not online\n", cpu)); return -EINVAL; } /* * check for pre-existing pinning, if conflicting reject */ if (task->cpus_allowed != ~0UL && (task->cpus_allowed & (1UL<<cpu)) == 0) { DBprintk(("[%d] pinned on 0x%lx, mask for CPU%d \n", task->pid, task->cpus_allowed, cpu)); return -EINVAL; } } else { /* * must provide a target for the signal in blocking mode even when * no counter is configured with PFM_FL_REG_OVFL_NOTIFY */ if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == 0) { DBprintk(("must have notify_pid when blocking for [%d]\n", task->pid)); return -EINVAL; }#if 0 if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == task->pid) { DBprintk(("cannot notify self when blocking for [%d]\n", task->pid)); return -EINVAL; }#endif } /* verify validity of smpl_regs */ if ((smpl_pmds & pmu_conf.impl_pmds[0]) != smpl_pmds) { DBprintk(("invalid smpl_regs 0x%lx\n", smpl_pmds)); return -EINVAL; } /* probably more to add here */ return 0;}static intpfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int count, struct pt_regs *regs){ pfarg_context_t tmp; void *uaddr = NULL; int ret; int ctx_flags; pid_t notify_pid; /* a context has already been defined */ if (ctx) return -EBUSY; /* * not yet supported */ if (task != current) return -EINVAL; if (__copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT; ret = pfx_is_sane(task, &tmp); if (ret < 0) return ret; ctx_flags = tmp.ctx_flags; ret = pfm_reserve_session(task, ctx_flags & PFM_FL_SYSTEM_WIDE, tmp.ctx_cpu_mask); if (ret) goto abort; ret = -ENOMEM; ctx = pfm_context_alloc(); if (!ctx) goto error; /* record the creator (important for inheritance) */ ctx->ctx_owner = current; notify_pid = tmp.ctx_notify_pid; spin_lock_init(&ctx->ctx_lock); if (notify_pid == current->pid) { ctx->ctx_notify_task = current; task->thread.pfm_context = ctx; } else if (notify_pid!=0) { struct task_struct *notify_task; read_lock(&tasklist_lock); notify_task = find_task_by_pid(notify_pid); if (notify_task) { ret = -EPERM; /* * check if we can send this task a signal */ if (pfm_bad_permissions(notify_task)) { read_unlock(&tasklist_lock); goto buffer_error; } /* * make visible * must be done inside critical section * * if the initialization does not go through it is still * okay because child will do the scan for nothing which * won't hurt. */ task->thread.pfm_context = ctx; /* * will cause task to check on exit for monitored * processes that would notify it. see release_thread() * Note: the scan MUST be done in release thread, once the * task has been detached from the tasklist otherwise you are * exposed to race conditions. */ atomic_add(1, &ctx->ctx_notify_task->thread.pfm_notifiers_check); ctx->ctx_notify_task = notify_task; } read_unlock(&tasklist_lock); } /* * notification process does not exist */ if (notify_pid != 0 && ctx->ctx_notify_task == NULL) { ret = -EINVAL; goto buffer_error; } if (tmp.ctx_smpl_entries) { DBprintk(("sampling entries=%lu\n",tmp.ctx_smpl_entries)); ret = pfm_smpl_buffer_alloc(ctx, tmp.ctx_smpl_regs, tmp.ctx_smpl_entries, &uaddr); if (ret<0) goto buffer_error; tmp.ctx_smpl_vaddr = uaddr; } /* initialization of context's flags */ ctx->ctx_fl_inherit = ctx_flags & PFM_FL_INHERIT_MASK; ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0; ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0; ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0; ctx->ctx_fl_unsecure = (ctx_flags & PFM_FL_UNSECURE) ? 1: 0; ctx->ctx_fl_frozen = 0; /* * setting this flag to 0 here means, that the creator or the task that the * context is being attached are granted access. Given that a context can only * be created for the calling process this, in effect only allows the creator * to access the context. See pfm_protect() for more. */ ctx->ctx_fl_protected = 0; /* for system wide mode only (only 1 bit set) */ ctx->ctx_cpu = ffz(~tmp.ctx_cpu_mask); /* SMP only, means no CPU */ ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; SET_LAST_CPU(ctx, -1); sema_init(&ctx->ctx_restart_sem, 0); /* init this semaphore to locked */ /* * initialize tasklet for signal notifications * * ALL signal-based (or any notification using data structures * external to perfmon) MUST use tasklets to avoid lock contentions * when a signal has to be sent for overflow interrupt handler. */ tasklet_init(&ctx->ctx_tasklet, pfm_send_notification_signal, (unsigned long)ctx); if (__copy_to_user(req, &tmp, sizeof(tmp))) { ret = -EFAULT; goto buffer_error; } DBprintk(("context=%p, pid=%d notify_task=%p\n", (void *)ctx, task->pid, ctx->ctx_notify_task)); DBprintk(("context=%p, pid=%d flags=0x%x inherit=%d block=%d system=%d excl_idle=%d unsecure=%d\n", (void *)ctx, task->pid, ctx_flags, ctx->ctx_fl_inherit, ctx->ctx_fl_block, ctx->ctx_fl_system, ctx->ctx_fl_excl_idle, ctx->ctx_fl_unsecure)); /* * when no notification is required, we can make this visible at the last moment */ if (notify_pid == 0) task->thread.pfm_context = ctx; /* * pin task to CPU and force reschedule on exit to ensure * that when back to user level the task runs on the designated * CPU. */ if (ctx->ctx_fl_system) { ctx->ctx_saved_cpus_allowed = task->cpus_allowed;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -