📄 perfmon.c
字号:
UNLOCK_PFS(); return -EBUSY;}static intpfm_unreserve_session(struct task_struct *task, int is_syswide, unsigned long cpu_mask){ pfm_context_t *ctx; unsigned long m; unsigned int n, i; ctx = task ? task->thread.pfm_context : NULL; /* * validy checks on cpu_mask have been done upstream */ LOCK_PFS(); DBprintk(("[%d] sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu_mask=0x%lx\n", task->pid, pfm_sessions.pfs_sys_sessions, pfm_sessions.pfs_task_sessions, pfm_sessions.pfs_sys_use_dbregs, is_syswide, cpu_mask)); if (is_syswide) { m = cpu_mask; n = 0; for(i=0; m; i++, m>>=1) { if ((m & 0x1) == 0UL) continue; pfm_sessions.pfs_sys_session[i] = NULL; n++; } /* * would not work with perfmon+more than one bit in cpu_mask */ if (ctx && ctx->ctx_fl_using_dbreg) { if (pfm_sessions.pfs_sys_use_dbregs == 0) { printk("perfmon: invalid release for [%d] sys_use_dbregs=0\n", task->pid); } else { pfm_sessions.pfs_sys_use_dbregs--; } } pfm_sessions.pfs_sys_sessions -= n; DBprintk(("CPU%d sys_sessions=%u\n", smp_processor_id(), pfm_sessions.pfs_sys_sessions)); } else { pfm_sessions.pfs_task_sessions--; DBprintk(("[%d] task_sessions=%u\n", task->pid, pfm_sessions.pfs_task_sessions)); } UNLOCK_PFS(); return 0;}/* * XXX: do something better here */static intpfm_bad_permissions(struct task_struct *task){ /* stolen from bad_signal() */ return (current->session != task->session) && (current->euid ^ task->suid) && (current->euid ^ task->uid) && (current->uid ^ task->suid) && (current->uid ^ task->uid);}static intpfx_is_sane(struct task_struct *task, pfarg_context_t *pfx){ unsigned long smpl_pmds = pfx->ctx_smpl_regs[0]; int ctx_flags; int cpu; /* valid signal */ /* cannot send to process 1, 0 means do not notify */ if (pfx->ctx_notify_pid == 1) { DBprintk(("invalid notify_pid %d\n", pfx->ctx_notify_pid)); return -EINVAL; } ctx_flags = pfx->ctx_flags; if ((ctx_flags & PFM_FL_INHERIT_MASK) == (PFM_FL_INHERIT_ONCE|PFM_FL_INHERIT_ALL)) { DBprintk(("invalid inherit mask 0x%x\n",ctx_flags & PFM_FL_INHERIT_MASK)); return -EINVAL; } if (ctx_flags & PFM_FL_SYSTEM_WIDE) { DBprintk(("cpu_mask=0x%lx\n", pfx->ctx_cpu_mask)); /* * cannot block in this mode */ if (ctx_flags & PFM_FL_NOTIFY_BLOCK) { DBprintk(("cannot use blocking mode when in system wide monitoring\n")); return -EINVAL; } /* * must only have one bit set in the CPU mask */ if (hweight64(pfx->ctx_cpu_mask) != 1UL) { DBprintk(("invalid CPU mask specified\n")); return -EINVAL; } /* * and it must be a valid CPU */ cpu = ffz(~pfx->ctx_cpu_mask); if (cpu_is_online(cpu) == 0) { DBprintk(("CPU%d is not online\n", cpu)); return -EINVAL; } /* * check for pre-existing pinning, if conflicting reject */ if (task->cpus_allowed != ~0UL && (task->cpus_allowed & (1UL<<cpu)) == 0) { DBprintk(("[%d] pinned on 0x%lx, mask for CPU%d \n", task->pid, task->cpus_allowed, cpu)); return -EINVAL; } } else { /* * must provide a target for the signal in blocking mode even when * no counter is configured with PFM_FL_REG_OVFL_NOTIFY */ if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == 0) { DBprintk(("must have notify_pid when blocking for [%d]\n", task->pid)); return -EINVAL; }#if 0 if ((ctx_flags & PFM_FL_NOTIFY_BLOCK) && pfx->ctx_notify_pid == task->pid) { DBprintk(("cannot notify self when blocking for [%d]\n", task->pid)); return -EINVAL; }#endif } /* verify validity of smpl_regs */ if ((smpl_pmds & pmu_conf.impl_pmds[0]) != smpl_pmds) { DBprintk(("invalid smpl_regs 0x%lx\n", smpl_pmds)); return -EINVAL; } /* probably more to add here */ return 0;}static intpfm_context_create(struct task_struct *task, pfm_context_t *ctx, void *req, int count, struct pt_regs *regs){ pfarg_context_t tmp; void *uaddr = NULL; int ret, cpu = 0; int ctx_flags; pid_t notify_pid; /* a context has already been defined */ if (ctx) return -EBUSY; /* * not yet supported */ if (task != current) return -EINVAL; if (__copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT; ret = pfx_is_sane(task, &tmp); if (ret < 0) return ret; ctx_flags = tmp.ctx_flags; ret = -EBUSY; ret = pfm_reserve_session(task, ctx_flags & PFM_FL_SYSTEM_WIDE, tmp.ctx_cpu_mask); if (ret) goto abort; ret = -ENOMEM; ctx = pfm_context_alloc(); if (!ctx) goto error; /* record the creator (important for inheritance) */ ctx->ctx_owner = current; notify_pid = tmp.ctx_notify_pid; spin_lock_init(&ctx->ctx_lock); if (notify_pid == current->pid) { ctx->ctx_notify_task = current; task->thread.pfm_context = ctx; } else if (notify_pid!=0) { struct task_struct *notify_task; read_lock(&tasklist_lock); notify_task = find_task_by_pid(notify_pid); if (notify_task) { ret = -EPERM; /* * check if we can send this task a signal */ if (pfm_bad_permissions(notify_task)) { read_unlock(&tasklist_lock); goto buffer_error; } /* * make visible * must be done inside critical section * * if the initialization does not go through it is still * okay because child will do the scan for nothing which * won't hurt. */ task->thread.pfm_context = ctx; /* * will cause task to check on exit for monitored * processes that would notify it. see release_thread() * Note: the scan MUST be done in release thread, once the * task has been detached from the tasklist otherwise you are * exposed to race conditions. */ atomic_add(1, &ctx->ctx_notify_task->thread.pfm_notifiers_check); ctx->ctx_notify_task = notify_task; } read_unlock(&tasklist_lock); } /* * notification process does not exist */ if (notify_pid != 0 && ctx->ctx_notify_task == NULL) { ret = -EINVAL; goto buffer_error; } if (tmp.ctx_smpl_entries) { DBprintk(("sampling entries=%lu\n",tmp.ctx_smpl_entries)); ret = pfm_smpl_buffer_alloc(ctx, tmp.ctx_smpl_regs, tmp.ctx_smpl_entries, &uaddr); if (ret<0) goto buffer_error; tmp.ctx_smpl_vaddr = uaddr; } /* initialization of context's flags */ ctx->ctx_fl_inherit = ctx_flags & PFM_FL_INHERIT_MASK; ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0; ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0; ctx->ctx_fl_frozen = 0; /* * setting this flag to 0 here means, that the creator or the task that the * context is being attached are granted access. Given that a context can only * be created for the calling process this, in effect only allows the creator * to access the context. See pfm_protect() for more. */ ctx->ctx_fl_protected = 0; /* for system wide mode only (only 1 bit set) */ ctx->ctx_cpu = cpu; atomic_set(&ctx->ctx_last_cpu,-1); /* SMP only, means no CPU */ /* may be redudant with memset() but at least it's easier to remember */ atomic_set(&ctx->ctx_saving_in_progress, 0); atomic_set(&ctx->ctx_is_busy, 0); sema_init(&ctx->ctx_restart_sem, 0); /* init this semaphore to locked */ if (__copy_to_user(req, &tmp, sizeof(tmp))) { ret = -EFAULT; goto buffer_error; } DBprintk(("context=%p, pid=%d notify_task=%p\n", (void *)ctx, task->pid, ctx->ctx_notify_task)); DBprintk(("context=%p, pid=%d flags=0x%x inherit=%d block=%d system=%d\n", (void *)ctx, task->pid, ctx_flags, ctx->ctx_fl_inherit, ctx->ctx_fl_block, ctx->ctx_fl_system)); /* * when no notification is required, we can make this visible at the last moment */ if (notify_pid == 0) task->thread.pfm_context = ctx; /* * pin task to CPU and force reschedule on exit to ensure * that when back to user level the task runs on the designated * CPU. */ if (ctx->ctx_fl_system) { ctx->ctx_saved_cpus_allowed = task->cpus_allowed; task->cpus_allowed = 1UL << cpu; task->need_resched = 1; DBprintk(("[%d] rescheduled allowed=0x%lx\n", task->pid,task->cpus_allowed)); } return 0;buffer_error: pfm_context_free(ctx);error: pfm_unreserve_session(task, ctx_flags & PFM_FL_SYSTEM_WIDE , tmp.ctx_cpu_mask);abort: /* make sure we don't leave anything behind */ task->thread.pfm_context = NULL; return ret;}static inline unsigned longpfm_new_counter_value (pfm_counter_t *reg, int is_long_reset){ unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset; unsigned long new_seed, old_seed = reg->seed, mask = reg->mask; extern unsigned long carta_random32 (unsigned long seed); if (reg->flags & PFM_REGFL_RANDOM) { new_seed = carta_random32(old_seed); val -= (old_seed & mask); /* counter values are negative numbers! */ if ((mask >> 32) != 0) /* construct a full 64-bit random value: */ new_seed |= carta_random32(old_seed >> 32) << 32; reg->seed = new_seed; } reg->lval = val; return val;}static voidpfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int flag){ unsigned long mask = ovfl_regs[0]; unsigned long reset_others = 0UL; unsigned long val; int i, is_long_reset = (flag == PFM_PMD_LONG_RESET); /* * now restore reset value on sampling overflowed counters */ mask >>= PMU_FIRST_COUNTER; for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) { if (mask & 0x1) { val = pfm_new_counter_value(ctx->ctx_soft_pmds + i, is_long_reset); reset_others |= ctx->ctx_soft_pmds[i].reset_pmds[0]; DBprintk_ovfl(("[%d] %s reset soft_pmd[%d]=%lx\n", current->pid, is_long_reset ? "long" : "short", i, val)); /* upper part is ignored on rval */ pfm_write_soft_counter(ctx, i, val); } } /* * Now take care of resetting the other registers */ for(i = 0; reset_others; i++, reset_others >>= 1) { if ((reset_others & 0x1) == 0) continue; val = pfm_new_counter_value(ctx->ctx_soft_pmds + i, is_long_reset); if (PMD_IS_COUNTING(i)) { pfm_write_soft_counter(ctx, i, val); } else { ia64_set_pmd(i, val); } DBprintk_ovfl(("[%d] %s reset_others pmd[%d]=%lx\n", current->pid, is_long_reset ? "long" : "short", i, val)); } ia64_srlz_d();}static intpfm_write_pmcs(struct task_struct *task, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs){ struct thread_struct *th = &task->thread; pfarg_reg_t tmp, *req = (pfarg_reg_t *)arg; unsigned long value, reset_pmds; unsigned int cnum, reg_flags, flags; int i; int ret = -EINVAL; if (!CTX_IS_ENABLED(ctx)) return -EINVAL; /* XXX: ctx locking may be required here */ for (i = 0; i < count; i++, req++) { if (__copy_from_user(&tmp, req, sizeof(tmp))) return -EFAULT; cnum = tmp.reg_num; reg_flags = tmp.reg_flags; value = tmp.reg_value; reset_pmds = tmp.reg_reset_pmds[0]; flags = 0; /* * we reject all non implemented PMC as well * as attempts to modify PMC[0-3] which are used * as status registers by the PMU */ if (!PMC_IS_IMPL(cnum) || cnum < 4) { DBprintk(("pmc[%u] is unimplemented or invalid\n", cnum)); goto error; } /* * A PMC used to configure monitors must be: * - system-wide session: privileged monitor * - per-task : user monitor * any other configuration is rejected. */ if (PMC_IS_MONITOR(cnum) || PMC_IS_COUNTING(cnum)) { DBprintk(("pmc[%u].pm=%ld\n", cnum, PMC_PM(cnum, value))); if (ctx->ctx_fl_system ^ PMC_PM(cnum, value)) { DBprintk(("pmc_pm=%ld fl_system=%d\n", PMC_PM(cnum, value), ctx->ctx_fl_system)); goto error; } } if (PMC_IS_COUNTING(cnum)) { pfm_monitor_t *p = (pfm_monitor_t *)&value; /* * enforce generation of overflow interrupt. Necessary on all * CPUs. */ p->pmc_oi = 1; if (reg_flags & PFM_REGFL_OVFL_NOTIFY) { /* * must have a target for the signal */ if (ctx->ctx_notify_task == NULL) { DBprintk(("cannot set ovfl_notify: no notify_task\n")); goto error; } flags |= PFM_REGFL_OVFL_NOTIFY; } if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM; /* verify validity of reset_pmds */ if ((reset_pmds & pmu_conf.impl_pmds[0]) != reset_pmds) { DBprintk(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum)); goto error; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -