📄 fasttrap.c
字号:
/* * Copyright 2005 Sun Microsystems, Inc. All rights reserved. * * The contents of this file are subject to the terms of the * Common Development and Distribution License, Version 1.0 only. * See the file usr/src/LICENSING.NOTICE in this distribution or * http://www.opensolaris.org/license/ for details. *//* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T *//* All Rights Reserved */#pragma ident "@(#)fasttrap.c 1.10 04/11/07 SMI"#include <sys/atomic.h>#include <sys/errno.h>#include <sys/stat.h>#include <sys/modctl.h>#include <sys/conf.h>#include <sys/systm.h>#include <sys/ddi.h>#include <sys/sunddi.h>#include <sys/cpuvar.h>#include <sys/kmem.h>#include <sys/strsubr.h>#include <sys/fasttrap.h>#include <sys/fasttrap_impl.h>#include <sys/fasttrap_isa.h>#include <sys/dtrace.h>#include <sys/dtrace_impl.h>#include <sys/sysmacros.h>#include <sys/frame.h>#include <sys/stack.h>#include <sys/proc.h>#include <sys/priv.h>#include <sys/policy.h>#include <sys/ontrap.h>#include <sys/vmsystm.h>#include <sys/prsystm.h>#include <vm/as.h>#include <vm/seg.h>#include <vm/seg_dev.h>#include <vm/seg_vn.h>#include <vm/seg_spt.h>#include <vm/seg_kmem.h>/* * User-Land Trap-Based Tracing * ---------------------------- * * The fasttrap provider allows DTrace consumers to instrument any user-level * instruction to gather data; this includes probes with semantic * signifigance like entry and return as well as simple offsets into the * function. While the specific techniques used are very ISA specific, the * methodology is generalizable to any architecture. * * * The General Methodology * ----------------------- * * With the primary goal of tracing every user-land instruction and the * limitation that we can't trust user space so don't want to rely on much * information there, we begin by replacing the instructions we want to trace * with trap instructions. Each instruction we overwrite is saved into a hash * table keyed by process ID and pc address. When we enter the kernel due to * this trap instruction, we need the effects of the replaced instruction to * appear to have occurred before we proceed with the user thread's * execution. * * Each user level thread is represented by a ulwp_t structure which is * always easily accessible through a register. The most basic way to produce * the effects of the instruction we replaced is to copy that instruction out * to a bit of scratch space reserved in the user thread's ulwp_t structure * (a sort of kernel-private thread local storage), set the PC to that * scratch space and single step. When we reenter the kernel after single * stepping the instruction we must then adjust the PC to point to what would * normally be the next instruction. Of course, special care must be taken * for branches and jumps, but these represent such a small fraction of any * instruction set that writing the code to emulate these in the kernel is * not too difficult. * * Return probes may require several tracepoints to trace every return site, * and, conversely, each tracepoint may activate several probes (the entry * and offset 0 probes, for example). To solve this muliplexing problem, * tracepoints contain lists of probes to activate and probes contain lists * of tracepoints to enable. If a probe is activated, it adds its ID to * existing tracepoints or creates new ones as necessary. * * Most probes are activated _before_ the instruction is executed, but return * probes are activated _after_ the effects of the last instruction of the * function are visible. Return probes must be fired _after_ we have * single-stepped the instruction whereas all other probes are fired * beforehand. */static dev_info_t *fasttrap_devi;static dtrace_provider_id_t fasttrap_id;static dtrace_meta_provider_id_t fasttrap_meta_id;static timeout_id_t fasttrap_timeout;static kmutex_t fasttrap_cleanup_mtx;static uint_t fasttrap_cleanup_work;/* * Generation count on modifications to the global tracepoint lookup table. */static volatile uint64_t fasttrap_mod_gen;/* * When the fasttrap provider is loaded, fasttrap_max is set to either * FASTTRAP_MAX_DEFAULT or the value for fasttrap-max-probes in the * fasttrap.conf file. Each time a probe is created, fasttrap_total is * incremented by the number of tracepoints that may be associated with that * probe; fasttrap_total is capped at fasttrap_max. */#define FASTTRAP_MAX_DEFAULT 250000static uint32_t fasttrap_max;static uint32_t fasttrap_total;#define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000#define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100#define FASTTRAP_PID_NAME "pid"fasttrap_hash_t fasttrap_tpoints;static fasttrap_hash_t fasttrap_provs;dtrace_id_t fasttrap_probe_id;static int fasttrap_count; /* ref count */static int fasttrap_pid_count; /* pid ref count */static kmutex_t fasttrap_count_mtx; /* lock on ref count */static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, const char *, const dtrace_pattr_t *);static void fasttrap_provider_free(fasttrap_provider_t *);static void fasttrap_provider_retire(fasttrap_provider_t *);#define FASTTRAP_PROVS_INDEX(pid, name) \ ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)static intfasttrap_highbit(ulong_t i){ int h = 1; if (i == 0) return (0);#ifdef _LP64 if (i & 0xffffffff00000000ul) { h += 32; i >>= 32; }#endif if (i & 0xffff0000) { h += 16; i >>= 16; } if (i & 0xff00) { h += 8; i >>= 8; } if (i & 0xf0) { h += 4; i >>= 4; } if (i & 0xc) { h += 2; i >>= 2; } if (i & 0x2) { h += 1; } return (h);}static uint_tfasttrap_hash_str(const char *p){ unsigned int g; uint_t hval = 0; while (*p) { hval = (hval << 4) + *p++; if ((g = (hval & 0xf0000000)) != 0) hval ^= g >> 24; hval &= ~g; } return (hval);}voidfasttrap_sigtrap(proc_t *p, kthread_t *t, uintptr_t pc){ sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); sqp->sq_info.si_signo = SIGTRAP; sqp->sq_info.si_code = TRAP_DTRACE; sqp->sq_info.si_addr = (caddr_t)pc; mutex_enter(&p->p_lock); sigaddqa(p, t, sqp); mutex_exit(&p->p_lock); if (t != NULL) aston(t);}/* * This function ensures that no threads are actively using the memory * associated with probes that were formerly live. */static voidfasttrap_mod_barrier(uint64_t gen){ int i; if (gen < fasttrap_mod_gen) return; fasttrap_mod_gen++; for (i = 0; i < NCPU; i++) { mutex_enter(&cpu_core[i].cpuc_pid_lock); mutex_exit(&cpu_core[i].cpuc_pid_lock); }}/* * This is the timeout's callback for cleaning up the providers and their * probes. *//*ARGSUSED*/static voidfasttrap_pid_cleanup_cb(void *data){ fasttrap_provider_t **fpp, *fp; fasttrap_bucket_t *bucket; dtrace_provider_id_t provid; int i, later; static volatile int in = 0; ASSERT(in == 0); in = 1; mutex_enter(&fasttrap_cleanup_mtx); while (fasttrap_cleanup_work) { fasttrap_cleanup_work = 0; mutex_exit(&fasttrap_cleanup_mtx); later = 0; /* * Iterate over all the providers trying to remove the marked * ones. If a provider is marked, but not defunct, we just * have to take a crack at removing it -- it's no big deal if * we can't. */ for (i = 0; i < fasttrap_provs.fth_nent; i++) { bucket = &fasttrap_provs.fth_table[i]; mutex_enter(&bucket->ftb_mtx); fpp = (fasttrap_provider_t **)&bucket->ftb_data; while ((fp = *fpp) != NULL) { if (!fp->ftp_marked) { fpp = &fp->ftp_next; continue; } mutex_enter(&fp->ftp_mtx); /* * If this provider is referenced either * because it is a USDT provider or is being * modified, we can't unregister or even * condense. */ if (fp->ftp_ccount != 0) { mutex_exit(&fp->ftp_mtx); fp->ftp_marked = 0; continue; } if (!fp->ftp_defunct || fp->ftp_rcount != 0) fp->ftp_marked = 0; mutex_exit(&fp->ftp_mtx); /* * If we successfully unregister this * provider we can remove it from the hash * chain and free the memory. If our attempt * to unregister fails and this is a defunct * provider, increment our flag to try again * pretty soon. If we've consumed more than * half of our total permitted number of * probes call dtrace_condense() to try to * clean out the unenabled probes. */ provid = fp->ftp_provid; if (dtrace_unregister(provid) != 0) { if (fasttrap_total > fasttrap_max / 2) (void) dtrace_condense(provid); later += fp->ftp_marked; fpp = &fp->ftp_next; } else { *fpp = fp->ftp_next; fasttrap_provider_free(fp); } } mutex_exit(&bucket->ftb_mtx); } mutex_enter(&fasttrap_cleanup_mtx); } ASSERT(fasttrap_timeout != 0); /* * If we were unable to remove a defunct provider, try again after * a second. This situation can occur in certain circumstances where * providers cannot be unregistered even though they have no probes * enabled because of an execution of dtrace -l or something similar. * If the timeout has been disabled (set to 1 because we're trying * to detach), we set fasttrap_cleanup_work to ensure that we'll * get a chance to do that work if and when the timeout is reenabled * (if detach fails). */ if (later > 0 && fasttrap_timeout != (timeout_id_t)1) fasttrap_timeout = timeout(&fasttrap_pid_cleanup_cb, NULL, hz); else if (later > 0) fasttrap_cleanup_work = 1; else fasttrap_timeout = 0; mutex_exit(&fasttrap_cleanup_mtx); in = 0;}/* * Activates the asynchronous cleanup mechanism. */static voidfasttrap_pid_cleanup(void){ mutex_enter(&fasttrap_cleanup_mtx); fasttrap_cleanup_work = 1; if (fasttrap_timeout == 0) fasttrap_timeout = timeout(&fasttrap_pid_cleanup_cb, NULL, 1); mutex_exit(&fasttrap_cleanup_mtx);}/* * This is called from cfork() via dtrace_fasttrap_fork(). The child * process's address space is a (roughly) a copy of the parent process's so * we have to remove all the instrumentation we had previously enabled in the * parent. */static voidfasttrap_fork(proc_t *p, proc_t *cp){ pid_t ppid = p->p_pid; int i; ASSERT(curproc == p); ASSERT(p->p_proc_flag & P_PR_LOCK); ASSERT(p->p_dtrace_count > 0); ASSERT(cp->p_dtrace_count == 0); /* * This would be simpler and faster if we maintained per-process * hash tables of enabled tracepoints. It could, however, potentially * slow down execution of a tracepoint since we'd need to go * through two levels of indirection. In the future, we should * consider either maintaining per-process ancillary lists of * enabled tracepoints or hanging a pointer to a per-process hash * table of enabled tracepoints off the proc structure. */ /* * We don't have to worry about the child process disappearing * because we're in fork(). */ mutex_enter(&cp->p_lock); sprlock_proc(cp); mutex_exit(&cp->p_lock); /* * Iterate over every tracepoint looking for ones that belong to the * parent process, and remove each from the child process. */ for (i = 0; i < fasttrap_tpoints.fth_nent; i++) { fasttrap_tracepoint_t *tp; fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i]; mutex_enter(&bucket->ftb_mtx); for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { if (tp->ftt_pid == ppid && !tp->ftt_prov->ftp_defunct) { int ret = fasttrap_tracepoint_remove(cp, tp); ASSERT(ret == 0); } } mutex_exit(&bucket->ftb_mtx); } mutex_enter(&cp->p_lock); sprunlock(cp);}/* * This is called from proc_exit() or from exec_common() if p_dtrace_probes * is set on the proc structure to indicate that there is a pid provider * associated with this process. */static voidfasttrap_exec_exit(proc_t *p){ fasttrap_provider_t *provider; ASSERT(p == curproc); ASSERT(MUTEX_HELD(&p->p_lock)); mutex_exit(&p->p_lock); /* * We clean up the pid provider for this process here; user-land * static probes are handled by the meta-provider remove entry point. */ if ((provider = fasttrap_provider_lookup(p->p_pid, FASTTRAP_PID_NAME, NULL)) != NULL) fasttrap_provider_retire(provider); mutex_enter(&p->p_lock);}/*ARGSUSED*/static voidfasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc){ /* * There are no "default" pid probes. */}/*ARGSUSED*/static voidfasttrap_provide(void *arg, const dtrace_probedesc_t *desc){ if (dtrace_probe_lookup(fasttrap_id, NULL, "fasttrap", "fasttrap") == 0) fasttrap_probe_id = dtrace_probe_create(fasttrap_id, NULL, "fasttrap", "fasttrap", FASTTRAP_AFRAMES, NULL);}static intfasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index){ fasttrap_tracepoint_t *tp, *new_tp = NULL; fasttrap_bucket_t *bucket; fasttrap_id_t *id; pid_t pid; uintptr_t pc; ASSERT(index < probe->ftp_ntps); pid = probe->ftp_pid; pc = probe->ftp_tps[index].fit_tp->ftt_pc; id = &probe->ftp_tps[index].fit_id; ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid); ASSERT(!(p->p_flag & SVFORK)); /* * Before we make any modifications, make sure we've imposed a barrier * on the generation in which this probe was last modified. */ fasttrap_mod_barrier(probe->ftp_gen); bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; /* * If the tracepoint has already been enabled, just add our id to the * list of interested probes. This may be our second time through * this path in which case we'll have constructed the tracepoint we'd * like to install. If we can't find a match, and have an allocated * tracepoint ready to go, enable that one now. *
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -