📄 lockstat.c
字号:
offs = 0; } else { offs = g_proglen - 1; } g_proglen = offs + size; if ((g_prog = realloc(g_prog, g_proglen)) == NULL) fail(1, "failed to reallocate program text"); (void) vsnprintf(&g_prog[offs], size, fmt, args);}/* * This function may read like an open sewer, but keep in mind that programs * that generate other programs are rarely pretty. If one has the unenviable * task of maintaining or -- worse -- extending this code, use the -V option * to examine the D program as generated by this function. */static voiddprog_addevent(int event){ ls_event_info_t *info = &g_event_info[event]; char *pred = NULL; char stack[20]; const char *arg0, *caller; char *arg1 = "arg1"; char buf[80]; hrtime_t dur; int depth; if (info->ev_name[0] == '\0') return; if (info->ev_type == 'I') { /* * For interrupt events, arg0 (normally the lock pointer) is * the CPU address plus the current pil, and arg1 (normally * the number of nanoseconds) is the number of nanoseconds * late -- and it's stored in arg2. */ arg0 = "(uintptr_t)curthread->t_cpu + \n" "\t curthread->t_cpu->cpu_profile_pil"; caller = "(uintptr_t)arg0"; arg1 = "arg2"; } else { arg0 = "(uintptr_t)arg0"; caller = "caller"; } if (g_recsize > LS_HIST) { for (depth = 0; g_recsize > LS_STACK(depth); depth++) continue; if (g_tracing) { (void) sprintf(stack, "\tstack(%d);\n", depth); } else { (void) sprintf(stack, ", stack(%d)", depth); } } else { (void) sprintf(stack, ""); } if (info->ev_acquire != NULL) { /* * If this is a hold event, we need to generate an additional * clause for the acquire; the clause for the release will be * generated with the aggregating statement, below. */ dprog_add("%s\n", info->ev_acquire); predicate_add(&pred, info->ev_predicate, NULL, 0); predicate_add(&pred, g_predicate, NULL, 0); if (pred != NULL) dprog_add("/%s/\n", pred); dprog_add("{\n"); (void) sprintf(buf, "self->ev%d[(uintptr_t)arg0]", event); if (info->ev_type == 'H') { dprog_add("\t%s = timestamp;\n", buf); } else { /* * If this isn't a hold event, it's the recursive * error event. For this, we simply bump the * thread-local, per-lock count. */ dprog_add("\t%s++;\n", buf); } dprog_add("}\n\n"); predicate_destroy(&pred); pred = NULL; if (info->ev_type == 'E') { /* * If this is the recursive lock error event, we need * to generate an additional clause to decrement the * thread-local, per-lock count. This assures that we * only execute the aggregating clause if we have * recursive entry. */ dprog_add("%s\n", info->ev_name); dprog_add("/%s/\n{\n\t%s--;\n}\n\n", buf, buf); } predicate_add(&pred, buf, NULL, 0); if (info->ev_type == 'H') { (void) sprintf(buf, "timestamp -\n\t " "self->ev%d[(uintptr_t)arg0]", event); } arg1 = buf; } else { predicate_add(&pred, info->ev_predicate, NULL, 0); if (info->ev_type != 'I') predicate_add(&pred, g_predicate, NULL, 0); else predicate_add(&pred, g_ipredicate, NULL, 0); } if ((dur = g_min_duration[event]) != 0) predicate_add(&pred, arg1, ">=", dur); dprog_add("%s\n", info->ev_name); if (pred != NULL) dprog_add("/%s/\n", pred); predicate_destroy(&pred); dprog_add("{\n"); if (g_tracing) { dprog_add("\ttrace(%dULL);\n", event); dprog_add("\ttrace(%s);\n", arg0); dprog_add("\ttrace(%s);\n", caller); dprog_add(stack); } else { dprog_add("\t@avg[%dULL, %s, %s%s] = avg(%s);\n", event, arg0, caller, stack, arg1); if (g_recsize >= LS_HIST) { dprog_add("\t@hist[%dULL, %s, %s%s] = quantize" "(%s);\n", event, arg0, caller, stack, arg1); } } if (info->ev_acquire != NULL) dprog_add("\tself->ev%d[arg0] = 0;\n", event); dprog_add("}\n\n");}static voiddprog_compile(){ dtrace_prog_t *prog; dtrace_proginfo_t info; if (g_Vflag) { (void) fprintf(stderr, "lockstat: vvvv D program vvvv\n"); (void) fputs(g_prog, stderr); (void) fprintf(stderr, "lockstat: ^^^^ D program ^^^^\n"); } if ((prog = dtrace_program_strcompile(g_dtp, g_prog, DTRACE_PROBESPEC_NAME, 0, 0, NULL)) == NULL) dfail("failed to compile program"); if (dtrace_program_exec(g_dtp, prog, &info) == -1) dfail("failed to enable probes"); if (dtrace_go(g_dtp) != 0) dfail("couldn't start tracing");}static voidstatus_fire(void){}static voidstatus_init(void){ dtrace_optval_t val; struct sigaction act; struct itimerspec ts; struct sigevent ev; timer_t tid; if (dtrace_getopt(g_dtp, "statusrate", &val) == -1) dfail("failed to get 'statusrate'"); (void) sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_handler = status_fire; (void) sigaction(SIGUSR1, &act, NULL); ev.sigev_notify = SIGEV_SIGNAL; ev.sigev_signo = SIGUSR1; if (timer_create(CLOCK_REALTIME, &ev, &tid) == -1) dfail("cannot create CLOCK_REALTIME timer"); ts.it_value.tv_sec = val / NANOSEC; ts.it_value.tv_nsec = val % NANOSEC; ts.it_interval = ts.it_value; if (timer_settime(tid, TIMER_RELTIME, &ts, NULL) == -1) dfail("cannot set time on CLOCK_REALTIME timer");}static voidstatus_check(void){ if (dtrace_status(g_dtp) == -1) dfail("dtrace_status()");}static voidlsrec_fill(lsrec_t *lsrec, dtrace_recdesc_t *rec, int nrecs, caddr_t data){ bzero(lsrec, g_recsize); lsrec->ls_count = 1; if ((g_recsize > LS_HIST && nrecs < 4) || (nrecs < 3)) fail(0, "truncated DTrace record"); if (rec->dtrd_size != sizeof (uint64_t)) fail(0, "bad event size in first record"); /* LINTED - alignment */ lsrec->ls_event = (uint32_t)*((uint64_t *)(data + rec->dtrd_offset)); rec++; if (rec->dtrd_size != sizeof (uintptr_t)) fail(0, "bad lock address size in second record"); /* LINTED - alignment */ lsrec->ls_lock = *((uintptr_t *)(data + rec->dtrd_offset)); rec++; if (rec->dtrd_size != sizeof (uintptr_t)) fail(0, "bad caller size in third record"); /* LINTED - alignment */ lsrec->ls_caller = *((uintptr_t *)(data + rec->dtrd_offset)); rec++; if (g_recsize > LS_HIST) { int frames, i; pc_t *stack; frames = rec->dtrd_size / sizeof (pc_t); /* LINTED - alignment */ stack = (pc_t *)(data + rec->dtrd_offset); for (i = 1; i < frames; i++) lsrec->ls_stack[i - 1] = stack[i]; }}static intprocess_aggregate(dtrace_aggdata_t *agg, void *arg){ dtrace_aggdesc_t *aggdesc = agg->dtada_desc; caddr_t data = agg->dtada_data; lsdata_t *lsdata = arg; lsrec_t *lsrec = lsdata->lsd_next; dtrace_recdesc_t *rec; uint64_t *avg, *quantized; int i, j; if (lsdata->lsd_count >= g_nrecs) return (DTRACE_AGGWALK_NEXT); rec = &aggdesc->dtagd_rec[0]; if (rec->dtrd_size != sizeof (uint64_t)) fail(0, "bad variable size in zeroth record"); /* LINTED - alignment */ if (*((uint64_t *)(data + rec->dtrd_offset))) { /* * If the variable is non-zero, this is the histogram entry. * We'll copy the quantized data into lc_hist, and jump over * the rest. */ rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1]; if (rec->dtrd_size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t)) fail(0, "bad quantize size in aggregation record"); /* LINTED - alignment */ quantized = (uint64_t *)(data + rec->dtrd_offset); for (i = DTRACE_QUANTIZE_ZEROBUCKET, j = 0; i < DTRACE_QUANTIZE_NBUCKETS; i++, j++) lsrec->ls_hist[j] = quantized[i]; goto out; } lsrec_fill(lsrec, &aggdesc->dtagd_rec[1], aggdesc->dtagd_nrecs - 1, data); rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1]; if (rec->dtrd_size != 2 * sizeof (uint64_t)) fail(0, "bad avg size in aggregation record"); /* LINTED - alignment */ avg = (uint64_t *)(data + rec->dtrd_offset); lsrec->ls_count = (uint32_t)avg[0]; lsrec->ls_time = (uintptr_t)avg[1]; if (g_recsize >= LS_HIST) return (DTRACE_AGGWALK_NEXT);out: lsdata->lsd_next = (lsrec_t *)((uintptr_t)lsrec + g_recsize); lsdata->lsd_count++; return (DTRACE_AGGWALK_NEXT);}static intprocess_trace(const dtrace_probedata_t *pdata, void *arg){ lsdata_t *lsdata = arg; lsrec_t *lsrec = lsdata->lsd_next; dtrace_eprobedesc_t *edesc = pdata->dtpda_edesc; caddr_t data = pdata->dtpda_data; if (lsdata->lsd_count >= g_nrecs) return (DTRACE_CONSUME_NEXT); lsrec_fill(lsrec, edesc->dtepd_rec, edesc->dtepd_nrecs, data); lsdata->lsd_next = (lsrec_t *)((uintptr_t)lsrec + g_recsize); lsdata->lsd_count++; return (DTRACE_CONSUME_NEXT);}static intprocess_data(FILE *out, char *data){ lsdata_t lsdata; /* LINTED - alignment */ lsdata.lsd_next = (lsrec_t *)data; lsdata.lsd_count = 0; if (g_tracing) { if (dtrace_consume(g_dtp, out, process_trace, NULL, &lsdata) != 0) dfail("failed to consume buffer"); return (lsdata.lsd_count); } if (dtrace_aggregate_snap(g_dtp) != 0) dfail("failed to snap aggregate"); if (dtrace_aggregate_walk_keyvarsorted(g_dtp, process_aggregate, &lsdata) != 0) dfail("failed to walk aggregate"); return (lsdata.lsd_count);}/*ARGSUSED*/static intdrophandler(dtrace_dropdata_t *data, void *arg){ g_dropped++; return (DTRACE_HANDLE_OK);}intmain(int argc, char **argv){ char *data_buf; lsrec_t *lsp, **current, **first, **sort_buf, **merge_buf; FILE *out = stdout; char c; pid_t child; int status; int i, j; hrtime_t duration; char *addrp, *offp, *sizep, *evp, *lastp; uintptr_t addr; size_t size, off; int events_specified = 0; int exec_errno = 0; uint32_t event; char *filt = NULL, *ifilt = NULL; uint_t data_buf_size; char data_buf_str[10]; static uint64_t ev_count[LS_MAX_EVENTS + 1]; static uint64_t ev_time[LS_MAX_EVENTS + 1]; int err; if ((g_dtp = dtrace_open(DTRACE_VERSION, 0, &err)) == NULL) { fail(0, "cannot open dtrace library: %s", dtrace_errmsg(NULL, err)); } if (dtrace_handle_drop(g_dtp, &drophandler, NULL) == -1) dfail("couldn't establish drop handler"); if (symtab_init() == -1) fail(1, "can't load kernel symbols"); g_nrecs = DEFAULT_NRECS; while ((c = getopt(argc, argv, "bths:n:d:i:l:f:e:ckwWgCHEATID:RpPo:V")) != EOF) { switch (c) { case 'b': g_recsize = LS_BASIC; break; case 't': g_recsize = LS_TIME; break; case 'h': g_recsize = LS_HIST; break; case 's': if (!isdigit(optarg[0])) usage(); g_stkdepth = atoi(optarg); if (g_stkdepth > LS_MAX_STACK_DEPTH) fail(0, "max stack depth is %d", LS_MAX_STACK_DEPTH); g_recsize = LS_STACK(g_stkdepth); break; case 'n': if (!isdigit(optarg[0])) usage(); g_nrecs = atoi(optarg); break; case 'd': if (!isdigit(optarg[0])) usage(); duration = atoll(optarg); /* * XXX -- durations really should be per event * since the units are different, but it's hard * to express this nicely in the interface. * Not clear yet what the cleanest solution is. */ for (i = 0; i < LS_MAX_EVENTS; i++) if (g_event_info[i].ev_type != 'E') g_min_duration[i] = duration; break; case 'i': if (!isdigit(optarg[0])) usage(); i = atoi(optarg); if (i <= 0) usage(); if (i > MAX_HZ) fail(0, "max interrupt rate is %d Hz", MAX_HZ); for (j = 0; j < LS_MAX_EVENTS; j++) if (strcmp(g_event_info[j].ev_desc, "Profiling interrupt") == 0) break; (void) sprintf(g_event_info[j].ev_name, "profile:::profile-%d", i); break; case 'l': case 'f': addrp = strtok(optarg, ","); sizep = strtok(NULL, ","); addrp = strtok(optarg, ",+"); offp = strtok(NULL, ","); size = sizep ? strtoul(sizep, NULL, 0) : 1; off = offp ? strtoul(offp, NULL, 0) : 0; if (addrp[0] == '0') { addr = strtoul(addrp, NULL, 16) + off; } else { addr = sym_to_addr(addrp) + off; if (sizep == NULL) size = sym_size(addrp) - off; if (addr - off == 0) fail(0, "symbol '%s' not found", addrp); if (size == 0) size = 1; } if (c == 'l') { filter_add(&filt, "arg0", addr, size); } else { filter_add(&filt, "caller", addr, size); filter_add(&ifilt, "arg0", addr, size); } break; case 'e': evp = strtok_r(optarg, ",", &lastp); while (evp) { int ev1, ev2; char *evp2; (void) strtok(evp, "-"); evp2 = strtok(NULL, "-"); ev1 = atoi(evp); ev2 = evp2 ? atoi(evp2) : ev1; if ((uint_t)ev1 >= LS_MAX_EVENTS || (uint_t)ev2 >= LS_MAX_EVENTS || ev1 > ev2) fail(0, "-e events out of range"); for (i = ev1; i <= ev2; i++) g_enabled[i] = 1; evp = strtok_r(NULL, ",", &lastp); } events_specified = 1; break; case 'c': g_cflag = 1; break; case 'k': g_kflag = 1; break; case 'w': g_wflag = 1; break; case 'W': g_Wflag = 1; break; case 'g': g_gflag = 1; break; case 'C': case 'E': case 'H': case 'I': for (i = 0; i < LS_MAX_EVENTS; i++) if (g_event_info[i].ev_type == c) g_enabled[i] = 1; events_specified = 1; break; case 'A': for (i = 0; i < LS_MAX_EVENTS; i++) if (strchr("CH", g_event_info[i].ev_type)) g_enabled[i] = 1; events_specified = 1; break; case 'T': g_tracing = 1; break; case 'D': if (!isdigit(optarg[0])) usage(); g_topn = atoi(optarg);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -