📄 dt_consume.c
字号:
if (dt_normalize(dtp, buf->dtbd_data + offs, rec) != 0) return (-1); i++; continue; } if (rec->dtrd_arg == DT_ACT_TRUNC) { if (i == epd->dtepd_nrecs - 1) return (dt_set_errno(dtp, EDT_BADTRUNC)); if (dt_trunc(dtp, buf->dtbd_data + offs, rec) != 0) return (-1); i++; continue; } if (rec->dtrd_arg == DT_ACT_FTRUNCATE) { if (fp == NULL) continue; (void) fflush(fp); (void) ftruncate(fileno(fp), 0); (void) fseeko(fp, 0, SEEK_SET); continue; } } rval = (*rfunc)(&data, rec, arg); if (rval == DTRACE_CONSUME_NEXT) continue; if (rval == DTRACE_CONSUME_ABORT) return (dt_set_errno(dtp, EDT_DIRABORT)); if (rval != DTRACE_CONSUME_THIS) return (dt_set_errno(dtp, EDT_BADRVAL)); if (act == DTRACEACT_STACK) { int depth = rec->dtrd_size / sizeof (pc_t); if (dt_print_stack(dtp, fp, NULL, addr, depth) < 0) return (-1); goto nextrec; } if (act == DTRACEACT_USTACK || act == DTRACEACT_JSTACK) { if (dt_print_ustack(dtp, fp, NULL, addr, rec->dtrd_arg) < 0) return (-1); goto nextrec; } if (DTRACEACT_ISPRINTFLIKE(act)) { void *fmtdata; int (*func)(dtrace_hdl_t *, FILE *, void *, const dtrace_recdesc_t *, uint_t, const void *buf, size_t); if ((fmtdata = dt_format_lookup(dtp, rec->dtrd_format)) == NULL) goto nofmt; switch (act) { case DTRACEACT_PRINTF: func = dtrace_fprintf; break; case DTRACEACT_PRINTA: func = dtrace_fprinta; break; case DTRACEACT_SYSTEM: func = dtrace_system; break; } n = (*func)(dtp, fp, fmtdata, rec, epd->dtepd_nrecs - i, (uchar_t *)buf->dtbd_data + offs, buf->dtbd_size - offs); if (n < 0) return (-1); /* errno is set for us */ if (n > 0) i += n - 1; goto nextrec; }nofmt: if (act == DTRACEACT_PRINTA) { dt_print_aggdata_t pd; bzero(&pd, sizeof (pd)); pd.dtpa_dtp = dtp; pd.dtpa_fp = fp; /* LINTED - alignment */ pd.dtpa_id = *((dtrace_aggvarid_t *)addr); if (dt_printf(dtp, fp, "\n") < 0 || dtrace_aggregate_walk_valsorted(dtp, dt_print_agg, &pd) < 0) return (-1); goto nextrec; } switch (rec->dtrd_size) { case sizeof (uint64_t): n = dt_printf(dtp, fp, quiet ? "%lld" : " %16lld", /* LINTED - alignment */ *((unsigned long long *)addr)); break; case sizeof (uint32_t): n = dt_printf(dtp, fp, quiet ? "%d" : " %8d", /* LINTED - alignment */ *((uint32_t *)addr)); break; case sizeof (uint16_t): n = dt_printf(dtp, fp, quiet ? "%d" : " %5d", /* LINTED - alignment */ *((uint16_t *)addr)); break; case sizeof (uint8_t): n = dt_printf(dtp, fp, quiet ? "%d" : " %3d", *((uint8_t *)addr)); break; default: n = dt_print_bytes(dtp, fp, addr, rec->dtrd_size, 33, quiet); break; } if (n < 0) return (-1); /* errno is set for us */nextrec: if (dt_buffered_flush(dtp, &data, rec, NULL) < 0) return (-1); /* errno is set for us */ } /* * Call the record callback with a NULL record to indicate * that we're done processing this EPID. */ rval = (*rfunc)(&data, NULL, arg);nextepid: offs += epd->dtepd_size; last = id; } if (buf->dtbd_oldest != 0 && start == buf->dtbd_oldest) { end = buf->dtbd_oldest; start = 0; goto again; } if ((drops = buf->dtbd_drops) == 0) return (0); /* * Explicitly zero the drops to prevent us from processing them again. */ buf->dtbd_drops = 0; return (dt_handle_cpudrop(dtp, cpu, DTRACEDROP_PRINCIPAL, drops));}typedef struct dt_begin { dtrace_consume_probe_f *dtbgn_probefunc; void *dtbgn_arg; dtrace_handle_err_f *dtbgn_errhdlr; void *dtbgn_errarg; int dtbgn_beginonly;} dt_begin_t;static intdt_consume_begin_probe(const dtrace_probedata_t *data, void *arg){ dt_begin_t *begin = (dt_begin_t *)arg; dtrace_probedesc_t *pd = data->dtpda_pdesc; int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0); int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0); if (begin->dtbgn_beginonly) { if (!(r1 && r2)) return (DTRACE_CONSUME_NEXT); } else { if (r1 && r2) return (DTRACE_CONSUME_NEXT); } /* * We have a record that we're interested in. Now call the underlying * probe function... */ return (begin->dtbgn_probefunc(data, begin->dtbgn_arg));}static intdt_consume_begin_error(dtrace_errdata_t *data, void *arg){ dt_begin_t *begin = (dt_begin_t *)arg; dtrace_probedesc_t *pd = data->dteda_pdesc; int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0); int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0); if (begin->dtbgn_beginonly) { if (!(r1 && r2)) return (DTRACE_HANDLE_OK); } else { if (r1 && r2) return (DTRACE_HANDLE_OK); } return (begin->dtbgn_errhdlr(data, begin->dtbgn_errarg));}static intdt_consume_begin(dtrace_hdl_t *dtp, FILE *fp, dtrace_bufdesc_t *buf, dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg){ /* * There's this idea that the BEGIN probe should be processed before * everything else, and that the END probe should be processed after * anything else. In the common case, this is pretty easy to deal * with. However, a situation may arise where the BEGIN enabling and * END enabling are on the same CPU, and some enabling in the middle * occurred on a different CPU. To deal with this (blech!) we need to * consume the BEGIN buffer up until the end of the BEGIN probe, and * then set it aside. We will then process every other CPU, and then * we'll return to the BEGIN CPU and process the rest of the data * (which will inevitably include the END probe, if any). Making this * even more complicated (!) is the library's ERROR enabling. Because * this enabling is processed before we even get into the consume call * back, any ERROR firing would result in the library's ERROR enabling * being processed twice -- once in our first pass (for BEGIN probes), * and again in our second pass (for everything but BEGIN probes). To * deal with this, we interpose on the ERROR handler to assure that we * only process ERROR enablings induced by BEGIN enablings in the * first pass, and that we only process ERROR enablings _not_ induced * by BEGIN enablings in the second pass. */ dt_begin_t begin; processorid_t cpu = dtp->dt_beganon; dtrace_bufdesc_t nbuf; int rval, i; static int max_ncpus; dtrace_optval_t size; dtp->dt_beganon = -1; if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) { /* * We really don't expect this to fail, but it is at least * technically possible for this to fail with ENOENT. In this * case, we just drive on... */ if (errno == ENOENT) return (0); return (dt_set_errno(dtp, errno)); } if (!dtp->dt_stopped || buf->dtbd_cpu != dtp->dt_endedon) { /* * This is the simple case. We're either not stopped, or if * we are, we actually processed any END probes on another * CPU. We can simply consume this buffer and return. */ return (dt_consume_cpu(dtp, fp, cpu, buf, pf, rf, arg)); } begin.dtbgn_probefunc = pf; begin.dtbgn_arg = arg; begin.dtbgn_beginonly = 1; /* * We need to interpose on the ERROR handler to be sure that we * only process ERRORs induced by BEGIN. */ begin.dtbgn_errhdlr = dtp->dt_errhdlr; begin.dtbgn_errarg = dtp->dt_errarg; dtp->dt_errhdlr = dt_consume_begin_error; dtp->dt_errarg = &begin; rval = dt_consume_cpu(dtp, fp, cpu, buf, dt_consume_begin_probe, rf, &begin); dtp->dt_errhdlr = begin.dtbgn_errhdlr; dtp->dt_errarg = begin.dtbgn_errarg; if (rval != 0) return (rval); /* * Now allocate a new buffer. We'll use this to deal with every other * CPU. */ bzero(&nbuf, sizeof (dtrace_bufdesc_t)); (void) dtrace_getopt(dtp, "bufsize", &size); if ((nbuf.dtbd_data = malloc(size)) == NULL) return (dt_set_errno(dtp, EDT_NOMEM)); if (max_ncpus == 0) max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; for (i = 0; i < max_ncpus; i++) { nbuf.dtbd_cpu = i; if (i == cpu) continue; if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, &nbuf) == -1) { /* * If we failed with ENOENT, it may be because the * CPU was unconfigured -- this is okay. Any other * error, however, is unexpected. */ if (errno == ENOENT) continue; free(nbuf.dtbd_data); return (dt_set_errno(dtp, errno)); } if ((rval = dt_consume_cpu(dtp, fp, i, &nbuf, pf, rf, arg)) != 0) { free(nbuf.dtbd_data); return (rval); } } free(nbuf.dtbd_data); /* * Okay -- we're done with the other buffers. Now we want to * reconsume the first buffer -- but this time we're looking for * everything _but_ BEGIN. And of course, in order to only consume * those ERRORs _not_ associatied with BEGIN, we need to reinstall our * ERROR interposition function... */ begin.dtbgn_beginonly = 0; assert(begin.dtbgn_errhdlr == dtp->dt_errhdlr); assert(begin.dtbgn_errarg == dtp->dt_errarg); dtp->dt_errhdlr = dt_consume_begin_error; dtp->dt_errarg = &begin; rval = dt_consume_cpu(dtp, fp, cpu, buf, dt_consume_begin_probe, rf, &begin); dtp->dt_errhdlr = begin.dtbgn_errhdlr; dtp->dt_errarg = begin.dtbgn_errarg; return (rval);}intdtrace_consume(dtrace_hdl_t *dtp, FILE *fp, dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg){ dtrace_bufdesc_t *buf = &dtp->dt_buf; dtrace_optval_t size; static int max_ncpus; int i, rval; dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_SWITCHRATE]; hrtime_t now = gethrtime(); if (dtp->dt_lastswitch != 0) { if (now - dtp->dt_lastswitch < interval) return (0); dtp->dt_lastswitch += interval; } else { dtp->dt_lastswitch = now; } if (!dtp->dt_active) return (dt_set_errno(dtp, EINVAL)); if (max_ncpus == 0) max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; if (pf == NULL) pf = (dtrace_consume_probe_f *)dt_nullprobe; if (rf == NULL) rf = (dtrace_consume_rec_f *)dt_nullrec; if (buf->dtbd_data == NULL) { (void) dtrace_getopt(dtp, "bufsize", &size); if ((buf->dtbd_data = malloc(size)) == NULL) return (dt_set_errno(dtp, EDT_NOMEM)); buf->dtbd_size = size; } /* * If we have just begun, we want to first process the CPU that * executed the BEGIN probe (if any). */ if (dtp->dt_active && dtp->dt_beganon != -1) { buf->dtbd_cpu = dtp->dt_beganon; if ((rval = dt_consume_begin(dtp, fp, buf, pf, rf, arg)) != 0) return (rval); } for (i = 0; i < max_ncpus; i++) { buf->dtbd_cpu = i; /* * If we have stopped, we want to process the CPU on which the * END probe was processed only _after_ we have processed * everything else. */ if (dtp->dt_stopped && (i == dtp->dt_endedon)) continue; if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) { /* * If we failed with ENOENT, it may be because the * CPU was unconfigured -- this is okay. Any other * error, however, is unexpected. */ if (errno == ENOENT) continue; return (dt_set_errno(dtp, errno)); } if ((rval = dt_consume_cpu(dtp, fp, i, buf, pf, rf, arg)) != 0) return (rval); } if (!dtp->dt_stopped) return (0); buf->dtbd_cpu = dtp->dt_endedon; if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) { /* * This _really_ shouldn't fail, but it is strictly speaking * possible for this to return ENOENT if the CPU that called * the END enabling somehow managed to become unconfigured. * It's unclear how the user can possibly expect anything * rational to happen in this case -- the state has been thrown * out along with the unconfigured CPU -- so we'll just drive * on... */ if (errno == ENOENT) return (0); return (dt_set_errno(dtp, errno)); } return (dt_consume_cpu(dtp, fp, dtp->dt_endedon, buf, pf, rf, arg));}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -