⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 erl_trace.c

📁 OTP是开放电信平台的简称
💻 C
📖 第 1 页 / 共 5 页
字号:
	       && internal_pid_index(*tracer_pid) < erts_max_processes);		tracer = erts_pid2proc(p, ERTS_PROC_LOCK_MAIN,			       *tracer_pid, ERTS_PROC_LOCK_STATUS);	if (!tracer)	    invalid_tracer = 1;	else {	    invalid_tracer = (tracer->trace_flags & F_TRACER) == 0;	    erts_smp_proc_unlock(tracer, ERTS_PROC_LOCK_STATUS);	}	if (invalid_tracer) {#ifdef ERTS_SMP	    ASSERT(is_nil(tracee) || tracer_pid == &p->tracer_proc);	    if (is_not_nil(tracee))		erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);#endif	    *tracee_flags &= ~TRACEE_FLAGS;	    *tracer_pid = NIL;#ifdef ERTS_SMP	    if (is_not_nil(tracee))		erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);#endif	    return 0;	}#ifdef ERTS_SMP	tpid = *tracer_pid; /* Need to save tracer pid,			       since *tracer_pid might			       be reset by erts_match_set_run() */#endif		/*	 * If there is a PAM program, run it.  Return if it fails.	 *	 * See the rules above in the port trace code.	 */		/* BEGIN this code should be the same for port and pid trace */	return_flags = 0;	arity = mfa[2];	if (match_spec) {	    pam_result = erts_match_set_run(p, match_spec, args, arity,					    &return_flags);	    if (is_non_value(pam_result)) {		erts_match_set_release_result(p);		return 0;	    }	}	if (tracee_flags == &meta_flags) {	    /* Meta trace */	    if (pam_result == am_false) {		erts_match_set_release_result(p);		return return_flags;	    }	} else {	    /* Non-meta trace */	    if (*tracee_flags & F_TRACE_SILENT) { 		erts_match_set_release_result(p);		return 0;	    }	    if (pam_result == am_false) {		erts_match_set_release_result(p);		return return_flags;	    }	    if (local && (*tracee_flags & F_TRACE_RETURN_TO)) {		return_flags |= MATCH_SET_RETURN_TO_TRACE;	    }	}	/* END this code should be the same for port and pid trace */		/*	 * Calculate number of words needed on heap.	 */		size = 4 + 5;		/* Trace tuple + MFA tuple. */	if (! (*tracee_flags & F_TRACE_ARITY_ONLY)) {	    size += 2*arity;	    for (i = arity-1; i >= 0; i--) {		sizes[i] = size_object(args[i]);		size += sizes[i];	    }	}	if (*tracee_flags & F_TIMESTAMP) {	    size += 1 + 4;	    /* One element in trace tuple + timestamp tuple. */	}	if (pam_result != am_true) {	    pam_result_size = size_object(pam_result);	    size += 1 + pam_result_size;	    /* One element in trace tuple + term size. */	}	#ifdef ERTS_SMP	bp = new_message_buffer(size);	hp = bp->mem;	off_heap = &bp->off_heap;#else	hp = erts_alloc_message_heap(size, &bp, &off_heap, tracer, 0);#endif#ifdef DEBUG	limit = hp + size;#endif	/*	 * Build the the {M,F,A} tuple in the message buffer. 	 * (A is arguments or arity.)	 */		if (*tracee_flags & F_TRACE_ARITY_ONLY) {	    mfa_tuple = make_small(arity);	} else {	    mfa_tuple = NIL;	    for (i = arity-1; i >= 0; i--) {		Eterm term = copy_struct(args[i], sizes[i], &hp,#ifdef ERTS_SMP					 &bp->off_heap#else					 off_heap#endif);		mfa_tuple = CONS(hp, term, mfa_tuple);		hp += 2;	    }	}	mfa_tuple = TUPLE3(hp, mfa[0], mfa[1], mfa_tuple);	hp += 4;		/*	 * Copy the PAM result (if any) onto the heap.	 */		if (pam_result != am_true) {	    pam_result = copy_struct(pam_result, pam_result_size, &hp, off_heap);	}	erts_match_set_release_result(p);	/*	 * Build the trace tuple and enqueue it.	 */		mess = TUPLE4(hp, am_trace, p->id/* Local pid */, am_call, mfa_tuple);	hp += 5;	if (pam_result != am_true) {	    hp[-5] = make_arityval(5);	    *hp++ = pam_result;	}	erts_smp_mtx_lock(&smq_mtx);	if (*tracee_flags & F_TIMESTAMP) {	    hp = patch_ts(mess, hp);	}	ASSERT(hp == limit);#ifdef ERTS_SMP	enqueue_sys_msg_unlocked(SYS_MSG_TYPE_TRACE, tracee, tpid, mess, bp);	erts_smp_mtx_unlock(&smq_mtx);#else	erts_queue_message(tracer, 0, bp, mess, NIL);#endif	return return_flags;    }}/* Sends trace message: *    {trace_ts, ProcessPid, What, Data, Timestamp} * or {trace, ProcessPid, What, Data} * * 'what' must be atomic, 'data' may be a deep term. * 'c_p' is the currently executing process, may be NULL. * 't_p' is the traced process. */voidtrace_proc(Process *c_p, Process *t_p, Eterm what, Eterm data){    Eterm mess;    Eterm* hp;    int need;    if (is_internal_port(t_p->tracer_proc)) {	Eterm local_heap[5+5];	hp = local_heap;	mess = TUPLE4(hp, am_trace, t_p->id, what, data);	hp += 5;	erts_smp_mtx_lock(&smq_mtx);	if (t_p->trace_flags & F_TIMESTAMP) {	    hp = patch_ts(mess, hp);	}	send_to_port(#ifndef ERTS_SMP	    /* No fake schedule out and in again after an exit */	    what == am_exit ? NULL : c_p,#else	    /* Fake schedule out and in are never sent when smp enabled */	    c_p,#endif	    mess, &t_p->tracer_proc, &t_p->trace_flags);	erts_smp_mtx_unlock(&smq_mtx);    } else {	Eterm tmp;	ErlHeapFragment *bp;	ErlOffHeap *off_heap;#ifndef ERTS_SMP	Process *tracer;#endif	size_t sz_data;	ASSERT(is_internal_pid(t_p->tracer_proc)	       && internal_pid_index(t_p->tracer_proc) < erts_max_processes);#ifndef ERTS_SMP	tracer = process_tab[internal_pid_index(t_p->tracer_proc)];	if (INVALID_PID(tracer, t_p->tracer_proc)	    || (tracer->trace_flags & F_TRACER) == 0) {	    t_p->trace_flags &= ~TRACEE_FLAGS;	    t_p->tracer_proc = NIL;	    return;	}#endif	sz_data = size_object(data);	need = sz_data + 5 + TS_SIZE(t_p);#ifdef ERTS_SMP	bp = new_message_buffer(need);	hp = bp->mem;	off_heap = &bp->off_heap;#else	hp = erts_alloc_message_heap(need, &bp, &off_heap, tracer, 0);#endif	tmp = copy_struct(data, sz_data, &hp, off_heap);	mess = TUPLE4(hp, am_trace, t_p->id/* Local pid */, what, tmp);	hp += 5;	erts_smp_mtx_lock(&smq_mtx);	if (t_p->trace_flags & F_TIMESTAMP) {	    hp = patch_ts(mess, hp);	}#ifdef ERTS_SMP	enqueue_sys_msg_unlocked(SYS_MSG_TYPE_TRACE,				 t_p->id, t_p->tracer_proc, mess, bp);	erts_smp_mtx_unlock(&smq_mtx);#else	erts_queue_message(tracer, 0, bp, mess, NIL);#endif    }}/* Sends trace message: *    {trace_ts, ParentPid, spawn, ChildPid, {Mod, Func, Args}, Timestamp} * or {trace, ParentPid, spawn, ChildPid, {Mod, Func, Args}} * * 'pid' is the ChildPid, 'mod' and 'func' must be atomic, * and 'args' may be a deep term. */voidtrace_proc_spawn(Process *p, Eterm pid, 		 Eterm mod, Eterm func, Eterm args){    Eterm mfa;    Eterm mess;    Eterm* hp;    if (is_internal_port(p->tracer_proc)) {	Eterm local_heap[4+6+5];	hp = local_heap;	mfa = TUPLE3(hp, mod, func, args);	hp += 4;	mess = TUPLE5(hp, am_trace, p->id, am_spawn, pid, mfa);	hp += 6;	erts_smp_mtx_lock(&smq_mtx);	if (p->trace_flags & F_TIMESTAMP) {	    hp = patch_ts(mess, hp);	}	send_to_port(p, mess, &p->tracer_proc, &p->trace_flags);	erts_smp_mtx_unlock(&smq_mtx);    } else {	Eterm tmp;	ErlHeapFragment *bp;	ErlOffHeap *off_heap;#ifndef ERTS_SMP	Process *tracer;#endif	size_t sz_args, sz_pid;	Uint need;	ASSERT(is_internal_pid(p->tracer_proc)	       && internal_pid_index(p->tracer_proc) < erts_max_processes);#ifndef ERTS_SMP	tracer = process_tab[internal_pid_index(p->tracer_proc)];	if (INVALID_PID(tracer, p->tracer_proc)	    || (tracer->trace_flags & F_TRACER) == 0) {	    p->trace_flags &= ~TRACEE_FLAGS;	    p->tracer_proc = NIL;	    return;	}#endif	sz_args = size_object(args);	sz_pid = size_object(pid);	need = sz_args + 4 + 6 + TS_SIZE(p);#ifdef ERTS_SMP	bp = new_message_buffer(need);	hp = bp->mem;	off_heap = &bp->off_heap;#else	hp = erts_alloc_message_heap(need, &bp, &off_heap, tracer, 0);#endif	tmp = copy_struct(args, sz_args, &hp, off_heap);	mfa = TUPLE3(hp, mod, func, tmp);	hp += 4;	tmp = copy_struct(pid, sz_pid, &hp, off_heap);	mess = TUPLE5(hp, am_trace, p->id, am_spawn, tmp, mfa);	hp += 6;	erts_smp_mtx_lock(&smq_mtx);	if (p->trace_flags & F_TIMESTAMP) {	    hp = patch_ts(mess, hp);	}#ifdef ERTS_SMP	enqueue_sys_msg_unlocked(SYS_MSG_TYPE_TRACE,				 p->id, p->tracer_proc, mess, bp);	erts_smp_mtx_unlock(&smq_mtx);#else	erts_queue_message(tracer, 0, bp, mess, NIL);#endif    }}void save_calls(Process *p, Export *e){   if (p->ct) {     Export **ct = &p->ct->ct[0];     int len = p->ct->len;     ct[p->ct->cur] = e;     if (++p->ct->cur >= len) {       p->ct->cur = 0;     }     if (p->ct->n < len) {       p->ct->n++;     }   }}/*  * Entry point called by the trace wrap functions in erl_bif_wrap.c * * The trace wrap functions are themselves called through the export * entries instead of the original BIF functions. */Etermerts_bif_trace(int bif_index, Process* p, 	       Eterm arg1, Eterm arg2, Eterm arg3, Uint *I){    Eterm result;    int meta = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_META);    ERTS_CHK_HAVE_ONLY_MAIN_PROC_LOCK(p->id);    if (!ARE_TRACE_FLAGS_ON(p, F_TRACE_CALLS) && (! meta)) {	/* Warning! This is an Optimization. 	 *	 * If neither meta trace is active nor process trace flags then 	 * no tracing will occur. Doing the whole else branch will 	 * also do nothing, only slower.	 */	Eterm (*func)(Process*, Eterm, Eterm, Eterm, Uint*) = bif_table[bif_index].f;	result = func(p, arg1, arg2, arg3, I);    } else {	Eterm (*func)(Process*, Eterm, Eterm, Eterm, Uint*);	Export* ep = bif_export[bif_index];	Uint32 flags = 0, flags_meta = 0;	int global = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_GLOBAL);	int local  = !!(erts_bif_trace_flags[bif_index] & BIF_TRACE_AS_LOCAL);	Eterm meta_tracer_pid = NIL;	int applying = (I == &(ep->code[3])); /* Yup, the apply code for a bif					       * is actually in the 					       * export entry */	Eterm *cp = p->cp;	#ifndef _OSE_	Eterm args[3] = {arg1, arg2, arg3};#else	Eterm args[3];	args[0] = arg1;	args[1] = arg2;	args[2] = arg3;#endif		/* 	 * Make continuation pointer OK, it is not during direct BIF calls,	 * but it is correct during apply of bif.	 */	if (!applying) { 	    p->cp = I;	}	if (global || local) {	    flags = erts_call_trace(p, ep->code, ep->match_prog_set, args, 				       local, &p->tracer_proc);	}	if (meta) {	    flags_meta = erts_bif_mtrace(p, ep->code+3, args, local, 					 &meta_tracer_pid);	}	/* Restore original continuation pointer (if changed). */	p->cp = cp;		func = bif_table[bif_index].f;	result = func(p, arg1, arg2, arg3, I);		if (applying && (flags & MATCH_SET_RETURN_TO_TRACE)) {	    Uint i_return_trace = beam_return_trace[0];	    Uint i_return_to_trace = beam_return_to_trace[0];	    Eterm *cpp;	    /* Maybe advance cp to skip trace stack frames */	    for (cpp = p->stop;  ;  cp = cp_val(*cpp++)) {		ASSERT(is_CP((Eterm) cp));		if (*cp_val((Eterm) cp) == i_return_trace) {		    /* Skip stack frame variables */		    while (is_not_CP(*cpp)) cpp++;		    cpp += 2; /* Skip return_trace parameters */		} else if (*cp_val((Eterm) cp) == i_return_to_trace) {		    /* A return_to trace message is going to be generated		     * by normal means, so we do not have to.		     */		    cp = NULL;		    break;		} else break;	    }	}		/* Try to get these in the order 	 * they usually appear in normal code... */	if (is_non_value(result)) {	    Uint reason = p->freason;	    if (reason != TRAP && reason != RESCHEDULE) {		Eterm class;		Eterm value = p->fvalue;		Eterm nocatch[3];		/* Expand error value like in handle_error() */		if (reason & EXF_ARGLIST) {		    Eterm *tp;		    ASSERT(is_tuple(value));		    tp = tuple_val(value);		    value = tp[1];		}		if ((reason & EXF_THROWN) && (p->catches <= 0)) {		    value = TUPLE2(nocatch, am_nocatch, value);		    reason = EXC_ERROR;		}		/* Note: expand_error_value() could theoretically 		 * allocate on the heap, but not for any error		 * returned by a BIF, and it would do no harm,		 * just be annoying.		 */		value = expand_error_value(p, reason, value);		class = exception_tag[GET_EXC_CLASS(reason)];				if (flags_meta & MATCH_SET_EXCEPTION_TRACE) {		    erts_trace_exception(p, ep->code, class, value,					 &meta_tracer_pid);		}		if (flags & MATCH_SET_EXCEPTION_TRACE) {		    erts_trace_exception(p, ep->code, class, value,					 &p->tracer_proc);		}		if ((flags & MATCH_SET_RETURN_TO_TRACE) && p->catches > 0) {		    /* can only happen if(local)*/		    Eterm *ptr = p->stop;		    ASSERT(is_CP(*ptr));		    ASSERT(ptr <= STACK_START(p));		    /* Search the nearest stack frame for a catch */		    while (++ptr < STACK_START(p)) {			if (is_CP(*ptr)) break;			if (is_catch(*ptr)) {			    if (applying) {				/* Apply of BIF, cp is in calling function */				if (cp) erts_trace_return_to(p, cp);			    } else {				/* Direct bif call, I points into 				 * calling function */				erts_trace_return_to(p, I);			    }			}		    }		}		if ((flags_meta|flags) & MATCH_SET_EXCEPTION_TRACE) {		    erts_smp_proc_lock(p, ERTS_PROC_LOCKS_ALL_MINOR);		    p->trace_flags |= F_EXCEPTION_TRACE;		    erts_smp_proc_unlock(p, ERTS_PROC_LOCKS_ALL_MINOR);		}	    }	} else {	    if (flags_meta & MATCH_SET_RX_TRACE) {		erts_trace_return(p, ep->code, result, &meta_tracer_pid);	    }	    /* MATCH_SET_RETURN_TO_TRACE cannot occur if(meta) */	    if (flags & MATCH_SET_RX_TRACE) {		erts_trace_return(p, ep->code, result, &p->tracer_proc);	    }	    if (flags & MATCH_SET_RETURN_TO_TRACE) { 		/* can only happen if(local)*/		if (applying) {		    /* Apply of BIF, cp is in calling function */		    if (cp) erts_trace_return_to(p, cp);		} else {		    /* Direct bif call, I points into calling function */		    erts_trace_return_to(p, I);		}	    }	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -