⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dtrace_isa.c

📁 Sun Solaris 10 中的 DTrace 组件的源代码。请参看: http://www.sun.com/software/solaris/observability.jsp
💻 C
字号:
/* * Copyright 2005 Sun Microsystems, Inc.  All rights reserved. * * The contents of this file are subject to the terms of the * Common Development and Distribution License, Version 1.0 only. * See the file usr/src/LICENSING.NOTICE in this distribution or * http://www.opensolaris.org/license/ for details. */#pragma ident	"@(#)dtrace_isa.c	1.10	04/12/18 SMI"#include <sys/dtrace_impl.h>#include <sys/stack.h>#include <sys/frame.h>#include <sys/cmn_err.h>#include <sys/privregs.h>#include <sys/sysmacros.h>/* * This is gross knowledge to have to encode here... */extern void _interrupt();extern void _cmntrap();extern void _allsyscalls();extern size_t _interrupt_size;extern size_t _cmntrap_size;extern size_t _allsyscalls_size;extern uintptr_t kernelbase;/*ARGSUSED*/voiddtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,    uint32_t *ignored){	struct frame *fp = (struct frame *)dtrace_getfp();	struct frame *nextfp, *minfp, *stacktop;	int depth = 0;	int is_intr = 0;	int on_intr, last = 0;	uintptr_t pc;	uintptr_t caller = CPU->cpu_dtrace_caller;	if ((on_intr = CPU_ON_INTR(CPU)) != 0)		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));	else		stacktop = (struct frame *)curthread->t_stk;	minfp = fp;	aframes++;	while (depth < pcstack_limit) {		if (is_intr) {			struct regs *rp = (struct regs *)fp;			nextfp = (struct frame *)rp->r_fp;			pc = rp->r_pc;		} else {			nextfp = (struct frame *)fp->fr_savfp;			pc = fp->fr_savpc;		}		if (nextfp <= minfp || nextfp >= stacktop) {			if (on_intr) {				/*				 * Hop from interrupt stack to thread stack.				 */				stacktop = (struct frame *)curthread->t_stk;				minfp = (struct frame *)curthread->t_stkbase;				on_intr = 0;				continue;			}			/*			 * This is the last frame we can process; indicate			 * that we should return after processing this frame.			 */			last = 1;		}		if (aframes > 0) {			if (--aframes == 0 && caller != NULL) {				/*				 * We've just run out of artificial frames,				 * and we have a valid caller -- fill it in				 * now.				 */				ASSERT(depth < pcstack_limit);				pcstack[depth++] = (pc_t)caller;				caller = NULL;			}		} else {			if (depth < pcstack_limit)				pcstack[depth++] = (pc_t)pc;		}		if (last) {			while (depth < pcstack_limit)				pcstack[depth++] = NULL;			return;		}		if (pc - (uintptr_t)_interrupt < _interrupt_size ||		    pc - (uintptr_t)_allsyscalls < _allsyscalls_size ||		    pc - (uintptr_t)_cmntrap < _cmntrap_size) {			is_intr = 1;		} else {			is_intr = 0;		}		fp = nextfp;		minfp = fp;	}}voiddtrace_getupcstack(uint64_t *pcstack, int pcstack_limit){	klwp_t *lwp = ttolwp(curthread);	proc_t *p = ttoproc(curthread);	struct regs *rp;	uintptr_t pc, sp, oldcontext;	volatile uint8_t *flags =	    (volatile uint8_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;	size_t s1, s2;	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)		return;	if (*flags & CPU_DTRACE_FAULT)		return;	if (pcstack_limit <= 0)		return;	*pcstack++ = (uint64_t)p->p_pid;	pcstack_limit--;	if (pcstack_limit <= 0)		return;	pc = rp->r_pc;	sp = rp->r_fp;	oldcontext = lwp->lwp_oldcontext;	if (p->p_model == DATAMODEL_NATIVE) {		s1 = sizeof (struct frame) + 2 * sizeof (long);		s2 = s1 + sizeof (siginfo_t);	} else {		s1 = sizeof (struct frame32) + 3 * sizeof (int);		s2 = s1 + sizeof (siginfo32_t);	}	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {		*pcstack++ = (uint64_t)pc;		pcstack_limit--;		if (pcstack_limit <= 0)			return;		if (p->p_model == DATAMODEL_NATIVE)			pc = dtrace_fulword((void *)rp->r_sp);		else			pc = dtrace_fuword32((void *)rp->r_sp);	}	while (pc != 0 && sp != 0) {		*pcstack++ = (uint64_t)pc;		pcstack_limit--;		if (pcstack_limit <= 0)			break;		if (oldcontext == sp + s1 || oldcontext == sp + s2) {			if (p->p_model == DATAMODEL_NATIVE) {				ucontext_t *ucp = (ucontext_t *)oldcontext;				greg_t *gregs = ucp->uc_mcontext.gregs;				sp = dtrace_fulword(&gregs[REG_FP]);				pc = dtrace_fulword(&gregs[REG_PC]);				oldcontext = dtrace_fulword(&ucp->uc_link);			} else {				ucontext32_t *ucp = (ucontext32_t *)oldcontext;				greg32_t *gregs = ucp->uc_mcontext.gregs;				sp = dtrace_fuword32(&gregs[EBP]);				pc = dtrace_fuword32(&gregs[EIP]);				oldcontext = dtrace_fuword32(&ucp->uc_link);			}		} else {			if (p->p_model == DATAMODEL_NATIVE) {				struct frame *fr = (struct frame *)sp;				pc = dtrace_fulword(&fr->fr_savpc);				sp = dtrace_fulword(&fr->fr_savfp);			} else {				struct frame32 *fr = (struct frame32 *)sp;				pc = dtrace_fuword32(&fr->fr_savpc);				sp = dtrace_fuword32(&fr->fr_savfp);			}		}		/*		 * This is totally bogus:  if we faulted, we're going to clear		 * the fault and break.  This is to deal with the apparently		 * broken Java stacks on x86.		 */		if (*flags & CPU_DTRACE_FAULT) {			*flags &= ~CPU_DTRACE_FAULT;			break;		}	}	while (pcstack_limit-- > 0)		*pcstack++ = NULL;}/*ARGSUSED*/voiddtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit){	klwp_t *lwp = ttolwp(curthread);	proc_t *p = ttoproc(curthread);	struct regs *rp;	uintptr_t pc, sp, oldcontext;	volatile uint8_t *flags =	    (volatile uint8_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;	size_t s1, s2;	if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)		return;	if (*flags & CPU_DTRACE_FAULT)		return;	if (pcstack_limit <= 0)		return;	*pcstack++ = (uint64_t)p->p_pid;	pcstack_limit--;	if (pcstack_limit <= 0)		return;	pc = rp->r_pc;	sp = rp->r_fp;	oldcontext = lwp->lwp_oldcontext;	if (p->p_model == DATAMODEL_NATIVE) {		s1 = sizeof (struct frame) + 2 * sizeof (long);		s2 = s1 + sizeof (siginfo_t);	} else {		s1 = sizeof (struct frame32) + 3 * sizeof (int);		s2 = s1 + sizeof (siginfo32_t);	}	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {		*pcstack++ = (uint64_t)pc;		*fpstack++ = 0;		pcstack_limit--;		if (pcstack_limit <= 0)			return;		if (p->p_model == DATAMODEL_NATIVE)			pc = dtrace_fulword((void *)rp->r_sp);		else			pc = dtrace_fuword32((void *)rp->r_sp);	}	while (pc != 0 && sp != 0) {		*pcstack++ = (uint64_t)pc;		*fpstack++ = sp;		pcstack_limit--;		if (pcstack_limit <= 0)			break;		if (oldcontext == sp + s1 || oldcontext == sp + s2) {			if (p->p_model == DATAMODEL_NATIVE) {				ucontext_t *ucp = (ucontext_t *)oldcontext;				greg_t *gregs = ucp->uc_mcontext.gregs;				sp = dtrace_fulword(&gregs[REG_FP]);				pc = dtrace_fulword(&gregs[REG_PC]);				oldcontext = dtrace_fulword(&ucp->uc_link);			} else {				ucontext_t *ucp = (ucontext_t *)oldcontext;				greg_t *gregs = ucp->uc_mcontext.gregs;				sp = dtrace_fuword32(&gregs[EBP]);				pc = dtrace_fuword32(&gregs[EIP]);				oldcontext = dtrace_fuword32(&ucp->uc_link);			}		} else {			if (p->p_model == DATAMODEL_NATIVE) {				struct frame *fr = (struct frame *)sp;				pc = dtrace_fulword(&fr->fr_savpc);				sp = dtrace_fulword(&fr->fr_savfp);			} else {				struct frame32 *fr = (struct frame32 *)sp;				pc = dtrace_fuword32(&fr->fr_savpc);				sp = dtrace_fuword32(&fr->fr_savfp);			}		}		/*		 * This is totally bogus:  if we faulted, we're going to clear		 * the fault and break.  This is to deal with the apparently		 * broken Java stacks on x86.		 */		if (*flags & CPU_DTRACE_FAULT) {			*flags &= ~CPU_DTRACE_FAULT;			break;		}	}	while (pcstack_limit-- > 0)		*pcstack++ = NULL;}/*ARGSUSED*/uint64_tdtrace_getarg(int arg, int aframes){	uintptr_t val;	struct frame *fp = (struct frame *)dtrace_getfp();	uintptr_t *stack;	int i;#if defined(__amd64)	int inreg = offsetof(struct regs, r_r9) / sizeof (greg_t);#endif	for (i = 1; i <= aframes; i++) {		fp = (struct frame *)(fp->fr_savfp);		if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {#if !defined(__amd64)			/*			 * If we pass through the invalid op handler, we will			 * use the pointer that it passed to the stack as the			 * second argument to dtrace_invop() as the pointer to			 * the stack.  When using this stack, we must step			 * beyond the EIP/RIP that was pushed when the trap was			 * taken -- hence the "+ 1" below.			 */			stack = ((uintptr_t **)&fp[1])[1] + 1;#else			/*			 * In the case of amd64, we will use the pointer to the			 * regs structure that was pushed when we took the			 * trap.  To get this structure, we must increment			 * beyond the frame structure, and then again beyond			 * the calling RIP stored in dtrace_invop().  If the			 * argument that we're seeking is passed on the stack,			 * we'll pull the true stack pointer out of the saved			 * registers and decrement our argument by the number			 * of arguments passed in registers; if the argument			 * we're seeking is passed in regsiters, we can just			 * load it directly.			 */			struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +			    sizeof (uintptr_t));			if (arg <= inreg) {				stack = (uintptr_t *)rp;			} else {				stack = (uintptr_t *)(rp->r_rsp);				arg -= inreg;			}#endif			goto load;		}	}	/*	 * We know that we did not come through a trap to get into	 * dtrace_probe() -- the provider simply called dtrace_probe()	 * directly.  As this is the case, we need to shift the argument	 * that we're looking for:  the probe ID is the first argument to	 * dtrace_probe(), so the argument n will actually be found where	 * one would expect to find argument (n + 1).	 */	arg++;#if defined(__amd64)	if (arg <= inreg) {		/*		 * This shouldn't happen.  If the argument is passed in a		 * register then it should have been, well, passed in a		 * register...		 */		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);		return (0);	}	arg -= (inreg + 1);#endif	stack = (uintptr_t *)&fp[1];load:	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);	val = stack[arg];	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);	return (val);}/*ARGSUSED*/intdtrace_getstackdepth(int aframes){	struct frame *fp = (struct frame *)dtrace_getfp();	struct frame *nextfp, *minfp, *stacktop;	int depth = 0;	int is_intr = 0;	int on_intr;	uintptr_t pc;	if ((on_intr = CPU_ON_INTR(CPU)) != 0)		stacktop = (struct frame *)(CPU->cpu_intr_stack + SA(MINFRAME));	else		stacktop = (struct frame *)curthread->t_stk;	minfp = fp;	aframes++;	for (;;) {		depth++;		if (is_intr) {			struct regs *rp = (struct regs *)fp;			nextfp = (struct frame *)rp->r_fp;			pc = rp->r_pc;		} else {			nextfp = (struct frame *)fp->fr_savfp;			pc = fp->fr_savpc;		}		if (nextfp <= minfp || nextfp >= stacktop) {			if (on_intr) {				/*				 * Hop from interrupt stack to thread stack.				 */				stacktop = (struct frame *)curthread->t_stk;				minfp = (struct frame *)curthread->t_stkbase;				on_intr = 0;				continue;			}			break;		}		is_intr = pc - (uintptr_t)_interrupt < _interrupt_size ||		    pc - (uintptr_t)_allsyscalls < _allsyscalls_size ||		    pc - (uintptr_t)_cmntrap < _cmntrap_size;		fp = nextfp;		minfp = fp;	}	if (depth <= aframes)		return (0);	return (depth - aframes);}ulong_tdtrace_getreg(struct regs *rp, uint_t reg){#if defined(__amd64)	int regmap[] = {		REG_GS,		/* GS */		REG_FS,		/* FS */		REG_ES,		/* ES */		REG_DS,		/* DS */		REG_RDI,	/* EDI */		REG_RSI,	/* ESI */		REG_RBP,	/* EBP */		REG_RSP,	/* ESP */		REG_RBX,	/* EBX */		REG_RDX,	/* EDX */		REG_RCX,	/* ECX */		REG_RAX,	/* EAX */		REG_TRAPNO,	/* TRAPNO */		REG_ERR,	/* ERR */		REG_RIP,	/* EIP */		REG_CS,		/* CS */		REG_RFL,	/* EFL */		REG_RSP,	/* UESP */		REG_SS		/* SS */	};	if (reg <= SS) {		if (reg >= sizeof (regmap) / sizeof (int)) {			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);			return (0);		}		reg = regmap[reg];	} else {		reg -= SS + 1;	}	switch (reg) {	case REG_RDI:		return (rp->r_rdi);	case REG_RSI:		return (rp->r_rsi);	case REG_RDX:		return (rp->r_rdx);	case REG_RCX:		return (rp->r_rcx);	case REG_R8:		return (rp->r_r8);	case REG_R9:		return (rp->r_r9);	case REG_RAX:		return (rp->r_rax);	case REG_RBX:		return (rp->r_rbx);	case REG_RBP:		return (rp->r_rbp);	case REG_R10:		return (rp->r_r10);	case REG_R11:		return (rp->r_r11);	case REG_R12:		return (rp->r_r12);	case REG_R13:		return (rp->r_r13);	case REG_R14:		return (rp->r_r14);	case REG_R15:		return (rp->r_r15);	case REG_DS:		return (rp->r_ds);	case REG_ES:		return (rp->r_es);	case REG_FS:		return (rp->r_fs);	case REG_GS:		return (rp->r_gs);	case REG_TRAPNO:		return (rp->r_trapno);	case REG_ERR:		return (rp->r_err);	case REG_RIP:		return (rp->r_rip);	case REG_CS:		return (rp->r_cs);	case REG_SS:		return (rp->r_ss);	case REG_RFL:		return (rp->r_rfl);	case REG_RSP:		return (rp->r_rsp);	default:		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);		return (0);	}#else	if (reg > SS) {		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);		return (0);	}	return ((&rp->r_gs)[reg]);#endif}static intdtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size){	ASSERT(kaddr >= kernelbase && kaddr + size >= kaddr);	if (uaddr + size >= kernelbase || uaddr + size < uaddr) {		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr;		return (0);	}	return (1);}voiddtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size){	if (dtrace_copycheck(uaddr, kaddr, size))		dtrace_copy(uaddr, kaddr, size);}voiddtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size){	if (dtrace_copycheck(uaddr, kaddr, size))		dtrace_copy(kaddr, uaddr, size);}voiddtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size){	if (dtrace_copycheck(uaddr, kaddr, size))		dtrace_copystr(uaddr, kaddr, size);}voiddtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size){	if (dtrace_copycheck(uaddr, kaddr, size))		dtrace_copystr(kaddr, uaddr, size);}uint8_tdtrace_fuword8(void *uaddr){	extern uint8_t dtrace_fuword8_nocheck(void *);	if ((uintptr_t)uaddr >= _userlimit) {		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;		return (0);	}	return (dtrace_fuword8_nocheck(uaddr));}uint16_tdtrace_fuword16(void *uaddr){	extern uint16_t dtrace_fuword16_nocheck(void *);	if ((uintptr_t)uaddr >= _userlimit) {		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;		return (0);	}	return (dtrace_fuword16_nocheck(uaddr));}uint32_tdtrace_fuword32(void *uaddr){	extern uint32_t dtrace_fuword32_nocheck(void *);	if ((uintptr_t)uaddr >= _userlimit) {		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;		return (0);	}	return (dtrace_fuword32_nocheck(uaddr));}uint64_tdtrace_fuword64(void *uaddr){	extern uint64_t dtrace_fuword64_nocheck(void *);	if ((uintptr_t)uaddr >= _userlimit) {		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = (uintptr_t)uaddr;		return (0);	}	return (dtrace_fuword64_nocheck(uaddr));}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -