📄 fasttrap_isa.c
字号:
uint_t taken;#ifdef __amd64 greg_t cx = rp->r_rcx--;#else greg_t cx = rp->r_ecx--;#endif switch (tp->ftt_code) { case FASTTRAP_LOOPNZ: taken = (rp->r_ps & FASTTRAP_EFLAGS_ZF) == 0 && cx != 0; break; case FASTTRAP_LOOPZ: taken = (rp->r_ps & FASTTRAP_EFLAGS_ZF) != 0 && cx != 0; break; case FASTTRAP_LOOP: taken = (cx != 0); break; } if (taken) new_pc = tp->ftt_dest; else new_pc = pc + tp->ftt_size; break; } case FASTTRAP_T_JCXZ: {#ifdef __amd64 greg_t cx = rp->r_rcx;#else greg_t cx = rp->r_ecx;#endif if (cx == 0) new_pc = tp->ftt_dest; else new_pc = pc + tp->ftt_size; break; } case FASTTRAP_T_PUSHL_EBP: { int ret; uintptr_t addr;#ifdef __amd64 if (p->p_model == DATAMODEL_NATIVE) {#endif addr = rp->r_sp - sizeof (uintptr_t); ret = fasttrap_sulword((void *)addr, rp->r_fp);#ifdef __amd64 } else { addr = rp->r_sp - sizeof (uint32_t); ret = fasttrap_suword32((void *)addr, (uint32_t)rp->r_fp); }#endif if (ret == -1) { fasttrap_sigsegv(p, curthread, addr); new_pc = pc; break; } rp->r_sp = addr; new_pc = pc + tp->ftt_size; break; } case FASTTRAP_T_JMP: case FASTTRAP_T_CALL: if (tp->ftt_code == 0) { new_pc = tp->ftt_dest; } else { uintptr_t addr = tp->ftt_dest; if (tp->ftt_base != FASTTRAP_NOREG) addr += fasttrap_getreg(rp, tp->ftt_base); if (tp->ftt_index != FASTTRAP_NOREG) addr += fasttrap_getreg(rp, tp->ftt_index) << tp->ftt_scale; if (tp->ftt_code == 1) {#ifdef __amd64 if (p->p_model == DATAMODEL_NATIVE) {#endif uintptr_t value; if (fasttrap_fulword((void *)addr, &value) == -1) { fasttrap_sigsegv(p, curthread, addr); new_pc = pc; break; } new_pc = value;#ifdef __amd64 } else { uint32_t value; if (fasttrap_fuword32((void *)addr, &value) == -1) { fasttrap_sigsegv(p, curthread, addr); new_pc = pc; break; } new_pc = value; }#endif } else { new_pc = addr; } } /* * If this is a call instruction, we need to push the return * address onto the stack. If this fails, we send the process * a SIGSEGV and reset the pc to emulate what would happen if * this instruction weren't traced. */ if (tp->ftt_type == FASTTRAP_T_CALL) { int ret; uintptr_t addr;#ifdef __amd64 if (p->p_model == DATAMODEL_NATIVE) { addr = rp->r_sp - sizeof (uintptr_t); ret = fasttrap_sulword((void *)addr, pc + tp->ftt_size); } else {#endif addr = rp->r_sp - sizeof (uint32_t); ret = fasttrap_suword32((void *)addr, (uint32_t)(pc + tp->ftt_size));#ifdef __amd64 }#endif if (ret == -1) { fasttrap_sigsegv(p, curthread, addr); new_pc = pc; break; } rp->r_sp = addr; } break; case FASTTRAP_T_COMMON: { uintptr_t addr; uint8_t scratch[2 * FASTTRAP_MAX_INSTR_SIZE + 5 + 2]; uint_t i = 0; klwp_t *lwp = ttolwp(curthread); /* * Compute the address of the ulwp_t and step over the * ul_self pointer. The method used to store the user-land * thread pointer is very different on 32- and 64-bit * kernels. */#if defined(__amd64) if (p->p_model == DATAMODEL_LP64) { addr = lwp->lwp_pcb.pcb_fsbase; addr += sizeof (void *); } else { addr = lwp->lwp_pcb.pcb_gsbase; addr += sizeof (caddr32_t); }#elif defined(__i386) addr = USEGD_GETBASE(&lwp->lwp_pcb.pcb_gsdesc); addr += sizeof (void *);#endif /* * Generic Instruction Tracing * --------------------------- * * This is the layout of the scratch space in the user-land * thread structure for our generated instructions. * * 32-bit mode bytes * ------------------------ ----- * a: <original instruction> <= 15 * jmp <pc + tp->ftt_size> 5 * b: <original instrction> <= 15 * int T_DTRACE_RET 2 * ----- * <= 37 * * 64-bit mode bytes * ------------------------ ----- * a: <original instruction> <= 15 * jmp 0(%rip) 6 * <pc + tp->ftt_size> 8 * b: <original instruction> <= 15 * int T_DTRACE_RET 2 * ----- * <= 46 * * The %pc is set to a, and curthread->t_dtrace_astpc is set * to b. If we encounter a signal on the way out of the * kernel, trap() will set %pc to curthread->t_dtrace_astpc * so that we execute the original instruction and re-enter * the kernel rather than redirecting to the next instruction. * * If there are return probes (so we know that we're going to * need to reenter the kernel after executing the original * instruction), the scratch space will just contain the * original instruction followed by an interrupt -- the same * data as at b. * * %rip-relative Addressing * ------------------------ * * There's a further complication in 64-bit mode due to %rip- * relative addressing. While this is clearly a beneficial * architectural decision for position independent code, it's * hard not to see it as a personal attack against the pid * provider since before there was a relatively small set of * instructions to emulate; with %rip-relative addressing, * almost every instruction can potentially depend on the * address at which it's executed. Rather than emulating * the broad spectrum of instructions that can now be * position dependent, we emulate jumps and others as in * 32-bit mode, and take a different tack for instructions * using %rip-relative addressing. * * For every instruction that uses the ModRM byte, the * in-kernel disassembler reports its location. We use the * ModRM byte to identify that an instruction uses * %rip-relative addressing and to see what other registers * the instruction uses. To emulate those instructions, * we modify the instruction to be %rax-relative rather than * %rip-relative (or %rcx-relative if the instruction uses * %rax; or %r8- or %r9-relative if the REX.B is present so * we don't have to rewrite the REX prefix). We then load * the value that %rip would have been into the scratch * register and generate an instruction to reset the scratch * register back to its original value. The instruction * sequence looks like this: * * 64-mode %rip-relative bytes * ------------------------ ----- * a: <modified instruction> <= 15 * movq $<value>, %<scratch> 6 * jmp 0(%rip) 6 * <pc + tp->ftt_size> 8 * b: <modified instruction> <= 15 * int T_DTRACE_RET 2 * ----- * 52 * * We set curthread->t_dtrace_regv so that upon receiving * a signal we can reset the value of the scratch register. */ ASSERT(tp->ftt_size < FASTTRAP_MAX_INSTR_SIZE); curthread->t_dtrace_scrpc = addr; bcopy(tp->ftt_instr, &scratch[i], tp->ftt_size); i += tp->ftt_size;#ifdef __amd64 if (tp->ftt_ripmode != 0) { greg_t *reg; ASSERT(p->p_model == DATAMODEL_LP64); ASSERT(tp->ftt_ripmode & (FASTTRAP_RIP_1 | FASTTRAP_RIP_2)); /* * If this was a %rip-relative instruction, we change * it to be either a %rax- or %rcx-relative * instruction (depending on whether those registers * are used as another operand; or %r8- or %r9- * relative depending on the value of REX.B). We then * set that register and generate a movq instruction * to reset the value. */ if (tp->ftt_ripmode & FASTTRAP_RIP_X) scratch[i++] = FASTTRAP_REX(1, 0, 0, 1); else scratch[i++] = FASTTRAP_REX(1, 0, 0, 0); if (tp->ftt_ripmode & FASTTRAP_RIP_1) scratch[i++] = FASTTRAP_MOV_EAX; else scratch[i++] = FASTTRAP_MOV_ECX; switch (tp->ftt_ripmode) { case FASTTRAP_RIP_1: reg = &rp->r_rax; curthread->t_dtrace_reg = REG_RAX; break; case FASTTRAP_RIP_2: reg = &rp->r_rcx; curthread->t_dtrace_reg = REG_RCX; break; case FASTTRAP_RIP_1 | FASTTRAP_RIP_X: reg = &rp->r_r8; curthread->t_dtrace_reg = REG_R8; break; case FASTTRAP_RIP_2 | FASTTRAP_RIP_X: reg = &rp->r_r9; curthread->t_dtrace_reg = REG_R9; break; } *(uint64_t *)&scratch[i] = *reg; curthread->t_dtrace_regv = *reg; *reg = pc + tp->ftt_size; i += sizeof (uint64_t); }#endif /* * Generate the branch instruction to what would have * normally been the subsequent instruction. In 32-bit mode, * this is just a relative branch; in 64-bit mode this is a * %rip-relative branch that loads the 64-bit pc value * immediately after the jmp instruction. */#ifdef __amd64 if (p->p_model == DATAMODEL_LP64) { scratch[i++] = FASTTRAP_GROUP5_OP; scratch[i++] = FASTTRAP_MODRM(0, 4, 5); *(uint32_t *)&scratch[i] = 0; i += sizeof (uint32_t); *(uint64_t *)&scratch[i] = pc + tp->ftt_size; i += sizeof (uint64_t); } else {#endif /* * Set up the jmp to the next instruction; note that * the size of the traced instruction cancels out. */ scratch[i++] = FASTTRAP_JMP32; *(uint32_t *)&scratch[i] = pc - addr - 5; i += sizeof (uint32_t);#ifdef __amd64 }#endif curthread->t_dtrace_astpc = addr + i; bcopy(tp->ftt_instr, &scratch[i], tp->ftt_size); i += tp->ftt_size; scratch[i++] = FASTTRAP_INT; scratch[i++] = T_DTRACE_RET; if (fasttrap_copyout(scratch, (char *)addr, i)) { fasttrap_sigtrap(p, curthread, pc); new_pc = pc; break; } if (tp->ftt_retids != NULL) { curthread->t_dtrace_step = 1; curthread->t_dtrace_ret = 1; new_pc = curthread->t_dtrace_astpc; } else { new_pc = curthread->t_dtrace_scrpc; } curthread->t_dtrace_pc = pc; curthread->t_dtrace_npc = pc + tp->ftt_size; curthread->t_dtrace_on = 1; break; } default: panic("fasttrap: mishandled an instruction"); } /* * If there were no return probes when we first found the tracepoint, * we should feel no obligation to honor any return probes that were * subsequently enabled -- they'll just have to wait until the next * time around. */ if (tp->ftt_retids != NULL) { /* * We need to wait until the results of the instruction are * apparent before invoking any return probes. If this * instruction was emulated we can just call * fasttrap_return_common(); if it needs to be executed, we * need to wait until we return to the kernel. */ if (tp->ftt_type != FASTTRAP_T_COMMON) { fasttrap_return_common(rp, pc, pid, new_pc); } else { ASSERT(curthread->t_dtrace_ret != 0); ASSERT(curthread->t_dtrace_pc == pc); ASSERT(curthread->t_dtrace_scrpc != 0); ASSERT(new_pc == curthread->t_dtrace_astpc); } } ASSERT(new_pc != 0); rp->r_pc = new_pc; return (0);}intfasttrap_return_probe(struct regs *rp){ proc_t *p = curproc; uintptr_t pc = curthread->t_dtrace_pc; uintptr_t npc = curthread->t_dtrace_npc; curthread->t_dtrace_pc = 0; curthread->t_dtrace_npc = 0; curthread->t_dtrace_scrpc = 0; curthread->t_dtrace_astpc = 0; /* * Treat a child created by a call to vfork(2) as if it were its * parent. We know that there's only one thread of control in such a * process: this one. */ while (p->p_flag & SVFORK) { p = p->p_parent; } /* * We set rp->r_pc to the address of the traced instruction so * that it appears to dtrace_probe() that we're on the original * instruction, and so that the user can't easily detect our * complex web of lies. dtrace_return_probe() (our caller) * will correctly set %pc after we return. */ rp->r_pc = pc; fasttrap_return_common(rp, pc, p->p_pid, npc); return (0);}/*ARGSUSED*/uint64_tfasttrap_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes){ return (fasttrap_anarg(ttolwp(curthread)->lwp_regs, 1, argno));}/*ARGSUSED*/uint64_tfasttrap_usdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes){ return (fasttrap_anarg(ttolwp(curthread)->lwp_regs, 0, argno));}static ulong_tfasttrap_getreg(struct regs *rp, uint_t reg){#ifdef __amd64 switch (reg) { case REG_R15: return (rp->r_r15); case REG_R14: return (rp->r_r14); case REG_R13: return (rp->r_r13); case REG_R12: return (rp->r_r12); case REG_R11: return (rp->r_r11); case REG_R10: return (rp->r_r10); case REG_R9: return (rp->r_r9); case REG_R8: return (rp->r_r8); case REG_RDI: return (rp->r_rdi); case REG_RSI: return (rp->r_rsi); case REG_RBP: return (rp->r_rbp); case REG_RBX: return (rp->r_rbx); case REG_RDX: return (rp->r_rdx); case REG_RCX: return (rp->r_rcx); case REG_RAX: return (rp->r_rax); case REG_TRAPNO: return (rp->r_trapno); case REG_ERR: return (rp->r_err); case REG_RIP: return (rp->r_rip); case REG_CS: return (rp->r_cs); case REG_RFL: return (rp->r_rfl); case REG_RSP: return (rp->r_rsp); case REG_SS: return (rp->r_ss); case REG_FS: return (rp->r_fs); case REG_GS: return (rp->r_gs); case REG_DS: return (rp->r_ds); case REG_ES: return (rp->r_es); case REG_FSBASE: return (rp->r_fsbase); case REG_GSBASE: return (rp->r_gsbase); } panic("dtrace: illegal register constant"); /*NOTREACHED*/#else if (reg >= _NGREG) panic("dtrace: illegal register constant"); return (((greg_t *)rp)[reg]);#endif}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -