📄 toir.c
字号:
} stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(arg1)) )); stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto64(mkexpr(arg2)) ));}/* -------------- Condition codes. -------------- *//* Condition codes, using the AMD encoding. */static HChar* name_AMD64Condcode ( AMD64Condcode cond ){ switch (cond) { case AMD64CondO: return "o"; case AMD64CondNO: return "no"; case AMD64CondB: return "b"; case AMD64CondNB: return "ae"; /*"nb";*/ case AMD64CondZ: return "e"; /*"z";*/ case AMD64CondNZ: return "ne"; /*"nz";*/ case AMD64CondBE: return "be"; case AMD64CondNBE: return "a"; /*"nbe";*/ case AMD64CondS: return "s"; case AMD64CondNS: return "ns"; case AMD64CondP: return "p"; case AMD64CondNP: return "np"; case AMD64CondL: return "l"; case AMD64CondNL: return "ge"; /*"nl";*/ case AMD64CondLE: return "le"; case AMD64CondNLE: return "g"; /*"nle";*/ case AMD64CondAlways: return "ALWAYS"; default: vpanic("name_AMD64Condcode"); }}static AMD64Condcode positiveIse_AMD64Condcode ( AMD64Condcode cond, /*OUT*/Bool* needInvert ){ vassert(cond >= AMD64CondO && cond <= AMD64CondNLE); if (cond & 1) { *needInvert = True; return cond-1; } else { *needInvert = False; return cond; }}/* -------------- Helpers for ADD/SUB with carry. -------------- *//* Given ta1, ta2 and tres, compute tres = ADC(ta1,ta2) and set flags appropriately.*/static void helper_ADC ( Int sz, IRTemp tres, IRTemp ta1, IRTemp ta2 ){ UInt thunkOp; IRType ty = szToITy(sz); IRTemp oldc = newTemp(Ity_I64); IRTemp oldcn = newTemp(ty); IROp plus = mkSizedOp(ty, Iop_Add8); IROp xor = mkSizedOp(ty, Iop_Xor8); switch (sz) { case 8: thunkOp = AMD64G_CC_OP_ADCQ; break; case 4: thunkOp = AMD64G_CC_OP_ADCL; break; case 2: thunkOp = AMD64G_CC_OP_ADCW; break; case 1: thunkOp = AMD64G_CC_OP_ADCB; break; default: vassert(0); } /* oldc = old carry flag, 0 or 1 */ assign( oldc, binop(Iop_And64, mk_amd64g_calculate_rflags_c(), mkU64(1)) ); assign( oldcn, narrowTo(ty, mkexpr(oldc)) ); assign( tres, binop(plus, binop(plus,mkexpr(ta1),mkexpr(ta2)), mkexpr(oldcn)) ); stmt( IRStmt_Put( OFFB_CC_OP, mkU64(thunkOp) ) ); stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(ta1)) )); stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto64(binop(xor, mkexpr(ta2), mkexpr(oldcn)) )) ); stmt( IRStmt_Put( OFFB_CC_NDEP, mkexpr(oldc) ) );}/* Given ta1, ta2 and tres, compute tres = SBB(ta1,ta2) and set flags appropriately.*/static void helper_SBB ( Int sz, IRTemp tres, IRTemp ta1, IRTemp ta2 ){ UInt thunkOp; IRType ty = szToITy(sz); IRTemp oldc = newTemp(Ity_I64); IRTemp oldcn = newTemp(ty); IROp minus = mkSizedOp(ty, Iop_Sub8); IROp xor = mkSizedOp(ty, Iop_Xor8); switch (sz) { case 8: thunkOp = AMD64G_CC_OP_SBBQ; break; case 4: thunkOp = AMD64G_CC_OP_SBBL; break; case 2: thunkOp = AMD64G_CC_OP_SBBW; break; case 1: thunkOp = AMD64G_CC_OP_SBBB; break; default: vassert(0); } /* oldc = old carry flag, 0 or 1 */ assign( oldc, binop(Iop_And64, mk_amd64g_calculate_rflags_c(), mkU64(1)) ); assign( oldcn, narrowTo(ty, mkexpr(oldc)) ); assign( tres, binop(minus, binop(minus,mkexpr(ta1),mkexpr(ta2)), mkexpr(oldcn)) ); stmt( IRStmt_Put( OFFB_CC_OP, mkU64(thunkOp) ) ); stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(ta1) )) ); stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto64(binop(xor, mkexpr(ta2), mkexpr(oldcn)) )) ); stmt( IRStmt_Put( OFFB_CC_NDEP, mkexpr(oldc) ) );}/* -------------- Helpers for disassembly printing. -------------- */static HChar* nameGrp1 ( Int opc_aux ){ static HChar* grp1_names[8] = { "add", "or", "adc", "sbb", "and", "sub", "xor", "cmp" }; if (opc_aux < 0 || opc_aux > 7) vpanic("nameGrp1(amd64)"); return grp1_names[opc_aux];}static HChar* nameGrp2 ( Int opc_aux ){ static HChar* grp2_names[8] = { "rol", "ror", "rcl", "rcr", "shl", "shr", "shl", "sar" }; if (opc_aux < 0 || opc_aux > 7) vpanic("nameGrp2(amd64)"); return grp2_names[opc_aux];}static HChar* nameGrp4 ( Int opc_aux ){ static HChar* grp4_names[8] = { "inc", "dec", "???", "???", "???", "???", "???", "???" }; if (opc_aux < 0 || opc_aux > 1) vpanic("nameGrp4(amd64)"); return grp4_names[opc_aux];}static HChar* nameGrp5 ( Int opc_aux ){ static HChar* grp5_names[8] = { "inc", "dec", "call*", "call*", "jmp*", "jmp*", "push", "???" }; if (opc_aux < 0 || opc_aux > 6) vpanic("nameGrp5(amd64)"); return grp5_names[opc_aux];}static HChar* nameGrp8 ( Int opc_aux ){ static HChar* grp8_names[8] = { "???", "???", "???", "???", "bt", "bts", "btr", "btc" }; if (opc_aux < 4 || opc_aux > 7) vpanic("nameGrp8(amd64)"); return grp8_names[opc_aux];}//.. static HChar* nameSReg ( UInt sreg )//.. {//.. switch (sreg) {//.. case R_ES: return "%es";//.. case R_CS: return "%cs";//.. case R_SS: return "%ss";//.. case R_DS: return "%ds";//.. case R_FS: return "%fs";//.. case R_GS: return "%gs";//.. default: vpanic("nameSReg(x86)");//.. }//.. }static HChar* nameMMXReg ( Int mmxreg ){ static HChar* mmx_names[8] = { "%mm0", "%mm1", "%mm2", "%mm3", "%mm4", "%mm5", "%mm6", "%mm7" }; if (mmxreg < 0 || mmxreg > 7) vpanic("nameMMXReg(amd64,guest)"); return mmx_names[mmxreg];}static HChar* nameXMMReg ( Int xmmreg ){ static HChar* xmm_names[16] = { "%xmm0", "%xmm1", "%xmm2", "%xmm3", "%xmm4", "%xmm5", "%xmm6", "%xmm7", "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", "%xmm14", "%xmm15" }; if (xmmreg < 0 || xmmreg > 15) vpanic("nameXMMReg(amd64)"); return xmm_names[xmmreg];} static HChar* nameMMXGran ( Int gran ){ switch (gran) { case 0: return "b"; case 1: return "w"; case 2: return "d"; case 3: return "q"; default: vpanic("nameMMXGran(amd64,guest)"); }}static HChar nameISize ( Int size ){ switch (size) { case 8: return 'q'; case 4: return 'l'; case 2: return 'w'; case 1: return 'b'; default: vpanic("nameISize(amd64)"); }}/*------------------------------------------------------------*//*--- JMP helpers ---*//*------------------------------------------------------------*/static void jmp_lit( IRJumpKind kind, Addr64 d64 ){ irbb->next = mkU64(d64); irbb->jumpkind = kind;}static void jmp_treg( IRJumpKind kind, IRTemp t ){ irbb->next = mkexpr(t); irbb->jumpkind = kind;}static void jcc_01 ( AMD64Condcode cond, Addr64 d64_false, Addr64 d64_true ){ Bool invert; AMD64Condcode condPos; condPos = positiveIse_AMD64Condcode ( cond, &invert ); if (invert) { stmt( IRStmt_Exit( mk_amd64g_calculate_condition(condPos), Ijk_Boring, IRConst_U64(d64_false) ) ); irbb->next = mkU64(d64_true); irbb->jumpkind = Ijk_Boring; } else { stmt( IRStmt_Exit( mk_amd64g_calculate_condition(condPos), Ijk_Boring, IRConst_U64(d64_true) ) ); irbb->next = mkU64(d64_false); irbb->jumpkind = Ijk_Boring; }}/* Let new_rsp be the %rsp value after a call/return. This function generates an AbiHint to say that -128(%rsp) .. -1(%rsp) should now be regarded as uninitialised.*/static void make_redzone_AbiHint ( IRTemp new_rsp, HChar* who ){ if (0) vex_printf("AbiHint: %s\n", who); vassert(typeOfIRTemp(irbb->tyenv, new_rsp) == Ity_I64); stmt( IRStmt_AbiHint( binop(Iop_Sub64, mkexpr(new_rsp), mkU64(128)), 128 ));}/*------------------------------------------------------------*//*--- Disassembling addressing modes ---*//*------------------------------------------------------------*/static HChar* sorbTxt ( Prefix pfx ){ if (pfx & PFX_CS) return "%cs:"; if (pfx & PFX_DS) return "%ds:"; if (pfx & PFX_ES) return "%es:"; if (pfx & PFX_FS) return "%fs:"; if (pfx & PFX_GS) return "%gs:"; if (pfx & PFX_SS) return "%ss:"; return ""; /* no override */}/* 'virtual' is an IRExpr* holding a virtual address. Convert it to a linear address by adding any required segment override as indicated by sorb, and also dealing with any address size override present. */staticIRExpr* handleAddrOverrides ( Prefix pfx, IRExpr* virtual ){ /* --- segment overrides --- */ if (pfx & PFX_FS) { /* Note that this is a linux-kernel specific hack that relies on the assumption that %fs is always zero. */ /* return virtual + guest_FS_ZERO. */ virtual = binop(Iop_Add64, virtual, IRExpr_Get(OFFB_FS_ZERO, Ity_I64)); } if (pfx & PFX_GS) { unimplemented("amd64 %gs segment override"); } /* cs, ds, es and ss are simply ignored in 64-bit mode. */ /* --- address size override --- */ if (haveASO(pfx)) virtual = unop(Iop_32Uto64, unop(Iop_64to32, virtual)); return virtual;}//.. {//.. Int sreg;//.. IRType hWordTy;//.. IRTemp ldt_ptr, gdt_ptr, seg_selector, r64;//.. //.. if (sorb == 0)//.. /* the common case - no override *///.. return virtual;//.. //.. switch (sorb) {//.. case 0x3E: sreg = R_DS; break;//.. case 0x26: sreg = R_ES; break;//.. case 0x64: sreg = R_FS; break;//.. case 0x65: sreg = R_GS; break;//.. default: vpanic("handleAddrOverrides(x86,guest)");//.. }//.. //.. hWordTy = sizeof(HWord)==4 ? Ity_I32 : Ity_I64;//.. //.. seg_selector = newTemp(Ity_I32);//.. ldt_ptr = newTemp(hWordTy);//.. gdt_ptr = newTemp(hWordTy);//.. r64 = newTemp(Ity_I64);//.. //.. assign( seg_selector, unop(Iop_16Uto32, getSReg(sreg)) );//.. assign( ldt_ptr, IRExpr_Get( OFFB_LDT, hWordTy ));//.. assign( gdt_ptr, IRExpr_Get( OFFB_GDT, hWordTy ));//.. //.. /*//.. Call this to do the translation and limit checks: //.. ULong x86g_use_seg_selector ( HWord ldt, HWord gdt,//.. UInt seg_selector, UInt virtual_addr )//.. *///.. assign( //.. r64, //.. mkIRExprCCall( //.. Ity_I64, //.. 0/*regparms*/, //.. "x86g_use_seg_selector", //.. &x86g_use_seg_selector, //.. mkIRExprVec_4( mkexpr(ldt_ptr), mkexpr(gdt_ptr), //.. mkexpr(seg_selector), virtual)//.. )//.. );//.. //.. /* If the high 32 of the result are non-zero, there was a //.. failure in address translation. In which case, make a//.. quick exit.//.. *///.. stmt( //.. IRStmt_Exit(//.. binop(Iop_CmpNE32, unop(Iop_64HIto32, mkexpr(r64)), mkU32(0)),//.. Ijk_MapFail,//.. IRConst_U32( guest_eip_curr_instr )//.. )//.. );//.. //.. /* otherwise, here's the translated result. *///.. return unop(Iop_64to32, mkexpr(r64));//.. }/* Generate IR to calculate an address indicated by a ModRM and following SIB bytes. The expression, and the number of bytes in the address mode, are returned (the latter in *len). Note that this fn should not be called if the R/M part of the address denotes a register instead of memory. If print_codegen is true, text of the addressing mode is placed in buf. The computed address is stored in a new tempreg, and the identity of the tempreg is returned. extra_bytes holds the number of bytes after the amode, as supplied by the caller. This is needed to make sense of %rip-relative addresses. Note that the value that *len is set to is only the length of the amode itself and does not include the value supplied in extra_bytes. */static IRTemp disAMode_copy2tmp ( IRExpr* addr64 ){ IRTemp tmp = newTemp(Ity_I64); assign( tmp, addr64 ); return tmp;}static IRTemp di
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -