📄 toir.c
字号:
case 7: return OFFB_XMM7; case 8: return OFFB_XMM8; case 9: return OFFB_XMM9; case 10: return OFFB_XMM10; case 11: return OFFB_XMM11; case 12: return OFFB_XMM12; case 13: return OFFB_XMM13; case 14: return OFFB_XMM14; case 15: return OFFB_XMM15; default: vpanic("xmmGuestRegOffset(amd64)"); }}/* Lanes of vector registers are always numbered from zero being the least significant lane (rightmost in the register). */static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno ){ /* Correct for little-endian host only. */ vassert(!host_is_bigendian); vassert(laneno >= 0 && laneno < 8); return xmmGuestRegOffset( xmmreg ) + 2 * laneno;}static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno ){ /* Correct for little-endian host only. */ vassert(!host_is_bigendian); vassert(laneno >= 0 && laneno < 4); return xmmGuestRegOffset( xmmreg ) + 4 * laneno;}static Int xmmGuestRegLane64offset ( UInt xmmreg, Int laneno ){ /* Correct for little-endian host only. */ vassert(!host_is_bigendian); vassert(laneno >= 0 && laneno < 2); return xmmGuestRegOffset( xmmreg ) + 8 * laneno;}//.. static IRExpr* getSReg ( UInt sreg )//.. {//.. return IRExpr_Get( segmentGuestRegOffset(sreg), Ity_I16 );//.. }//.. //.. static void putSReg ( UInt sreg, IRExpr* e )//.. {//.. vassert(typeOfIRExpr(irbb->tyenv,e) == Ity_I16);//.. stmt( IRStmt_Put( segmentGuestRegOffset(sreg), e ) );//.. }static IRExpr* getXMMReg ( UInt xmmreg ){ return IRExpr_Get( xmmGuestRegOffset(xmmreg), Ity_V128 );}static IRExpr* getXMMRegLane64 ( UInt xmmreg, Int laneno ){ return IRExpr_Get( xmmGuestRegLane64offset(xmmreg,laneno), Ity_I64 );}static IRExpr* getXMMRegLane64F ( UInt xmmreg, Int laneno ){ return IRExpr_Get( xmmGuestRegLane64offset(xmmreg,laneno), Ity_F64 );}static IRExpr* getXMMRegLane32 ( UInt xmmreg, Int laneno ){ return IRExpr_Get( xmmGuestRegLane32offset(xmmreg,laneno), Ity_I32 );}static IRExpr* getXMMRegLane32F ( UInt xmmreg, Int laneno ){ return IRExpr_Get( xmmGuestRegLane32offset(xmmreg,laneno), Ity_F32 );}static void putXMMReg ( UInt xmmreg, IRExpr* e ){ vassert(typeOfIRExpr(irbb->tyenv,e) == Ity_V128); stmt( IRStmt_Put( xmmGuestRegOffset(xmmreg), e ) );}static void putXMMRegLane64 ( UInt xmmreg, Int laneno, IRExpr* e ){ vassert(typeOfIRExpr(irbb->tyenv,e) == Ity_I64); stmt( IRStmt_Put( xmmGuestRegLane64offset(xmmreg,laneno), e ) );}static void putXMMRegLane64F ( UInt xmmreg, Int laneno, IRExpr* e ){ vassert(typeOfIRExpr(irbb->tyenv,e) == Ity_F64); stmt( IRStmt_Put( xmmGuestRegLane64offset(xmmreg,laneno), e ) );}static void putXMMRegLane32F ( UInt xmmreg, Int laneno, IRExpr* e ){ vassert(typeOfIRExpr(irbb->tyenv,e) == Ity_F32); stmt( IRStmt_Put( xmmGuestRegLane32offset(xmmreg,laneno), e ) );}static void putXMMRegLane32 ( UInt xmmreg, Int laneno, IRExpr* e ){ vassert(typeOfIRExpr(irbb->tyenv,e) == Ity_I32); stmt( IRStmt_Put( xmmGuestRegLane32offset(xmmreg,laneno), e ) );}static void putXMMRegLane16 ( UInt xmmreg, Int laneno, IRExpr* e ){ vassert(typeOfIRExpr(irbb->tyenv,e) == Ity_I16); stmt( IRStmt_Put( xmmGuestRegLane16offset(xmmreg,laneno), e ) );}static IRExpr* mkV128 ( UShort mask ){ return IRExpr_Const(IRConst_V128(mask));}static IRExpr* mkAnd1 ( IRExpr* x, IRExpr* y ){ vassert(typeOfIRExpr(irbb->tyenv,x) == Ity_I1); vassert(typeOfIRExpr(irbb->tyenv,y) == Ity_I1); return unop(Iop_64to1, binop(Iop_And64, unop(Iop_1Uto64,x), unop(Iop_1Uto64,y)));}/*------------------------------------------------------------*//*--- Helpers for %rflags. ---*//*------------------------------------------------------------*//* -------------- Evaluating the flags-thunk. -------------- *//* Build IR to calculate all the eflags from stored CC_OP/CC_DEP1/CC_DEP2/CC_NDEP. Returns an expression :: Ity_I64. */static IRExpr* mk_amd64g_calculate_rflags_all ( void ){ IRExpr** args = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP, Ity_I64), IRExpr_Get(OFFB_CC_DEP1, Ity_I64), IRExpr_Get(OFFB_CC_DEP2, Ity_I64), IRExpr_Get(OFFB_CC_NDEP, Ity_I64) ); IRExpr* call = mkIRExprCCall( Ity_I64, 0/*regparm*/, "amd64g_calculate_rflags_all", &amd64g_calculate_rflags_all, args ); /* Exclude OP and NDEP from definedness checking. We're only interested in DEP1 and DEP2. */ call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3); return call;}/* Build IR to calculate some particular condition from stored CC_OP/CC_DEP1/CC_DEP2/CC_NDEP. Returns an expression :: Ity_Bit. */static IRExpr* mk_amd64g_calculate_condition ( AMD64Condcode cond ){ IRExpr** args = mkIRExprVec_5( mkU64(cond), IRExpr_Get(OFFB_CC_OP, Ity_I64), IRExpr_Get(OFFB_CC_DEP1, Ity_I64), IRExpr_Get(OFFB_CC_DEP2, Ity_I64), IRExpr_Get(OFFB_CC_NDEP, Ity_I64) ); IRExpr* call = mkIRExprCCall( Ity_I64, 0/*regparm*/, "amd64g_calculate_condition", &amd64g_calculate_condition, args ); /* Exclude the requested condition, OP and NDEP from definedness checking. We're only interested in DEP1 and DEP2. */ call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<1) | (1<<4); return unop(Iop_64to1, call);}/* Build IR to calculate just the carry flag from stored CC_OP/CC_DEP1/CC_DEP2/CC_NDEP. Returns an expression :: Ity_I64. */static IRExpr* mk_amd64g_calculate_rflags_c ( void ){ IRExpr** args = mkIRExprVec_4( IRExpr_Get(OFFB_CC_OP, Ity_I64), IRExpr_Get(OFFB_CC_DEP1, Ity_I64), IRExpr_Get(OFFB_CC_DEP2, Ity_I64), IRExpr_Get(OFFB_CC_NDEP, Ity_I64) ); IRExpr* call = mkIRExprCCall( Ity_I64, 0/*regparm*/, "amd64g_calculate_rflags_c", &amd64g_calculate_rflags_c, args ); /* Exclude OP and NDEP from definedness checking. We're only interested in DEP1 and DEP2. */ call->Iex.CCall.cee->mcx_mask = (1<<0) | (1<<3); return call;}/* -------------- Building the flags-thunk. -------------- *//* The machinery in this section builds the flag-thunk following a flag-setting operation. Hence the various setFlags_* functions.*/static Bool isAddSub ( IROp op8 ){ return toBool(op8 == Iop_Add8 || op8 == Iop_Sub8);}static Bool isLogic ( IROp op8 ){ return toBool(op8 == Iop_And8 || op8 == Iop_Or8 || op8 == Iop_Xor8);}/* U-widen 8/16/32/64 bit int expr to 64. */static IRExpr* widenUto64 ( IRExpr* e ){ switch (typeOfIRExpr(irbb->tyenv,e)) { case Ity_I64: return e; case Ity_I32: return unop(Iop_32Uto64, e); case Ity_I16: return unop(Iop_16Uto64, e); case Ity_I8: return unop(Iop_8Uto64, e); default: vpanic("widenUto64"); }}/* S-widen 8/16/32/64 bit int expr to 32. */static IRExpr* widenSto64 ( IRExpr* e ){ switch (typeOfIRExpr(irbb->tyenv,e)) { case Ity_I64: return e; case Ity_I32: return unop(Iop_32Sto64, e); case Ity_I16: return unop(Iop_16Sto64, e); case Ity_I8: return unop(Iop_8Sto64, e); default: vpanic("widenSto64"); }}/* Narrow 8/16/32/64 bit int expr to 8/16/32/64. Clearly only some of these combinations make sense. */static IRExpr* narrowTo ( IRType dst_ty, IRExpr* e ){ IRType src_ty = typeOfIRExpr(irbb->tyenv,e); if (src_ty == dst_ty) return e; if (src_ty == Ity_I32 && dst_ty == Ity_I16) return unop(Iop_32to16, e); if (src_ty == Ity_I32 && dst_ty == Ity_I8) return unop(Iop_32to8, e); if (src_ty == Ity_I64 && dst_ty == Ity_I32) return unop(Iop_64to32, e); if (src_ty == Ity_I64 && dst_ty == Ity_I16) return unop(Iop_64to16, e); if (src_ty == Ity_I64 && dst_ty == Ity_I8) return unop(Iop_64to8, e); vex_printf("\nsrc, dst tys are: "); ppIRType(src_ty); vex_printf(", "); ppIRType(dst_ty); vex_printf("\n"); vpanic("narrowTo(amd64)");}/* Set the flags thunk OP, DEP1 and DEP2 fields. The supplied op is auto-sized up to the real op. */static void setFlags_DEP1_DEP2 ( IROp op8, IRTemp dep1, IRTemp dep2, IRType ty ){ Int ccOp = 0; switch (ty) { case Ity_I8: ccOp = 0; break; case Ity_I16: ccOp = 1; break; case Ity_I32: ccOp = 2; break; case Ity_I64: ccOp = 3; break; default: vassert(0); } switch (op8) { case Iop_Add8: ccOp += AMD64G_CC_OP_ADDB; break; case Iop_Sub8: ccOp += AMD64G_CC_OP_SUBB; break; default: ppIROp(op8); vpanic("setFlags_DEP1_DEP2(amd64)"); } stmt( IRStmt_Put( OFFB_CC_OP, mkU64(ccOp)) ); stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(dep1))) ); stmt( IRStmt_Put( OFFB_CC_DEP2, widenUto64(mkexpr(dep2))) );}/* Set the OP and DEP1 fields only, and write zero to DEP2. */static void setFlags_DEP1 ( IROp op8, IRTemp dep1, IRType ty ){ Int ccOp = 0; switch (ty) { case Ity_I8: ccOp = 0; break; case Ity_I16: ccOp = 1; break; case Ity_I32: ccOp = 2; break; case Ity_I64: ccOp = 3; break; default: vassert(0); } switch (op8) { case Iop_Or8: case Iop_And8: case Iop_Xor8: ccOp += AMD64G_CC_OP_LOGICB; break; default: ppIROp(op8); vpanic("setFlags_DEP1(amd64)"); } stmt( IRStmt_Put( OFFB_CC_OP, mkU64(ccOp)) ); stmt( IRStmt_Put( OFFB_CC_DEP1, widenUto64(mkexpr(dep1))) ); stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0)) );}/* For shift operations, we put in the result and the undershifted result. Except if the shift amount is zero, the thunk is left unchanged. */static void setFlags_DEP1_DEP2_shift ( IROp op64, IRTemp res, IRTemp resUS, IRType ty, IRTemp guard ){ Int ccOp = 0; switch (ty) { case Ity_I8: ccOp = 0; break; case Ity_I16: ccOp = 1; break; case Ity_I32: ccOp = 2; break; case Ity_I64: ccOp = 3; break; default: vassert(0); } vassert(guard); /* Both kinds of right shifts are handled by the same thunk operation. */ switch (op64) { case Iop_Shr64: case Iop_Sar64: ccOp += AMD64G_CC_OP_SHRB; break; case Iop_Shl64: ccOp += AMD64G_CC_OP_SHLB; break; default: ppIROp(op64); vpanic("setFlags_DEP1_DEP2_shift(amd64)"); } /* DEP1 contains the result, DEP2 contains the undershifted value. */ stmt( IRStmt_Put( OFFB_CC_OP, IRExpr_Mux0X( mkexpr(guard), IRExpr_Get(OFFB_CC_OP,Ity_I64), mkU64(ccOp))) ); stmt( IRStmt_Put( OFFB_CC_DEP1, IRExpr_Mux0X( mkexpr(guard), IRExpr_Get(OFFB_CC_DEP1,Ity_I64), widenUto64(mkexpr(res)))) ); stmt( IRStmt_Put( OFFB_CC_DEP2, IRExpr_Mux0X( mkexpr(guard), IRExpr_Get(OFFB_CC_DEP2,Ity_I64), widenUto64(mkexpr(resUS)))) );}/* For the inc/dec case, we store in DEP1 the result value and in NDEP the former value of the carry flag, which unfortunately we have to compute. */static void setFlags_INC_DEC ( Bool inc, IRTemp res, IRType ty ){ Int ccOp = inc ? AMD64G_CC_OP_INCB : AMD64G_CC_OP_DECB; switch (ty) { case Ity_I8: ccOp += 0; break; case Ity_I16: ccOp += 1; break; case Ity_I32: ccOp += 2; break; case Ity_I64: ccOp += 3; break; default: vassert(0); } /* This has to come first, because calculating the C flag may require reading all four thunk fields. */ stmt( IRStmt_Put( OFFB_CC_NDEP, mk_amd64g_calculate_rflags_c()) ); stmt( IRStmt_Put( OFFB_CC_OP, mkU64(ccOp)) ); stmt( IRStmt_Put( OFFB_CC_DEP1, mkexpr(res)) ); stmt( IRStmt_Put( OFFB_CC_DEP2, mkU64(0)) );}/* Multiplies are pretty much like add and sub: DEP1 and DEP2 hold the two arguments. */staticvoid setFlags_MUL ( IRType ty, IRTemp arg1, IRTemp arg2, ULong base_op ){ switch (ty) { case Ity_I8: stmt( IRStmt_Put( OFFB_CC_OP, mkU64(base_op+0) ) ); break; case Ity_I16: stmt( IRStmt_Put( OFFB_CC_OP, mkU64(base_op+1) ) ); break; case Ity_I32: stmt( IRStmt_Put( OFFB_CC_OP, mkU64(base_op+2) ) ); break; case Ity_I64: stmt( IRStmt_Put( OFFB_CC_OP, mkU64(base_op+3) ) ); break; default: vpanic("setFlags_MUL(amd64)");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -