📄 isel.c
字号:
case Iop_V128HIto64: case Iop_V128to64: { Int off = e->Iex.Unop.op==Iop_V128HIto64 ? 8 : 0; HReg dst = newVRegI(env); HReg vec = iselVecExpr(env, e->Iex.Unop.arg); AMD64AMode* rsp0 = AMD64AMode_IR(0, hregAMD64_RSP()); AMD64AMode* rspN = AMD64AMode_IR(off, hregAMD64_RSP()); sub_from_rsp(env, 16); addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, vec, rsp0)); addInstr(env, AMD64Instr_Alu64R( Aalu_MOV, AMD64RMI_Mem(rspN), dst )); add_to_rsp(env, 16); return dst; } /* ReinterpF64asI64(e) */ /* Given an IEEE754 double, produce an I64 with the same bit pattern. */ case Iop_ReinterpF64asI64: { AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP()); HReg dst = newVRegI(env); HReg src = iselDblExpr(env, e->Iex.Unop.arg); /* paranoia */ set_SSE_rounding_default(env); addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 8, src, m8_rsp)); addInstr(env, AMD64Instr_Alu64R( Aalu_MOV, AMD64RMI_Mem(m8_rsp), dst)); return dst; } case Iop_16to8: case Iop_32to8: case Iop_64to8: case Iop_32to16: case Iop_64to16: case Iop_64to32: /* These are no-ops. */ return iselIntExpr_R(env, e->Iex.Unop.arg); default: break; } /* Deal with unary 64-bit SIMD ops. */ switch (e->Iex.Unop.op) { case Iop_CmpNEZ32x2: fn = (HWord)h_generic_calc_CmpNEZ32x2; break; case Iop_CmpNEZ16x4: fn = (HWord)h_generic_calc_CmpNEZ16x4; break; case Iop_CmpNEZ8x8: fn = (HWord)h_generic_calc_CmpNEZ8x8; break; default: fn = (HWord)0; break; } if (fn != (HWord)0) { /* Note: the following assumes all helpers are of signature ULong fn ( ULong ), and they are not marked as regparm functions. */ HReg dst = newVRegI(env); HReg arg = iselIntExpr_R(env, e->Iex.Unop.arg); addInstr(env, mk_iMOVsd_RR(arg, hregAMD64_RDI()) ); addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 1 )); addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), dst)); return dst; } break; } /* --------- GET --------- */ case Iex_Get: { if (ty == Ity_I64) { HReg dst = newVRegI(env); addInstr(env, AMD64Instr_Alu64R( Aalu_MOV, AMD64RMI_Mem( AMD64AMode_IR(e->Iex.Get.offset, hregAMD64_RBP())), dst)); return dst; } if (ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32) { HReg dst = newVRegI(env); addInstr(env, AMD64Instr_LoadEX( toUChar(ty==Ity_I8 ? 1 : (ty==Ity_I16 ? 2 : 4)), False, AMD64AMode_IR(e->Iex.Get.offset,hregAMD64_RBP()), dst)); return dst; } break; } case Iex_GetI: { AMD64AMode* am = genGuestArrayOffset( env, e->Iex.GetI.descr, e->Iex.GetI.ix, e->Iex.GetI.bias ); HReg dst = newVRegI(env); if (ty == Ity_I8) { addInstr(env, AMD64Instr_LoadEX( 1, False, am, dst )); return dst; } if (ty == Ity_I64) { addInstr(env, AMD64Instr_Alu64R( Aalu_MOV, AMD64RMI_Mem(am), dst )); return dst; } break; } /* --------- CCALL --------- */ case Iex_CCall: { HReg dst = newVRegI(env); vassert(ty == e->Iex.CCall.retty); /* be very restrictive for now. Only 64-bit ints allowed for args, and 64 or 32 bits for return type. */ if (e->Iex.CCall.retty != Ity_I64 && e->Iex.CCall.retty != Ity_I32) goto irreducible; /* Marshal args, do the call. */ doHelperCall( env, False, NULL, e->Iex.CCall.cee, e->Iex.CCall.args ); /* Move to dst, and zero out the top 32 bits if the result type is Ity_I32. Probably overkill, but still .. */ if (e->Iex.CCall.retty == Ity_I64) addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), dst)); else addInstr(env, AMD64Instr_MovZLQ(hregAMD64_RAX(), dst)); return dst; } /* --------- LITERAL --------- */ /* 64/32/16/8-bit literals */ case Iex_Const: if (ty == Ity_I64) { HReg r = newVRegI(env); addInstr(env, AMD64Instr_Imm64(e->Iex.Const.con->Ico.U64, r)); return r; } else { AMD64RMI* rmi = iselIntExpr_RMI ( env, e ); HReg r = newVRegI(env); addInstr(env, AMD64Instr_Alu64R(Aalu_MOV, rmi, r)); return r; } /* --------- MULTIPLEX --------- */ case Iex_Mux0X: { if ((ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8) && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) { HReg r8; HReg rX = iselIntExpr_R(env, e->Iex.Mux0X.exprX); AMD64RM* r0 = iselIntExpr_RM(env, e->Iex.Mux0X.expr0); HReg dst = newVRegI(env); addInstr(env, mk_iMOVsd_RR(rX,dst)); r8 = iselIntExpr_R(env, e->Iex.Mux0X.cond); addInstr(env, AMD64Instr_Test64(0xFF, r8)); addInstr(env, AMD64Instr_CMov64(Acc_Z,r0,dst)); return dst; } break; } /* --------- TERNARY OP --------- */ case Iex_Triop: { /* C3210 flags following FPU partial remainder (fprem), both IEEE compliant (PREM1) and non-IEEE compliant (PREM). */ if (e->Iex.Triop.op == Iop_PRemC3210F64) { AMD64AMode* m8_rsp = AMD64AMode_IR(-8, hregAMD64_RSP()); HReg arg1 = iselDblExpr(env, e->Iex.Triop.arg2); HReg arg2 = iselDblExpr(env, e->Iex.Triop.arg3); HReg dst = newVRegI(env); addInstr(env, AMD64Instr_A87Free(2)); /* one arg -> top of x87 stack */ addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 8, arg2, m8_rsp)); addInstr(env, AMD64Instr_A87PushPop(m8_rsp, True/*push*/)); /* other arg -> top of x87 stack */ addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 8, arg1, m8_rsp)); addInstr(env, AMD64Instr_A87PushPop(m8_rsp, True/*push*/)); switch (e->Iex.Triop.op) { case Iop_PRemC3210F64: addInstr(env, AMD64Instr_A87FpOp(Afp_PREM)); break; default: vassert(0); } /* Ignore the result, and instead make off with the FPU's C3210 flags (in the status word). */ addInstr(env, AMD64Instr_A87StSW(m8_rsp)); addInstr(env, AMD64Instr_Alu64R(Aalu_MOV,AMD64RMI_Mem(m8_rsp),dst)); addInstr(env, AMD64Instr_Alu64R(Aalu_AND,AMD64RMI_Imm(0x4700),dst)); return dst; } break; } default: break; } /* switch (e->tag) */ /* We get here if no pattern matched. */ irreducible: ppIRExpr(e); vpanic("iselIntExpr_R(amd64): cannot reduce tree");}/*---------------------------------------------------------*//*--- ISEL: Integer expression auxiliaries ---*//*---------------------------------------------------------*//* --------------------- AMODEs --------------------- *//* Return an AMode which computes the value of the specified expression, possibly also adding insns to the code list as a result. The expression may only be a 32-bit one.*/static AMD64AMode* iselIntExpr_AMode ( ISelEnv* env, IRExpr* e ){ AMD64AMode* am = iselIntExpr_AMode_wrk(env, e); vassert(sane_AMode(am)); return am;}/* DO NOT CALL THIS DIRECTLY ! */static AMD64AMode* iselIntExpr_AMode_wrk ( ISelEnv* env, IRExpr* e ){ MatchInfo mi; DECLARE_PATTERN(p_complex); IRType ty = typeOfIRExpr(env->type_env,e); vassert(ty == Ity_I64); /* Add64( Add64(expr1, Shl64(expr2, imm8)), simm32 ) */ /* bind0 bind1 bind2 bind3 */ DEFINE_PATTERN(p_complex, binop( Iop_Add64, binop( Iop_Add64, bind(0), binop(Iop_Shl64, bind(1), bind(2)) ), bind(3) ) ); if (matchIRExpr(&mi, p_complex, e)) { IRExpr* expr1 = mi.bindee[0]; IRExpr* expr2 = mi.bindee[1]; IRExpr* imm8 = mi.bindee[2]; IRExpr* simm32 = mi.bindee[3]; if (imm8->tag == Iex_Const && imm8->Iex.Const.con->tag == Ico_U8 && imm8->Iex.Const.con->Ico.U8 < 4 /* imm8 is OK, now check simm32 */ && simm32->tag == Iex_Const && simm32->Iex.Const.con->tag == Ico_U64 && fitsIn32Bits(simm32->Iex.Const.con->Ico.U64)) { UInt shift = imm8->Iex.Const.con->Ico.U8; UInt offset = toUInt(simm32->Iex.Const.con->Ico.U64); HReg r1 = iselIntExpr_R(env, expr1); HReg r2 = iselIntExpr_R(env, expr2); vassert(shift == 0 || shift == 1 || shift == 2 || shift == 3); return AMD64AMode_IRRS(offset, r1, r2, shift); } } /* Add64(expr1, Shl64(expr2, imm)) */ if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_Add64 && e->Iex.Binop.arg2->tag == Iex_Binop && e->Iex.Binop.arg2->Iex.Binop.op == Iop_Shl64 && e->Iex.Binop.arg2->Iex.Binop.arg2->tag == Iex_Const && e->Iex.Binop.arg2->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8) { UInt shift = e->Iex.Binop.arg2->Iex.Binop.arg2->Iex.Const.con->Ico.U8; if (shift == 1 || shift == 2 || shift == 3) { HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg r2 = iselIntExpr_R(env, e->Iex.Binop.arg2->Iex.Binop.arg1 ); return AMD64AMode_IRRS(0, r1, r2, shift); } } /* Add64(expr,i) */ if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_Add64 && e->Iex.Binop.arg2->tag == Iex_Const && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U64 && fitsIn32Bits(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64)) { HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1); return AMD64AMode_IR( toUInt(e->Iex.Binop.arg2->Iex.Const.con->Ico.U64), r1 ); } /* Doesn't match anything in particular. Generate it into a register and use that. */ { HReg r1 = iselIntExpr_R(env, e); return AMD64AMode_IR(0, r1); }}/* --------------------- RMIs --------------------- *//* Similarly, calculate an expression into an X86RMI operand. As with iselIntExpr_R, the expression can have type 32, 16 or 8 bits. */static AMD64RMI* iselIntExpr_RMI ( ISelEnv* env, IRExpr* e ){ AMD64RMI* rmi = iselIntExpr_RMI_wrk(env, e); /* sanity checks ... */ switch (rmi->tag) { case Armi_Imm: return rmi; case Armi_Reg: vassert(hregClass(rmi->Armi.Reg.reg) == HRcInt64); vassert(hregIsVirtual(rmi->Armi.Reg.reg)); return rmi; case Armi_Mem: vassert(sane_AMode(rmi->Armi.Mem.am)); return rmi; default: vpanic("iselIntExpr_RMI: unknown amd64 RMI tag"); }}/* DO NOT CALL THIS DIRECTLY ! */static AMD64RMI* iselIntExpr_RMI_wrk ( ISelEnv* env, IRExpr* e ){ IRType ty = typeOfIRExpr(env->type_env,e); vassert(ty == Ity_I64 || ty == Ity_I32 || ty == Ity_I16 || ty == Ity_I8); /* special case: immediate 64/32/16/8 */ if (e->tag == Iex_Const) { switch (e->Iex.Const.con->tag) { case Ico_U64: if (fitsIn32Bits(e->Iex.Const.con->Ico.U64)) { return AMD64RMI_Imm(toUInt(e->Iex.Const.con->Ico.U64)); } break; case Ico_U32: return AMD64RMI_Imm(e->Iex.Const.con->Ico.U32); break; case Ico_U16: return AMD64RMI_Imm(0xFFFF & e->Iex.Const.con->Ico.U16); break; case Ico_U8: return AMD64RMI_Imm(0xFF & e->Iex.Const.con->Ico.U8); break; default: vpanic("iselIntExpr_RMI.Iex_Const(amd64)"); } } /* special case: 64-bit GET */ if (e->tag == Iex_Get && ty == Ity_I64) { return AMD64RMI_Mem(AMD64AMode_IR(e->Iex.Get.offset, hregAMD64_RBP())); } /* special case: 64-bit load from memory */ if (e->tag == Iex_Load && ty == Ity_I6
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -