📄 isel.c
字号:
DEFINE_PATTERN(p_32to1, unop(Iop_32to1,bind(0)) ); if (matchIRExpr(&mi,p_32to1,e)) { HReg r = iselIntExpr_R(env, mi.bindee[0]); addInstr(env, X86Instr_Test32(1,r)); return Xcc_NZ; } /* --- patterns rooted at: CmpNEZ8 --- */ /* CmpNEZ8(x) */ if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_CmpNEZ8) { HReg r = iselIntExpr_R(env, e->Iex.Unop.arg); addInstr(env, X86Instr_Test32(0xFF,r)); return Xcc_NZ; } /* --- patterns rooted at: CmpNEZ16 --- */ /* CmpNEZ16(x) */ if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_CmpNEZ16) { HReg r = iselIntExpr_R(env, e->Iex.Unop.arg); addInstr(env, X86Instr_Test32(0xFFFF,r)); return Xcc_NZ; } /* --- patterns rooted at: CmpNEZ32 --- */ /* CmpNEZ32(1Sto32(b)) ==> b */ { DECLARE_PATTERN(p_CmpNEZ32_1Sto32); DEFINE_PATTERN(p_CmpNEZ32_1Sto32, unop(Iop_CmpNEZ32, unop(Iop_1Sto32,bind(0)))); if (matchIRExpr(&mi, p_CmpNEZ32_1Sto32, e)) { return iselCondCode(env, mi.bindee[0]); } } /* CmpNEZ32(And32(x,y)) */ { DECLARE_PATTERN(p_CmpNEZ32_And32); DEFINE_PATTERN(p_CmpNEZ32_And32, unop(Iop_CmpNEZ32, binop(Iop_And32, bind(0), bind(1)))); if (matchIRExpr(&mi, p_CmpNEZ32_And32, e)) { HReg r0 = iselIntExpr_R(env, mi.bindee[0]); X86RMI* rmi1 = iselIntExpr_RMI(env, mi.bindee[1]); HReg tmp = newVRegI(env); addInstr(env, mk_iMOVsd_RR(r0, tmp)); addInstr(env, X86Instr_Alu32R(Xalu_AND,rmi1,tmp)); return Xcc_NZ; } } /* CmpNEZ32(Or32(x,y)) */ { DECLARE_PATTERN(p_CmpNEZ32_Or32); DEFINE_PATTERN(p_CmpNEZ32_Or32, unop(Iop_CmpNEZ32, binop(Iop_Or32, bind(0), bind(1)))); if (matchIRExpr(&mi, p_CmpNEZ32_Or32, e)) { HReg r0 = iselIntExpr_R(env, mi.bindee[0]); X86RMI* rmi1 = iselIntExpr_RMI(env, mi.bindee[1]); HReg tmp = newVRegI(env); addInstr(env, mk_iMOVsd_RR(r0, tmp)); addInstr(env, X86Instr_Alu32R(Xalu_OR,rmi1,tmp)); return Xcc_NZ; } } /* CmpNEZ32(x) */ if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_CmpNEZ32) { HReg r1 = iselIntExpr_R(env, e->Iex.Unop.arg); X86RMI* rmi2 = X86RMI_Imm(0); addInstr(env, X86Instr_Alu32R(Xalu_CMP,rmi2,r1)); return Xcc_NZ; } /* --- patterns rooted at: CmpNEZ64 --- */ /* CmpNEZ64(1Sto64(b)) ==> b */ { DECLARE_PATTERN(p_CmpNEZ64_1Sto64); DEFINE_PATTERN( p_CmpNEZ64_1Sto64, unop(Iop_CmpNEZ64, unop(Iop_1Sto64,bind(0)))); if (matchIRExpr(&mi, p_CmpNEZ64_1Sto64, e)) { return iselCondCode(env, mi.bindee[0]); } } /* CmpNEZ64(Or64(x,y)) */ { DECLARE_PATTERN(p_CmpNEZ64_Or64); DEFINE_PATTERN(p_CmpNEZ64_Or64, unop(Iop_CmpNEZ64, binop(Iop_Or64, bind(0), bind(1)))); if (matchIRExpr(&mi, p_CmpNEZ64_Or64, e)) { HReg hi1, lo1, hi2, lo2; HReg tmp = newVRegI(env); iselInt64Expr( &hi1, &lo1, env, mi.bindee[0] ); addInstr(env, mk_iMOVsd_RR(hi1, tmp)); addInstr(env, X86Instr_Alu32R(Xalu_OR,X86RMI_Reg(lo1),tmp)); iselInt64Expr( &hi2, &lo2, env, mi.bindee[1] ); addInstr(env, X86Instr_Alu32R(Xalu_OR,X86RMI_Reg(hi2),tmp)); addInstr(env, X86Instr_Alu32R(Xalu_OR,X86RMI_Reg(lo2),tmp)); return Xcc_NZ; } } /* CmpNEZ64(x) */ if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_CmpNEZ64) { HReg hi, lo; HReg tmp = newVRegI(env); iselInt64Expr( &hi, &lo, env, e->Iex.Unop.arg ); addInstr(env, mk_iMOVsd_RR(hi, tmp)); addInstr(env, X86Instr_Alu32R(Xalu_OR,X86RMI_Reg(lo), tmp)); return Xcc_NZ; } /* --- patterns rooted at: Cmp{EQ,NE}{8,16} --- */ /* CmpEQ8 / CmpNE8 */ if (e->tag == Iex_Binop && (e->Iex.Binop.op == Iop_CmpEQ8 || e->Iex.Binop.op == Iop_CmpNE8)) { HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1); X86RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2); HReg r = newVRegI(env); addInstr(env, mk_iMOVsd_RR(r1,r)); addInstr(env, X86Instr_Alu32R(Xalu_XOR,rmi2,r)); addInstr(env, X86Instr_Test32(0xFF,r)); switch (e->Iex.Binop.op) { case Iop_CmpEQ8: return Xcc_Z; case Iop_CmpNE8: return Xcc_NZ; default: vpanic("iselCondCode(x86): CmpXX8"); } } /* CmpEQ16 / CmpNE16 */ if (e->tag == Iex_Binop && (e->Iex.Binop.op == Iop_CmpEQ16 || e->Iex.Binop.op == Iop_CmpNE16)) { HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1); X86RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2); HReg r = newVRegI(env); addInstr(env, mk_iMOVsd_RR(r1,r)); addInstr(env, X86Instr_Alu32R(Xalu_XOR,rmi2,r)); addInstr(env, X86Instr_Test32(0xFFFF,r)); switch (e->Iex.Binop.op) { case Iop_CmpEQ16: return Xcc_Z; case Iop_CmpNE16: return Xcc_NZ; default: vpanic("iselCondCode(x86): CmpXX16"); } } /* Cmp*32*(x,y) */ if (e->tag == Iex_Binop && (e->Iex.Binop.op == Iop_CmpEQ32 || e->Iex.Binop.op == Iop_CmpNE32 || e->Iex.Binop.op == Iop_CmpLT32S || e->Iex.Binop.op == Iop_CmpLT32U || e->Iex.Binop.op == Iop_CmpLE32S || e->Iex.Binop.op == Iop_CmpLE32U)) { HReg r1 = iselIntExpr_R(env, e->Iex.Binop.arg1); X86RMI* rmi2 = iselIntExpr_RMI(env, e->Iex.Binop.arg2); addInstr(env, X86Instr_Alu32R(Xalu_CMP,rmi2,r1)); switch (e->Iex.Binop.op) { case Iop_CmpEQ32: return Xcc_Z; case Iop_CmpNE32: return Xcc_NZ; case Iop_CmpLT32S: return Xcc_L; case Iop_CmpLT32U: return Xcc_B; case Iop_CmpLE32S: return Xcc_LE; case Iop_CmpLE32U: return Xcc_BE; default: vpanic("iselCondCode(x86): CmpXX32"); } } /* CmpNE64 */ if (e->tag == Iex_Binop && (e->Iex.Binop.op == Iop_CmpNE64 || e->Iex.Binop.op == Iop_CmpEQ64)) { HReg hi1, hi2, lo1, lo2; HReg tHi = newVRegI(env); HReg tLo = newVRegI(env); iselInt64Expr( &hi1, &lo1, env, e->Iex.Binop.arg1 ); iselInt64Expr( &hi2, &lo2, env, e->Iex.Binop.arg2 ); addInstr(env, mk_iMOVsd_RR(hi1, tHi)); addInstr(env, X86Instr_Alu32R(Xalu_XOR,X86RMI_Reg(hi2), tHi)); addInstr(env, mk_iMOVsd_RR(lo1, tLo)); addInstr(env, X86Instr_Alu32R(Xalu_XOR,X86RMI_Reg(lo2), tLo)); addInstr(env, X86Instr_Alu32R(Xalu_OR,X86RMI_Reg(tHi), tLo)); switch (e->Iex.Binop.op) { case Iop_CmpNE64: return Xcc_NZ; case Iop_CmpEQ64: return Xcc_Z; default: vpanic("iselCondCode(x86): CmpXX64"); } } ppIRExpr(e); vpanic("iselCondCode");}/*---------------------------------------------------------*//*--- ISEL: Integer expressions (64 bit) ---*//*---------------------------------------------------------*//* Compute a 64-bit value into a register pair, which is returned as the first two parameters. As with iselIntExpr_R, these may be either real or virtual regs; in any case they must not be changed by subsequent code emitted by the caller. */static void iselInt64Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ){ iselInt64Expr_wrk(rHi, rLo, env, e);# if 0 vex_printf("\n"); ppIRExpr(e); vex_printf("\n");# endif vassert(hregClass(*rHi) == HRcInt32); vassert(hregIsVirtual(*rHi)); vassert(hregClass(*rLo) == HRcInt32); vassert(hregIsVirtual(*rLo));}/* DO NOT CALL THIS DIRECTLY ! */static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e ){ HWord fn = 0; /* helper fn for most SIMD64 stuff */ vassert(e); vassert(typeOfIRExpr(env->type_env,e) == Ity_I64); /* 64-bit literal */ if (e->tag == Iex_Const) { ULong w64 = e->Iex.Const.con->Ico.U64; UInt wHi = toUInt(w64 >> 32); UInt wLo = toUInt(w64); HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); vassert(e->Iex.Const.con->tag == Ico_U64); if (wLo == wHi) { /* Save a precious Int register in this special case. */ addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(wLo), tLo)); *rHi = tLo; *rLo = tLo; } else { addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(wHi), tHi)); addInstr(env, X86Instr_Alu32R(Xalu_MOV, X86RMI_Imm(wLo), tLo)); *rHi = tHi; *rLo = tLo; } return; } /* read 64-bit IRTemp */ if (e->tag == Iex_Tmp) { lookupIRTemp64( rHi, rLo, env, e->Iex.Tmp.tmp); return; } /* 64-bit load */ if (e->tag == Iex_Load && e->Iex.Load.end == Iend_LE) { HReg tLo, tHi; X86AMode *am0, *am4; vassert(e->Iex.Load.ty == Ity_I64); tLo = newVRegI(env); tHi = newVRegI(env); am0 = iselIntExpr_AMode(env, e->Iex.Load.addr); am4 = advance4(am0); addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am0), tLo )); addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am4), tHi )); *rHi = tHi; *rLo = tLo; return; } /* 64-bit GET */ if (e->tag == Iex_Get) { X86AMode* am = X86AMode_IR(e->Iex.Get.offset, hregX86_EBP()); X86AMode* am4 = advance4(am); HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am), tLo )); addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am4), tHi )); *rHi = tHi; *rLo = tLo; return; } /* 64-bit GETI */ if (e->tag == Iex_GetI) { X86AMode* am = genGuestArrayOffset( env, e->Iex.GetI.descr, e->Iex.GetI.ix, e->Iex.GetI.bias ); X86AMode* am4 = advance4(am); HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am), tLo )); addInstr(env, X86Instr_Alu32R( Xalu_MOV, X86RMI_Mem(am4), tHi )); *rHi = tHi; *rLo = tLo; return; } /* 64-bit Mux0X */ if (e->tag == Iex_Mux0X) { HReg e0Lo, e0Hi, eXLo, eXHi, r8; HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.Mux0X.expr0); iselInt64Expr(&eXHi, &eXLo, env, e->Iex.Mux0X.exprX); addInstr(env, mk_iMOVsd_RR(eXHi, tHi)); addInstr(env, mk_iMOVsd_RR(eXLo, tLo)); r8 = iselIntExpr_R(env, e->Iex.Mux0X.cond); addInstr(env, X86Instr_Test32(0xFF, r8)); /* This assumes the first cmov32 doesn't trash the condition codes, so they are still available for the second cmov32 */ addInstr(env, X86Instr_CMov32(Xcc_Z,X86RM_Reg(e0Hi),tHi)); addInstr(env, X86Instr_CMov32(Xcc_Z,X86RM_Reg(e0Lo),tLo)); *rHi = tHi; *rLo = tLo; return; } /* --------- BINARY ops --------- */ if (e->tag == Iex_Binop) { switch (e->Iex.Binop.op) { /* 32 x 32 -> 64 multiply */ case Iop_MullU32: case Iop_MullS32: { /* get one operand into %eax, and the other into a R/M. Need to make an educated guess about which is better in which. */ HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); Bool syned = toBool(e->Iex.Binop.op == Iop_MullS32); X86RM* rmLeft = iselIntExpr_RM(env, e->Iex.Binop.arg1); HReg rRight = iselIntExpr_R(env, e->Iex.Binop.arg2); addInstr(env, mk_iMOVsd_RR(rRight, hregX86_EAX())); addInstr(env, X86Instr_MulL(syned, rmLeft)); /* Result is now in EDX:EAX. Tell the caller. */ addInstr(env, mk_iMOVsd_RR(hregX86_EDX(), tHi)); addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), tLo)); *rHi = tHi; *rLo = tLo; return; } /* 64 x 32 -> (32(rem),32(div)) division */ case Iop_DivModU64to32: case Iop_DivModS64to32: { /* Get the 64-bit operand into edx:eax, and the other into any old R/M. */ HReg sHi, sLo; HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); Bool syned = toBool(e->Iex.Binop.op == Iop_DivModS64to32); X86RM* rmRight = iselIntExpr_RM(env, e->Iex.Binop.arg2); iselInt64Expr(&sHi,&sLo, env, e->Iex.Binop.arg1); addInstr(env, mk_iMOVsd_RR(sHi, hregX86_EDX())); addInstr(env, mk_iMOVsd_RR(sLo, hregX86_EAX())); addInstr(env, X86Instr_Div(syned, rmRight)); addInstr(env, mk_iMOVsd_RR(hregX86_EDX(), tHi)); addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), tLo)); *rHi = tHi; *rLo = tLo; return; } /* Or64/And64/Xor64 */ case Iop_Or64: case Iop_And64: case Iop_Xor64: { HReg xLo, xHi, yLo, yHi; HReg tLo = newVRegI(env); HReg tHi = newVRegI(env); X86AluOp op = e->Iex.Binop.op==Iop_Or64 ? Xalu_OR : e->Iex.Binop.op==Iop_And64 ? Xalu_AND : Xalu_XOR; iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1); addInstr(env, mk_iMOVsd_RR(xHi, tHi)); addInstr(env, mk_iMOVsd_RR(xLo, tLo)); iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2); addInstr(env, X86Instr_
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -