📄 isel.c
字号:
addInstr(env, mk_iMOVsd_RR(left64, rax)); addInstr(env, AMD64Instr_Sh64(Ash_SHR, 32, rdx)); addInstr(env, AMD64Instr_Div(syned, 4, rmRight)); addInstr(env, AMD64Instr_MovZLQ(rdx,rdx)); addInstr(env, AMD64Instr_MovZLQ(rax,rax)); addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, rdx)); addInstr(env, mk_iMOVsd_RR(rax, dst)); addInstr(env, AMD64Instr_Alu64R(Aalu_OR, AMD64RMI_Reg(rdx), dst)); return dst; } if (e->Iex.Binop.op == Iop_32HLto64) { HReg hi32 = newVRegI(env); HReg lo32 = newVRegI(env); HReg hi32s = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg lo32s = iselIntExpr_R(env, e->Iex.Binop.arg2); addInstr(env, mk_iMOVsd_RR(hi32s, hi32)); addInstr(env, mk_iMOVsd_RR(lo32s, lo32)); addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, hi32)); addInstr(env, AMD64Instr_MovZLQ(lo32,lo32)); addInstr(env, AMD64Instr_Alu64R( Aalu_OR, AMD64RMI_Reg(lo32), hi32)); return hi32; } if (e->Iex.Binop.op == Iop_16HLto32) { HReg hi16 = newVRegI(env); HReg lo16 = newVRegI(env); HReg hi16s = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg lo16s = iselIntExpr_R(env, e->Iex.Binop.arg2); addInstr(env, mk_iMOVsd_RR(hi16s, hi16)); addInstr(env, mk_iMOVsd_RR(lo16s, lo16)); addInstr(env, AMD64Instr_Sh64(Ash_SHL, 16, hi16)); addInstr(env, AMD64Instr_Alu64R( Aalu_AND, AMD64RMI_Imm(0xFFFF), lo16)); addInstr(env, AMD64Instr_Alu64R( Aalu_OR, AMD64RMI_Reg(lo16), hi16)); return hi16; } if (e->Iex.Binop.op == Iop_8HLto16) { HReg hi8 = newVRegI(env); HReg lo8 = newVRegI(env); HReg hi8s = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg lo8s = iselIntExpr_R(env, e->Iex.Binop.arg2); addInstr(env, mk_iMOVsd_RR(hi8s, hi8)); addInstr(env, mk_iMOVsd_RR(lo8s, lo8)); addInstr(env, AMD64Instr_Sh64(Ash_SHL, 8, hi8)); addInstr(env, AMD64Instr_Alu64R( Aalu_AND, AMD64RMI_Imm(0xFF), lo8)); addInstr(env, AMD64Instr_Alu64R( Aalu_OR, AMD64RMI_Reg(lo8), hi8)); return hi8; } if (e->Iex.Binop.op == Iop_MullS32 || e->Iex.Binop.op == Iop_MullS16 || e->Iex.Binop.op == Iop_MullS8 || e->Iex.Binop.op == Iop_MullU32 || e->Iex.Binop.op == Iop_MullU16 || e->Iex.Binop.op == Iop_MullU8) { HReg a32 = newVRegI(env); HReg b32 = newVRegI(env); HReg a32s = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg b32s = iselIntExpr_R(env, e->Iex.Binop.arg2); Int shift = 0; AMD64ShiftOp shr_op = Ash_SHR; switch (e->Iex.Binop.op) { case Iop_MullS32: shr_op = Ash_SAR; shift = 32; break; case Iop_MullS16: shr_op = Ash_SAR; shift = 48; break; case Iop_MullS8: shr_op = Ash_SAR; shift = 56; break; case Iop_MullU32: shr_op = Ash_SHR; shift = 32; break; case Iop_MullU16: shr_op = Ash_SHR; shift = 48; break; case Iop_MullU8: shr_op = Ash_SHR; shift = 56; break; default: vassert(0); } addInstr(env, mk_iMOVsd_RR(a32s, a32)); addInstr(env, mk_iMOVsd_RR(b32s, b32)); addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, a32)); addInstr(env, AMD64Instr_Sh64(Ash_SHL, shift, b32)); addInstr(env, AMD64Instr_Sh64(shr_op, shift, a32)); addInstr(env, AMD64Instr_Sh64(shr_op, shift, b32)); addInstr(env, AMD64Instr_Alu64R(Aalu_MUL, AMD64RMI_Reg(a32), b32)); return b32; } if (e->Iex.Binop.op == Iop_CmpF64) { HReg fL = iselDblExpr(env, e->Iex.Binop.arg1); HReg fR = iselDblExpr(env, e->Iex.Binop.arg2); HReg dst = newVRegI(env); addInstr(env, AMD64Instr_SseUComIS(8,fL,fR,dst)); /* Mask out irrelevant parts of the result so as to conform to the CmpF64 definition. */ addInstr(env, AMD64Instr_Alu64R(Aalu_AND, AMD64RMI_Imm(0x45), dst)); return dst; } if (e->Iex.Binop.op == Iop_F64toI32 || e->Iex.Binop.op == Iop_F64toI64) { Int szD = e->Iex.Binop.op==Iop_F64toI32 ? 4 : 8; HReg rf = iselDblExpr(env, e->Iex.Binop.arg2); HReg dst = newVRegI(env); set_SSE_rounding_mode( env, e->Iex.Binop.arg1 ); addInstr(env, AMD64Instr_SseSF2SI( 8, szD, rf, dst )); set_SSE_rounding_default(env); return dst; }//.. if (e->Iex.Binop.op == Iop_F64toI32 || e->Iex.Binop.op == Iop_F64toI16) {//.. Int sz = e->Iex.Binop.op == Iop_F64toI16 ? 2 : 4;//.. HReg rf = iselDblExpr(env, e->Iex.Binop.arg2);//.. HReg dst = newVRegI(env);//.. //.. /* Used several times ... *///.. X86AMode* zero_esp = X86AMode_IR(0, hregX86_ESP());//.. //.. /* rf now holds the value to be converted, and rrm holds the//.. rounding mode value, encoded as per the IRRoundingMode//.. enum. The first thing to do is set the FPU's rounding//.. mode accordingly. *///.. //.. /* Create a space for the format conversion. *///.. /* subl $4, %esp *///.. sub_from_esp(env, 4);//.. //.. /* Set host rounding mode *///.. set_FPU_rounding_mode( env, e->Iex.Binop.arg1 );//.. //.. /* gistw/l %rf, 0(%esp) *///.. addInstr(env, X86Instr_FpLdStI(False/*store*/, sz, rf, zero_esp));//.. //.. if (sz == 2) {//.. /* movzwl 0(%esp), %dst *///.. addInstr(env, X86Instr_LoadEX(2,False,zero_esp,dst));//.. } else {//.. /* movl 0(%esp), %dst *///.. vassert(sz == 4);//.. addInstr(env, X86Instr_Alu32R(//.. Xalu_MOV, X86RMI_Mem(zero_esp), dst));//.. }//.. //.. /* Restore default FPU rounding. *///.. set_FPU_rounding_default( env );//.. //.. /* addl $4, %esp *///.. add_to_esp(env, 4);//.. return dst;//.. }//.. //.. /* C3210 flags following FPU partial remainder (fprem), both//.. IEEE compliant (PREM1) and non-IEEE compliant (PREM). *///.. if (e->Iex.Binop.op == Iop_PRemC3210F64//.. || e->Iex.Binop.op == Iop_PRem1C3210F64) {//.. HReg junk = newVRegF(env);//.. HReg dst = newVRegI(env);//.. HReg srcL = iselDblExpr(env, e->Iex.Binop.arg1);//.. HReg srcR = iselDblExpr(env, e->Iex.Binop.arg2);//.. addInstr(env, X86Instr_FpBinary(//.. e->Iex.Binop.op==Iop_PRemC3210F64 //.. ? Xfp_PREM : Xfp_PREM1,//.. srcL,srcR,junk//.. ));//.. /* The previous pseudo-insn will have left the FPU's C3210//.. flags set correctly. So bag them. *///.. addInstr(env, X86Instr_FpStSW_AX());//.. addInstr(env, mk_iMOVsd_RR(hregX86_EAX(), dst));//.. addInstr(env, X86Instr_Alu32R(Xalu_AND, X86RMI_Imm(0x4700), dst));//.. return dst;//.. } break; } /* --------- UNARY OP --------- */ case Iex_Unop: { /* 32Uto64(8Uto32(expr8)) */ DEFINE_PATTERN(p_8Uto64, unop(Iop_32Uto64, unop(Iop_8Uto32, bind(0)) ) ); if (matchIRExpr(&mi,p_8Uto64,e)) { IRExpr* expr8 = mi.bindee[0]; HReg dst = newVRegI(env); HReg src = iselIntExpr_R(env, expr8); addInstr(env, mk_iMOVsd_RR(src,dst) ); addInstr(env, AMD64Instr_Sh64(Ash_SHL, 56, dst)); addInstr(env, AMD64Instr_Sh64(Ash_SHR, 56, dst)); return dst; } /* 1Uto8(64to1(expr64)) */ DEFINE_PATTERN( p_1Uto8_64to1, unop(Iop_1Uto8, unop(Iop_64to1, bind(0))) ); if (matchIRExpr(&mi,p_1Uto8_64to1,e)) { IRExpr* expr64 = mi.bindee[0]; HReg dst = newVRegI(env); HReg src = iselIntExpr_R(env, expr64); addInstr(env, mk_iMOVsd_RR(src,dst) ); addInstr(env, AMD64Instr_Alu64R(Aalu_AND, AMD64RMI_Imm(1), dst)); return dst; }//.. /* 16Uto32(LDle(expr32)) *///.. {//.. DECLARE_PATTERN(p_LDle16_then_16Uto32);//.. DEFINE_PATTERN(p_LDle16_then_16Uto32,//.. unop(Iop_16Uto32,IRExpr_LDle(Ity_I16,bind(0))) );//.. if (matchIRExpr(&mi,p_LDle16_then_16Uto32,e)) {//.. HReg dst = newVRegI(env);//.. X86AMode* amode = iselIntExpr_AMode ( env, mi.bindee[0] );//.. addInstr(env, X86Instr_LoadEX(2,False,amode,dst));//.. return dst;//.. }//.. } switch (e->Iex.Unop.op) { case Iop_32Uto64: { HReg dst = newVRegI(env); HReg src = iselIntExpr_R(env, e->Iex.Unop.arg); addInstr(env, AMD64Instr_MovZLQ(src,dst) ); return dst; } case Iop_32Sto64: { HReg dst = newVRegI(env); HReg src = iselIntExpr_R(env, e->Iex.Unop.arg); UInt amt = 32; addInstr(env, mk_iMOVsd_RR(src,dst) ); addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, dst)); addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, dst)); return dst; } case Iop_128HIto64: { HReg rHi, rLo; iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg); return rHi; /* and abandon rLo */ } case Iop_128to64: { HReg rHi, rLo; iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg); return rLo; /* and abandon rHi */ } case Iop_8Uto16: case Iop_8Uto32: case Iop_8Uto64: case Iop_16Uto64: case Iop_16Uto32: { HReg dst = newVRegI(env); HReg src = iselIntExpr_R(env, e->Iex.Unop.arg); Bool srcIs16 = toBool( e->Iex.Unop.op==Iop_16Uto32 || e->Iex.Unop.op==Iop_16Uto64 ); UInt mask = srcIs16 ? 0xFFFF : 0xFF; addInstr(env, mk_iMOVsd_RR(src,dst) ); addInstr(env, AMD64Instr_Alu64R(Aalu_AND, AMD64RMI_Imm(mask), dst)); return dst; } case Iop_8Sto16: case Iop_8Sto64: case Iop_8Sto32: case Iop_16Sto32: case Iop_16Sto64: { HReg dst = newVRegI(env); HReg src = iselIntExpr_R(env, e->Iex.Unop.arg); Bool srcIs16 = toBool( e->Iex.Unop.op==Iop_16Sto32 || e->Iex.Unop.op==Iop_16Sto64 ); UInt amt = srcIs16 ? 48 : 56; addInstr(env, mk_iMOVsd_RR(src,dst) ); addInstr(env, AMD64Instr_Sh64(Ash_SHL, amt, dst)); addInstr(env, AMD64Instr_Sh64(Ash_SAR, amt, dst)); return dst; } case Iop_Not8: case Iop_Not16: case Iop_Not32: case Iop_Not64: { HReg dst = newVRegI(env); HReg src = iselIntExpr_R(env, e->Iex.Unop.arg); addInstr(env, mk_iMOVsd_RR(src,dst) ); addInstr(env, AMD64Instr_Unary64(Aun_NOT,dst)); return dst; }//.. case Iop_64HIto32: {//.. HReg rHi, rLo;//.. iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);//.. return rHi; /* and abandon rLo .. poor wee thing :-) *///.. }//.. case Iop_64to32: {//.. HReg rHi, rLo;//.. iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg);//.. return rLo; /* similar stupid comment to the above ... *///.. }//.. case Iop_16HIto8: case Iop_32HIto16: case Iop_64HIto32: { HReg dst = newVRegI(env); HReg src = iselIntExpr_R(env, e->Iex.Unop.arg); Int shift = 0; switch (e->Iex.Unop.op) { case Iop_32HIto16: shift = 16; break; case Iop_64HIto32: shift = 32; break; default: vassert(0); } addInstr(env, mk_iMOVsd_RR(src,dst) ); addInstr(env, AMD64Instr_Sh64(Ash_SHR, shift, dst)); return dst; } case Iop_1Uto64: case Iop_1Uto32: case Iop_1Uto8: { HReg dst = newVRegI(env); AMD64CondCode cond = iselCondCode(env, e->Iex.Unop.arg); addInstr(env, AMD64Instr_Set64(cond,dst)); return dst; } case Iop_1Sto8: case Iop_1Sto16: case Iop_1Sto32: case Iop_1Sto64: { /* could do better than this, but for now ... */ HReg dst = newVRegI(env); AMD64CondCode cond = iselCondCode(env, e->Iex.Unop.arg); addInstr(env, AMD64Instr_Set64(cond,dst)); addInstr(env, AMD64Instr_Sh64(Ash_SHL, 63, dst)); addInstr(env, AMD64Instr_Sh64(Ash_SAR, 63, dst)); return dst; } case Iop_Ctz64: { /* Count trailing zeroes, implemented by amd64 'bsfq' */ HReg dst = newVRegI(env); HReg src = iselIntExpr_R(env, e->Iex.Unop.arg); addInstr(env, AMD64Instr_Bsfr64(True,src,dst)); return dst; } case Iop_Clz64: { /* Count leading zeroes. Do 'bsrq' to establish the index of the highest set bit, and subtract that value from 63. */ HReg tmp = newVRegI(env); HReg dst = newVRegI(env); HReg src = iselIntExpr_R(env, e->Iex.Unop.arg); addInstr(env, AMD64Instr_Bsfr64(False,src,tmp)); addInstr(env, AMD64Instr_Alu64R(Aalu_MOV, AMD64RMI_Imm(63), dst)); addInstr(env, AMD64Instr_Alu64R(Aalu_SUB, AMD64RMI_Reg(tmp), dst)); return dst; } case Iop_Neg8: case Iop_Neg16: case Iop_Neg32: case Iop_Neg64: { HReg dst = newVRegI(env); HReg reg = iselIntExpr_R(env, e->Iex.Unop.arg); addInstr(env, mk_iMOVsd_RR(reg,dst)); addInstr(env, AMD64Instr_Unary64(Aun_NEG,dst)); return dst; } case Iop_V128to32: { HReg dst = newVRegI(env); HReg vec = iselVecExpr(env, e->Iex.Unop.arg); AMD64AMode* rsp_m16 = AMD64AMode_IR(-16, hregAMD64_RSP()); addInstr(env, AMD64Instr_SseLdSt(False/*store*/, 16, vec, rsp_m16)); addInstr(env, AMD64Instr_LoadEX(4, False/*z-widen*/, rsp_m16, dst)); return dst; } /* V128{HI}to64 */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -