📄 isel.c
字号:
HReg dst = generate_ones_V128(env); addInstr(env, AMD64Instr_SseReRg(Asse_XOR, src, dst)); return dst;}//.. /* Round an x87 FPU value to 53-bit-mantissa precision, to be used//.. after most non-simple FPU operations (simple = +, -, *, / and//.. sqrt).//.. //.. This could be done a lot more efficiently if needed, by loading//.. zero and adding it to the value to be rounded (fldz ; faddp?).//.. *///.. static void roundToF64 ( ISelEnv* env, HReg reg )//.. {//.. X86AMode* zero_esp = X86AMode_IR(0, hregX86_ESP());//.. sub_from_esp(env, 8);//.. addInstr(env, X86Instr_FpLdSt(False/*store*/, 8, reg, zero_esp));//.. addInstr(env, X86Instr_FpLdSt(True/*load*/, 8, reg, zero_esp));//.. add_to_esp(env, 8);//.. }/*---------------------------------------------------------*//*--- ISEL: Integer expressions (64/32/16/8 bit) ---*//*---------------------------------------------------------*//* Select insns for an integer-typed expression, and add them to the code list. Return a reg holding the result. This reg will be a virtual register. THE RETURNED REG MUST NOT BE MODIFIED. If you want to modify it, ask for a new vreg, copy it in there, and modify the copy. The register allocator will do its best to map both vregs to the same real register, so the copies will often disappear later in the game. This should handle expressions of 64, 32, 16 and 8-bit type. All results are returned in a 64-bit register. For 32-, 16- and 8-bit expressions, the upper 32/16/24 bits are arbitrary, so you should mask or sign extend partial values if necessary.*/static HReg iselIntExpr_R ( ISelEnv* env, IRExpr* e ){ HReg r = iselIntExpr_R_wrk(env, e); /* sanity checks ... */# if 0 vex_printf("\niselIntExpr_R: "); ppIRExpr(e); vex_printf("\n");# endif vassert(hregClass(r) == HRcInt64); vassert(hregIsVirtual(r)); return r;}/* DO NOT CALL THIS DIRECTLY ! */static HReg iselIntExpr_R_wrk ( ISelEnv* env, IRExpr* e ){ /* Used for unary/binary SIMD64 ops. */ HWord fn = 0; Bool second_is_UInt; MatchInfo mi; DECLARE_PATTERN(p_8Uto64); DECLARE_PATTERN(p_1Uto8_64to1); IRType ty = typeOfIRExpr(env->type_env,e); vassert(ty == Ity_I32 || Ity_I16 || Ity_I8); switch (e->tag) { /* --------- TEMP --------- */ case Iex_Tmp: { return lookupIRTemp(env, e->Iex.Tmp.tmp); } /* --------- LOAD --------- */ case Iex_Load: { HReg dst = newVRegI(env); AMD64AMode* amode = iselIntExpr_AMode ( env, e->Iex.Load.addr ); if (e->Iex.Load.end != Iend_LE) goto irreducible; if (ty == Ity_I64) { addInstr(env, AMD64Instr_Alu64R(Aalu_MOV, AMD64RMI_Mem(amode), dst) ); return dst; } if (ty == Ity_I32) { addInstr(env, AMD64Instr_LoadEX(4,False,amode,dst)); return dst; } if (ty == Ity_I16) { addInstr(env, AMD64Instr_LoadEX(2,False,amode,dst)); return dst; } if (ty == Ity_I8) { addInstr(env, AMD64Instr_LoadEX(1,False,amode,dst)); return dst; } break; } /* --------- BINARY OP --------- */ case Iex_Binop: { AMD64AluOp aluOp; AMD64ShiftOp shOp;//.. //.. /* Pattern: Sub32(0,x) *///.. if (e->Iex.Binop.op == Iop_Sub32 && isZero32(e->Iex.Binop.arg1)) {//.. HReg dst = newVRegI(env);//.. HReg reg = iselIntExpr_R(env, e->Iex.Binop.arg2);//.. addInstr(env, mk_iMOVsd_RR(reg,dst));//.. addInstr(env, X86Instr_Unary32(Xun_NEG,X86RM_Reg(dst)));//.. return dst;//.. }//.. /* Is it an addition or logical style op? */ switch (e->Iex.Binop.op) { case Iop_Add8: case Iop_Add16: case Iop_Add32: case Iop_Add64: aluOp = Aalu_ADD; break; case Iop_Sub8: case Iop_Sub16: case Iop_Sub32: case Iop_Sub64: aluOp = Aalu_SUB; break; case Iop_And8: case Iop_And16: case Iop_And32: case Iop_And64: aluOp = Aalu_AND; break; case Iop_Or8: case Iop_Or16: case Iop_Or32: case Iop_Or64: aluOp = Aalu_OR; break; case Iop_Xor8: case Iop_Xor16: case Iop_Xor32: case Iop_Xor64: aluOp = Aalu_XOR; break; case Iop_Mul16: case Iop_Mul32: case Iop_Mul64: aluOp = Aalu_MUL; break; default: aluOp = Aalu_INVALID; break; } /* For commutative ops we assume any literal values are on the second operand. */ if (aluOp != Aalu_INVALID) { HReg dst = newVRegI(env); HReg reg = iselIntExpr_R(env, e->Iex.Binop.arg1); AMD64RMI* rmi = iselIntExpr_RMI(env, e->Iex.Binop.arg2); addInstr(env, mk_iMOVsd_RR(reg,dst)); addInstr(env, AMD64Instr_Alu64R(aluOp, rmi, dst)); return dst; } /* Perhaps a shift op? */ switch (e->Iex.Binop.op) { case Iop_Shl64: case Iop_Shl32: case Iop_Shl16: case Iop_Shl8: shOp = Ash_SHL; break; case Iop_Shr64: case Iop_Shr32: case Iop_Shr16: case Iop_Shr8: shOp = Ash_SHR; break; case Iop_Sar64: case Iop_Sar32: case Iop_Sar16: case Iop_Sar8: shOp = Ash_SAR; break; default: shOp = Ash_INVALID; break; } if (shOp != Ash_INVALID) { HReg dst = newVRegI(env); /* regL = the value to be shifted */ HReg regL = iselIntExpr_R(env, e->Iex.Binop.arg1); addInstr(env, mk_iMOVsd_RR(regL,dst)); /* Do any necessary widening for 32/16/8 bit operands */ switch (e->Iex.Binop.op) { case Iop_Shr64: case Iop_Shl64: case Iop_Sar64: break; case Iop_Shl32: case Iop_Shl16: case Iop_Shl8: break; case Iop_Shr8: addInstr(env, AMD64Instr_Alu64R( Aalu_AND, AMD64RMI_Imm(0xFF), dst)); break; case Iop_Shr16: addInstr(env, AMD64Instr_Alu64R( Aalu_AND, AMD64RMI_Imm(0xFFFF), dst)); break; case Iop_Shr32: addInstr(env, AMD64Instr_MovZLQ(dst,dst)); break; case Iop_Sar8: addInstr(env, AMD64Instr_Sh64(Ash_SHL, 56, dst)); addInstr(env, AMD64Instr_Sh64(Ash_SAR, 56, dst)); break; case Iop_Sar16: addInstr(env, AMD64Instr_Sh64(Ash_SHL, 48, dst)); addInstr(env, AMD64Instr_Sh64(Ash_SAR, 48, dst)); break; case Iop_Sar32: addInstr(env, AMD64Instr_Sh64(Ash_SHL, 32, dst)); addInstr(env, AMD64Instr_Sh64(Ash_SAR, 32, dst)); break; default: ppIROp(e->Iex.Binop.op); vassert(0); } /* Now consider the shift amount. If it's a literal, we can do a much better job than the general case. */ if (e->Iex.Binop.arg2->tag == Iex_Const) { /* assert that the IR is well-typed */ Int nshift; vassert(e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U8); nshift = e->Iex.Binop.arg2->Iex.Const.con->Ico.U8; vassert(nshift >= 0); if (nshift > 0) /* Can't allow nshift==0 since that means %cl */ addInstr(env, AMD64Instr_Sh64(shOp, nshift, dst)); } else { /* General case; we have to force the amount into %cl. */ HReg regR = iselIntExpr_R(env, e->Iex.Binop.arg2); addInstr(env, mk_iMOVsd_RR(regR,hregAMD64_RCX())); addInstr(env, AMD64Instr_Sh64(shOp, 0/* %cl */, dst)); } return dst; } /* Deal with 64-bit SIMD binary ops */ second_is_UInt = False; switch (e->Iex.Binop.op) { case Iop_Add8x8: fn = (HWord)h_generic_calc_Add8x8; break; case Iop_Add16x4: fn = (HWord)h_generic_calc_Add16x4; break; case Iop_Add32x2: fn = (HWord)h_generic_calc_Add32x2; break; case Iop_Avg8Ux8: fn = (HWord)h_generic_calc_Avg8Ux8; break; case Iop_Avg16Ux4: fn = (HWord)h_generic_calc_Avg16Ux4; break; case Iop_CmpEQ8x8: fn = (HWord)h_generic_calc_CmpEQ8x8; break; case Iop_CmpEQ16x4: fn = (HWord)h_generic_calc_CmpEQ16x4; break; case Iop_CmpEQ32x2: fn = (HWord)h_generic_calc_CmpEQ32x2; break; case Iop_CmpGT8Sx8: fn = (HWord)h_generic_calc_CmpGT8Sx8; break; case Iop_CmpGT16Sx4: fn = (HWord)h_generic_calc_CmpGT16Sx4; break; case Iop_CmpGT32Sx2: fn = (HWord)h_generic_calc_CmpGT32Sx2; break; case Iop_InterleaveHI8x8: fn = (HWord)h_generic_calc_InterleaveHI8x8; break; case Iop_InterleaveLO8x8: fn = (HWord)h_generic_calc_InterleaveLO8x8; break; case Iop_InterleaveHI16x4: fn = (HWord)h_generic_calc_InterleaveHI16x4; break; case Iop_InterleaveLO16x4: fn = (HWord)h_generic_calc_InterleaveLO16x4; break; case Iop_InterleaveHI32x2: fn = (HWord)h_generic_calc_InterleaveHI32x2; break; case Iop_InterleaveLO32x2: fn = (HWord)h_generic_calc_InterleaveLO32x2; break; case Iop_Max8Ux8: fn = (HWord)h_generic_calc_Max8Ux8; break; case Iop_Max16Sx4: fn = (HWord)h_generic_calc_Max16Sx4; break; case Iop_Min8Ux8: fn = (HWord)h_generic_calc_Min8Ux8; break; case Iop_Min16Sx4: fn = (HWord)h_generic_calc_Min16Sx4; break; case Iop_Mul16x4: fn = (HWord)h_generic_calc_Mul16x4; break; case Iop_MulHi16Sx4: fn = (HWord)h_generic_calc_MulHi16Sx4; break; case Iop_MulHi16Ux4: fn = (HWord)h_generic_calc_MulHi16Ux4; break; case Iop_QAdd8Sx8: fn = (HWord)h_generic_calc_QAdd8Sx8; break; case Iop_QAdd16Sx4: fn = (HWord)h_generic_calc_QAdd16Sx4; break; case Iop_QAdd8Ux8: fn = (HWord)h_generic_calc_QAdd8Ux8; break; case Iop_QAdd16Ux4: fn = (HWord)h_generic_calc_QAdd16Ux4; break; case Iop_QNarrow32Sx2: fn = (HWord)h_generic_calc_QNarrow32Sx2; break; case Iop_QNarrow16Sx4: fn = (HWord)h_generic_calc_QNarrow16Sx4; break; case Iop_QNarrow16Ux4: fn = (HWord)h_generic_calc_QNarrow16Ux4; break; case Iop_QSub8Sx8: fn = (HWord)h_generic_calc_QSub8Sx8; break; case Iop_QSub16Sx4: fn = (HWord)h_generic_calc_QSub16Sx4; break; case Iop_QSub8Ux8: fn = (HWord)h_generic_calc_QSub8Ux8; break; case Iop_QSub16Ux4: fn = (HWord)h_generic_calc_QSub16Ux4; break; case Iop_Sub8x8: fn = (HWord)h_generic_calc_Sub8x8; break; case Iop_Sub16x4: fn = (HWord)h_generic_calc_Sub16x4; break; case Iop_Sub32x2: fn = (HWord)h_generic_calc_Sub32x2; break; case Iop_ShlN32x2: fn = (HWord)h_generic_calc_ShlN32x2; second_is_UInt = True; break; case Iop_ShlN16x4: fn = (HWord)h_generic_calc_ShlN16x4; second_is_UInt = True; break; case Iop_ShrN32x2: fn = (HWord)h_generic_calc_ShrN32x2; second_is_UInt = True; break; case Iop_ShrN16x4: fn = (HWord)h_generic_calc_ShrN16x4; second_is_UInt = True; break; case Iop_SarN32x2: fn = (HWord)h_generic_calc_SarN32x2; second_is_UInt = True; break; case Iop_SarN16x4: fn = (HWord)h_generic_calc_SarN16x4; second_is_UInt = True; break; default: fn = (HWord)0; break; } if (fn != (HWord)0) { /* Note: the following assumes all helpers are of signature ULong fn ( ULong, ULong ), and they are not marked as regparm functions. */ HReg dst = newVRegI(env); HReg argL = iselIntExpr_R(env, e->Iex.Binop.arg1); HReg argR = iselIntExpr_R(env, e->Iex.Binop.arg2); if (second_is_UInt) addInstr(env, AMD64Instr_MovZLQ(argR, argR)); addInstr(env, mk_iMOVsd_RR(argL, hregAMD64_RDI()) ); addInstr(env, mk_iMOVsd_RR(argR, hregAMD64_RSI()) ); addInstr(env, AMD64Instr_Call( Acc_ALWAYS, (ULong)fn, 2 )); addInstr(env, mk_iMOVsd_RR(hregAMD64_RAX(), dst)); return dst; } /* Handle misc other ops. */ if (e->Iex.Binop.op == Iop_DivModS64to32 || e->Iex.Binop.op == Iop_DivModU64to32) { /* 64 x 32 -> (32(rem),32(div)) division */ /* Get the 64-bit operand into edx:eax, and the other into any old R/M. */ HReg rax = hregAMD64_RAX(); HReg rdx = hregAMD64_RDX(); HReg dst = newVRegI(env); Bool syned = toBool(e->Iex.Binop.op == Iop_DivModS64to32); AMD64RM* rmRight = iselIntExpr_RM(env, e->Iex.Binop.arg2); /* Compute the left operand into a reg, and then put the top half in edx and the bottom in eax. */ HReg left64 = iselIntExpr_R(env, e->Iex.Binop.arg1); addInstr(env, mk_iMOVsd_RR(left64, rdx));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -