📄 hdefs.c
字号:
//.. return;//.. case Xin_FpBinary://.. addHRegUse(u, HRmRead, i->Xin.FpBinary.srcL);//.. addHRegUse(u, HRmRead, i->Xin.FpBinary.srcR);//.. addHRegUse(u, HRmWrite, i->Xin.FpBinary.dst);//.. return;//.. case Xin_FpLdSt://.. addRegUsage_AMD64AMode(u, i->Xin.FpLdSt.addr);//.. addHRegUse(u, i->Xin.FpLdSt.isLoad ? HRmWrite : HRmRead,//.. i->Xin.FpLdSt.reg);//.. return;//.. case Xin_FpLdStI://.. addRegUsage_AMD64AMode(u, i->Xin.FpLdStI.addr);//.. addHRegUse(u, i->Xin.FpLdStI.isLoad ? HRmWrite : HRmRead,//.. i->Xin.FpLdStI.reg);//.. return;//.. case Xin_Fp64to32://.. addHRegUse(u, HRmRead, i->Xin.Fp64to32.src);//.. addHRegUse(u, HRmWrite, i->Xin.Fp64to32.dst);//.. return;//.. case Xin_FpCMov://.. addHRegUse(u, HRmRead, i->Xin.FpCMov.src);//.. addHRegUse(u, HRmModify, i->Xin.FpCMov.dst);//.. return; case Ain_LdMXCSR: addRegUsage_AMD64AMode(u, i->Ain.LdMXCSR.addr); return;//.. case Xin_FpStSW_AX://.. addHRegUse(u, HRmWrite, hregAMD64_EAX());//.. return; case Ain_SseUComIS: addHRegUse(u, HRmRead, i->Ain.SseUComIS.srcL); addHRegUse(u, HRmRead, i->Ain.SseUComIS.srcR); addHRegUse(u, HRmWrite, i->Ain.SseUComIS.dst); return; case Ain_SseSI2SF: addHRegUse(u, HRmRead, i->Ain.SseSI2SF.src); addHRegUse(u, HRmWrite, i->Ain.SseSI2SF.dst); return; case Ain_SseSF2SI: addHRegUse(u, HRmRead, i->Ain.SseSF2SI.src); addHRegUse(u, HRmWrite, i->Ain.SseSF2SI.dst); return; case Ain_SseSDSS: addHRegUse(u, HRmRead, i->Ain.SseSDSS.src); addHRegUse(u, HRmWrite, i->Ain.SseSDSS.dst); return; case Ain_SseLdSt: addRegUsage_AMD64AMode(u, i->Ain.SseLdSt.addr); addHRegUse(u, i->Ain.SseLdSt.isLoad ? HRmWrite : HRmRead, i->Ain.SseLdSt.reg); return; case Ain_SseLdzLO: addRegUsage_AMD64AMode(u, i->Ain.SseLdzLO.addr); addHRegUse(u, HRmWrite, i->Ain.SseLdzLO.reg); return;//.. case Xin_SseConst://.. addHRegUse(u, HRmWrite, i->Xin.SseConst.dst);//.. return; case Ain_Sse32Fx4: vassert(i->Ain.Sse32Fx4.op != Asse_MOV); unary = toBool( i->Ain.Sse32Fx4.op == Asse_RCPF || i->Ain.Sse32Fx4.op == Asse_RSQRTF || i->Ain.Sse32Fx4.op == Asse_SQRTF ); addHRegUse(u, HRmRead, i->Ain.Sse32Fx4.src); addHRegUse(u, unary ? HRmWrite : HRmModify, i->Ain.Sse32Fx4.dst); return; case Ain_Sse32FLo: vassert(i->Ain.Sse32FLo.op != Asse_MOV); unary = toBool( i->Ain.Sse32FLo.op == Asse_RCPF || i->Ain.Sse32FLo.op == Asse_RSQRTF || i->Ain.Sse32FLo.op == Asse_SQRTF ); addHRegUse(u, HRmRead, i->Ain.Sse32FLo.src); addHRegUse(u, unary ? HRmWrite : HRmModify, i->Ain.Sse32FLo.dst); return; case Ain_Sse64Fx2: vassert(i->Ain.Sse64Fx2.op != Asse_MOV); unary = toBool( i->Ain.Sse64Fx2.op == Asse_RCPF || i->Ain.Sse64Fx2.op == Asse_RSQRTF || i->Ain.Sse64Fx2.op == Asse_SQRTF ); addHRegUse(u, HRmRead, i->Ain.Sse64Fx2.src); addHRegUse(u, unary ? HRmWrite : HRmModify, i->Ain.Sse64Fx2.dst); return; case Ain_Sse64FLo: vassert(i->Ain.Sse64FLo.op != Asse_MOV); unary = toBool( i->Ain.Sse64FLo.op == Asse_RCPF || i->Ain.Sse64FLo.op == Asse_RSQRTF || i->Ain.Sse64FLo.op == Asse_SQRTF ); addHRegUse(u, HRmRead, i->Ain.Sse64FLo.src); addHRegUse(u, unary ? HRmWrite : HRmModify, i->Ain.Sse64FLo.dst); return; case Ain_SseReRg: if ( (i->Ain.SseReRg.op == Asse_XOR || i->Ain.SseReRg.op == Asse_CMPEQ32) && i->Ain.SseReRg.src == i->Ain.SseReRg.dst) { /* reg-alloc needs to understand 'xor r,r' and 'cmpeqd r,r' as a write of a value to r, and independent of any previous value in r */ /* (as opposed to a rite of passage :-) */ addHRegUse(u, HRmWrite, i->Ain.SseReRg.dst); } else { addHRegUse(u, HRmRead, i->Ain.SseReRg.src); addHRegUse(u, i->Ain.SseReRg.op == Asse_MOV ? HRmWrite : HRmModify, i->Ain.SseReRg.dst); } return; case Ain_SseCMov: addHRegUse(u, HRmRead, i->Ain.SseCMov.src); addHRegUse(u, HRmModify, i->Ain.SseCMov.dst); return; case Ain_SseShuf: addHRegUse(u, HRmRead, i->Ain.SseShuf.src); addHRegUse(u, HRmWrite, i->Ain.SseShuf.dst); return; default: ppAMD64Instr(i, mode64); vpanic("getRegUsage_AMD64Instr"); }}/* local helper */static inline void mapReg(HRegRemap* m, HReg* r){ *r = lookupHRegRemap(m, *r);}void mapRegs_AMD64Instr ( HRegRemap* m, AMD64Instr* i, Bool mode64 ){ vassert(mode64 == True); switch (i->tag) { case Ain_Imm64: mapReg(m, &i->Ain.Imm64.dst); return; case Ain_Alu64R: mapRegs_AMD64RMI(m, i->Ain.Alu64R.src); mapReg(m, &i->Ain.Alu64R.dst); return; case Ain_Alu64M: mapRegs_AMD64RI(m, i->Ain.Alu64M.src); mapRegs_AMD64AMode(m, i->Ain.Alu64M.dst); return; case Ain_Sh64: mapReg(m, &i->Ain.Sh64.dst); return; case Ain_Test64: mapReg(m, &i->Ain.Test64.dst); return; case Ain_Unary64: mapReg(m, &i->Ain.Unary64.dst); return; case Ain_MulL: mapRegs_AMD64RM(m, i->Ain.MulL.src); return; case Ain_Div: mapRegs_AMD64RM(m, i->Ain.Div.src); return;//.. case Xin_Sh3232://.. mapReg(m, &i->Xin.Sh3232.src);//.. mapReg(m, &i->Xin.Sh3232.dst);//.. return; case Ain_Push: mapRegs_AMD64RMI(m, i->Ain.Push.src); return; case Ain_Call: return; case Ain_Goto: mapRegs_AMD64RI(m, i->Ain.Goto.dst); return; case Ain_CMov64: mapRegs_AMD64RM(m, i->Ain.CMov64.src); mapReg(m, &i->Ain.CMov64.dst); return; case Ain_MovZLQ: mapReg(m, &i->Ain.MovZLQ.src); mapReg(m, &i->Ain.MovZLQ.dst); return; case Ain_LoadEX: mapRegs_AMD64AMode(m, i->Ain.LoadEX.src); mapReg(m, &i->Ain.LoadEX.dst); return; case Ain_Store: mapReg(m, &i->Ain.Store.src); mapRegs_AMD64AMode(m, i->Ain.Store.dst); return; case Ain_Set64: mapReg(m, &i->Ain.Set64.dst); return; case Ain_Bsfr64: mapReg(m, &i->Ain.Bsfr64.src); mapReg(m, &i->Ain.Bsfr64.dst); return; case Ain_MFence: return; case Ain_A87Free: return; case Ain_A87PushPop: mapRegs_AMD64AMode(m, i->Ain.A87PushPop.addr); return; case Ain_A87FpOp: return; case Ain_A87LdCW: mapRegs_AMD64AMode(m, i->Ain.A87LdCW.addr); return; case Ain_A87StSW: mapRegs_AMD64AMode(m, i->Ain.A87StSW.addr); return;//.. case Xin_FpUnary://.. mapReg(m, &i->Xin.FpUnary.src);//.. mapReg(m, &i->Xin.FpUnary.dst);//.. return;//.. case Xin_FpBinary://.. mapReg(m, &i->Xin.FpBinary.srcL);//.. mapReg(m, &i->Xin.FpBinary.srcR);//.. mapReg(m, &i->Xin.FpBinary.dst);//.. return;//.. case Xin_FpLdSt://.. mapRegs_AMD64AMode(m, i->Xin.FpLdSt.addr);//.. mapReg(m, &i->Xin.FpLdSt.reg);//.. return;//.. case Xin_FpLdStI://.. mapRegs_AMD64AMode(m, i->Xin.FpLdStI.addr);//.. mapReg(m, &i->Xin.FpLdStI.reg);//.. return;//.. case Xin_Fp64to32://.. mapReg(m, &i->Xin.Fp64to32.src);//.. mapReg(m, &i->Xin.Fp64to32.dst);//.. return;//.. case Xin_FpCMov://.. mapReg(m, &i->Xin.FpCMov.src);//.. mapReg(m, &i->Xin.FpCMov.dst);//.. return; case Ain_LdMXCSR: mapRegs_AMD64AMode(m, i->Ain.LdMXCSR.addr); return;//.. case Xin_FpStSW_AX://.. return; case Ain_SseUComIS: mapReg(m, &i->Ain.SseUComIS.srcL); mapReg(m, &i->Ain.SseUComIS.srcR); mapReg(m, &i->Ain.SseUComIS.dst); return; case Ain_SseSI2SF: mapReg(m, &i->Ain.SseSI2SF.src); mapReg(m, &i->Ain.SseSI2SF.dst); return; case Ain_SseSF2SI: mapReg(m, &i->Ain.SseSF2SI.src); mapReg(m, &i->Ain.SseSF2SI.dst); return; case Ain_SseSDSS: mapReg(m, &i->Ain.SseSDSS.src); mapReg(m, &i->Ain.SseSDSS.dst); return;//.. case Xin_SseConst://.. mapReg(m, &i->Xin.SseConst.dst);//.. return; case Ain_SseLdSt: mapReg(m, &i->Ain.SseLdSt.reg); mapRegs_AMD64AMode(m, i->Ain.SseLdSt.addr); break; case Ain_SseLdzLO: mapReg(m, &i->Ain.SseLdzLO.reg); mapRegs_AMD64AMode(m, i->Ain.SseLdzLO.addr); break; case Ain_Sse32Fx4: mapReg(m, &i->Ain.Sse32Fx4.src); mapReg(m, &i->Ain.Sse32Fx4.dst); return; case Ain_Sse32FLo: mapReg(m, &i->Ain.Sse32FLo.src); mapReg(m, &i->Ain.Sse32FLo.dst); return; case Ain_Sse64Fx2: mapReg(m, &i->Ain.Sse64Fx2.src); mapReg(m, &i->Ain.Sse64Fx2.dst); return; case Ain_Sse64FLo: mapReg(m, &i->Ain.Sse64FLo.src); mapReg(m, &i->Ain.Sse64FLo.dst); return; case Ain_SseReRg: mapReg(m, &i->Ain.SseReRg.src); mapReg(m, &i->Ain.SseReRg.dst); return; case Ain_SseCMov: mapReg(m, &i->Ain.SseCMov.src); mapReg(m, &i->Ain.SseCMov.dst); return; case Ain_SseShuf: mapReg(m, &i->Ain.SseShuf.src); mapReg(m, &i->Ain.SseShuf.dst); return; default: ppAMD64Instr(i, mode64); vpanic("mapRegs_AMD64Instr"); }}/* Figure out if i represents a reg-reg move, and if so assign the source and destination to *src and *dst. If in doubt say No. Used by the register allocator to do move coalescing. */Bool isMove_AMD64Instr ( AMD64Instr* i, HReg* src, HReg* dst ){ /* Moves between integer regs */ if (i->tag == Ain_Alu64R) { if (i->Ain.Alu64R.op != Aalu_MOV) return False; if (i->Ain.Alu64R.src->tag != Armi_Reg) return False; *src = i->Ain.Alu64R.src->Armi.Reg.reg; *dst = i->Ain.Alu64R.dst; return True; } /* Moves between vector regs */ if (i->tag == Ain_SseReRg) { if (i->Ain.SseReRg.op != Asse_MOV) return False; *src = i->Ain.SseReRg.src; *dst = i->Ain.SseReRg.dst; return True; } return False;}/* Generate amd64 spill/reload instructions under the direction of the register allocator. Note it's critical these don't write the condition codes. */AMD64Instr* genSpill_AMD64 ( HReg rreg, Int offsetB, Bool mode64 ){ AMD64AMode* am; vassert(offsetB >= 0); vassert(!hregIsVirtual(rreg)); vassert(mode64 == True); am = AMD64AMode_IR(offsetB, hregAMD64_RBP()); switch (hregClass(rreg)) { case HRcInt64: return AMD64Instr_Alu64M ( Aalu_MOV, AMD64RI_Reg(rreg), am ); case HRcVec128: return AMD64Instr_SseLdSt ( False/*store*/, 16, rreg, am ); default: ppHRegClass(hregClass(rreg)); vpanic("genSpill_AMD64: unimplemented regclass"); }}AMD64Instr* genReload_AMD64 ( HReg rreg, Int offsetB, Bool mode64 ){ AMD64AMode* am; vassert(offsetB >= 0); vassert(!hregIsVirtual(rreg)); vassert(mode64 == True); am = AMD64AMode_IR(offsetB, hregAMD64_RBP()); switch (hregClass(rreg)) { case HRcInt64: return AMD64Instr_Alu64R ( Aalu_MOV, AMD64RMI_Mem(am), rreg ); case HRcVec128: return AMD64Instr_SseLdSt ( True/*load*/, 16, rreg, am ); default: ppHRegClass(hregClass(rreg)); vpanic("genReload_AMD64: unimplemented regclass"); }}/* --------- The amd64 assembler (bl
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -