📄 hdefs.c
字号:
AMD64RI* AMD64RI_Imm ( UInt imm32 ) { AMD64RI* op = LibVEX_Alloc(sizeof(AMD64RI)); op->tag = Ari_Imm; op->Ari.Imm.imm32 = imm32; return op;}AMD64RI* AMD64RI_Reg ( HReg reg ) { AMD64RI* op = LibVEX_Alloc(sizeof(AMD64RI)); op->tag = Ari_Reg; op->Ari.Reg.reg = reg; return op;}void ppAMD64RI ( AMD64RI* op ) { switch (op->tag) { case Ari_Imm: vex_printf("$0x%x", op->Ari.Imm.imm32); return; case Ari_Reg: ppHRegAMD64(op->Ari.Reg.reg); return; default: vpanic("ppAMD64RI"); }}/* An AMD64RI can only be used in a "read" context (what would it mean to write or modify a literal?) and so we enumerate its registers accordingly. */static void addRegUsage_AMD64RI ( HRegUsage* u, AMD64RI* op ) { switch (op->tag) { case Ari_Imm: return; case Ari_Reg: addHRegUse(u, HRmRead, op->Ari.Reg.reg); return; default: vpanic("addRegUsage_AMD64RI"); }}static void mapRegs_AMD64RI ( HRegRemap* m, AMD64RI* op ) { switch (op->tag) { case Ari_Imm: return; case Ari_Reg: op->Ari.Reg.reg = lookupHRegRemap(m, op->Ari.Reg.reg); return; default: vpanic("mapRegs_AMD64RI"); }}/* --------- Operand, which can be reg or memory only. --------- */AMD64RM* AMD64RM_Reg ( HReg reg ) { AMD64RM* op = LibVEX_Alloc(sizeof(AMD64RM)); op->tag = Arm_Reg; op->Arm.Reg.reg = reg; return op;}AMD64RM* AMD64RM_Mem ( AMD64AMode* am ) { AMD64RM* op = LibVEX_Alloc(sizeof(AMD64RM)); op->tag = Arm_Mem; op->Arm.Mem.am = am; return op;}void ppAMD64RM ( AMD64RM* op ) { switch (op->tag) { case Arm_Mem: ppAMD64AMode(op->Arm.Mem.am); return; case Arm_Reg: ppHRegAMD64(op->Arm.Reg.reg); return; default: vpanic("ppAMD64RM"); }}/* Because an AMD64RM can be both a source or destination operand, we have to supply a mode -- pertaining to the operand as a whole -- indicating how it's being used. */static void addRegUsage_AMD64RM ( HRegUsage* u, AMD64RM* op, HRegMode mode ) { switch (op->tag) { case Arm_Mem: /* Memory is read, written or modified. So we just want to know the regs read by the amode. */ addRegUsage_AMD64AMode(u, op->Arm.Mem.am); return; case Arm_Reg: /* reg is read, written or modified. Add it in the appropriate way. */ addHRegUse(u, mode, op->Arm.Reg.reg); return; default: vpanic("addRegUsage_AMD64RM"); }}static void mapRegs_AMD64RM ( HRegRemap* m, AMD64RM* op ){ switch (op->tag) { case Arm_Mem: mapRegs_AMD64AMode(m, op->Arm.Mem.am); return; case Arm_Reg: op->Arm.Reg.reg = lookupHRegRemap(m, op->Arm.Reg.reg); return; default: vpanic("mapRegs_AMD64RM"); }}/* --------- Instructions. --------- */static HChar* showAMD64ScalarSz ( Int sz ) { switch (sz) { case 2: return "w"; case 4: return "l"; case 8: return "q"; default: vpanic("showAMD64ScalarSz"); }} HChar* showAMD64UnaryOp ( AMD64UnaryOp op ) { switch (op) { case Aun_NOT: return "not"; case Aun_NEG: return "neg"; default: vpanic("showAMD64UnaryOp"); }}HChar* showAMD64AluOp ( AMD64AluOp op ) { switch (op) { case Aalu_MOV: return "mov"; case Aalu_CMP: return "cmp"; case Aalu_ADD: return "add"; case Aalu_SUB: return "sub"; case Aalu_ADC: return "adc"; case Aalu_SBB: return "sbb"; case Aalu_AND: return "and"; case Aalu_OR: return "or"; case Aalu_XOR: return "xor"; case Aalu_MUL: return "imul"; default: vpanic("showAMD64AluOp"); }}HChar* showAMD64ShiftOp ( AMD64ShiftOp op ) { switch (op) { case Ash_SHL: return "shl"; case Ash_SHR: return "shr"; case Ash_SAR: return "sar"; default: vpanic("showAMD64ShiftOp"); }}HChar* showA87FpOp ( A87FpOp op ) { switch (op) {//.. case Xfp_ADD: return "add";//.. case Xfp_SUB: return "sub";//.. case Xfp_MUL: return "mul";//.. case Xfp_DIV: return "div"; case Afp_SCALE: return "scale"; case Afp_ATAN: return "atan"; case Afp_YL2X: return "yl2x"; case Afp_YL2XP1: return "yl2xp1"; case Afp_PREM: return "prem";//.. case Xfp_PREM1: return "prem1"; case Afp_SQRT: return "sqrt";//.. case Xfp_ABS: return "abs";//.. case Xfp_NEG: return "chs";//.. case Xfp_MOV: return "mov"; case Afp_SIN: return "sin"; case Afp_COS: return "cos"; case Afp_TAN: return "tan"; case Afp_ROUND: return "round"; case Afp_2XM1: return "2xm1"; default: vpanic("showA87FpOp"); }}HChar* showAMD64SseOp ( AMD64SseOp op ) { switch (op) { case Asse_MOV: return "movups"; case Asse_ADDF: return "add"; case Asse_SUBF: return "sub"; case Asse_MULF: return "mul"; case Asse_DIVF: return "div"; case Asse_MAXF: return "max"; case Asse_MINF: return "min"; case Asse_CMPEQF: return "cmpFeq"; case Asse_CMPLTF: return "cmpFlt"; case Asse_CMPLEF: return "cmpFle"; case Asse_CMPUNF: return "cmpFun"; case Asse_RCPF: return "rcp"; case Asse_RSQRTF: return "rsqrt"; case Asse_SQRTF: return "sqrt"; case Asse_AND: return "and"; case Asse_OR: return "or"; case Asse_XOR: return "xor"; case Asse_ANDN: return "andn"; case Asse_ADD8: return "paddb"; case Asse_ADD16: return "paddw"; case Asse_ADD32: return "paddd"; case Asse_ADD64: return "paddq"; case Asse_QADD8U: return "paddusb"; case Asse_QADD16U: return "paddusw"; case Asse_QADD8S: return "paddsb"; case Asse_QADD16S: return "paddsw"; case Asse_SUB8: return "psubb"; case Asse_SUB16: return "psubw"; case Asse_SUB32: return "psubd"; case Asse_SUB64: return "psubq"; case Asse_QSUB8U: return "psubusb"; case Asse_QSUB16U: return "psubusw"; case Asse_QSUB8S: return "psubsb"; case Asse_QSUB16S: return "psubsw"; case Asse_MUL16: return "pmullw"; case Asse_MULHI16U: return "pmulhuw"; case Asse_MULHI16S: return "pmulhw"; case Asse_AVG8U: return "pavgb"; case Asse_AVG16U: return "pavgw"; case Asse_MAX16S: return "pmaxw"; case Asse_MAX8U: return "pmaxub"; case Asse_MIN16S: return "pminw"; case Asse_MIN8U: return "pminub"; case Asse_CMPEQ8: return "pcmpeqb"; case Asse_CMPEQ16: return "pcmpeqw"; case Asse_CMPEQ32: return "pcmpeqd"; case Asse_CMPGT8S: return "pcmpgtb"; case Asse_CMPGT16S: return "pcmpgtw"; case Asse_CMPGT32S: return "pcmpgtd"; case Asse_SHL16: return "psllw"; case Asse_SHL32: return "pslld"; case Asse_SHL64: return "psllq"; case Asse_SHR16: return "psrlw"; case Asse_SHR32: return "psrld"; case Asse_SHR64: return "psrlq"; case Asse_SAR16: return "psraw"; case Asse_SAR32: return "psrad"; case Asse_PACKSSD: return "packssdw"; case Asse_PACKSSW: return "packsswb"; case Asse_PACKUSW: return "packuswb"; case Asse_UNPCKHB: return "punpckhb"; case Asse_UNPCKHW: return "punpckhw"; case Asse_UNPCKHD: return "punpckhd"; case Asse_UNPCKHQ: return "punpckhq"; case Asse_UNPCKLB: return "punpcklb"; case Asse_UNPCKLW: return "punpcklw"; case Asse_UNPCKLD: return "punpckld"; case Asse_UNPCKLQ: return "punpcklq"; default: vpanic("showAMD64SseOp"); }}AMD64Instr* AMD64Instr_Imm64 ( ULong imm64, HReg dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_Imm64; i->Ain.Imm64.imm64 = imm64; i->Ain.Imm64.dst = dst; return i;}AMD64Instr* AMD64Instr_Alu64R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_Alu64R; i->Ain.Alu64R.op = op; i->Ain.Alu64R.src = src; i->Ain.Alu64R.dst = dst; return i;}AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp op, AMD64RI* src, AMD64AMode* dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_Alu64M; i->Ain.Alu64M.op = op; i->Ain.Alu64M.src = src; i->Ain.Alu64M.dst = dst; vassert(op != Aalu_MUL); return i;}AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, HReg dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_Sh64; i->Ain.Sh64.op = op; i->Ain.Sh64.src = src; i->Ain.Sh64.dst = dst; return i;}AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_Test64; i->Ain.Test64.imm32 = imm32; i->Ain.Test64.dst = dst; return i;}AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_Unary64; i->Ain.Unary64.op = op; i->Ain.Unary64.dst = dst; return i;}AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* src ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_MulL; i->Ain.MulL.syned = syned; i->Ain.MulL.src = src; return i;}AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* src ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_Div; i->Ain.Div.syned = syned; i->Ain.Div.sz = sz; i->Ain.Div.src = src; vassert(sz == 4 || sz == 8); return i;}//.. AMD64Instr* AMD64Instr_Sh3232 ( AMD64ShiftOp op, UInt amt, HReg src, HReg dst ) {//.. AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));//.. i->tag = Xin_Sh3232;//.. i->Xin.Sh3232.op = op;//.. i->Xin.Sh3232.amt = amt;//.. i->Xin.Sh3232.src = src;//.. i->Xin.Sh3232.dst = dst;//.. vassert(op == Xsh_SHL || op == Xsh_SHR);//.. return i;//.. }AMD64Instr* AMD64Instr_Push( AMD64RMI* src ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_Push; i->Ain.Push.src = src; return i;}AMD64Instr* AMD64Instr_Call ( AMD64CondCode cond, Addr64 target, Int regparms ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_Call; i->Ain.Call.cond = cond; i->Ain.Call.target = target; i->Ain.Call.regparms = regparms; vassert(regparms >= 0 && regparms <= 6); return i;}AMD64Instr* AMD64Instr_Goto ( IRJumpKind jk, AMD64CondCode cond, AMD64RI* dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_Goto; i->Ain.Goto.cond = cond; i->Ain.Goto.dst = dst; i->Ain.Goto.jk = jk; return i;}AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode cond, AMD64RM* src, HReg dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_CMov64; i->Ain.CMov64.cond = cond; i->Ain.CMov64.src = src; i->Ain.CMov64.dst = dst; vassert(cond != Acc_ALWAYS); return i;}AMD64Instr* AMD64Instr_MovZLQ ( HReg src, HReg dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_MovZLQ; i->Ain.MovZLQ.src = src; i->Ain.MovZLQ.dst = dst; return i;}AMD64Instr* AMD64Instr_LoadEX ( UChar szSmall, Bool syned, AMD64AMode* src, HReg dst ) { AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr)); i->tag = Ain_LoadEX; i->Ain.LoadEX.szSmall = szSmall; i->Ain.LoadEX.syned = syned;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -