⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 hdefs.c

📁 The Valgrind distribution has multiple tools. The most popular is the memory checking tool (called M
💻 C
📖 第 1 页 / 共 5 页
字号:
   i->Ain.LoadEX.src     = src;   i->Ain.LoadEX.dst     = dst;   vassert(szSmall == 1 || szSmall == 2 || szSmall == 4);   return i;}AMD64Instr* AMD64Instr_Store ( UChar sz, HReg src, AMD64AMode* dst ) {   AMD64Instr* i    = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag           = Ain_Store;   i->Ain.Store.sz  = sz;   i->Ain.Store.src = src;   i->Ain.Store.dst = dst;   vassert(sz == 1 || sz == 2 || sz == 4);   return i;}AMD64Instr* AMD64Instr_Set64 ( AMD64CondCode cond, HReg dst ) {   AMD64Instr* i     = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag            = Ain_Set64;   i->Ain.Set64.cond = cond;   i->Ain.Set64.dst  = dst;   return i;}AMD64Instr* AMD64Instr_Bsfr64 ( Bool isFwds, HReg src, HReg dst ) {   AMD64Instr* i        = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag               = Ain_Bsfr64;   i->Ain.Bsfr64.isFwds = isFwds;   i->Ain.Bsfr64.src    = src;   i->Ain.Bsfr64.dst    = dst;   return i;}AMD64Instr* AMD64Instr_MFence ( void ){   AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag        = Ain_MFence;   return i;}AMD64Instr* AMD64Instr_A87Free ( Int nregs ){   AMD64Instr* i        = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag               = Ain_A87Free;   i->Ain.A87Free.nregs = nregs;   vassert(nregs >= 1 && nregs <= 7);   return i;}AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush ){   AMD64Instr* i            = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag                   = Ain_A87PushPop;   i->Ain.A87PushPop.addr   = addr;   i->Ain.A87PushPop.isPush = isPush;   return i;}AMD64Instr* AMD64Instr_A87FpOp ( A87FpOp op ){   AMD64Instr* i     = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag            = Ain_A87FpOp;   i->Ain.A87FpOp.op = op;   return i;}AMD64Instr* AMD64Instr_A87LdCW ( AMD64AMode* addr ){   AMD64Instr* i       = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag              = Ain_A87LdCW;   i->Ain.A87LdCW.addr = addr;   return i;}AMD64Instr* AMD64Instr_A87StSW ( AMD64AMode* addr ){   AMD64Instr* i       = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag              = Ain_A87StSW;   i->Ain.A87StSW.addr = addr;   return i;}//.. AMD64Instr* AMD64Instr_FpUnary ( AMD64FpOp op, HReg src, HReg dst ) {//..    AMD64Instr* i        = LibVEX_Alloc(sizeof(AMD64Instr));//..    i->tag             = Xin_FpUnary;//..    i->Xin.FpUnary.op  = op;//..    i->Xin.FpUnary.src = src;//..    i->Xin.FpUnary.dst = dst;//..    return i;//.. }//.. AMD64Instr* AMD64Instr_FpBinary ( AMD64FpOp op, HReg srcL, HReg srcR, HReg dst ) {//..    AMD64Instr* i          = LibVEX_Alloc(sizeof(AMD64Instr));//..    i->tag               = Xin_FpBinary;//..    i->Xin.FpBinary.op   = op;//..    i->Xin.FpBinary.srcL = srcL;//..    i->Xin.FpBinary.srcR = srcR;//..    i->Xin.FpBinary.dst  = dst;//..    return i;//.. }//.. AMD64Instr* AMD64Instr_FpLdSt ( Bool isLoad, UChar sz, HReg reg, AMD64AMode* addr ) {//..    AMD64Instr* i          = LibVEX_Alloc(sizeof(AMD64Instr));//..    i->tag               = Xin_FpLdSt;//..    i->Xin.FpLdSt.isLoad = isLoad;//..    i->Xin.FpLdSt.sz     = sz;//..    i->Xin.FpLdSt.reg    = reg;//..    i->Xin.FpLdSt.addr   = addr;//..    vassert(sz == 4 || sz == 8);//..    return i;//.. }//.. AMD64Instr* AMD64Instr_FpLdStI ( Bool isLoad, UChar sz,  //..                              HReg reg, AMD64AMode* addr ) {//..    AMD64Instr* i           = LibVEX_Alloc(sizeof(AMD64Instr));//..    i->tag                = Xin_FpLdStI;//..    i->Xin.FpLdStI.isLoad = isLoad;//..    i->Xin.FpLdStI.sz     = sz;//..    i->Xin.FpLdStI.reg    = reg;//..    i->Xin.FpLdStI.addr   = addr;//..    vassert(sz == 2 || sz == 4 || sz == 8);//..    return i;//.. }//.. AMD64Instr* AMD64Instr_Fp64to32 ( HReg src, HReg dst ) {//..    AMD64Instr* i         = LibVEX_Alloc(sizeof(AMD64Instr));//..    i->tag              = Xin_Fp64to32;//..    i->Xin.Fp64to32.src = src;//..    i->Xin.Fp64to32.dst = dst;//..    return i;//.. }//.. AMD64Instr* AMD64Instr_FpCMov ( AMD64CondCode cond, HReg src, HReg dst ) {//..    AMD64Instr* i        = LibVEX_Alloc(sizeof(AMD64Instr));//..    i->tag             = Xin_FpCMov;//..    i->Xin.FpCMov.cond = cond;//..    i->Xin.FpCMov.src  = src;//..    i->Xin.FpCMov.dst  = dst;//..    vassert(cond != Xcc_ALWAYS);//..    return i;//.. }AMD64Instr* AMD64Instr_LdMXCSR ( AMD64AMode* addr ) {   AMD64Instr* i         = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag                = Ain_LdMXCSR;   i->Ain.LdMXCSR.addr   = addr;   return i;}//.. AMD64Instr* AMD64Instr_FpStSW_AX ( void ) {//..    AMD64Instr* i = LibVEX_Alloc(sizeof(AMD64Instr));//..    i->tag      = Xin_FpStSW_AX;//..    return i;//.. }AMD64Instr* AMD64Instr_SseUComIS ( Int sz, HReg srcL, HReg srcR, HReg dst ) {   AMD64Instr* i         = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag                = Ain_SseUComIS;   i->Ain.SseUComIS.sz   = toUChar(sz);   i->Ain.SseUComIS.srcL = srcL;   i->Ain.SseUComIS.srcR = srcR;   i->Ain.SseUComIS.dst  = dst;   vassert(sz == 4 || sz == 8);   return i;}AMD64Instr* AMD64Instr_SseSI2SF ( Int szS, Int szD, HReg src, HReg dst ) {   AMD64Instr* i       = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag              = Ain_SseSI2SF;   i->Ain.SseSI2SF.szS = toUChar(szS);   i->Ain.SseSI2SF.szD = toUChar(szD);   i->Ain.SseSI2SF.src = src;   i->Ain.SseSI2SF.dst = dst;   vassert(szS == 4 || szS == 8);   vassert(szD == 4 || szD == 8);   return i;}AMD64Instr* AMD64Instr_SseSF2SI ( Int szS, Int szD, HReg src, HReg dst ) {   AMD64Instr* i       = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag              = Ain_SseSF2SI;   i->Ain.SseSF2SI.szS = toUChar(szS);   i->Ain.SseSF2SI.szD = toUChar(szD);   i->Ain.SseSF2SI.src = src;   i->Ain.SseSF2SI.dst = dst;   vassert(szS == 4 || szS == 8);   vassert(szD == 4 || szD == 8);   return i;}AMD64Instr* AMD64Instr_SseSDSS   ( Bool from64, HReg src, HReg dst ){   AMD64Instr* i         = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag                = Ain_SseSDSS;   i->Ain.SseSDSS.from64 = from64;   i->Ain.SseSDSS.src    = src;   i->Ain.SseSDSS.dst    = dst;   return i;}//.. AMD64Instr* AMD64Instr_SseConst ( UShort con, HReg dst ) {//..    AMD64Instr* i            = LibVEX_Alloc(sizeof(AMD64Instr));//..    i->tag                 = Xin_SseConst;//..    i->Xin.SseConst.con    = con;//..    i->Xin.SseConst.dst    = dst;//..    vassert(hregClass(dst) == HRcVec128);//..    return i;//.. }AMD64Instr* AMD64Instr_SseLdSt ( Bool isLoad, Int sz,                                  HReg reg, AMD64AMode* addr ) {   AMD64Instr* i         = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag                = Ain_SseLdSt;   i->Ain.SseLdSt.isLoad = isLoad;   i->Ain.SseLdSt.sz     = toUChar(sz);   i->Ain.SseLdSt.reg    = reg;   i->Ain.SseLdSt.addr   = addr;   vassert(sz == 4 || sz == 8 || sz == 16);   return i;}AMD64Instr* AMD64Instr_SseLdzLO  ( Int sz, HReg reg, AMD64AMode* addr ){   AMD64Instr* i         = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag                = Ain_SseLdzLO;   i->Ain.SseLdzLO.sz    = sz;   i->Ain.SseLdzLO.reg   = reg;   i->Ain.SseLdzLO.addr  = addr;   vassert(sz == 4 || sz == 8);   return i;}AMD64Instr* AMD64Instr_Sse32Fx4 ( AMD64SseOp op, HReg src, HReg dst ) {   AMD64Instr* i       = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag              = Ain_Sse32Fx4;   i->Ain.Sse32Fx4.op  = op;   i->Ain.Sse32Fx4.src = src;   i->Ain.Sse32Fx4.dst = dst;   vassert(op != Asse_MOV);   return i;}AMD64Instr* AMD64Instr_Sse32FLo ( AMD64SseOp op, HReg src, HReg dst ) {   AMD64Instr* i       = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag              = Ain_Sse32FLo;   i->Ain.Sse32FLo.op  = op;   i->Ain.Sse32FLo.src = src;   i->Ain.Sse32FLo.dst = dst;   vassert(op != Asse_MOV);   return i;}AMD64Instr* AMD64Instr_Sse64Fx2 ( AMD64SseOp op, HReg src, HReg dst ) {   AMD64Instr* i       = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag              = Ain_Sse64Fx2;   i->Ain.Sse64Fx2.op  = op;   i->Ain.Sse64Fx2.src = src;   i->Ain.Sse64Fx2.dst = dst;   vassert(op != Asse_MOV);   return i;}AMD64Instr* AMD64Instr_Sse64FLo ( AMD64SseOp op, HReg src, HReg dst ) {   AMD64Instr* i       = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag              = Ain_Sse64FLo;   i->Ain.Sse64FLo.op  = op;   i->Ain.Sse64FLo.src = src;   i->Ain.Sse64FLo.dst = dst;   vassert(op != Asse_MOV);   return i;}AMD64Instr* AMD64Instr_SseReRg ( AMD64SseOp op, HReg re, HReg rg ) {   AMD64Instr* i      = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag             = Ain_SseReRg;   i->Ain.SseReRg.op  = op;   i->Ain.SseReRg.src = re;   i->Ain.SseReRg.dst = rg;   return i;}AMD64Instr* AMD64Instr_SseCMov ( AMD64CondCode cond, HReg src, HReg dst ) {   AMD64Instr* i       = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag              = Ain_SseCMov;   i->Ain.SseCMov.cond = cond;   i->Ain.SseCMov.src  = src;   i->Ain.SseCMov.dst  = dst;   vassert(cond != Acc_ALWAYS);   return i;}AMD64Instr* AMD64Instr_SseShuf ( Int order, HReg src, HReg dst ) {   AMD64Instr* i        = LibVEX_Alloc(sizeof(AMD64Instr));   i->tag               = Ain_SseShuf;   i->Ain.SseShuf.order = order;   i->Ain.SseShuf.src   = src;   i->Ain.SseShuf.dst   = dst;   vassert(order >= 0 && order <= 0xFF);   return i;}void ppAMD64Instr ( AMD64Instr* i, Bool mode64 ) {   vassert(mode64 == True);   switch (i->tag) {      case Ain_Imm64:          vex_printf("movabsq $0x%llx,", i->Ain.Imm64.imm64);         ppHRegAMD64(i->Ain.Imm64.dst);         return;      case Ain_Alu64R:         vex_printf("%sq ", showAMD64AluOp(i->Ain.Alu64R.op));         ppAMD64RMI(i->Ain.Alu64R.src);         vex_printf(",");         ppHRegAMD64(i->Ain.Alu64R.dst);         return;      case Ain_Alu64M:         vex_printf("%sq ", showAMD64AluOp(i->Ain.Alu64M.op));         ppAMD64RI(i->Ain.Alu64M.src);         vex_printf(",");         ppAMD64AMode(i->Ain.Alu64M.dst);         return;      case Ain_Sh64:         vex_printf("%sq ", showAMD64ShiftOp(i->Ain.Sh64.op));         if (i->Ain.Sh64.src == 0)            vex_printf("%%cl,");          else             vex_printf("$%d,", (Int)i->Ain.Sh64.src);         ppHRegAMD64(i->Ain.Sh64.dst);         return;      case Ain_Test64:         vex_printf("testq $%d,", (Int)i->Ain.Test64.imm32);         ppHRegAMD64(i->Ain.Test64.dst);         return;      case Ain_Unary64:         vex_printf("%sq ", showAMD64UnaryOp(i->Ain.Unary64.op));         ppHRegAMD64(i->Ain.Unary64.dst);         return;      case Ain_MulL:         vex_printf("%cmulq ", i->Ain.MulL.syned ? 's' : 'u');         ppAMD64RM(i->Ain.MulL.src);         return;      case Ain_Div:         vex_printf("%cdiv%s ",                    i->Ain.Div.syned ? 's' : 'u',                    showAMD64ScalarSz(i->Ain.Div.sz));         ppAMD64RM(i->Ain.Div.src);         return;//..       case Xin_Sh3232://..          vex_printf("%sdl ", showAMD64ShiftOp(i->Xin.Sh3232.op));//..          if (i->Xin.Sh3232.amt == 0)//..            vex_printf(" %%cl,"); //..          else //..             vex_printf(" $%d,", i->Xin.Sh3232.amt);//..          ppHRegAMD64(i->Xin.Sh3232.src);//..          vex_printf(",");//..          ppHRegAMD64(i->Xin.Sh3232.dst);//..          return;      case Ain_Push:         vex_printf("pushq ");         ppAMD64RMI(i->Ain.Push.src);         return;      case Ain_Call:         vex_printf("call%s[%d] ",                     i->Ain.Call.cond==Acc_ALWAYS                        ? "" : showAMD64CondCode(i->Ain.Call.cond),                    i->Ain.Call.regparms );         vex_printf("0x%llx", i->Ain.Call.target);         break;      case Ain_Goto:         if (i->Ain.Goto.cond != Acc_ALWAYS) {            vex_printf("if (%%rflags.%s) { ",                        showAMD64CondCode(i->Ain.Goto.cond));         }         if (i->Ain.Goto.jk != Ijk_Boring             && i->Ain.Goto.jk != Ijk_Call             && i->Ain.Goto.jk != Ijk_Ret) {            vex_printf("movl $");            ppIRJumpKind(i->Ain.Goto.jk);            vex_printf(",%%ebp ; ");         }         vex_printf("movq ");         ppAMD64RI(i->Ain.Goto.dst);         vex_printf(",%%rax ; movabsq $dispatcher_addr,%%rdx ; jmp *%%rdx");         if (i->Ain.Goto.cond != Acc_ALWAYS) {            vex_printf(" }");         }         return;      case Ain_CMov64:         vex_printf("cmov%s ", showAMD64CondCode(i->Ain.CMov64.cond));         ppAMD64RM(i->Ain.CMov64.src);         vex_printf(",");         ppHRegAMD64(i->Ain.CMov64.dst);         return;      case Ain_MovZLQ:         vex_printf("movzlq ");         ppHRegAMD64_lo32(i->Ain.MovZLQ.src);         vex_printf(",");         ppHRegAMD64(i->Ain.MovZLQ.dst);         return;      case Ain_LoadEX:         if (i->Ain.LoadEX.szSmall==4 && !i->Ain.LoadEX.syned) {            vex_printf("movl ");            ppAMD64AMode(i->Ain.LoadEX.src);            vex_printf(",");            ppHRegAMD64_lo32(i->Ain.LoadEX.dst);         } else {            vex_printf("mov%c%cq ",

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -