⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 hdefs.h

📁 The Valgrind distribution has multiple tools. The most popular is the memory checking tool (called M
💻 H
📖 第 1 页 / 共 2 页
字号:
      Ain_Sh64,        /* 64-bit shift/rotate, dst=REG or MEM */      Ain_Test64,      /* 64-bit test (AND, set flags, discard result) */      Ain_Unary64,     /* 64-bit not and neg */      Ain_MulL,        /* widening multiply */      Ain_Div,         /* div and mod *///..       Xin_Sh3232,    /* shldl or shrdl */      Ain_Push,        /* push 64-bit value on stack */      Ain_Call,        /* call to address in register */      Ain_Goto,        /* conditional/unconditional jmp to dst */      Ain_CMov64,      /* conditional move */      Ain_MovZLQ,      /* reg-reg move, zeroing out top half */      Ain_LoadEX,      /* mov{s,z}{b,w,l}q from mem to reg */      Ain_Store,       /* store 32/16/8 bit value in memory */      Ain_Set64,       /* convert condition code to 64-bit value */      Ain_Bsfr64,      /* 64-bit bsf/bsr */      Ain_MFence,      /* mem fence */      Ain_A87Free,     /* free up x87 registers */      Ain_A87PushPop,  /* x87 loads/stores */      Ain_A87FpOp,     /* x87 operations */      Ain_A87LdCW,     /* load x87 control word */      Ain_A87StSW,     /* store x87 status word *///.. //..       Xin_FpUnary,   /* FP fake unary op *///..       Xin_FpBinary,  /* FP fake binary op *///..       Xin_FpLdSt,    /* FP fake load/store *///..       Xin_FpLdStI,   /* FP fake load/store, converting to/from Int *///..       Xin_Fp64to32,  /* FP round IEEE754 double to IEEE754 single *///..       Xin_FpCMov,    /* FP fake floating point conditional move */      Ain_LdMXCSR,     /* load %mxcsr *///..       Xin_FpStSW_AX, /* fstsw %ax */      Ain_SseUComIS,   /* ucomisd/ucomiss, then get %rflags into int                          register */      Ain_SseSI2SF,    /* scalar 32/64 int to 32/64 float conversion */      Ain_SseSF2SI,    /* scalar 32/64 float to 32/64 int conversion */      Ain_SseSDSS,     /* scalar float32 to/from float64 *///.. //..       Xin_SseConst,  /* Generate restricted SSE literal */      Ain_SseLdSt,     /* SSE load/store 32/64/128 bits, no alignment                          constraints, upper 96/64/0 bits arbitrary */      Ain_SseLdzLO,    /* SSE load low 32/64 bits, zero remainder of reg */      Ain_Sse32Fx4,    /* SSE binary, 32Fx4 */      Ain_Sse32FLo,    /* SSE binary, 32F in lowest lane only */      Ain_Sse64Fx2,    /* SSE binary, 64Fx2 */      Ain_Sse64FLo,    /* SSE binary, 64F in lowest lane only */      Ain_SseReRg,     /* SSE binary general reg-reg, Re, Rg */      Ain_SseCMov,     /* SSE conditional move */      Ain_SseShuf      /* SSE2 shuffle (pshufd) */   }   AMD64InstrTag;/* Destinations are on the RIGHT (second operand) */typedef   struct {      AMD64InstrTag tag;      union {         struct {            ULong imm64;            HReg  dst;         } Imm64;         struct {            AMD64AluOp op;            AMD64RMI*  src;            HReg       dst;         } Alu64R;         struct {            AMD64AluOp  op;            AMD64RI*    src;            AMD64AMode* dst;         } Alu64M;         struct {            AMD64ShiftOp op;            UInt         src;  /* shift amount, or 0 means %cl */            HReg         dst;         } Sh64;         struct {            UInt   imm32;            HReg   dst;         } Test64;         /* Not and Neg */         struct {            AMD64UnaryOp op;            HReg         dst;         } Unary64;         /* 64 x 64 -> 128 bit widening multiply: RDX:RAX = RAX *s/u            r/m64 */         struct {            Bool     syned;            AMD64RM* src;         } MulL;          /* amd64 div/idiv instruction.  Modifies RDX and RAX and	     reads src. */         struct {            Bool     syned;            Int      sz; /* 4 or 8 only */            AMD64RM* src;         } Div;//..          /* shld/shrd.  op may only be Xsh_SHL or Xsh_SHR *///..          struct {//..             X86ShiftOp op;//..             UInt       amt;   /* shift amount, or 0 means %cl *///..             HReg       src;//..             HReg       dst;//..          } Sh3232;         struct {            AMD64RMI* src;         } Push;         /* Pseudo-insn.  Call target (an absolute address), on given            condition (which could be Xcc_ALWAYS). */         struct {            AMD64CondCode cond;            Addr64        target;            Int           regparms; /* 0 .. 6 */         } Call;         /* Pseudo-insn.  Goto dst, on given condition (which could be            Acc_ALWAYS). */         struct {            IRJumpKind    jk;            AMD64CondCode cond;            AMD64RI*      dst;         } Goto;         /* Mov src to dst on the given condition, which may not            be the bogus Acc_ALWAYS. */         struct {            AMD64CondCode cond;            AMD64RM*      src;            HReg          dst;         } CMov64;         /* reg-reg move, zeroing out top half */         struct {            HReg src;            HReg dst;         } MovZLQ;         /* Sign/Zero extending loads.  Dst size is always 64 bits. */         struct {            UChar       szSmall; /* only 1, 2 or 4 */            Bool        syned;            AMD64AMode* src;            HReg        dst;         } LoadEX;         /* 32/16/8 bit stores. */         struct {            UChar       sz; /* only 1, 2 or 4 */            HReg        src;            AMD64AMode* dst;         } Store;         /* Convert an amd64 condition code to a 64-bit value (0 or 1). */         struct {            AMD64CondCode cond;            HReg          dst;         } Set64;         /* 64-bit bsf or bsr. */         struct {            Bool isFwds;            HReg src;            HReg dst;         } Bsfr64;         /* Mem fence.  In short, an insn which flushes all preceding            loads and stores as much as possible before continuing.            On AMD64 we emit a real "mfence". */         struct {         } MFence;         /* --- X87 --- */         /* A very minimal set of x87 insns, that operate exactly in a            stack-like way so no need to think about x87 registers. */         /* Do 'ffree' on %st(7) .. %st(7-nregs) */         struct {            Int nregs; /* 1 <= nregs <= 7 */         } A87Free;         /* Push a 64-bit FP value from memory onto the stack, or move            a value from the stack to memory and remove it from the            stack. */         struct {            AMD64AMode* addr;            Bool        isPush;         } A87PushPop;         /* Do an operation on the top-of-stack.  This can be unary, in            which case it is %st0 = OP( %st0 ), or binary: %st0 = OP(            %st0, %st1 ). */         struct {            A87FpOp op;         } A87FpOp;         /* Load the FPU control word. */         struct {            AMD64AMode* addr;         } A87LdCW;         /* Store the FPU status word (fstsw m16) */         struct {            AMD64AMode* addr;         } A87StSW;         /* --- SSE --- */         /* Load 32 bits into %mxcsr. */         struct {            AMD64AMode* addr;         }         LdMXCSR;//..          /* fstsw %ax *///..          struct {//..             /* no fields *///..          }//..          FpStSW_AX;         /* ucomisd/ucomiss, then get %rflags into int register */         struct {            UChar   sz;   /* 4 or 8 only */            HReg    srcL; /* xmm */            HReg    srcR; /* xmm */            HReg    dst;  /* int */         } SseUComIS;         /* scalar 32/64 int to 32/64 float conversion */         struct {            UChar szS; /* 4 or 8 */            UChar szD; /* 4 or 8 */            HReg  src; /* i class */            HReg  dst; /* v class */         } SseSI2SF;         /* scalar 32/64 float to 32/64 int conversion */         struct {            UChar szS; /* 4 or 8 */            UChar szD; /* 4 or 8 */            HReg  src; /* v class */            HReg  dst; /* i class */         } SseSF2SI;         /* scalar float32 to/from float64 */         struct {            Bool from64; /* True: 64->32; False: 32->64 */            HReg src;            HReg dst;         } SseSDSS;//.. //..          /* Simplistic SSE[123] *///..          struct {//..             UShort  con;//..             HReg    dst;//..          } SseConst;         struct {            Bool        isLoad;            UChar       sz; /* 4, 8 or 16 only */            HReg        reg;            AMD64AMode* addr;         } SseLdSt;         struct {            Int         sz; /* 4 or 8 only */            HReg        reg;            AMD64AMode* addr;         } SseLdzLO;         struct {            AMD64SseOp op;            HReg       src;            HReg       dst;         } Sse32Fx4;         struct {            AMD64SseOp op;            HReg       src;            HReg       dst;         } Sse32FLo;         struct {            AMD64SseOp op;            HReg       src;            HReg       dst;         } Sse64Fx2;         struct {            AMD64SseOp op;            HReg       src;            HReg       dst;         } Sse64FLo;         struct {            AMD64SseOp op;            HReg       src;            HReg       dst;         } SseReRg;         /* Mov src to dst on the given condition, which may not            be the bogus Xcc_ALWAYS. */         struct {            AMD64CondCode cond;            HReg          src;            HReg          dst;         } SseCMov;         struct {            Int    order; /* 0 <= order <= 0xFF */            HReg   src;            HReg   dst;         } SseShuf;      } Ain;   }   AMD64Instr;extern AMD64Instr* AMD64Instr_Imm64      ( ULong imm64, HReg dst );extern AMD64Instr* AMD64Instr_Alu64R     ( AMD64AluOp, AMD64RMI*, HReg );extern AMD64Instr* AMD64Instr_Alu64M     ( AMD64AluOp, AMD64RI*,  AMD64AMode* );extern AMD64Instr* AMD64Instr_Unary64    ( AMD64UnaryOp op, HReg dst );extern AMD64Instr* AMD64Instr_Sh64       ( AMD64ShiftOp, UInt, HReg );extern AMD64Instr* AMD64Instr_Test64     ( UInt imm32, HReg dst );extern AMD64Instr* AMD64Instr_MulL       ( Bool syned, AMD64RM* );extern AMD64Instr* AMD64Instr_Div        ( Bool syned, Int sz, AMD64RM* );//.. extern AMD64Instr* AMD64Instr_Sh3232    ( AMD64ShiftOp, UInt amt, HReg src, HReg dst );extern AMD64Instr* AMD64Instr_Push       ( AMD64RMI* );extern AMD64Instr* AMD64Instr_Call       ( AMD64CondCode, Addr64, Int );extern AMD64Instr* AMD64Instr_Goto       ( IRJumpKind, AMD64CondCode cond, AMD64RI* dst );extern AMD64Instr* AMD64Instr_CMov64     ( AMD64CondCode, AMD64RM* src, HReg dst );extern AMD64Instr* AMD64Instr_MovZLQ     ( HReg src, HReg dst );extern AMD64Instr* AMD64Instr_LoadEX     ( UChar szSmall, Bool syned,                                           AMD64AMode* src, HReg dst );extern AMD64Instr* AMD64Instr_Store      ( UChar sz, HReg src, AMD64AMode* dst );extern AMD64Instr* AMD64Instr_Set64      ( AMD64CondCode cond, HReg dst );extern AMD64Instr* AMD64Instr_Bsfr64     ( Bool isFwds, HReg src, HReg dst );extern AMD64Instr* AMD64Instr_MFence     ( void );extern AMD64Instr* AMD64Instr_A87Free    ( Int nregs );extern AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush );extern AMD64Instr* AMD64Instr_A87FpOp    ( A87FpOp op );extern AMD64Instr* AMD64Instr_A87LdCW    ( AMD64AMode* addr );extern AMD64Instr* AMD64Instr_A87StSW    ( AMD64AMode* addr );//.. //.. extern AMD64Instr* AMD64Instr_FpUnary   ( AMD64FpOp op, HReg src, HReg dst );//.. extern AMD64Instr* AMD64Instr_FpBinary  ( AMD64FpOp op, HReg srcL, HReg srcR, HReg dst );//.. extern AMD64Instr* AMD64Instr_FpLdSt    ( Bool isLoad, UChar sz, HReg reg, AMD64AMode* );//.. extern AMD64Instr* AMD64Instr_FpLdStI   ( Bool isLoad, UChar sz, HReg reg, AMD64AMode* );//.. extern AMD64Instr* AMD64Instr_Fp64to32  ( HReg src, HReg dst );//.. extern AMD64Instr* AMD64Instr_FpCMov    ( AMD64CondCode, HReg src, HReg dst );extern AMD64Instr* AMD64Instr_LdMXCSR    ( AMD64AMode* );//.. extern AMD64Instr* AMD64Instr_FpStSW_AX ( void );extern AMD64Instr* AMD64Instr_SseUComIS  ( Int sz, HReg srcL, HReg srcR, HReg dst );extern AMD64Instr* AMD64Instr_SseSI2SF   ( Int szS, Int szD, HReg src, HReg dst );extern AMD64Instr* AMD64Instr_SseSF2SI   ( Int szS, Int szD, HReg src, HReg dst );extern AMD64Instr* AMD64Instr_SseSDSS    ( Bool from64, HReg src, HReg dst );//.. //.. extern AMD64Instr* AMD64Instr_SseConst  ( UShort con, HReg dst );extern AMD64Instr* AMD64Instr_SseLdSt    ( Bool isLoad, Int sz, HReg, AMD64AMode* );extern AMD64Instr* AMD64Instr_SseLdzLO   ( Int sz, HReg, AMD64AMode* );extern AMD64Instr* AMD64Instr_Sse32Fx4   ( AMD64SseOp, HReg, HReg );extern AMD64Instr* AMD64Instr_Sse32FLo   ( AMD64SseOp, HReg, HReg );extern AMD64Instr* AMD64Instr_Sse64Fx2   ( AMD64SseOp, HReg, HReg );extern AMD64Instr* AMD64Instr_Sse64FLo   ( AMD64SseOp, HReg, HReg );extern AMD64Instr* AMD64Instr_SseReRg    ( AMD64SseOp, HReg, HReg );extern AMD64Instr* AMD64Instr_SseCMov    ( AMD64CondCode, HReg src, HReg dst );extern AMD64Instr* AMD64Instr_SseShuf    ( Int order, HReg src, HReg dst );extern void ppAMD64Instr ( AMD64Instr*, Bool );/* Some functions that insulate the register allocator from details   of the underlying instruction set. */extern void         getRegUsage_AMD64Instr ( HRegUsage*, AMD64Instr*, Bool );extern void         mapRegs_AMD64Instr     ( HRegRemap*, AMD64Instr*, Bool );extern Bool         isMove_AMD64Instr      ( AMD64Instr*, HReg*, HReg* );extern Int          emit_AMD64Instr        ( UChar* buf, Int nbuf, AMD64Instr*,                                              Bool, void* dispatch );extern AMD64Instr*  genSpill_AMD64         ( HReg rreg, Int offset, Bool );extern AMD64Instr*  genReload_AMD64        ( HReg rreg, Int offset, Bool );extern void         getAllocableRegs_AMD64 ( Int*, HReg** );extern HInstrArray* iselBB_AMD64           ( IRBB*, VexArch, VexArchInfo* );#endif /* ndef __LIBVEX_HOST_AMD64_HDEFS_H *//*---------------------------------------------------------------*//*--- end                                  host-amd64/hdefs.h ---*//*---------------------------------------------------------------*/

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -