📄 hdefs.c
字号:
/*---------------------------------------------------------------*//*--- ---*//*--- This file (host-amd64/hdefs.c) is ---*//*--- Copyright (C) OpenWorks LLP. All rights reserved. ---*//*--- ---*//*---------------------------------------------------------------*//* This file is part of LibVEX, a library for dynamic binary instrumentation and translation. Copyright (C) 2004-2006 OpenWorks LLP. All rights reserved. This library is made available under a dual licensing scheme. If you link LibVEX against other code all of which is itself licensed under the GNU General Public License, version 2 dated June 1991 ("GPL v2"), then you may use LibVEX under the terms of the GPL v2, as appearing in the file LICENSE.GPL. If the file LICENSE.GPL is missing, you can obtain a copy of the GPL v2 from the Free Software Foundation Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. For any other uses of LibVEX, you must first obtain a commercial license from OpenWorks LLP. Please contact info@open-works.co.uk for information about commercial licensing. This software is provided by OpenWorks LLP "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall OpenWorks LLP be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. Neither the names of the U.S. Department of Energy nor the University of California nor the names of its contributors may be used to endorse or promote products derived from this software without prior written permission.*/#include "libvex_basictypes.h"#include "libvex.h"#include "libvex_trc_values.h"#include "main/vex_util.h"#include "host-generic/h_generic_regs.h"#include "host-amd64/hdefs.h"/* --------- Registers. --------- */void ppHRegAMD64 ( HReg reg ) { Int r; static HChar* ireg64_names[16] = { "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15" }; /* Be generic for all virtual regs. */ if (hregIsVirtual(reg)) { ppHReg(reg); return; } /* But specific for real regs. */ switch (hregClass(reg)) { case HRcInt64: r = hregNumber(reg); vassert(r >= 0 && r < 16); vex_printf("%s", ireg64_names[r]); return; case HRcFlt64: r = hregNumber(reg); vassert(r >= 0 && r < 6); vex_printf("%%fake%d", r); return; case HRcVec128: r = hregNumber(reg); vassert(r >= 0 && r < 16); vex_printf("%%xmm%d", r); return; default: vpanic("ppHRegAMD64"); }}static void ppHRegAMD64_lo32 ( HReg reg ) { Int r; static HChar* ireg32_names[16] = { "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi", "%r8d", "%r9d", "%r10d", "%r11d", "%r12d", "%r13d", "%r14d", "%r15d" }; /* Be generic for all virtual regs. */ if (hregIsVirtual(reg)) { ppHReg(reg); vex_printf("d"); return; } /* But specific for real regs. */ switch (hregClass(reg)) { case HRcInt64: r = hregNumber(reg); vassert(r >= 0 && r < 16); vex_printf("%s", ireg32_names[r]); return; default: vpanic("ppHRegAMD64_lo32: invalid regclass"); }}HReg hregAMD64_RAX ( void ) { return mkHReg( 0, HRcInt64, False); }HReg hregAMD64_RCX ( void ) { return mkHReg( 1, HRcInt64, False); }HReg hregAMD64_RDX ( void ) { return mkHReg( 2, HRcInt64, False); }HReg hregAMD64_RBX ( void ) { return mkHReg( 3, HRcInt64, False); }HReg hregAMD64_RSP ( void ) { return mkHReg( 4, HRcInt64, False); }HReg hregAMD64_RBP ( void ) { return mkHReg( 5, HRcInt64, False); }HReg hregAMD64_RSI ( void ) { return mkHReg( 6, HRcInt64, False); }HReg hregAMD64_RDI ( void ) { return mkHReg( 7, HRcInt64, False); }HReg hregAMD64_R8 ( void ) { return mkHReg( 8, HRcInt64, False); }HReg hregAMD64_R9 ( void ) { return mkHReg( 9, HRcInt64, False); }HReg hregAMD64_R10 ( void ) { return mkHReg(10, HRcInt64, False); }HReg hregAMD64_R11 ( void ) { return mkHReg(11, HRcInt64, False); }HReg hregAMD64_R12 ( void ) { return mkHReg(12, HRcInt64, False); }HReg hregAMD64_R13 ( void ) { return mkHReg(13, HRcInt64, False); }HReg hregAMD64_R14 ( void ) { return mkHReg(14, HRcInt64, False); }HReg hregAMD64_R15 ( void ) { return mkHReg(15, HRcInt64, False); }//.. HReg hregAMD64_FAKE0 ( void ) { return mkHReg(0, HRcFlt64, False); }//.. HReg hregAMD64_FAKE1 ( void ) { return mkHReg(1, HRcFlt64, False); }//.. HReg hregAMD64_FAKE2 ( void ) { return mkHReg(2, HRcFlt64, False); }//.. HReg hregAMD64_FAKE3 ( void ) { return mkHReg(3, HRcFlt64, False); }//.. HReg hregAMD64_FAKE4 ( void ) { return mkHReg(4, HRcFlt64, False); }//.. HReg hregAMD64_FAKE5 ( void ) { return mkHReg(5, HRcFlt64, False); }//.. HReg hregAMD64_XMM0 ( void ) { return mkHReg( 0, HRcVec128, False); }HReg hregAMD64_XMM1 ( void ) { return mkHReg( 1, HRcVec128, False); }HReg hregAMD64_XMM2 ( void ) { return mkHReg( 2, HRcVec128, False); }HReg hregAMD64_XMM3 ( void ) { return mkHReg( 3, HRcVec128, False); }HReg hregAMD64_XMM4 ( void ) { return mkHReg( 4, HRcVec128, False); }HReg hregAMD64_XMM5 ( void ) { return mkHReg( 5, HRcVec128, False); }HReg hregAMD64_XMM6 ( void ) { return mkHReg( 6, HRcVec128, False); }HReg hregAMD64_XMM7 ( void ) { return mkHReg( 7, HRcVec128, False); }HReg hregAMD64_XMM8 ( void ) { return mkHReg( 8, HRcVec128, False); }HReg hregAMD64_XMM9 ( void ) { return mkHReg( 9, HRcVec128, False); }HReg hregAMD64_XMM10 ( void ) { return mkHReg(10, HRcVec128, False); }HReg hregAMD64_XMM11 ( void ) { return mkHReg(11, HRcVec128, False); }HReg hregAMD64_XMM12 ( void ) { return mkHReg(12, HRcVec128, False); }HReg hregAMD64_XMM13 ( void ) { return mkHReg(13, HRcVec128, False); }HReg hregAMD64_XMM14 ( void ) { return mkHReg(14, HRcVec128, False); }HReg hregAMD64_XMM15 ( void ) { return mkHReg(15, HRcVec128, False); }void getAllocableRegs_AMD64 ( Int* nregs, HReg** arr ){#if 0 *nregs = 6; *arr = LibVEX_Alloc(*nregs * sizeof(HReg)); (*arr)[ 0] = hregAMD64_RSI(); (*arr)[ 1] = hregAMD64_RDI(); (*arr)[ 2] = hregAMD64_RBX(); (*arr)[ 3] = hregAMD64_XMM7(); (*arr)[ 4] = hregAMD64_XMM8(); (*arr)[ 5] = hregAMD64_XMM9();#endif#if 1 *nregs = 19; *arr = LibVEX_Alloc(*nregs * sizeof(HReg)); (*arr)[ 0] = hregAMD64_RSI(); (*arr)[ 1] = hregAMD64_RDI(); (*arr)[ 2] = hregAMD64_R8(); (*arr)[ 3] = hregAMD64_R9(); (*arr)[ 4] = hregAMD64_R12(); (*arr)[ 5] = hregAMD64_R13(); (*arr)[ 6] = hregAMD64_R14(); (*arr)[ 7] = hregAMD64_R15(); (*arr)[ 8] = hregAMD64_RBX(); (*arr)[ 9] = hregAMD64_XMM3(); (*arr)[10] = hregAMD64_XMM4(); (*arr)[11] = hregAMD64_XMM5(); (*arr)[12] = hregAMD64_XMM6(); (*arr)[13] = hregAMD64_XMM7(); (*arr)[14] = hregAMD64_XMM8(); (*arr)[15] = hregAMD64_XMM9(); (*arr)[16] = hregAMD64_XMM10(); (*arr)[17] = hregAMD64_XMM11(); (*arr)[18] = hregAMD64_XMM12();#endif}/* --------- Condition codes, Intel encoding. --------- */HChar* showAMD64CondCode ( AMD64CondCode cond ){ switch (cond) { case Acc_O: return "o"; case Acc_NO: return "no"; case Acc_B: return "b"; case Acc_NB: return "nb"; case Acc_Z: return "z"; case Acc_NZ: return "nz"; case Acc_BE: return "be"; case Acc_NBE: return "nbe"; case Acc_S: return "s"; case Acc_NS: return "ns"; case Acc_P: return "p"; case Acc_NP: return "np"; case Acc_L: return "l"; case Acc_NL: return "nl"; case Acc_LE: return "le"; case Acc_NLE: return "nle"; case Acc_ALWAYS: return "ALWAYS"; default: vpanic("ppAMD64CondCode"); }}/* --------- AMD64AMode: memory address expressions. --------- */AMD64AMode* AMD64AMode_IR ( UInt imm32, HReg reg ) { AMD64AMode* am = LibVEX_Alloc(sizeof(AMD64AMode)); am->tag = Aam_IR; am->Aam.IR.imm = imm32; am->Aam.IR.reg = reg; return am;}AMD64AMode* AMD64AMode_IRRS ( UInt imm32, HReg base, HReg indEx, Int shift ) { AMD64AMode* am = LibVEX_Alloc(sizeof(AMD64AMode)); am->tag = Aam_IRRS; am->Aam.IRRS.imm = imm32; am->Aam.IRRS.base = base; am->Aam.IRRS.index = indEx; am->Aam.IRRS.shift = shift; vassert(shift >= 0 && shift <= 3); return am;}//.. AMD64AMode* dopyAMD64AMode ( AMD64AMode* am ) {//.. switch (am->tag) {//.. case Xam_IR: //.. return AMD64AMode_IR( am->Xam.IR.imm, am->Xam.IR.reg );//.. case Xam_IRRS: //.. return AMD64AMode_IRRS( am->Xam.IRRS.imm, am->Xam.IRRS.base, //.. am->Xam.IRRS.index, am->Xam.IRRS.shift );//.. default://.. vpanic("dopyAMD64AMode");//.. }//.. }void ppAMD64AMode ( AMD64AMode* am ) { switch (am->tag) { case Aam_IR: if (am->Aam.IR.imm == 0) vex_printf("("); else vex_printf("0x%x(", am->Aam.IR.imm); ppHRegAMD64(am->Aam.IR.reg); vex_printf(")"); return; case Aam_IRRS: vex_printf("0x%x(", am->Aam.IRRS.imm); ppHRegAMD64(am->Aam.IRRS.base); vex_printf(","); ppHRegAMD64(am->Aam.IRRS.index); vex_printf(",%d)", 1 << am->Aam.IRRS.shift); return; default: vpanic("ppAMD64AMode"); }}static void addRegUsage_AMD64AMode ( HRegUsage* u, AMD64AMode* am ) { switch (am->tag) { case Aam_IR: addHRegUse(u, HRmRead, am->Aam.IR.reg); return; case Aam_IRRS: addHRegUse(u, HRmRead, am->Aam.IRRS.base); addHRegUse(u, HRmRead, am->Aam.IRRS.index); return; default: vpanic("addRegUsage_AMD64AMode"); }}static void mapRegs_AMD64AMode ( HRegRemap* m, AMD64AMode* am ) { switch (am->tag) { case Aam_IR: am->Aam.IR.reg = lookupHRegRemap(m, am->Aam.IR.reg); return; case Aam_IRRS: am->Aam.IRRS.base = lookupHRegRemap(m, am->Aam.IRRS.base); am->Aam.IRRS.index = lookupHRegRemap(m, am->Aam.IRRS.index); return; default: vpanic("mapRegs_AMD64AMode"); }}/* --------- Operand, which can be reg, immediate or memory. --------- */AMD64RMI* AMD64RMI_Imm ( UInt imm32 ) { AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI)); op->tag = Armi_Imm; op->Armi.Imm.imm32 = imm32; return op;}AMD64RMI* AMD64RMI_Reg ( HReg reg ) { AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI)); op->tag = Armi_Reg; op->Armi.Reg.reg = reg; return op;}AMD64RMI* AMD64RMI_Mem ( AMD64AMode* am ) { AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI)); op->tag = Armi_Mem; op->Armi.Mem.am = am; return op;}void ppAMD64RMI ( AMD64RMI* op ) { switch (op->tag) { case Armi_Imm: vex_printf("$0x%x", op->Armi.Imm.imm32); return; case Armi_Reg: ppHRegAMD64(op->Armi.Reg.reg); return; case Armi_Mem: ppAMD64AMode(op->Armi.Mem.am); return; default: vpanic("ppAMD64RMI"); }}/* An AMD64RMI can only be used in a "read" context (what would it mean to write or modify a literal?) and so we enumerate its registers accordingly. */static void addRegUsage_AMD64RMI ( HRegUsage* u, AMD64RMI* op ) { switch (op->tag) { case Armi_Imm: return; case Armi_Reg: addHRegUse(u, HRmRead, op->Armi.Reg.reg); return; case Armi_Mem: addRegUsage_AMD64AMode(u, op->Armi.Mem.am); return; default: vpanic("addRegUsage_AMD64RMI"); }}static void mapRegs_AMD64RMI ( HRegRemap* m, AMD64RMI* op ) { switch (op->tag) { case Armi_Imm: return; case Armi_Reg: op->Armi.Reg.reg = lookupHRegRemap(m, op->Armi.Reg.reg); return; case Armi_Mem: mapRegs_AMD64AMode(m, op->Armi.Mem.am); return; default: vpanic("mapRegs_AMD64RMI"); }}/* --------- Operand, which can be reg or immediate only. --------- */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -