📄 amd64-codegen.h
字号:
do { \ if ((size) == 2) \ *(inst)++ = (unsigned char)0x66; \ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x8a; break; \ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0)#define amd64_movzx_reg_membase(inst,reg,basereg,disp,size) \ do { \ amd64_emit_rex(inst, (size), (reg), 0, (basereg)); \ switch ((size)) { \ case 1: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb6; break; \ case 2: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb7; break; \ case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \ default: assert (0); \ } \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ } while (0)#define amd64_movsxd_reg_membase(inst,reg,basereg,disp) \ do { \ amd64_emit_rex(inst,8,(reg),0,(basereg)); \ *(inst)++ = (unsigned char)0x63; \ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \ } while (0)#define amd64_movsxd_reg_reg(inst,dreg,reg) \ do { \ amd64_emit_rex(inst,8,(dreg),0,(reg)); \ *(inst)++ = (unsigned char)0x63; \ x86_reg_emit ((inst), (dreg), (reg)); \ } while (0)/* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of * 32-bit immediate. Pepper with casts to avoid warnings. */#define amd64_mov_reg_imm_size(inst,reg,imm,size) \ do { \ amd64_emit_rex(inst, (size), 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \ if ((size) == 8) \ x86_imm_emit64 ((inst), (long)(imm)); \ else \ x86_imm_emit32 ((inst), (int)(long)(imm)); \ } while (0)#define amd64_mov_reg_imm(inst,reg,imm) \ do { \ int _amd64_width_temp = ((long)(imm) == (long)(int)(long)(imm)); \ amd64_mov_reg_imm_size ((inst), (reg), (imm), (_amd64_width_temp ? 4 : 8)); \ } while (0)#define amd64_set_reg_template(inst,reg) amd64_mov_reg_imm_size ((inst),(reg), 0, 8)#define amd64_set_template(inst,reg) amd64_set_reg_template((inst),(reg))#define amd64_mov_membase_imm(inst,basereg,disp,imm,size) \ do { \ if ((size) == 2) \ *(inst)++ = (unsigned char)0x66; \ amd64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \ if ((size) == 1) { \ *(inst)++ = (unsigned char)0xc6; \ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit8 ((inst), (imm)); \ } else if ((size) == 2) { \ *(inst)++ = (unsigned char)0xc7; \ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit16 ((inst), (imm)); \ } else { \ *(inst)++ = (unsigned char)0xc7; \ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \ x86_imm_emit32 ((inst), (imm)); \ } \ } while (0)#define amd64_lea_membase(inst,reg,basereg,disp) \ do { \ amd64_emit_rex(inst, 8, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x8d; \ amd64_membase_emit ((inst), (reg), (basereg), (disp)); \ } while (0)/* Instruction are implicitly 64-bits so don't generate REX for just the size. */#define amd64_push_reg(inst,reg) \ do { \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x50 + ((reg) & 0x7); \ } while (0)/* Instruction is implicitly 64-bits so don't generate REX for just the size. */#define amd64_push_membase(inst,basereg,disp) \ do { \ amd64_emit_rex(inst, 0, 0, 0, (basereg)); \ *(inst)++ = (unsigned char)0xff; \ x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \ } while (0)#define amd64_pop_reg(inst,reg) \ do { \ amd64_emit_rex(inst, 0, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \ } while (0)#define amd64_call_reg(inst,reg) \ do { \ amd64_emit_rex(inst, 8, 0, 0, (reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst), 2, ((reg) & 0x7)); \ } while (0)#define amd64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0)#define amd64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0)#define amd64_movsd_reg_regp(inst,reg,regp) \ do { \ *(inst)++ = (unsigned char)0xf2; \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ } while (0)#define amd64_movsd_regp_reg(inst,regp,reg) \ do { \ *(inst)++ = (unsigned char)0xf2; \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ } while (0)#define amd64_movss_reg_regp(inst,reg,regp) \ do { \ *(inst)++ = (unsigned char)0xf3; \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ } while (0)#define amd64_movss_regp_reg(inst,regp,reg) \ do { \ *(inst)++ = (unsigned char)0xf3; \ amd64_emit_rex(inst, 0, (reg), 0, (regp)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \ } while (0)#define amd64_movsd_reg_membase(inst,reg,basereg,disp) \ do { \ *(inst)++ = (unsigned char)0xf2; \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ } while (0)#define amd64_movss_reg_membase(inst,reg,basereg,disp) \ do { \ *(inst)++ = (unsigned char)0xf3; \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x10; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ } while (0)#define amd64_movsd_membase_reg(inst,basereg,disp,reg) \ do { \ *(inst)++ = (unsigned char)0xf2; \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ } while (0)#define amd64_movss_membase_reg(inst,basereg,disp,reg) \ do { \ *(inst)++ = (unsigned char)0xf3; \ amd64_emit_rex(inst, 0, (reg), 0, (basereg)); \ *(inst)++ = (unsigned char)0x0f; \ *(inst)++ = (unsigned char)0x11; \ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \ } while (0)/* The original inc_reg opcode is used as the REX prefix */#define amd64_inc_reg_size(inst,reg,size) \ do { \ amd64_emit_rex ((inst),(size),0,0,(reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst),0,(reg) & 0x7); \ } while (0)#define amd64_dec_reg_size(inst,reg,size) \ do { \ amd64_emit_rex ((inst),(size),0,0,(reg)); \ *(inst)++ = (unsigned char)0xff; \ x86_reg_emit ((inst),1,(reg) & 0x7); \ } while (0)#define amd64_padding_size(inst,size) \ do { if (size == 1) x86_padding ((inst),(size)); else { amd64_emit_rex ((inst),8,0,0,0); x86_padding((inst),(size) - 1); } } while (0)#define amd64_fld_membase_size(inst,basereg,disp,is_double,size) do { \ amd64_emit_rex ((inst),0,0,0,(basereg)); \ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \ amd64_membase_emit ((inst), 0, (basereg), (disp)); \} while (0)#define amd64_call_membase_size(inst,basereg,disp,size) do { amd64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; amd64_membase_emit ((inst),2, (basereg),(disp)); } while (0) /* * SSE */#define emit_opcode3(inst,op1,op2,op3) do { \ *(inst)++ = (unsigned char)(op1); \ *(inst)++ = (unsigned char)(op2); \ *(inst)++ = (unsigned char)(op3); \
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -