📄 in_script.txt
字号:
# in_script.txt
# Copyright (C) 2008 Willow Schlanger
# If ever o16/o32/o64 is necessary, make it a _ insn and print it manually.
# --- begin new instructions ---
$ _cmovcc ! size_same
write("cmov");
write_cc(argvalue(2));
space();
write_arg(0);
comma();
write_arg(1);
: 0f 40 /r reg_r:osz,mem_rm:osz,imm_cc:8
asgn(arg(0), quest(_x86_cc(arg(2)), arg(1), arg(0)));
$ emms
: 0f 77 void
$ ldmxcsr ! size_none
: 0f ae /2 mem_rm:32 ! mod_mem
# Table format depends on whether rex.w is used or not.
$ fxrstor ! size_none
: 0f ae /1 mem_rm:512b ! mod_mem
# Table format depends on whether rex.w is used or not.
$ fxsave ! size_none
: 0f ae /0 mem_rm:512b ! mod_mem
$ fxtract
: d9 f4 void
# --- end new instructions ---
# Intel docs say to use /0, but /1../7 seem to work the same.
$ _setcc ! size_none
write("set");
write_cc(argvalue(1));
space();
write_arg(0);
: 0f 90 /z mem_rm:8,imm_cc:8
asgn(arg(0), zx$byte(_x86_cc(arg(1))));
# nasm-style syntax. valid in 64-bit mode? assuming yes.
$ _nopmb
write("nop");
space();
write_args();
: 0f 1f /0 mem_rm:osz
asgn(void); // is implemented - just no-op.
# valid in 64-bit mode? assuming yes.
$ _usalc
write("salc");
: d6 void
# valid in 64-bit mode? assuming yes.
$ _uint1
write("int1");
space();
: f1 void
$ _aad ! size_none
if((U1)get_imm64() == 0x0a)
write("aad");
else
{
write("aad");
space();
write_args();
}
: d5 imm_imm:8 ! o_no64
$ _aam ! size_none
if((U1)get_imm64() == 0x0a)
write("aam");
else
{
write("aam");
space();
write_args();
}
: d4 imm_imm:8 ! o_no64
$ das
: 2f void ! o_no64
$ daa
: 27 void ! o_no64
$ aaa
: 37 void ! o_no64
$ aas
: 3f void ! o_no64
$ _sxacc # (r/e)ax <- sx((r/e)ax.lo)
if(get_osz() == argsize_16)
write("cbw");
else
if(get_osz() == argsize_32)
write("cwde");
else
write("cdqe");
: 98 void
asgn(x86_acc, sx$osz(x86_acc_lo));
$ _sxdax # (r/e)dx:(r/e)ax <- sx((r/e)ax)
if(get_osz() == argsize_16)
write("cwd");
else
if(get_osz() == argsize_32)
write("cdq");
else
write("cqo");
return true;
: 99 void
asgn(x86_dax, sx$osz_times_2(x86_acc));
# According to Wikipedia, some older Intel processors do not support sahf/lahf in 64-bit mode.
$ sahf
: 9e void
# According to Wikipedia, some older Intel processors do not support sahf/lahf in 64-bit mode.
$ lahf
: 9f void
$ cmc
: f5 void
asgn(x86_cf, not(x86_cf));
$ clc
: f8 void
asgn(x86_cf, 0);
$ stc
: f9 void
asgn(x86_cf, 1);
$ cld
: fc void
asgn(x86_df, 0);
$ std
: fd void
asgn(x86_cf, 1);
# diasm to e.g. lea eax,[<size> bx+si]
# -- should this be a special insn?
$ lea ! size_none
: 8d /r reg_r:osz,mem_rm:asz ! mod_mem,ea_itself
asgn(arg(0), arg(1));
$ bound ! size_none
: 62 /r reg_r:osz,mem_rm:osz.osz ! mod_mem,o_no64
$ into
: ce void ! o_no64
# --- begin imul opcodes ---
# _cmul is officially signed but it can be used for unsigned too. destination operand has same size
# as source operands. _cmul is known as 'imul' in intel docs.
$ _cmul3 ! size_same
write("imul");
space();
write_args();
: 69 /r reg_r:osz,mem_rm:osz,imm_imm:osz
: 6b /r reg_r:osz,mem_rm:osz,imm_imm:osz ! sx_yes
asgn(arg(0), cmul(arg(1), arg(2)));
asgn(x86_sf, undefined);
asgn(x86_zf, undefined);
asgn(x86_af, undefined);
asgn(x86_pf, undefined);
asgn(x86_cf, smul_overflow(arg(1), arg(2)));
asgn(x86_of, smul_overflow(arg(1), arg(2)));
$ _cmul2 ! size_same
write("imul");
space();
write_args();
: 0f af /r reg_r:osz,mem_rm:osz
asgn(arg(0), cmul(arg(0), arg(1)));
asgn(x86_sf, undefined);
asgn(x86_zf, undefined);
asgn(x86_af, undefined);
asgn(x86_pf, undefined);
asgn(x86_cf, smul_overflow(arg(0), arg(1)));
asgn(x86_of, smul_overflow(arg(0), arg(1)));
$ _imulb
write("imul");
space();
write_args();
: f6 /5 mem_rm:8
asgn(x86_ax, smul(x86_al, arg(0)));
asgn(x86_sf, undefined);
asgn(x86_zf, undefined);
asgn(x86_af, undefined);
asgn(x86_pf, undefined);
asgn(x86_cf, smul_overflow(x86_al, arg(0)));
asgn(x86_of, smul_overflow(x86_al, arg(0)));
$ imul
: f7 /5 mem_rm:osz
asgn(x86_dax, smul(x86_acc, arg(0)));
asgn(x86_sf, undefined);
asgn(x86_zf, undefined);
asgn(x86_af, undefined);
asgn(x86_pf, undefined);
asgn(x86_cf, smul_overflow(x86_acc, arg(0)));
asgn(x86_of, smul_overflow(x86_acc, arg(0)));
# --- end imul opcodes ---
$ _mulb
write("mul");
space();
write_args();
: f6 /4 mem_rm:8
asgn(x86_ax, umul(x86_al, arg(0)));
asgn(x86_sf, undefined);
asgn(x86_zf, undefined);
asgn(x86_af, undefined);
asgn(x86_pf, undefined);
asgn(x86_cf, umul_overflow(x86_al, arg(0)));
asgn(x86_of, umul_overflow(x86_al, arg(0)));
$ mul
: f7 /4 mem_rm:osz
asgn(x86_dax, umul(x86_acc, arg(0)));
asgn(x86_sf, undefined);
asgn(x86_zf, undefined);
asgn(x86_af, undefined);
asgn(x86_pf, undefined);
asgn(x86_cf, umul_overflow(x86_acc, arg(0)));
asgn(x86_of, umul_overflow(x86_acc, arg(0)));
# bugfix 12/26/2008 (forgot flag outputs)
$ _divb
write("div");
space();
write_args();
: f6 /6 mem_rm:8
asgn(tmp(q), udiv(x86_ax, arg(0)));
asgn(tmp(r), umod(x86_ax, arg(0)));
asgn(x86_al, tmp(q));
asgn(x86_ah, tmp(r));
asgn(x86_of, undefined);
asgn(x86_sf, undefined);
asgn(x86_zf, undefined);
asgn(x86_af, undefined);
asgn(x86_pf, undefined);
asgn(x86_cf, undefined);
# bugfix 12/26/2008 (was using ax)
$ div
: f7 /6 mem_rm:osz
asgn(tmp(q), udiv(x86_dax, arg(0)));
asgn(tmp(r), umod(x86_dax, arg(0)));
asgn(x86_al, tmp(q));
asgn(x86_ah, tmp(r));
asgn(x86_of, undefined);
asgn(x86_sf, undefined);
asgn(x86_zf, undefined);
asgn(x86_af, undefined);
asgn(x86_pf, undefined);
asgn(x86_cf, undefined);
$ _idivb
write("idiv");
space();
write_args();
: f6 /7 mem_rm:8
asgn(tmp(q), sdiv(x86_ax, arg(0)));
asgn(tmp(r), smod(x86_ax, arg(0)));
asgn(x86_al, tmp(q));
asgn(x86_ah, tmp(r));
asgn(x86_of, undefined);
asgn(x86_sf, undefined);
asgn(x86_zf, undefined);
asgn(x86_af, undefined);
asgn(x86_pf, undefined);
asgn(x86_cf, undefined);
$ idiv
: f7 /7 mem_rm:osz
asgn(tmp(q), sdiv(x86_dax, arg(0)));
asgn(tmp(r), smod(x86_dax, arg(0)));
asgn(x86_acc, tmp(q));
asgn(x86_dat, tmp(r));
asgn(x86_of, undefined);
asgn(x86_sf, undefined);
asgn(x86_zf, undefined);
asgn(x86_af, undefined);
asgn(x86_pf, undefined);
asgn(x86_cf, undefined);
# note: order of arguments is arbitrary.
$ test ! size_same
: 84 /r mem_rm:8,reg_r:8
: 85 /r mem_rm:osz,reg_r:osz
: a8 reg_r:8:0,imm_imm:8
: a9 reg_r:osz:0,imm_imm:osz_old
: f6 /0 mem_rm:8,imm_imm:8
: f7 /0 mem_rm:osz,imm_imm:osz_old
: f6 /1 mem_rm:8,imm_imm:8 ! xasm_skip
: f7 /1 mem_rm:osz,imm_imm:osz_old ! xasm_skip
asgn(x86_of, 0);
asgn(x86_af, undefined);
asgn(x86_cf, 0);
asgn(tmp(result), bitand(arg(0), arg(1)));
asgn(x86_sf, sign(tmp(result)));
asgn(x86_zf, is_zero(tmp(result)));
asgn(x86_pf, _x86_parity(trunc$byte(tmp(result))));
# note: order of arguments is arbitrary. but, our code requires rm be first so we can allow lock.
# This is _xchg not _xchg because when disassembling, check to see if basecode is 0x90 and print
# nop if so -- if rex does not make the other argument r8.
# Also because the order of arguments is arbitrary, some disassemblers may flip the
# arguments over what we have here.
$ _xchg ! size_same
if(basecode() == 0x90 && argvalue(1) == 0)
write("nop");
else
{
write("xchg");
space();
write_args();
}
: 86 /r mem_rm:8,reg_r:8 ! fx_locked
: 87 /r mem_rm:osz,reg_r:osz ! fx_locked
: 90 reg_r:osz:0,reg_basecode:osz
asgn(tmp(x), arg(0));
asgn(arg(0), arg(1));
asgn(arg(1), tmp(x));
$ mov ! size_same
: 88 /r mem_rm:8,reg_r:8
: 8a /r reg_r:8,mem_rm:8
: 89 /r mem_rm:osz,reg_r:osz
: 8b /r reg_r:osz,mem_rm:osz
: a0 reg_r:8:0,mem_disp:8
: a2 mem_disp:8,reg_r:8:0
: a1 reg_r:osz:0,mem_disp:osz
: a3 mem_disp:osz,reg_r:osz:0
: b0 reg_basecode:8,imm_imm:8
: b8 reg_basecode:osz,imm_imm:osz
# Note: reportedly, on the 486, /1../7 are the same as /0 for c6. Not on my cpu, however.
: c6 /0 mem_rm:8,imm_imm:8
: c7 /0 mem_rm:osz_old,imm_imm:osz_old
asgn(arg(0), arg(1));
$ not
: f6 /2 mem_rm:8 ! fx_lockable
: f7 /2 mem_rm:osz ! fx_lockable
asgn(arg(0), bitnot(arg(0)));
$ neg
: f6 /3 mem_rm:8 ! fx_lockable
: f7 /3 mem_rm:osz ! fx_lockable
asgn(x86_of, _x86_sub_of(0, arg(0)));
asgn(x86_af, _x86_sub_af(0, trunc$byte(arg(0))));
asgn(x86_cf, _x86_sub_cf(0, arg(0)));
asgn(arg(0), neg(arg(0)));
asgn(x86_sf, sign(arg(0)));
asgn(x86_zf, is_zero(arg(0)));
asgn(x86_pf, _x86_parity(trunc$byte(arg(0))));
# -- begin arithmatic opcodes ---
$ add ! size_same
: 80 /0 mem_rm:8,imm_imm:8 ! fx_lockable
: 82 /0 mem_rm:8,imm_imm:8 ! fx_lockable,xasm_skip
: 81 /0 mem_rm:osz,imm_imm:osz_old ! fx_lockable
: 83 /0 mem_rm:osz,imm_imm:osz_old ! fx_lockable,sx_yes
: 0 /r mem_rm:8,reg_r:8 ! fx_lockable
: 2 /r reg_r:8,mem_rm:8 ! fx_lockable
: 1 /r mem_rm:osz,reg_r:osz ! fx_lockable
: 3 /r reg_r:osz,mem_rm:osz ! fx_lockable
: 4 reg_r:8:0,imm_imm:8
: 5 reg_r:osz:0,imm_imm:osz_old
asgn(x86_of, _x86_add_of(arg(0), arg(1)));
asgn(x86_af, _x86_add_af(trunc$byte(arg(0)), trunc$byte(arg(1))));
asgn(x86_cf, _x86_add_cf(arg(0), arg(1)));
asgn(arg(0), add(arg(0), arg(1)));
asgn(x86_sf, sign(arg(0)));
asgn(x86_zf, is_zero(arg(0)));
asgn(x86_pf, _x86_parity(trunc$byte(arg(0))));
$ or ! size_same
: 80 /1 mem_rm:8,imm_imm:8 ! fx_lockable
: 82 /1 mem_rm:8,imm_imm:8 ! fx_lockable,xasm_skip
: 81 /1 mem_rm:osz,imm_imm:osz_old ! fx_lockable
: 83 /1 mem_rm:osz,imm_imm:osz_old ! fx_lockable,sx_yes
: 8 /r mem_rm:8,reg_r:8 ! fx_lockable
: a /r reg_r:8,mem_rm:8 ! fx_lockable
: 9 /r mem_rm:osz,reg_r:osz ! fx_lockable
: b /r reg_r:osz,mem_rm:osz ! fx_lockable
: c reg_r:8:0,imm_imm:8
: d reg_r:osz:0,imm_imm:osz_old
asgn(x86_of, 0);
asgn(x86_af, undefined);
asgn(x86_cf, 0);
asgn(arg(0), bitor(arg(0), arg(1)));
asgn(x86_sf, sign(arg(0)));
asgn(x86_zf, is_zero(arg(0)));
asgn(x86_pf, _x86_parity(trunc$byte(arg(0))));
$ adc ! size_same
: 80 /2 mem_rm:8,imm_imm:8 ! fx_lockable
: 82 /2 mem_rm:8,imm_imm:8 ! fx_lockable,xasm_skip
: 81 /2 mem_rm:osz,imm_imm:osz_old ! fx_lockable
: 83 /2 mem_rm:osz,imm_imm:osz_old ! fx_lockable,sx_yes
: 10 /r mem_rm:8,reg_r:8 ! fx_lockable
: 12 /r reg_r:8,mem_rm:8 ! fx_lockable
: 11 /r mem_rm:osz,reg_r:osz ! fx_lockable
: 13 /r reg_r:osz,mem_rm:osz ! fx_lockable
: 14 reg_r:8:0,imm_imm:8
: 15 reg_r:osz:0,imm_imm:osz_old
asgn(tmp(old_cf), x86_cf);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -