📄 translate.c
字号:
if ((int32_t)disp == disp) gen_op_movq_A0_im(disp); else gen_op_movq_A0_im64(disp >> 32, disp); } else #endif { gen_op_movl_A0_im(disp); } } /* XXX: index == 4 is always invalid */ if (havesib && (index != 4 || scale != 0)) {#ifdef TARGET_X86_64 if (s->aflag == 2) { gen_op_addq_A0_reg_sN[scale][index](); } else #endif { gen_op_addl_A0_reg_sN[scale][index](); } } if (must_add_seg) { if (override < 0) { if (base == R_EBP || base == R_ESP) override = R_SS; else override = R_DS; }#ifdef TARGET_X86_64 if (s->aflag == 2) { gen_op_addq_A0_seg(offsetof(CPUX86State,segs[override].base)); } else #endif { gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base)); } } } else { switch (mod) { case 0: if (rm == 6) { disp = lduw_code(s->pc); s->pc += 2; gen_op_movl_A0_im(disp); rm = 0; /* avoid SS override */ goto no_rm; } else { disp = 0; } break; case 1: disp = (int8_t)ldub_code(s->pc++); break; default: case 2: disp = lduw_code(s->pc); s->pc += 2; break; } switch(rm) { case 0: gen_op_movl_A0_reg[R_EBX](); gen_op_addl_A0_reg_sN[0][R_ESI](); break; case 1: gen_op_movl_A0_reg[R_EBX](); gen_op_addl_A0_reg_sN[0][R_EDI](); break; case 2: gen_op_movl_A0_reg[R_EBP](); gen_op_addl_A0_reg_sN[0][R_ESI](); break; case 3: gen_op_movl_A0_reg[R_EBP](); gen_op_addl_A0_reg_sN[0][R_EDI](); break; case 4: gen_op_movl_A0_reg[R_ESI](); break; case 5: gen_op_movl_A0_reg[R_EDI](); break; case 6: gen_op_movl_A0_reg[R_EBP](); break; default: case 7: gen_op_movl_A0_reg[R_EBX](); break; } if (disp != 0) gen_op_addl_A0_im(disp); gen_op_andl_A0_ffff(); no_rm: if (must_add_seg) { if (override < 0) { if (rm == 2 || rm == 3 || rm == 6) override = R_SS; else override = R_DS; } gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base)); } } opreg = OR_A0; disp = 0; *reg_ptr = opreg; *offset_ptr = disp;}static void gen_nop_modrm(DisasContext *s, int modrm){ int mod, rm, base, code; mod = (modrm >> 6) & 3; if (mod == 3) return; rm = modrm & 7; if (s->aflag) { base = rm; if (base == 4) { code = ldub_code(s->pc++); base = (code & 7); } switch (mod) { case 0: if (base == 5) { s->pc += 4; } break; case 1: s->pc++; break; default: case 2: s->pc += 4; break; } } else { switch (mod) { case 0: if (rm == 6) { s->pc += 2; } break; case 1: s->pc++; break; default: case 2: s->pc += 2; break; } }}/* used for LEA and MOV AX, mem */static void gen_add_A0_ds_seg(DisasContext *s){ int override, must_add_seg; must_add_seg = s->addseg; override = R_DS; if (s->override >= 0) { override = s->override; must_add_seg = 1; } else { override = R_DS; } if (must_add_seg) {#ifdef TARGET_X86_64 if (CODE64(s)) { gen_op_addq_A0_seg(offsetof(CPUX86State,segs[override].base)); } else #endif { gen_op_addl_A0_seg(offsetof(CPUX86State,segs[override].base)); } }}/* generate modrm memory load or store of 'reg'. TMP0 is used if reg != OR_TMP0 */static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store){ int mod, rm, opreg, disp; mod = (modrm >> 6) & 3; rm = (modrm & 7) | REX_B(s); if (mod == 3) { if (is_store) { if (reg != OR_TMP0) gen_op_mov_TN_reg[ot][0][reg](); gen_op_mov_reg_T0[ot][rm](); } else { gen_op_mov_TN_reg[ot][0][rm](); if (reg != OR_TMP0) gen_op_mov_reg_T0[ot][reg](); } } else { gen_lea_modrm(s, modrm, &opreg, &disp); if (is_store) { if (reg != OR_TMP0) gen_op_mov_TN_reg[ot][0][reg](); gen_op_st_T0_A0[ot + s->mem_index](); } else { gen_op_ld_T0_A0[ot + s->mem_index](); if (reg != OR_TMP0) gen_op_mov_reg_T0[ot][reg](); } }}static inline uint32_t insn_get(DisasContext *s, int ot){ uint32_t ret; switch(ot) { case OT_BYTE: ret = ldub_code(s->pc); s->pc++; break; case OT_WORD: ret = lduw_code(s->pc); s->pc += 2; break; default: case OT_LONG: ret = ldl_code(s->pc); s->pc += 4; break; } return ret;}static inline int insn_const_size(unsigned int ot){ if (ot <= OT_LONG) return 1 << ot; else return 4;}static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip){ TranslationBlock *tb; target_ulong pc; pc = s->cs_base + eip; tb = s->tb; /* NOTE: we handle the case where the TB spans two pages here */ if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) || (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) { /* jump to same page: we can use a direct jump */ if (tb_num == 0) gen_op_goto_tb0(TBPARAM(tb)); else gen_op_goto_tb1(TBPARAM(tb)); gen_jmp_im(eip); gen_op_movl_T0_im((long)tb + tb_num); gen_op_exit_tb(); } else { /* jump to another page: currently not optimized */ gen_jmp_im(eip); gen_eob(s); }}static inline void gen_jcc(DisasContext *s, int b, target_ulong val, target_ulong next_eip){ TranslationBlock *tb; int inv, jcc_op; GenOpFunc1 *func; target_ulong tmp; int l1, l2; inv = b & 1; jcc_op = (b >> 1) & 7; if (s->jmp_opt) { switch(s->cc_op) { /* we optimize the cmp/jcc case */ case CC_OP_SUBB: case CC_OP_SUBW: case CC_OP_SUBL: case CC_OP_SUBQ: func = gen_jcc_sub[s->cc_op - CC_OP_SUBB][jcc_op]; break; /* some jumps are easy to compute */ case CC_OP_ADDB: case CC_OP_ADDW: case CC_OP_ADDL: case CC_OP_ADDQ: case CC_OP_ADCB: case CC_OP_ADCW: case CC_OP_ADCL: case CC_OP_ADCQ: case CC_OP_SBBB: case CC_OP_SBBW: case CC_OP_SBBL: case CC_OP_SBBQ: case CC_OP_LOGICB: case CC_OP_LOGICW: case CC_OP_LOGICL: case CC_OP_LOGICQ: case CC_OP_INCB: case CC_OP_INCW: case CC_OP_INCL: case CC_OP_INCQ: case CC_OP_DECB: case CC_OP_DECW: case CC_OP_DECL: case CC_OP_DECQ: case CC_OP_SHLB: case CC_OP_SHLW: case CC_OP_SHLL: case CC_OP_SHLQ: case CC_OP_SARB: case CC_OP_SARW: case CC_OP_SARL: case CC_OP_SARQ: switch(jcc_op) { case JCC_Z: func = gen_jcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op]; break; case JCC_S: func = gen_jcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op]; break; default: func = NULL; break; } break; default: func = NULL; break; } if (s->cc_op != CC_OP_DYNAMIC) { gen_op_set_cc_op(s->cc_op); s->cc_op = CC_OP_DYNAMIC; } if (!func) { gen_setcc_slow[jcc_op](); func = gen_op_jnz_T0_label; } if (inv) { tmp = val; val = next_eip; next_eip = tmp; } tb = s->tb; l1 = gen_new_label(); func(l1); gen_goto_tb(s, 0, next_eip); gen_set_label(l1); gen_goto_tb(s, 1, val); s->is_jmp = 3; } else { if (s->cc_op != CC_OP_DYNAMIC) { gen_op_set_cc_op(s->cc_op); s->cc_op = CC_OP_DYNAMIC; } gen_setcc_slow[jcc_op](); if (inv) { tmp = val; val = next_eip; next_eip = tmp; } l1 = gen_new_label(); l2 = gen_new_label(); gen_op_jnz_T0_label(l1); gen_jmp_im(next_eip); gen_op_jmp_label(l2); gen_set_label(l1); gen_jmp_im(val); gen_set_label(l2); gen_eob(s); }}static void gen_setcc(DisasContext *s, int b){ int inv, jcc_op; GenOpFunc *func; inv = b & 1; jcc_op = (b >> 1) & 7; switch(s->cc_op) { /* we optimize the cmp/jcc case */ case CC_OP_SUBB: case CC_OP_SUBW: case CC_OP_SUBL: case CC_OP_SUBQ: func = gen_setcc_sub[s->cc_op - CC_OP_SUBB][jcc_op]; if (!func) goto slow_jcc; break; /* some jumps are easy to compute */ case CC_OP_ADDB: case CC_OP_ADDW: case CC_OP_ADDL: case CC_OP_ADDQ: case CC_OP_LOGICB: case CC_OP_LOGICW: case CC_OP_LOGICL: case CC_OP_LOGICQ: case CC_OP_INCB: case CC_OP_INCW: case CC_OP_INCL: case CC_OP_INCQ: case CC_OP_DECB: case CC_OP_DECW: case CC_OP_DECL: case CC_OP_DECQ: case CC_OP_SHLB: case CC_OP_SHLW: case CC_OP_SHLL: case CC_OP_SHLQ: switch(jcc_op) { case JCC_Z: func = gen_setcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op]; break; case JCC_S: func = gen_setcc_sub[(s->cc_op - CC_OP_ADDB) % 4][jcc_op]; break; default: goto slow_jcc; } break; default: slow_jcc: if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); func = gen_setcc_slow[jcc_op]; break; } func(); if (inv) { gen_op_xor_T0_1(); }}/* move T0 to seg_reg and compute if the CPU state may change. Never call this function with seg_reg == R_CS */static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip){ if (s->pe && !s->vm86) { /* XXX: optimize by finding processor state dynamically */ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(cur_eip); gen_op_movl_seg_T0(seg_reg); /* abort translation because the addseg value may change or because ss32 may change. For R_SS, translation must always stop as a special handling must be done to disable hardware interrupts for the next instruction */ if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS)) s->is_jmp = 3; } else { gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[seg_reg])); if (seg_reg == R_SS) s->is_jmp = 3; }}static inline void gen_stack_update(DisasContext *s, int addend){#ifdef TARGET_X86_64 if (CODE64(s)) { if (addend == 8) gen_op_addq_ESP_8(); else gen_op_addq_ESP_im(addend); } else#endif if (s->ss32) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -