📄 translate.c.svn-base
字号:
so we know if this is an EOB or not ... let's assume it's not for now. */ }#endif return 0;}static inline int svm_is_rep(int prefixes){ return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);}static inline intgen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start, uint64_t type, uint64_t param){ if(!(s->flags & (INTERCEPT_SVM_MASK))) /* no SVM activated */ return 0; switch(type) { /* CRx and DRx reads/writes */ case SVM_EXIT_READ_CR0 ... SVM_EXIT_EXCP_BASE - 1: if (s->cc_op != CC_OP_DYNAMIC) { gen_op_set_cc_op(s->cc_op); s->cc_op = CC_OP_DYNAMIC; } gen_jmp_im(pc_start - s->cs_base); SVM_movq_T1_im(param); gen_op_geneflags(); gen_op_svm_check_intercept_param((uint32_t)(type >> 32), (uint32_t)type); /* this is a special case as we do not know if the interception occurs so we assume there was none */ return 0; case SVM_EXIT_MSR: if(s->flags & (1ULL << INTERCEPT_MSR_PROT)) { if (s->cc_op != CC_OP_DYNAMIC) { gen_op_set_cc_op(s->cc_op); s->cc_op = CC_OP_DYNAMIC; } gen_jmp_im(pc_start - s->cs_base); SVM_movq_T1_im(param); gen_op_geneflags(); gen_op_svm_check_intercept_param((uint32_t)(type >> 32), (uint32_t)type); /* this is a special case as we do not know if the interception occurs so we assume there was none */ return 0; } break; default: if(s->flags & (1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR))) { if (s->cc_op != CC_OP_DYNAMIC) { gen_op_set_cc_op(s->cc_op); s->cc_op = CC_OP_EFLAGS; } gen_jmp_im(pc_start - s->cs_base); SVM_movq_T1_im(param); gen_op_geneflags(); gen_op_svm_vmexit(type >> 32, type); /* we can optimize this one so TBs don't get longer than up to vmexit */ gen_eob(s); return 1; } } return 0;}static inline intgen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type){ return gen_svm_check_intercept_param(s, pc_start, type, 0);}static inline void gen_stack_update(DisasContext *s, int addend){#ifdef TARGET_X86_64 if (CODE64(s)) { if (addend == 8) gen_op_addq_ESP_8(); else gen_op_addq_ESP_im(addend); } else#endif if (s->ss32) { if (addend == 2) gen_op_addl_ESP_2(); else if (addend == 4) gen_op_addl_ESP_4(); else gen_op_addl_ESP_im(addend); } else { if (addend == 2) gen_op_addw_ESP_2(); else if (addend == 4) gen_op_addw_ESP_4(); else gen_op_addw_ESP_im(addend); }}/* generate a push. It depends on ss32, addseg and dflag */static void gen_push_T0(DisasContext *s){#ifdef TARGET_X86_64 if (CODE64(s)) { gen_op_movq_A0_reg[R_ESP](); if (s->dflag) { gen_op_subq_A0_8(); gen_op_st_T0_A0[OT_QUAD + s->mem_index](); } else { gen_op_subq_A0_2(); gen_op_st_T0_A0[OT_WORD + s->mem_index](); } gen_op_movq_ESP_A0(); } else#endif { gen_op_movl_A0_reg[R_ESP](); if (!s->dflag) gen_op_subl_A0_2(); else gen_op_subl_A0_4(); if (s->ss32) { if (s->addseg) { gen_op_movl_T1_A0(); gen_op_addl_A0_SS(); } } else { gen_op_andl_A0_ffff(); gen_op_movl_T1_A0(); gen_op_addl_A0_SS(); } gen_op_st_T0_A0[s->dflag + 1 + s->mem_index](); if (s->ss32 && !s->addseg) gen_op_movl_ESP_A0(); else gen_op_mov_reg_T1[s->ss32 + 1][R_ESP](); }}/* generate a push. It depends on ss32, addseg and dflag *//* slower version for T1, only used for call Ev */static void gen_push_T1(DisasContext *s){#ifdef TARGET_X86_64 if (CODE64(s)) { gen_op_movq_A0_reg[R_ESP](); if (s->dflag) { gen_op_subq_A0_8(); gen_op_st_T1_A0[OT_QUAD + s->mem_index](); } else { gen_op_subq_A0_2(); gen_op_st_T0_A0[OT_WORD + s->mem_index](); } gen_op_movq_ESP_A0(); } else#endif { gen_op_movl_A0_reg[R_ESP](); if (!s->dflag) gen_op_subl_A0_2(); else gen_op_subl_A0_4(); if (s->ss32) { if (s->addseg) { gen_op_addl_A0_SS(); } } else { gen_op_andl_A0_ffff(); gen_op_addl_A0_SS(); } gen_op_st_T1_A0[s->dflag + 1 + s->mem_index](); if (s->ss32 && !s->addseg) gen_op_movl_ESP_A0(); else gen_stack_update(s, (-2) << s->dflag); }}/* two step pop is necessary for precise exceptions */static void gen_pop_T0(DisasContext *s){#ifdef TARGET_X86_64 if (CODE64(s)) { gen_op_movq_A0_reg[R_ESP](); gen_op_ld_T0_A0[(s->dflag ? OT_QUAD : OT_WORD) + s->mem_index](); } else#endif { gen_op_movl_A0_reg[R_ESP](); if (s->ss32) { if (s->addseg) gen_op_addl_A0_SS(); } else { gen_op_andl_A0_ffff(); gen_op_addl_A0_SS(); } gen_op_ld_T0_A0[s->dflag + 1 + s->mem_index](); }}static void gen_pop_update(DisasContext *s){#ifdef TARGET_X86_64 if (CODE64(s) && s->dflag) { gen_stack_update(s, 8); } else#endif { gen_stack_update(s, 2 << s->dflag); }}static void gen_stack_A0(DisasContext *s){ gen_op_movl_A0_ESP(); if (!s->ss32) gen_op_andl_A0_ffff(); gen_op_movl_T1_A0(); if (s->addseg) gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base));}/* NOTE: wrap around in 16 bit not fully handled */static void gen_pusha(DisasContext *s){ int i; gen_op_movl_A0_ESP(); gen_op_addl_A0_im(-16 << s->dflag); if (!s->ss32) gen_op_andl_A0_ffff(); gen_op_movl_T1_A0(); if (s->addseg) gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base)); for(i = 0;i < 8; i++) { gen_op_mov_TN_reg[OT_LONG][0][7 - i](); gen_op_st_T0_A0[OT_WORD + s->dflag + s->mem_index](); gen_op_addl_A0_im(2 << s->dflag); } gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP]();}/* NOTE: wrap around in 16 bit not fully handled */static void gen_popa(DisasContext *s){ int i; gen_op_movl_A0_ESP(); if (!s->ss32) gen_op_andl_A0_ffff(); gen_op_movl_T1_A0(); gen_op_addl_T1_im(16 << s->dflag); if (s->addseg) gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base)); for(i = 0;i < 8; i++) { /* ESP is not reloaded */ if (i != 3) { gen_op_ld_T0_A0[OT_WORD + s->dflag + s->mem_index](); gen_op_mov_reg_T0[OT_WORD + s->dflag][7 - i](); } gen_op_addl_A0_im(2 << s->dflag); } gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP]();}static void gen_enter(DisasContext *s, int esp_addend, int level){ int ot, opsize; level &= 0x1f;#ifdef TARGET_X86_64 if (CODE64(s)) { ot = s->dflag ? OT_QUAD : OT_WORD; opsize = 1 << ot; gen_op_movl_A0_ESP(); gen_op_addq_A0_im(-opsize); gen_op_movl_T1_A0(); /* push bp */ gen_op_mov_TN_reg[OT_LONG][0][R_EBP](); gen_op_st_T0_A0[ot + s->mem_index](); if (level) { gen_op_enter64_level(level, (ot == OT_QUAD)); } gen_op_mov_reg_T1[ot][R_EBP](); gen_op_addl_T1_im( -esp_addend + (-opsize * level) ); gen_op_mov_reg_T1[OT_QUAD][R_ESP](); } else#endif { ot = s->dflag + OT_WORD; opsize = 2 << s->dflag; gen_op_movl_A0_ESP(); gen_op_addl_A0_im(-opsize); if (!s->ss32) gen_op_andl_A0_ffff(); gen_op_movl_T1_A0(); if (s->addseg) gen_op_addl_A0_seg(offsetof(CPUX86State,segs[R_SS].base)); /* push bp */ gen_op_mov_TN_reg[OT_LONG][0][R_EBP](); gen_op_st_T0_A0[ot + s->mem_index](); if (level) { gen_op_enter_level(level, s->dflag); } gen_op_mov_reg_T1[ot][R_EBP](); gen_op_addl_T1_im( -esp_addend + (-opsize * level) ); gen_op_mov_reg_T1[OT_WORD + s->ss32][R_ESP](); }}static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip){ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(cur_eip); gen_op_raise_exception(trapno); s->is_jmp = 3;}/* an interrupt is different from an exception because of the privilege checks */static void gen_interrupt(DisasContext *s, int intno, target_ulong cur_eip, target_ulong next_eip){ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(cur_eip); gen_op_raise_interrupt(intno, (int)(next_eip - cur_eip)); s->is_jmp = 3;}static void gen_debug(DisasContext *s, target_ulong cur_eip){ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); gen_jmp_im(cur_eip); gen_op_debug(); s->is_jmp = 3;}/* generate a generic end of block. Trace exception is also generated if needed */static void gen_eob(DisasContext *s){ if (s->cc_op != CC_OP_DYNAMIC) gen_op_set_cc_op(s->cc_op); if (s->tb->flags & HF_INHIBIT_IRQ_MASK) { gen_op_reset_inhibit_irq(); } if (s->singlestep_enabled) { gen_op_debug(); } else if (s->tf) { gen_op_single_step(); } else { gen_op_movl_T0_0(); gen_op_exit_tb(); } s->is_jmp = 3;}/* generate a jump to eip. No segment change must happen before as a direct call to the next block may occur */static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num){ if (s->jmp_opt) { if (s->cc_op != CC_OP_DYNAMIC) { gen_op_set_cc_op(s->cc_op); s->cc_op = CC_OP_DYNAMIC; } gen_goto_tb(s, tb_num, eip); s->is_jmp = 3; } else { gen_jmp_im(eip); gen_eob(s); }}static void gen_jmp(DisasContext *s, target_ulong eip){ gen_jmp_tb(s, eip, 0);}static void gen_movtl_T0_im(target_ulong val){#ifdef TARGET_X86_64 if ((int32_t)val == val) { gen_op_movl_T0_im(val); } else { gen_op_movq_T0_im64(val >> 32, val); }#else gen_op_movl_T0_im(val);#endif}static void gen_movtl_T1_im(target_ulong val){#ifdef TARGET_X86_64 if ((int32_t)val == val) { gen_op_movl_T1_im(val); } else { gen_op_movq_T1_im64(val >> 32, val); }#else gen_op_movl_T1_im(val);#endif}static void gen_add_A0_im(DisasContext *s, int val){#ifdef TARGET_X86_64 if (CODE64(s)) gen_op_addq_A0_im(val); else#endif gen_op_addl_A0_im(val);}static GenOpFunc1 *gen_ldq_env_A0[3] = { gen_op_ldq_raw_env_A0,#ifndef CONFIG_USER_ONLY gen_op_ldq_kernel_env_A0, gen_op_ldq_user_env_A0,#endif};static GenOpFunc1 *gen_stq_env_A0[3] = { gen_op_stq_raw_env_A0,#ifndef CONFIG_USER_ONLY gen_op_stq_kernel_env_A0, gen_op_stq_user_env_A0,#endif};static GenOpFunc1 *gen_ldo_env_A0[3] = { gen_op_ldo_raw_env_A0,#ifndef CONFIG_USER_ONLY gen_op_ldo_kernel_env_A0, gen_op_ldo_user_env_A0,#endif};static GenOpFunc1 *gen_sto_env_A0[3] = { gen_op_sto_raw_env_A0,#ifndef CONFIG_USER_ONLY gen_op_sto_kernel_env_A0, gen_op_sto_user_env_A0,#endif};#define SSE_SPECIAL ((GenOpFunc2 *)1)#define MMX_OP2(x) { gen_op_ ## x ## _mmx, gen_op_ ## x ## _xmm }#define SSE_FOP(x) { gen_op_ ## x ## ps, gen_op_ ## x ## pd, \ gen_op_ ## x ## ss, gen_op_ ## x ## sd, }static GenOpFunc2 *sse_op_table1[256][4] = { /* pure SSE operations */ [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */ [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */ [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */ [0x14] = { gen_op_punpckldq_xmm, gen_op_punpcklqdq_xmm }, [0x15] = { gen_op_punpckhdq_xmm, gen_op_punpckhqdq_xmm }, [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */ [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */ [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */ [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */ [0x2b] = { SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd */ [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */ [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */ [0x2e] = { gen_op_ucomiss, gen_op_ucomisd }, [0x2f] = { gen_op_comiss, gen_op_comisd }, [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */ [0x51] = SSE_FOP(sqrt), [0x52] = { gen_op_rsqrtps, NULL, gen_op_rsqrtss, NULL }, [0x53] = { gen_op_rcpps, NULL, gen_op_rcpss, NULL }, [0x54] = { gen_op_pand_xmm, gen_op_pand_xmm }, /* andps, andpd */ [0x55] = { gen_op_pandn_xmm, gen_op_pandn_xmm }, /* andnps, andnpd */ [0x56] = { gen_op_por_xmm, gen_op_por_xmm }, /* orps, orpd */ [0x57] = { gen_op_pxor_xmm, gen_op_pxor_xmm }, /* xorps, xorpd */ [0x58] = SSE_FOP(add), [0x59] = SSE_FOP(mul), [0x5a] = { gen_op_cvtps2pd, gen_op_cvtpd2ps, gen_op_cvtss2sd, gen_op_cvtsd2ss }, [0x5b] = { gen_op_cvtdq2ps, gen_op_cvtps2dq, gen_op_cvttps2dq }, [0x5c] = SSE_FOP(sub), [0x5d] = SSE_FOP(min), [0x5e] = SSE_FOP(div), [0x5f] = SSE_FOP(max), [0xc2] = SSE_FOP(cmpeq), [0xc6] = { (GenOpFunc2 *)gen_op_shufps, (GenOpFunc2 *)gen_op_shufpd }, /* MMX ops and their SSE extensions */ [0x60] = MMX_OP2(punpcklbw), [0x61] = MMX_OP2(p
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -