📄 helper.c.svn-base
字号:
}void helper_cpuid(void){ uint32_t index; index = (uint32_t)EAX; /* test if maximum index reached */ if (index & 0x80000000) { if (index > env->cpuid_xlevel) index = env->cpuid_level; } else { if (index > env->cpuid_level) index = env->cpuid_level; } switch(index) { case 0: EAX = env->cpuid_level; EBX = env->cpuid_vendor1; EDX = env->cpuid_vendor2; ECX = env->cpuid_vendor3; break; case 1: EAX = env->cpuid_version; EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ ECX = env->cpuid_ext_features; EDX = env->cpuid_features; break; case 2: /* cache info: needed for Pentium Pro compatibility */ EAX = 1; EBX = 0; ECX = 0; EDX = 0x2c307d; break; case 0x80000000: EAX = env->cpuid_xlevel; EBX = env->cpuid_vendor1; EDX = env->cpuid_vendor2; ECX = env->cpuid_vendor3; break; case 0x80000001: EAX = env->cpuid_features; EBX = 0; ECX = env->cpuid_ext3_features; EDX = env->cpuid_ext2_features; break; case 0x80000002: case 0x80000003: case 0x80000004: EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0]; EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1]; ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2]; EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3]; break; case 0x80000005: /* cache info (L1 cache) */ EAX = 0x01ff01ff; EBX = 0x01ff01ff; ECX = 0x40020140; EDX = 0x40020140; break; case 0x80000006: /* cache info (L2 cache) */ EAX = 0; EBX = 0x42004200; ECX = 0x02008140; EDX = 0; break; case 0x80000008: /* virtual & phys address size in low 2 bytes. */ EAX = 0x00003028; EBX = 0; ECX = 0; EDX = 0; break; case 0x8000000A: EAX = 0x00000001; EBX = 0; ECX = 0; EDX = 0; break; default: /* reserved values: zero */ EAX = 0; EBX = 0; ECX = 0; EDX = 0; break; }}void helper_enter_level(int level, int data32){ target_ulong ssp; uint32_t esp_mask, esp, ebp; esp_mask = get_sp_mask(env->segs[R_SS].flags); ssp = env->segs[R_SS].base; ebp = EBP; esp = ESP; if (data32) { /* 32 bit */ esp -= 4; while (--level) { esp -= 4; ebp -= 4; stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask))); } esp -= 4; stl(ssp + (esp & esp_mask), T1); } else { /* 16 bit */ esp -= 2; while (--level) { esp -= 2; ebp -= 2; stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask))); } esp -= 2; stw(ssp + (esp & esp_mask), T1); }}#ifdef TARGET_X86_64void helper_enter64_level(int level, int data64){ target_ulong esp, ebp; ebp = EBP; esp = ESP; if (data64) { /* 64 bit */ esp -= 8; while (--level) { esp -= 8; ebp -= 8; stq(esp, ldq(ebp)); } esp -= 8; stq(esp, T1); } else { /* 16 bit */ esp -= 2; while (--level) { esp -= 2; ebp -= 2; stw(esp, lduw(ebp)); } esp -= 2; stw(esp, T1); }}#endifvoid helper_lldt_T0(void){ int selector; SegmentCache *dt; uint32_t e1, e2; int index, entry_limit; target_ulong ptr; selector = T0 & 0xffff; if ((selector & 0xfffc) == 0) { /* XXX: NULL selector case: invalid LDT */ env->ldt.base = 0; env->ldt.limit = 0; } else { if (selector & 0x4) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); dt = &env->gdt; index = selector & ~7;#ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) entry_limit = 15; else#endif entry_limit = 7; if ((index + entry_limit) > dt->limit) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); ptr = dt->base + index; e1 = ldl_kernel(ptr); e2 = ldl_kernel(ptr + 4); if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);#ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { uint32_t e3; e3 = ldl_kernel(ptr + 8); load_seg_cache_raw_dt(&env->ldt, e1, e2); env->ldt.base |= (target_ulong)e3 << 32; } else#endif { load_seg_cache_raw_dt(&env->ldt, e1, e2); } } env->ldt.selector = selector;}void helper_ltr_T0(void){ int selector; SegmentCache *dt; uint32_t e1, e2; int index, type, entry_limit; target_ulong ptr; selector = T0 & 0xffff; if ((selector & 0xfffc) == 0) { /* NULL selector case: invalid TR */ env->tr.base = 0; env->tr.limit = 0; env->tr.flags = 0; } else { if (selector & 0x4) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); dt = &env->gdt; index = selector & ~7;#ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) entry_limit = 15; else#endif entry_limit = 7; if ((index + entry_limit) > dt->limit) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); ptr = dt->base + index; e1 = ldl_kernel(ptr); e2 = ldl_kernel(ptr + 4); type = (e2 >> DESC_TYPE_SHIFT) & 0xf; if ((e2 & DESC_S_MASK) || (type != 1 && type != 9)) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);#ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { uint32_t e3, e4; e3 = ldl_kernel(ptr + 8); e4 = ldl_kernel(ptr + 12); if ((e4 >> DESC_TYPE_SHIFT) & 0xf) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); load_seg_cache_raw_dt(&env->tr, e1, e2); env->tr.base |= (target_ulong)e3 << 32; } else#endif { load_seg_cache_raw_dt(&env->tr, e1, e2); } e2 |= DESC_TSS_BUSY_MASK; stl_kernel(ptr + 4, e2); } env->tr.selector = selector;}/* only works if protected mode and not VM86. seg_reg must be != R_CS */void load_seg(int seg_reg, int selector){ uint32_t e1, e2; int cpl, dpl, rpl; SegmentCache *dt; int index; target_ulong ptr; selector &= 0xffff; cpl = env->hflags & HF_CPL_MASK; if ((selector & 0xfffc) == 0) { /* null selector case */ if (seg_reg == R_SS#ifdef TARGET_X86_64 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)#endif ) raise_exception_err(EXCP0D_GPF, 0); cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0); } else { if (selector & 0x4) dt = &env->ldt; else dt = &env->gdt; index = selector & ~7; if ((index + 7) > dt->limit) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); ptr = dt->base + index; e1 = ldl_kernel(ptr); e2 = ldl_kernel(ptr + 4); if (!(e2 & DESC_S_MASK)) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); rpl = selector & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (seg_reg == R_SS) { /* must be writable segment */ if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); if (rpl != cpl || dpl != cpl) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); } else { /* must be readable segment */ if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) { /* if not conforming code, test rights */ if (dpl < cpl || dpl < rpl) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); } } if (!(e2 & DESC_P_MASK)) { if (seg_reg == R_SS) raise_exception_err(EXCP0C_STACK, selector & 0xfffc); else raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); } /* set the access bit if not already set */ if (!(e2 & DESC_A_MASK)) { e2 |= DESC_A_MASK; stl_kernel(ptr + 4, e2); } cpu_x86_load_seg_cache(env, seg_reg, selector, get_seg_base(e1, e2), get_seg_limit(e1, e2), e2);#if 0 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n", selector, (unsigned long)sc->base, sc->limit, sc->flags);#endif }}/* protected mode jump */void helper_ljmp_protected_T0_T1(int next_eip_addend){ int new_cs, gate_cs, type; uint32_t e1, e2, cpl, dpl, rpl, limit; target_ulong new_eip, next_eip; new_cs = T0; new_eip = T1; if ((new_cs & 0xfffc) == 0) raise_exception_err(EXCP0D_GPF, 0); if (load_segment(&e1, &e2, new_cs) != 0) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); cpl = env->hflags & HF_CPL_MASK; if (e2 & DESC_S_MASK) { if (!(e2 & DESC_CS_MASK)) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); dpl = (e2 >> DESC_DPL_SHIFT) & 3; if (e2 & DESC_C_MASK) { /* conforming code segment */ if (dpl > cpl) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); } else { /* non conforming code segment */ rpl = new_cs & 3; if (rpl > cpl) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); if (dpl != cpl) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); } if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); limit = get_seg_limit(e1, e2); if (new_eip > limit && !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl, get_seg_base(e1, e2), limit, e2); EIP = new_eip; } else { /* jump to call or task gate */ dpl = (e2 >> DESC_DPL_SHIFT) & 3; rpl = new_cs & 3; cpl = env->hflags & HF_CPL_MASK; type = (e2 >> DESC_TYPE_SHIFT) & 0xf; switch(type) { case 1: /* 286 TSS */ case 9: /* 386 TSS */ case 5: /* task gate */ if (dpl < cpl || dpl < rpl) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); next_eip = env->eip + next_eip_addend; switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip); CC_OP = CC_OP_EFLAGS; break; case 4: /* 286 call gate */ case 12: /* 386 call gate */ if ((dpl < cpl) || (dpl < rpl)) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); gate_cs = e1 >> 16; new_eip = (e1 & 0xffff); if (type == 12) new_eip |= (e2 & 0xffff0000); if (load_segment(&e1, &e2, gate_cs) != 0) raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); dpl = (e2 >> DESC_DPL_SHIFT) & 3; /* must be code segment */ if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) != (DESC_S_MASK | DESC_CS_MASK))) raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc); if (((e2 & DESC_C_MASK) && (dpl > cpl)) || (!(e2 & DESC_C_MASK) && (dpl != cpl))) raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -