⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 helper.c

📁 qemu虚拟机代码
💻 C
📖 第 1 页 / 共 5 页
字号:
        }        if (!(e2 & DESC_P_MASK))            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);        limit = get_seg_limit(e1, e2);        if (new_eip > limit &&             !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);        cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,                       get_seg_base(e1, e2), limit, e2);        EIP = new_eip;    } else {        /* jump to call or task gate */        dpl = (e2 >> DESC_DPL_SHIFT) & 3;        rpl = new_cs & 3;        cpl = env->hflags & HF_CPL_MASK;        type = (e2 >> DESC_TYPE_SHIFT) & 0xf;        switch(type) {        case 1: /* 286 TSS */        case 9: /* 386 TSS */        case 5: /* task gate */            if (dpl < cpl || dpl < rpl)                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);            next_eip = env->eip + next_eip_addend;            switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);            break;        case 4: /* 286 call gate */        case 12: /* 386 call gate */            if ((dpl < cpl) || (dpl < rpl))                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);            if (!(e2 & DESC_P_MASK))                raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);            gate_cs = e1 >> 16;            new_eip = (e1 & 0xffff);            if (type == 12)                new_eip |= (e2 & 0xffff0000);            if (load_segment(&e1, &e2, gate_cs) != 0)                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);            dpl = (e2 >> DESC_DPL_SHIFT) & 3;            /* must be code segment */            if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=                  (DESC_S_MASK | DESC_CS_MASK)))                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);            if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||                 (!(e2 & DESC_C_MASK) && (dpl != cpl)))                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);            if (!(e2 & DESC_P_MASK))                raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);            limit = get_seg_limit(e1, e2);            if (new_eip > limit)                raise_exception_err(EXCP0D_GPF, 0);            cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,                                   get_seg_base(e1, e2), limit, e2);            EIP = new_eip;            break;        default:            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);            break;        }    }}/* real mode call */void helper_lcall_real_T0_T1(int shift, int next_eip){    int new_cs, new_eip;    uint32_t esp, esp_mask;    target_ulong ssp;    new_cs = T0;    new_eip = T1;    esp = ESP;    esp_mask = get_sp_mask(env->segs[R_SS].flags);    ssp = env->segs[R_SS].base;    if (shift) {        PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);        PUSHL(ssp, esp, esp_mask, next_eip);    } else {        PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);        PUSHW(ssp, esp, esp_mask, next_eip);    }    ESP = (ESP & ~esp_mask) | (esp & esp_mask);    env->eip = new_eip;    env->segs[R_CS].selector = new_cs;    env->segs[R_CS].base = (new_cs << 4);}/* protected mode call */void helper_lcall_protected_T0_T1(int shift, int next_eip_addend){    int new_cs, new_stack, i;    uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;    uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;    uint32_t val, limit, old_sp_mask;    target_ulong ssp, old_ssp, next_eip, new_eip;        new_cs = T0;    new_eip = T1;    next_eip = env->eip + next_eip_addend;#ifdef DEBUG_PCALL    if (loglevel & CPU_LOG_PCALL) {        fprintf(logfile, "lcall %04x:%08x s=%d\n",                new_cs, (uint32_t)new_eip, shift);        cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);    }#endif    if ((new_cs & 0xfffc) == 0)        raise_exception_err(EXCP0D_GPF, 0);    if (load_segment(&e1, &e2, new_cs) != 0)        raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);    cpl = env->hflags & HF_CPL_MASK;#ifdef DEBUG_PCALL    if (loglevel & CPU_LOG_PCALL) {        fprintf(logfile, "desc=%08x:%08x\n", e1, e2);    }#endif    if (e2 & DESC_S_MASK) {        if (!(e2 & DESC_CS_MASK))            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);        dpl = (e2 >> DESC_DPL_SHIFT) & 3;        if (e2 & DESC_C_MASK) {            /* conforming code segment */            if (dpl > cpl)                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);        } else {            /* non conforming code segment */            rpl = new_cs & 3;            if (rpl > cpl)                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);            if (dpl != cpl)                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);        }        if (!(e2 & DESC_P_MASK))            raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);#ifdef TARGET_X86_64        /* XXX: check 16/32 bit cases in long mode */        if (shift == 2) {            target_ulong rsp;            /* 64 bit case */            rsp = ESP;            PUSHQ(rsp, env->segs[R_CS].selector);            PUSHQ(rsp, next_eip);            /* from this point, not restartable */            ESP = rsp;            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,                                   get_seg_base(e1, e2),                                    get_seg_limit(e1, e2), e2);            EIP = new_eip;        } else #endif        {            sp = ESP;            sp_mask = get_sp_mask(env->segs[R_SS].flags);            ssp = env->segs[R_SS].base;            if (shift) {                PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);                PUSHL(ssp, sp, sp_mask, next_eip);            } else {                PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);                PUSHW(ssp, sp, sp_mask, next_eip);            }                        limit = get_seg_limit(e1, e2);            if (new_eip > limit)                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);            /* from this point, not restartable */            ESP = (ESP & ~sp_mask) | (sp & sp_mask);            cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,                                   get_seg_base(e1, e2), limit, e2);            EIP = new_eip;        }    } else {        /* check gate type */        type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;        dpl = (e2 >> DESC_DPL_SHIFT) & 3;        rpl = new_cs & 3;        switch(type) {        case 1: /* available 286 TSS */        case 9: /* available 386 TSS */        case 5: /* task gate */            if (dpl < cpl || dpl < rpl)                raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);            switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);            return;        case 4: /* 286 call gate */        case 12: /* 386 call gate */            break;        default:            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);            break;        }        shift = type >> 3;        if (dpl < cpl || dpl < rpl)            raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);        /* check valid bit */        if (!(e2 & DESC_P_MASK))            raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);        selector = e1 >> 16;        offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);        param_count = e2 & 0x1f;        if ((selector & 0xfffc) == 0)            raise_exception_err(EXCP0D_GPF, 0);        if (load_segment(&e1, &e2, selector) != 0)            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);        if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);        dpl = (e2 >> DESC_DPL_SHIFT) & 3;        if (dpl > cpl)            raise_exception_err(EXCP0D_GPF, selector & 0xfffc);        if (!(e2 & DESC_P_MASK))            raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);        if (!(e2 & DESC_C_MASK) && dpl < cpl) {            /* to inner priviledge */            get_ss_esp_from_tss(&ss, &sp, dpl);#ifdef DEBUG_PCALL            if (loglevel & CPU_LOG_PCALL)                fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",                         ss, sp, param_count, ESP);#endif            if ((ss & 0xfffc) == 0)                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);            if ((ss & 3) != dpl)                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);            if (load_segment(&ss_e1, &ss_e2, ss) != 0)                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);            ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;            if (ss_dpl != dpl)                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);            if (!(ss_e2 & DESC_S_MASK) ||                (ss_e2 & DESC_CS_MASK) ||                !(ss_e2 & DESC_W_MASK))                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);            if (!(ss_e2 & DESC_P_MASK))                raise_exception_err(EXCP0A_TSS, ss & 0xfffc);                        //            push_size = ((param_count * 2) + 8) << shift;            old_sp_mask = get_sp_mask(env->segs[R_SS].flags);            old_ssp = env->segs[R_SS].base;                        sp_mask = get_sp_mask(ss_e2);            ssp = get_seg_base(ss_e1, ss_e2);            if (shift) {                PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);                PUSHL(ssp, sp, sp_mask, ESP);                for(i = param_count - 1; i >= 0; i--) {                    val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));                    PUSHL(ssp, sp, sp_mask, val);                }            } else {                PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);                PUSHW(ssp, sp, sp_mask, ESP);                for(i = param_count - 1; i >= 0; i--) {                    val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));                    PUSHW(ssp, sp, sp_mask, val);                }            }            new_stack = 1;        } else {            /* to same priviledge */            sp = ESP;            sp_mask = get_sp_mask(env->segs[R_SS].flags);            ssp = env->segs[R_SS].base;            //            push_size = (4 << shift);            new_stack = 0;        }        if (shift) {            PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);            PUSHL(ssp, sp, sp_mask, next_eip);        } else {            PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);            PUSHW(ssp, sp, sp_mask, next_eip);        }        /* from this point, not restartable */        if (new_stack) {            ss = (ss & ~3) | dpl;            cpu_x86_load_seg_cache(env, R_SS, ss,                                    ssp,                                   get_seg_limit(ss_e1, ss_e2),                                   ss_e2);        }        selector = (selector & ~3) | dpl;        cpu_x86_load_seg_cache(env, R_CS, selector,                        get_seg_base(e1, e2),                       get_seg_limit(e1, e2),                       e2);        cpu_x86_set_cpl(env, dpl);        ESP = (ESP & ~sp_mask) | (sp & sp_mask);        EIP = offset;    }#ifdef USE_KQEMU    if (kqemu_is_ok(env)) {        env->exception_index = -1;        cpu_loop_exit();    }#endif}/* real and vm86 mode iret */void helper_iret_real(int shift){    uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;    target_ulong ssp;    int eflags_mask;    sp_mask = 0xffff; /* XXXX: use SS segment size ? */    sp = ESP;    ssp = env->segs[R_SS].base;    if (shift == 1) {        /* 32 bits */        POPL(ssp, sp, sp_mask, new_eip);        POPL(ssp, sp, sp_mask, new_cs);        new_cs &= 0xffff;        POPL(ssp, sp, sp_mask, new_eflags);    } else {        /* 16 bits */        POPW(ssp, sp, sp_mask, new_eip);        POPW(ssp, sp, sp_mask, new_cs);        POPW(ssp, sp, sp_mask, new_eflags);    }    ESP = (ESP & ~sp_mask) | (sp & sp_mask);    load_seg_vm(R_CS, new_cs);    env->eip = new_eip;    if (env->eflags & VM_MASK)        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;    else        eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;    if (shift == 0)        eflags_mask &= 0xffff;    load_eflags(new_eflags, eflags_mask);}static inline void validate_seg(int seg_reg, int cpl){    int dpl;    uint32_t e2;    /* XXX: on x86_64, we do not want to nullify FS and GS because       they may still contain a valid base. I would be interested to       know how a real x86_64 CPU behaves */    if ((seg_reg == R_FS || seg_reg == R_GS) &&         (env->segs[seg_reg].selector & 0xfffc) == 0)        return;    e2 = env->segs[seg_reg].flags;    dpl = (e2 >> DESC_DPL_SHIFT) & 3;    if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {        /* data or non conforming code segment */        if (dpl < cpl) {            cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);        }    }}/* protected mode iret */static inline void helper_ret_protected(int shift, int is_iret, int addend){    uint32_t new_cs, new_eflags, new_ss;    uint32_t new_es, new_ds, new_fs, new_gs;    uint32_t e1, e2, ss_e1, ss_e2;    int cpl, dpl, rpl, eflags_mask, iopl;    target_ulong ssp, sp, new_eip, new_esp, sp_mask;    #ifdef TARGET_X86_64    if (shift == 2)        sp_mask = -1;    else#endif        sp_mask = get_sp_mask(env->segs[R_SS].flags);    sp = ESP;    ssp = env->segs[R_SS].base;    new_eflags = 0; /* avoid warning */#ifdef TARGET_X86_64    if (shift == 2) {        POPQ(sp, new_eip);        POPQ(sp, new_cs);        new_cs &= 0xffff;        if (is_iret) {            POPQ(sp, new_eflags);        }    } else#endif    if (shift == 1) {        /* 32 bits */        POPL(ssp, sp, sp_mask, new_eip);        POPL(ssp, sp, sp_mask, new_cs);        new_cs &= 0xffff;        if (is_iret) {            POPL(ssp, sp, sp_mask, new_eflags);            if (new_eflags & VM_MASK)                goto return_to_vm86;        }    } else {        /* 16 bits */        POPW(ssp, sp, sp_mask, new_eip);        POPW(ssp, sp, sp_mask, new_cs);        if (is_iret)            POPW(ssp, sp, sp_mask, new_eflags);    }#ifdef DEBUG_PCALL    if (loglevel & CPU_LOG_PCALL) {        fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",                new_cs, new_eip, shift, addend);   

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -