⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 emulate.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 2 页
字号:
    return hvmemul_write(seg, offset, p_new, bytes, ctxt);}static int hvmemul_rep_ins(    uint16_t src_port,    enum x86_segment dst_seg,    unsigned long dst_offset,    unsigned int bytes_per_rep,    unsigned long *reps,    struct x86_emulate_ctxt *ctxt){    struct hvm_emulate_ctxt *hvmemul_ctxt =        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);    unsigned long addr;    uint32_t pfec = PFEC_page_present | PFEC_write_access;    paddr_t gpa;    int rc;    rc = hvmemul_virtual_to_linear(        dst_seg, dst_offset, bytes_per_rep, reps, hvm_access_write,        hvmemul_ctxt, &addr);    if ( rc != X86EMUL_OKAY )        return rc;    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )        pfec |= PFEC_user_mode;    rc = hvmemul_linear_to_phys(        addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);    if ( rc != X86EMUL_OKAY )        return rc;    return hvmemul_do_pio(src_port, reps, bytes_per_rep, gpa, IOREQ_READ,                          !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);}static int hvmemul_rep_outs(    enum x86_segment src_seg,    unsigned long src_offset,    uint16_t dst_port,    unsigned int bytes_per_rep,    unsigned long *reps,    struct x86_emulate_ctxt *ctxt){    struct hvm_emulate_ctxt *hvmemul_ctxt =        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);    unsigned long addr;    uint32_t pfec = PFEC_page_present;    paddr_t gpa;    int rc;    rc = hvmemul_virtual_to_linear(        src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,        hvmemul_ctxt, &addr);    if ( rc != X86EMUL_OKAY )        return rc;    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )        pfec |= PFEC_user_mode;    rc = hvmemul_linear_to_phys(        addr, &gpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);    if ( rc != X86EMUL_OKAY )        return rc;    return hvmemul_do_pio(dst_port, reps, bytes_per_rep, gpa, IOREQ_WRITE,                          !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);}static int hvmemul_rep_movs(   enum x86_segment src_seg,   unsigned long src_offset,   enum x86_segment dst_seg,   unsigned long dst_offset,   unsigned int bytes_per_rep,   unsigned long *reps,   struct x86_emulate_ctxt *ctxt){    struct hvm_emulate_ctxt *hvmemul_ctxt =        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);    unsigned long saddr, daddr;    paddr_t sgpa, dgpa;    uint32_t pfec = PFEC_page_present;    p2m_type_t p2mt;    int rc;    rc = hvmemul_virtual_to_linear(        src_seg, src_offset, bytes_per_rep, reps, hvm_access_read,        hvmemul_ctxt, &saddr);    if ( rc != X86EMUL_OKAY )        return rc;    rc = hvmemul_virtual_to_linear(        dst_seg, dst_offset, bytes_per_rep, reps, hvm_access_write,        hvmemul_ctxt, &daddr);    if ( rc != X86EMUL_OKAY )        return rc;    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )        pfec |= PFEC_user_mode;    rc = hvmemul_linear_to_phys(        saddr, &sgpa, bytes_per_rep, reps, pfec, hvmemul_ctxt);    if ( rc != X86EMUL_OKAY )        return rc;    rc = hvmemul_linear_to_phys(        daddr, &dgpa, bytes_per_rep, reps,        pfec | PFEC_write_access, hvmemul_ctxt);    if ( rc != X86EMUL_OKAY )        return rc;    (void)gfn_to_mfn_current(sgpa >> PAGE_SHIFT, &p2mt);    if ( !p2m_is_ram(p2mt) )        return hvmemul_do_mmio(            sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ,            !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);    (void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt);    if ( p2m_is_ram(p2mt) )        return X86EMUL_UNHANDLEABLE;    return hvmemul_do_mmio(        dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE,        !!(ctxt->regs->eflags & X86_EFLAGS_DF), NULL);}static int hvmemul_read_segment(    enum x86_segment seg,    struct segment_register *reg,    struct x86_emulate_ctxt *ctxt){    struct hvm_emulate_ctxt *hvmemul_ctxt =        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);    struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);    memcpy(reg, sreg, sizeof(struct segment_register));    return X86EMUL_OKAY;}static int hvmemul_write_segment(    enum x86_segment seg,    struct segment_register *reg,    struct x86_emulate_ctxt *ctxt){    struct hvm_emulate_ctxt *hvmemul_ctxt =        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);    struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);    memcpy(sreg, reg, sizeof(struct segment_register));    __set_bit(seg, &hvmemul_ctxt->seg_reg_dirty);    return X86EMUL_OKAY;}static int hvmemul_read_io(    unsigned int port,    unsigned int bytes,    unsigned long *val,    struct x86_emulate_ctxt *ctxt){    unsigned long reps = 1;    *val = 0;    return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_READ, 0, val);}static int hvmemul_write_io(    unsigned int port,    unsigned int bytes,    unsigned long val,    struct x86_emulate_ctxt *ctxt){    unsigned long reps = 1;    return hvmemul_do_pio(port, &reps, bytes, 0, IOREQ_WRITE, 0, &val);}static int hvmemul_read_cr(    unsigned int reg,    unsigned long *val,    struct x86_emulate_ctxt *ctxt){    switch ( reg )    {    case 0:    case 2:    case 3:    case 4:        *val = current->arch.hvm_vcpu.guest_cr[reg];        return X86EMUL_OKAY;    default:        break;    }    return X86EMUL_UNHANDLEABLE;}static int hvmemul_write_cr(    unsigned int reg,    unsigned long val,    struct x86_emulate_ctxt *ctxt){    switch ( reg )    {    case 0:        return hvm_set_cr0(val);    case 2:        current->arch.hvm_vcpu.guest_cr[2] = val;        return X86EMUL_OKAY;    case 3:        return hvm_set_cr3(val);    case 4:        return hvm_set_cr4(val);    default:        break;    }    return X86EMUL_UNHANDLEABLE;}static int hvmemul_read_msr(    unsigned long reg,    uint64_t *val,    struct x86_emulate_ctxt *ctxt){    struct cpu_user_regs _regs;    int rc;    _regs.ecx = (uint32_t)reg;    if ( (rc = hvm_msr_read_intercept(&_regs)) != 0 )        return rc;    *val = ((uint64_t)(uint32_t)_regs.edx << 32) || (uint32_t)_regs.eax;    return X86EMUL_OKAY;}static int hvmemul_write_msr(    unsigned long reg,    uint64_t val,    struct x86_emulate_ctxt *ctxt){    struct cpu_user_regs _regs;    _regs.edx = (uint32_t)(val >> 32);    _regs.eax = (uint32_t)val;    _regs.ecx = (uint32_t)reg;    return hvm_msr_write_intercept(&_regs);}static int hvmemul_wbinvd(    struct x86_emulate_ctxt *ctxt){    hvm_funcs.wbinvd_intercept();    return X86EMUL_OKAY;}static int hvmemul_cpuid(    unsigned int *eax,    unsigned int *ebx,    unsigned int *ecx,    unsigned int *edx,    struct x86_emulate_ctxt *ctxt){    hvm_funcs.cpuid_intercept(eax, ebx, ecx, edx);    return X86EMUL_OKAY;}static int hvmemul_inject_hw_exception(    uint8_t vector,    int32_t error_code,    struct x86_emulate_ctxt *ctxt){    struct hvm_emulate_ctxt *hvmemul_ctxt =        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);    hvmemul_ctxt->exn_pending = 1;    hvmemul_ctxt->exn_vector = vector;    hvmemul_ctxt->exn_error_code = error_code;    hvmemul_ctxt->exn_insn_len = 0;    return X86EMUL_OKAY;}static int hvmemul_inject_sw_interrupt(    uint8_t vector,    uint8_t insn_len,    struct x86_emulate_ctxt *ctxt){    struct hvm_emulate_ctxt *hvmemul_ctxt =        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);    hvmemul_ctxt->exn_pending = 1;    hvmemul_ctxt->exn_vector = vector;    hvmemul_ctxt->exn_error_code = -1;    hvmemul_ctxt->exn_insn_len = insn_len;    return X86EMUL_OKAY;}static int hvmemul_get_fpu(    void (*exception_callback)(void *, struct cpu_user_regs *),    void *exception_callback_arg,    enum x86_emulate_fpu_type type,    struct x86_emulate_ctxt *ctxt){    struct vcpu *curr = current;    switch ( type )    {    case X86EMUL_FPU_fpu:        break;    case X86EMUL_FPU_mmx:        if ( !cpu_has_mmx )            return X86EMUL_UNHANDLEABLE;        break;    default:        return X86EMUL_UNHANDLEABLE;    }    if ( !curr->fpu_dirtied )        hvm_funcs.fpu_dirty_intercept();    curr->arch.hvm_vcpu.fpu_exception_callback = exception_callback;    curr->arch.hvm_vcpu.fpu_exception_callback_arg = exception_callback_arg;    return X86EMUL_OKAY;}static void hvmemul_put_fpu(    struct x86_emulate_ctxt *ctxt){    struct vcpu *curr = current;    curr->arch.hvm_vcpu.fpu_exception_callback = NULL;}static int hvmemul_invlpg(    enum x86_segment seg,    unsigned long offset,    struct x86_emulate_ctxt *ctxt){    struct hvm_emulate_ctxt *hvmemul_ctxt =        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);    unsigned long addr, reps = 1;    int rc;    rc = hvmemul_virtual_to_linear(        seg, offset, 1, &reps, hvm_access_none, hvmemul_ctxt, &addr);    if ( rc == X86EMUL_OKAY )        hvm_funcs.invlpg_intercept(addr);    return rc;}static struct x86_emulate_ops hvm_emulate_ops = {    .read          = hvmemul_read,    .insn_fetch    = hvmemul_insn_fetch,    .write         = hvmemul_write,    .cmpxchg       = hvmemul_cmpxchg,    .rep_ins       = hvmemul_rep_ins,    .rep_outs      = hvmemul_rep_outs,    .rep_movs      = hvmemul_rep_movs,    .read_segment  = hvmemul_read_segment,    .write_segment = hvmemul_write_segment,    .read_io       = hvmemul_read_io,    .write_io      = hvmemul_write_io,    .read_cr       = hvmemul_read_cr,    .write_cr      = hvmemul_write_cr,    .read_msr      = hvmemul_read_msr,    .write_msr     = hvmemul_write_msr,    .wbinvd        = hvmemul_wbinvd,    .cpuid         = hvmemul_cpuid,    .inject_hw_exception = hvmemul_inject_hw_exception,    .inject_sw_interrupt = hvmemul_inject_sw_interrupt,    .get_fpu       = hvmemul_get_fpu,    .put_fpu       = hvmemul_put_fpu,    .invlpg        = hvmemul_invlpg};int hvm_emulate_one(    struct hvm_emulate_ctxt *hvmemul_ctxt){    struct cpu_user_regs *regs = hvmemul_ctxt->ctxt.regs;    struct vcpu *curr = current;    uint32_t new_intr_shadow, pfec = PFEC_page_present;    unsigned long addr;    int rc;    if ( hvm_long_mode_enabled(curr) &&         hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.l )    {        hvmemul_ctxt->ctxt.addr_size = hvmemul_ctxt->ctxt.sp_size = 64;    }    else    {        hvmemul_ctxt->ctxt.addr_size =            hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;        hvmemul_ctxt->ctxt.sp_size =            hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16;    }    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )        pfec |= PFEC_user_mode;    hvmemul_ctxt->insn_buf_eip = regs->eip;    hvmemul_ctxt->insn_buf_bytes =        (hvm_virtual_to_linear_addr(            x86_seg_cs, &hvmemul_ctxt->seg_reg[x86_seg_cs],            regs->eip, sizeof(hvmemul_ctxt->insn_buf),            hvm_access_insn_fetch, hvmemul_ctxt->ctxt.addr_size, &addr) &&         !hvm_fetch_from_guest_virt_nofault(             hvmemul_ctxt->insn_buf, addr,             sizeof(hvmemul_ctxt->insn_buf), pfec))        ? sizeof(hvmemul_ctxt->insn_buf) : 0;    hvmemul_ctxt->exn_pending = 0;    rc = x86_emulate(&hvmemul_ctxt->ctxt, &hvm_emulate_ops);    if ( rc != X86EMUL_RETRY )        curr->arch.hvm_vcpu.mmio_large_read_bytes =            curr->arch.hvm_vcpu.mmio_large_write_bytes = 0;    if ( rc != X86EMUL_OKAY )        return rc;    new_intr_shadow = hvmemul_ctxt->intr_shadow;    /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */    if ( hvmemul_ctxt->ctxt.retire.flags.mov_ss )        new_intr_shadow ^= HVM_INTR_SHADOW_MOV_SS;    else        new_intr_shadow &= ~HVM_INTR_SHADOW_MOV_SS;    /* STI instruction toggles STI shadow, else we just clear it. */    if ( hvmemul_ctxt->ctxt.retire.flags.sti )        new_intr_shadow ^= HVM_INTR_SHADOW_STI;    else        new_intr_shadow &= ~HVM_INTR_SHADOW_STI;    if ( hvmemul_ctxt->intr_shadow != new_intr_shadow )    {        hvmemul_ctxt->intr_shadow = new_intr_shadow;        hvm_funcs.set_interrupt_shadow(curr, new_intr_shadow);    }    if ( hvmemul_ctxt->ctxt.retire.flags.hlt &&         !hvm_local_events_need_delivery(curr) )    {        hvm_hlt(regs->eflags);    }    return X86EMUL_OKAY;}void hvm_emulate_prepare(    struct hvm_emulate_ctxt *hvmemul_ctxt,    struct cpu_user_regs *regs){    hvmemul_ctxt->intr_shadow = hvm_funcs.get_interrupt_shadow(current);    hvmemul_ctxt->ctxt.regs = regs;    hvmemul_ctxt->ctxt.force_writeback = 1;    hvmemul_ctxt->seg_reg_accessed = 0;    hvmemul_ctxt->seg_reg_dirty = 0;    hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);    hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);}void hvm_emulate_writeback(    struct hvm_emulate_ctxt *hvmemul_ctxt){    enum x86_segment seg;    seg = find_first_bit(&hvmemul_ctxt->seg_reg_dirty,                         ARRAY_SIZE(hvmemul_ctxt->seg_reg));    while ( seg < ARRAY_SIZE(hvmemul_ctxt->seg_reg) )    {        hvm_set_segment_register(current, seg, &hvmemul_ctxt->seg_reg[seg]);        seg = find_next_bit(&hvmemul_ctxt->seg_reg_dirty,                            ARRAY_SIZE(hvmemul_ctxt->seg_reg),                            seg+1);    }}struct segment_register *hvmemul_get_seg_reg(    enum x86_segment seg,    struct hvm_emulate_ctxt *hvmemul_ctxt){    if ( !__test_and_set_bit(seg, &hvmemul_ctxt->seg_reg_accessed) )        hvm_get_segment_register(current, seg, &hvmemul_ctxt->seg_reg[seg]);    return &hvmemul_ctxt->seg_reg[seg];}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -