⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 domain.c

📁 xen 3.2.2 源码
💻 C
📖 第 1 页 / 共 4 页
字号:
    if ( (pg = alloc_domheap_page(NULL)) == NULL )        goto fail;    d->arch.mm_perdomain_l3 = page_to_virt(pg);    clear_page(d->arch.mm_perdomain_l3);    d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] =        l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2),                            __PAGE_HYPERVISOR);#endif /* __x86_64__ */#ifdef CONFIG_COMPAT    HYPERVISOR_COMPAT_VIRT_START(d) = __HYPERVISOR_COMPAT_VIRT_START;#endif    paging_domain_init(d);    paging_initialised = 1;    if ( !is_idle_domain(d) )    {        d->arch.ioport_caps =             rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex);        if ( d->arch.ioport_caps == NULL )            goto fail;        if ( (d->shared_info = alloc_xenheap_page()) == NULL )            goto fail;        clear_page(d->shared_info);        share_xen_page_with_guest(            virt_to_page(d->shared_info), d, XENSHARE_writable);    }    if ( (rc = iommu_domain_init(d)) != 0 )        goto fail;    if ( is_hvm_domain(d) )    {        if ( (rc = hvm_domain_initialise(d)) != 0 )        {            iommu_domain_destroy(d);            goto fail;        }    }    else    {        /* 32-bit PV guest by default only if Xen is not 64-bit. */        d->arch.is_32bit_pv = d->arch.has_32bit_shinfo =            (CONFIG_PAGING_LEVELS != 4);    }    return 0; fail:    free_xenheap_page(d->shared_info);    if ( paging_initialised )        paging_final_teardown(d);#ifdef __x86_64__    if ( d->arch.mm_perdomain_l2 )        free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));    if ( d->arch.mm_perdomain_l3 )        free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));#endif    free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order);    return rc;}void arch_domain_destroy(struct domain *d){    if ( is_hvm_domain(d) )        hvm_domain_destroy(d);    iommu_domain_destroy(d);    paging_final_teardown(d);    free_xenheap_pages(        d->arch.mm_perdomain_pt,        get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t)));#ifdef __x86_64__    free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));    free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));#endif    if ( is_pv_32on64_domain(d) )        release_arg_xlat_area(d);    free_xenheap_page(d->shared_info);}unsigned long pv_guest_cr4_fixup(unsigned long guest_cr4){    unsigned long hv_cr4_mask, hv_cr4 = real_cr4_to_pv_guest_cr4(read_cr4());    hv_cr4_mask = ~X86_CR4_TSD;    if ( cpu_has_de )        hv_cr4_mask &= ~X86_CR4_DE;    if ( (guest_cr4 & hv_cr4_mask) != (hv_cr4 & hv_cr4_mask) )        gdprintk(XENLOG_WARNING,                 "Attempt to change CR4 flags %08lx -> %08lx\n",                 hv_cr4, guest_cr4);    return (hv_cr4 & hv_cr4_mask) | (guest_cr4 & ~hv_cr4_mask);}/* This is called by arch_final_setup_guest and do_boot_vcpu */int arch_set_info_guest(    struct vcpu *v, vcpu_guest_context_u c){    struct domain *d = v->domain;    unsigned long cr3_pfn = INVALID_MFN;    unsigned long flags, cr4;    int i, rc = 0, compat;    /* The context is a compat-mode one if the target domain is compat-mode;     * we expect the tools to DTRT even in compat-mode callers. */    compat = is_pv_32on64_domain(d);#ifdef CONFIG_COMPAT#define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))#else#define c(fld) (c.nat->fld)#endif    flags = c(flags);    if ( !is_hvm_vcpu(v) )    {        if ( !compat )        {            fixup_guest_stack_selector(d, c.nat->user_regs.ss);            fixup_guest_stack_selector(d, c.nat->kernel_ss);            fixup_guest_code_selector(d, c.nat->user_regs.cs);#ifdef __i386__            fixup_guest_code_selector(d, c.nat->event_callback_cs);            fixup_guest_code_selector(d, c.nat->failsafe_callback_cs);#endif            for ( i = 0; i < 256; i++ )                fixup_guest_code_selector(d, c.nat->trap_ctxt[i].cs);            /* LDT safety checks. */            if ( ((c.nat->ldt_base & (PAGE_SIZE-1)) != 0) ||                 (c.nat->ldt_ents > 8192) ||                 !array_access_ok(c.nat->ldt_base,                                  c.nat->ldt_ents,                                  LDT_ENTRY_SIZE) )                return -EINVAL;        }#ifdef CONFIG_COMPAT        else        {            fixup_guest_stack_selector(d, c.cmp->user_regs.ss);            fixup_guest_stack_selector(d, c.cmp->kernel_ss);            fixup_guest_code_selector(d, c.cmp->user_regs.cs);            fixup_guest_code_selector(d, c.cmp->event_callback_cs);            fixup_guest_code_selector(d, c.cmp->failsafe_callback_cs);            for ( i = 0; i < 256; i++ )                fixup_guest_code_selector(d, c.cmp->trap_ctxt[i].cs);            /* LDT safety checks. */            if ( ((c.cmp->ldt_base & (PAGE_SIZE-1)) != 0) ||                 (c.cmp->ldt_ents > 8192) ||                 !compat_array_access_ok(c.cmp->ldt_base,                                         c.cmp->ldt_ents,                                         LDT_ENTRY_SIZE) )                return -EINVAL;        }#endif    }    v->fpu_initialised = !!(flags & VGCF_I387_VALID);    v->arch.flags &= ~TF_kernel_mode;    if ( (flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ )        v->arch.flags |= TF_kernel_mode;    if ( !compat )        memcpy(&v->arch.guest_context, c.nat, sizeof(*c.nat));#ifdef CONFIG_COMPAT    else        XLAT_vcpu_guest_context(&v->arch.guest_context, c.cmp);#endif    v->arch.guest_context.user_regs.eflags |= 2;    if ( is_hvm_vcpu(v) )        goto out;    /* Only CR0.TS is modifiable by guest or admin. */    v->arch.guest_context.ctrlreg[0] &= X86_CR0_TS;    v->arch.guest_context.ctrlreg[0] |= read_cr0() & ~X86_CR0_TS;    init_int80_direct_trap(v);    /* IOPL privileges are virtualised. */    v->arch.iopl = (v->arch.guest_context.user_regs.eflags >> 12) & 3;    v->arch.guest_context.user_regs.eflags &= ~EF_IOPL;    /* Ensure real hardware interrupts are enabled. */    v->arch.guest_context.user_regs.eflags |= EF_IE;    cr4 = v->arch.guest_context.ctrlreg[4];    v->arch.guest_context.ctrlreg[4] = cr4 ? pv_guest_cr4_fixup(cr4) :        real_cr4_to_pv_guest_cr4(mmu_cr4_features);    memset(v->arch.guest_context.debugreg, 0,           sizeof(v->arch.guest_context.debugreg));    for ( i = 0; i < 8; i++ )        (void)set_debugreg(v, i, c(debugreg[i]));    if ( v->is_initialised )        goto out;    if ( v->vcpu_id == 0 )        d->vm_assist = c(vm_assist);    if ( !compat )        rc = (int)set_gdt(v, c.nat->gdt_frames, c.nat->gdt_ents);#ifdef CONFIG_COMPAT    else    {        unsigned long gdt_frames[ARRAY_SIZE(c.cmp->gdt_frames)];        unsigned int i, n = (c.cmp->gdt_ents + 511) / 512;        if ( n > ARRAY_SIZE(c.cmp->gdt_frames) )            return -EINVAL;        for ( i = 0; i < n; ++i )            gdt_frames[i] = c.cmp->gdt_frames[i];        rc = (int)set_gdt(v, gdt_frames, c.cmp->gdt_ents);    }#endif    if ( rc != 0 )        return rc;    if ( !compat )    {        cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[3]));        if ( !mfn_valid(cr3_pfn) ||             (paging_mode_refcounts(d)              ? !get_page(mfn_to_page(cr3_pfn), d)              : !get_page_and_type(mfn_to_page(cr3_pfn), d,                                   PGT_base_page_table)) )        {            destroy_gdt(v);            return -EINVAL;        }        v->arch.guest_table = pagetable_from_pfn(cr3_pfn);#ifdef __x86_64__        if ( c.nat->ctrlreg[1] )        {            cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[1]));            if ( !mfn_valid(cr3_pfn) ||                 (paging_mode_refcounts(d)                  ? !get_page(mfn_to_page(cr3_pfn), d)                  : !get_page_and_type(mfn_to_page(cr3_pfn), d,                                       PGT_base_page_table)) )            {                cr3_pfn = pagetable_get_pfn(v->arch.guest_table);                v->arch.guest_table = pagetable_null();                if ( paging_mode_refcounts(d) )                    put_page(mfn_to_page(cr3_pfn));                else                    put_page_and_type(mfn_to_page(cr3_pfn));                destroy_gdt(v);                return -EINVAL;            }            v->arch.guest_table_user = pagetable_from_pfn(cr3_pfn);        }#endif    }#ifdef CONFIG_COMPAT    else    {        l4_pgentry_t *l4tab;        cr3_pfn = gmfn_to_mfn(d, compat_cr3_to_pfn(c.cmp->ctrlreg[3]));        if ( !mfn_valid(cr3_pfn) ||             (paging_mode_refcounts(d)              ? !get_page(mfn_to_page(cr3_pfn), d)              : !get_page_and_type(mfn_to_page(cr3_pfn), d,                                   PGT_l3_page_table)) )        {            destroy_gdt(v);            return -EINVAL;        }        l4tab = __va(pagetable_get_paddr(v->arch.guest_table));        *l4tab = l4e_from_pfn(            cr3_pfn, _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);    }#endif    if ( v->vcpu_id == 0 )        update_domain_wallclock_time(d);    /* Don't redo final setup */    v->is_initialised = 1;    if ( paging_mode_enabled(d) )        paging_update_paging_modes(v);    update_cr3(v); out:    if ( flags & VGCF_online )        clear_bit(_VPF_down, &v->pause_flags);    else        set_bit(_VPF_down, &v->pause_flags);    return 0;#undef c}int arch_vcpu_reset(struct vcpu *v){    destroy_gdt(v);    vcpu_destroy_pagetables(v);    return 0;}/*  * Unmap the vcpu info page if the guest decided to place it somewhere * else.  This is only used from arch_domain_destroy, so there's no * need to do anything clever. */static voidunmap_vcpu_info(struct vcpu *v){    struct domain *d = v->domain;    unsigned long mfn;    if ( v->arch.vcpu_info_mfn == INVALID_MFN )        return;    mfn = v->arch.vcpu_info_mfn;    unmap_domain_page_global(v->vcpu_info);    v->vcpu_info = shared_info_addr(d, vcpu_info[v->vcpu_id]);    v->arch.vcpu_info_mfn = INVALID_MFN;    put_page_and_type(mfn_to_page(mfn));}/*  * Map a guest page in and point the vcpu_info pointer at it.  This * makes sure that the vcpu_info is always pointing at a valid piece * of memory, and it sets a pending event to make sure that a pending * event doesn't get missed. */static intmap_vcpu_info(struct vcpu *v, unsigned long mfn, unsigned offset){    struct domain *d = v->domain;    void *mapping;    vcpu_info_t *new_info;    int i;    if ( offset > (PAGE_SIZE - sizeof(vcpu_info_t)) )        return -EINVAL;    if ( v->arch.vcpu_info_mfn != INVALID_MFN )        return -EINVAL;    /* Run this command on yourself or on other offline VCPUS. */    if ( (v != current) && !test_bit(_VPF_down, &v->pause_flags) )        return -EINVAL;    mfn = gmfn_to_mfn(d, mfn);    if ( !mfn_valid(mfn) ||         !get_page_and_type(mfn_to_page(mfn), d, PGT_writable_page) )        return -EINVAL;    mapping = map_domain_page_global(mfn);    if ( mapping == NULL )    {        put_page_and_type(mfn_to_page(mfn));        return -ENOMEM;    }    new_info = (vcpu_info_t *)(mapping + offset);    memcpy(new_info, v->vcpu_info, sizeof(*new_info));    v->vcpu_info = new_info;    v->arch.vcpu_info_mfn = mfn;    /* Set new vcpu_info pointer /before/ setting pending flags. */    wmb();    /*     * Mark everything as being pending just to make sure nothing gets     * lost.  The domain will get a spurious event, but it can cope.     */    vcpu_info(v, evtchn_upcall_pending) = 1;    for ( i = 0; i < BITS_PER_GUEST_LONG(d); i++ )        set_bit(i, vcpu_info_addr(v, evtchn_pending_sel));    /*     * Only bother to update time for the current vcpu.  If we're     * operating on another vcpu, then it had better not be running at     * the time.     */    if ( v == current )         update_vcpu_system_time(v);    return 0;}longarch_do_vcpu_op(    int cmd, struct vcpu *v, XEN_GUEST_HANDLE(void) arg){    long rc = 0;    switch ( cmd )    {    case VCPUOP_register_runstate_memory_area:    {        struct vcpu_register_runstate_memory_area area;        struct vcpu_runstate_info runstate;        rc = -EFAULT;        if ( copy_from_guest(&area, arg, 1) )            break;        if ( !guest_handle_okay(area.addr.h, 1) )            break;        rc = 0;        runstate_guest(v) = area.addr.h;        if ( v == current )        {            __copy_to_guest(runstate_guest(v), &v->runstate, 1);        }        else        {            vcpu_runstate_get(v, &runstate);            __copy_to_guest(runstate_guest(v), &runstate, 1);        }        break;    }    case VCPUOP_register_vcpu_info:    {        struct domain *d = v->domain;        struct vcpu_register_vcpu_info info;        rc = -EFAULT;        if ( copy_from_guest(&info, arg, 1) )            break;        LOCK_BIGLOCK(d);        rc = map_vcpu_info(v, info.mfn, info.offset);        UNLOCK_BIGLOCK(d);        break;    }    case VCPUOP_get_physid:    {        struct vcpu_get_physid cpu_id;        rc = -EINVAL;        if ( !v->domain->is_pinned )            break;        cpu_id.phys_id =            (uint64_t)x86_cpu_to_apicid[v->vcpu_id] |            ((uint64_t)acpi_get_processor_id(v->vcpu_id) << 32);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -