⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 domain.c

📁 xen 3.2.2 源码
💻 C
📖 第 1 页 / 共 4 页
字号:
    regs->eax = info->func(info->data);    v->arch.schedule_tail = info->saved_schedule_tail;    v->arch.continue_info = NULL;    xfree(info);    vcpu_unlock_affinity(v, &mask);    schedule_tail(v);}int continue_hypercall_on_cpu(int cpu, long (*func)(void *data), void *data){    struct vcpu *v = current;    struct migrate_info *info;    int rc;    if ( cpu == smp_processor_id() )        return func(data);    info = xmalloc(struct migrate_info);    if ( info == NULL )        return -ENOMEM;    info->func = func;    info->data = data;    info->saved_schedule_tail = v->arch.schedule_tail;    info->saved_affinity = cpumask_of_cpu(cpu);    v->arch.schedule_tail = continue_hypercall_on_cpu_helper;    v->arch.continue_info = info;    rc = vcpu_lock_affinity(v, &info->saved_affinity);    if ( rc )    {        v->arch.schedule_tail = info->saved_schedule_tail;        v->arch.continue_info = NULL;        xfree(info);        return rc;    }    /* Dummy return value will be overwritten by new schedule_tail. */    BUG_ON(!test_bit(SCHEDULE_SOFTIRQ, &softirq_pending(smp_processor_id())));    return 0;}#define next_arg(fmt, args) ({                                              \    unsigned long __arg;                                                    \    switch ( *(fmt)++ )                                                     \    {                                                                       \    case 'i': __arg = (unsigned long)va_arg(args, unsigned int);  break;    \    case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break;    \    case 'h': __arg = (unsigned long)va_arg(args, void *);        break;    \    default:  __arg = 0; BUG();                                             \    }                                                                       \    __arg;                                                                  \})DEFINE_PER_CPU(char, hc_preempted);unsigned long hypercall_create_continuation(    unsigned int op, const char *format, ...){    struct mc_state *mcs = &this_cpu(mc_state);    struct cpu_user_regs *regs;    const char *p = format;    unsigned long arg;    unsigned int i;    va_list args;    va_start(args, format);    if ( test_bit(_MCSF_in_multicall, &mcs->flags) )    {        __set_bit(_MCSF_call_preempted, &mcs->flags);        for ( i = 0; *p != '\0'; i++ )            mcs->call.args[i] = next_arg(p, args);        if ( is_pv_32on64_domain(current->domain) )        {            for ( ; i < 6; i++ )                mcs->call.args[i] = 0;        }    }    else    {        regs       = guest_cpu_user_regs();        regs->eax  = op;        /*         * For PV guest, we update EIP to re-execute 'syscall' / 'int 0x82';         * HVM does not need this since 'vmcall' / 'vmmcall' is fault-like.         */        if ( !is_hvm_vcpu(current) )            regs->eip -= 2;  /* re-execute 'syscall' / 'int 0x82' */#ifdef __x86_64__        if ( !is_hvm_vcpu(current) ?             !is_pv_32on64_vcpu(current) :             (hvm_guest_x86_mode(current) == 8) )        {            for ( i = 0; *p != '\0'; i++ )            {                arg = next_arg(p, args);                switch ( i )                {                case 0: regs->rdi = arg; break;                case 1: regs->rsi = arg; break;                case 2: regs->rdx = arg; break;                case 3: regs->r10 = arg; break;                case 4: regs->r8  = arg; break;                case 5: regs->r9  = arg; break;                }            }        }        else#endif        {            if ( supervisor_mode_kernel )                regs->eip &= ~31; /* re-execute entire hypercall entry stub */            for ( i = 0; *p != '\0'; i++ )            {                arg = next_arg(p, args);                switch ( i )                {                case 0: regs->ebx = arg; break;                case 1: regs->ecx = arg; break;                case 2: regs->edx = arg; break;                case 3: regs->esi = arg; break;                case 4: regs->edi = arg; break;                case 5: regs->ebp = arg; break;                }            }        }        this_cpu(hc_preempted) = 1;    }    va_end(args);    return op;}#ifdef CONFIG_COMPATint hypercall_xlat_continuation(unsigned int *id, unsigned int mask, ...){    int rc = 0;    struct mc_state *mcs = &this_cpu(mc_state);    struct cpu_user_regs *regs;    unsigned int i, cval = 0;    unsigned long nval = 0;    va_list args;    BUG_ON(*id > 5);    BUG_ON(mask & (1U << *id));    va_start(args, mask);    if ( test_bit(_MCSF_in_multicall, &mcs->flags) )    {        if ( !test_bit(_MCSF_call_preempted, &mcs->flags) )            return 0;        for ( i = 0; i < 6; ++i, mask >>= 1 )        {            if ( mask & 1 )            {                nval = va_arg(args, unsigned long);                cval = va_arg(args, unsigned int);                if ( cval == nval )                    mask &= ~1U;                else                    BUG_ON(nval == (unsigned int)nval);            }            else if ( id && *id == i )            {                *id = mcs->call.args[i];                id = NULL;            }            if ( (mask & 1) && mcs->call.args[i] == nval )            {                mcs->call.args[i] = cval;                ++rc;            }            else                BUG_ON(mcs->call.args[i] != (unsigned int)mcs->call.args[i]);        }    }    else    {        regs = guest_cpu_user_regs();        for ( i = 0; i < 6; ++i, mask >>= 1 )        {            unsigned long *reg;            switch ( i )            {            case 0: reg = &regs->ebx; break;            case 1: reg = &regs->ecx; break;            case 2: reg = &regs->edx; break;            case 3: reg = &regs->esi; break;            case 4: reg = &regs->edi; break;            case 5: reg = &regs->ebp; break;            default: BUG(); reg = NULL; break;            }            if ( (mask & 1) )            {                nval = va_arg(args, unsigned long);                cval = va_arg(args, unsigned int);                if ( cval == nval )                    mask &= ~1U;                else                    BUG_ON(nval == (unsigned int)nval);            }            else if ( id && *id == i )            {                *id = *reg;                id = NULL;            }            if ( (mask & 1) && *reg == nval )            {                *reg = cval;                ++rc;            }            else                BUG_ON(*reg != (unsigned int)*reg);        }    }    va_end(args);    return rc;}#endifstatic int relinquish_memory(    struct domain *d, struct list_head *list, unsigned long type){    struct list_head *ent;    struct page_info  *page;    unsigned long     x, y;    int               ret = 0;    /* Use a recursive lock, as we may enter 'free_domheap_page'. */    spin_lock_recursive(&d->page_alloc_lock);    ent = list->next;    while ( ent != list )    {        page = list_entry(ent, struct page_info, list);        /* Grab a reference to the page so it won't disappear from under us. */        if ( unlikely(!get_page(page, d)) )        {            /* Couldn't get a reference -- someone is freeing this page. */            ent = ent->next;            list_move_tail(&page->list, &d->arch.relmem_list);            continue;        }        if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )            put_page_and_type(page);        if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )            put_page(page);        /*         * Forcibly invalidate top-most, still valid page tables at this point         * to break circular 'linear page table' references. This is okay         * because MMU structures are not shared across domains and this domain         * is now dead. Thus top-most valid tables are not in use so a non-zero         * count means circular reference.         */        y = page->u.inuse.type_info;        for ( ; ; )        {            x = y;            if ( likely((x & (PGT_type_mask|PGT_validated)) !=                        (type|PGT_validated)) )                break;            y = cmpxchg(&page->u.inuse.type_info, x, x & ~PGT_validated);            if ( likely(y == x) )            {                free_page_type(page, type);                break;            }        }        /* Follow the list chain and /then/ potentially free the page. */        ent = ent->next;        list_move_tail(&page->list, &d->arch.relmem_list);        put_page(page);        if ( hypercall_preempt_check() )        {            ret = -EAGAIN;            goto out;        }    }    list_splice_init(&d->arch.relmem_list, list); out:    spin_unlock_recursive(&d->page_alloc_lock);    return ret;}static void vcpu_destroy_pagetables(struct vcpu *v){    struct domain *d = v->domain;    unsigned long pfn;#ifdef __x86_64__    if ( is_pv_32on64_vcpu(v) )    {        pfn = l4e_get_pfn(*(l4_pgentry_t *)                          __va(pagetable_get_paddr(v->arch.guest_table)));        if ( pfn != 0 )        {            if ( paging_mode_refcounts(d) )                put_page(mfn_to_page(pfn));            else                put_page_and_type(mfn_to_page(pfn));        }        l4e_write(            (l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)),            l4e_empty());        v->arch.cr3 = 0;        return;    }#endif    pfn = pagetable_get_pfn(v->arch.guest_table);    if ( pfn != 0 )    {        if ( paging_mode_refcounts(d) )            put_page(mfn_to_page(pfn));        else            put_page_and_type(mfn_to_page(pfn));        v->arch.guest_table = pagetable_null();    }#ifdef __x86_64__    /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */    pfn = pagetable_get_pfn(v->arch.guest_table_user);    if ( pfn != 0 )    {        if ( !is_pv_32bit_vcpu(v) )        {            if ( paging_mode_refcounts(d) )                put_page(mfn_to_page(pfn));            else                put_page_and_type(mfn_to_page(pfn));        }        v->arch.guest_table_user = pagetable_null();    }#endif    v->arch.cr3 = 0;}int domain_relinquish_resources(struct domain *d){    int ret;    struct vcpu *v;    BUG_ON(!cpus_empty(d->domain_dirty_cpumask));    switch ( d->arch.relmem )    {    case RELMEM_not_started:        /* Tear down paging-assistance stuff. */        paging_teardown(d);        for_each_vcpu ( d, v )        {            /* Drop the in-use references to page-table bases. */            vcpu_destroy_pagetables(v);            /*             * Relinquish GDT mappings. No need for explicit unmapping of the             * LDT as it automatically gets squashed with the guest mappings.             */            destroy_gdt(v);            unmap_vcpu_info(v);        }        d->arch.relmem = RELMEM_xen_l4;        /* fallthrough */        /* Relinquish every page of memory. */    case RELMEM_xen_l4:#if CONFIG_PAGING_LEVELS >= 4        ret = relinquish_memory(d, &d->xenpage_list, PGT_l4_page_table);        if ( ret )            return ret;        d->arch.relmem = RELMEM_dom_l4;        /* fallthrough */	case RELMEM_dom_l4:        ret = relinquish_memory(d, &d->page_list, PGT_l4_page_table);        if ( ret )            return ret;        d->arch.relmem = RELMEM_xen_l3;        /* fallthrough */#endif	case RELMEM_xen_l3:#if CONFIG_PAGING_LEVELS >= 3        ret = relinquish_memory(d, &d->xenpage_list, PGT_l3_page_table);        if ( ret )            return ret;        d->arch.relmem = RELMEM_dom_l3;        /* fallthrough */	case RELMEM_dom_l3:        ret = relinquish_memory(d, &d->page_list, PGT_l3_page_table);        if ( ret )            return ret;        d->arch.relmem = RELMEM_xen_l2;        /* fallthrough */#endif	case RELMEM_xen_l2:        ret = relinquish_memory(d, &d->xenpage_list, PGT_l2_page_table);        if ( ret )            return ret;        d->arch.relmem = RELMEM_dom_l2;        /* fallthrough */	case RELMEM_dom_l2:        ret = relinquish_memory(d, &d->page_list, PGT_l2_page_table);        if ( ret )            return ret;        d->arch.relmem = RELMEM_done;        /* fallthrough */	case RELMEM_done:        break;    default:        BUG();    }    /* Free page used by xen oprofile buffer. */    free_xenoprof_pages(d);    if ( is_hvm_domain(d) )        hvm_domain_relinquish_resources(d);    return 0;}void arch_dump_domain_info(struct domain *d){    paging_dump_domain_info(d);}void arch_dump_vcpu_info(struct vcpu *v){    paging_dump_vcpu_info(v);}/* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -