⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 paging.h

📁 xen虚拟机源代码安装包
💻 H
📖 第 1 页 / 共 2 页
字号:
 * Called from pagefault handler in Xen, and from the HVM trap handlers * for pagefaults.  Returns 1 if this fault was an artefact of the * paging code (and the guest should retry) or 0 if it is not (and the * fault should be handled elsewhere or passed to the guest). * * Note: under shadow paging, this function handles all page faults; * however, for hardware-assisted paging, this function handles only  * host page faults (i.e. nested page faults). */static inline intpaging_fault(unsigned long va, struct cpu_user_regs *regs){    struct vcpu *v = current;    return v->arch.paging.mode->page_fault(v, va, regs);}/* Handle invlpg requests on vcpus. * Returns 1 if the invlpg instruction should be issued on the hardware, * or 0 if it's safe not to do so. */static inline int paging_invlpg(struct vcpu *v, unsigned long va){    return v->arch.paging.mode->invlpg(v, va);}/* Translate a guest virtual address to the frame number that the * *guest* pagetables would map it to.  Returns INVALID_GFN if the guest * tables don't map this address for this kind of access. * pfec[0] is used to determine which kind of access this is when * walking the tables.  The caller should set the PFEC_page_present bit * in pfec[0]; in the failure case, that bit will be cleared if appropriate. */#define INVALID_GFN (-1UL)static inline unsigned long paging_gva_to_gfn(struct vcpu *v,                                               unsigned long va,                                              uint32_t *pfec){    return v->arch.paging.mode->gva_to_gfn(v, va, pfec);}/* Update all the things that are derived from the guest's CR3. * Called when the guest changes CR3; the caller can then use v->arch.cr3 * as the value to load into the host CR3 to schedule this vcpu */static inline void paging_update_cr3(struct vcpu *v){    v->arch.paging.mode->update_cr3(v, 1);}/* Update all the things that are derived from the guest's CR0/CR3/CR4. * Called to initialize paging structures if the paging mode * has changed, and when bringing up a VCPU for the first time. */static inline void paging_update_paging_modes(struct vcpu *v){    v->arch.paging.mode->update_paging_modes(v);}/* Write a new value into the guest pagetable, and update the * paging-assistance state appropriately.  Returns 0 if we page-faulted, * 1 for success. */static inline int paging_write_guest_entry(struct vcpu *v, intpte_t *p,                                           intpte_t new, mfn_t gmfn){    if ( unlikely(paging_mode_enabled(v->domain)                   && v->arch.paging.mode != NULL) )        return v->arch.paging.mode->write_guest_entry(v, p, new, gmfn);    else         return (!__copy_to_user(p, &new, sizeof(new)));}/* Cmpxchg a new value into the guest pagetable, and update the * paging-assistance state appropriately.  Returns 0 if we page-faulted, * 1 if not.  N.B. caller should check the value of "old" to see if the * cmpxchg itself was successful. */static inline int paging_cmpxchg_guest_entry(struct vcpu *v, intpte_t *p,                                             intpte_t *old, intpte_t new,                                              mfn_t gmfn){    if ( unlikely(paging_mode_enabled(v->domain)                   && v->arch.paging.mode != NULL) )        return v->arch.paging.mode->cmpxchg_guest_entry(v, p, old, new, gmfn);    else         return (!cmpxchg_user(p, *old, new));}/* Helper function that writes a pte in such a way that a concurrent read  * never sees a half-written entry that has _PAGE_PRESENT set */static inline void safe_write_pte(l1_pgentry_t *p, l1_pgentry_t new){#if CONFIG_PAGING_LEVELS == 3    /* PAE machines write 64bit PTEs as two 32bit writes. */    volatile unsigned long *d = (unsigned long *) p;    unsigned long *s = (unsigned long *) &new;    BUILD_BUG_ON(sizeof (l1_pgentry_t) != 2 * sizeof (unsigned long));    d[0] = 0;    d[1] = s[1];    d[0] = s[0]; #else    *p = new;#endif}/* Atomically write a P2M entry and update the paging-assistance state  * appropriately.  * Arguments: the domain in question, the GFN whose mapping is being updated,  * a pointer to the entry to be written, the MFN in which the entry resides,  * the new contents of the entry, and the level in the p2m tree at which  * we are writing. */static inline void paging_write_p2m_entry(struct domain *d, unsigned long gfn,                                           l1_pgentry_t *p, mfn_t table_mfn,                                          l1_pgentry_t new, unsigned int level){    struct vcpu *v = current;    if ( v->domain != d )        v = d->vcpu[0];    if ( likely(v && paging_mode_enabled(d) && v->arch.paging.mode != NULL) )    {        return v->arch.paging.mode->write_p2m_entry(v, gfn, p, table_mfn,                                                    new, level);    }    else         safe_write_pte(p, new);}/* Print paging-assistance info to the console */void paging_dump_domain_info(struct domain *d);void paging_dump_vcpu_info(struct vcpu *v);/***************************************************************************** * Access to the guest pagetables *//* Get a mapping of a PV guest's l1e for this virtual address. */static inline void *guest_map_l1e(struct vcpu *v, unsigned long addr, unsigned long *gl1mfn){    l2_pgentry_t l2e;    if ( unlikely(paging_mode_translate(v->domain)) )        return v->arch.paging.mode->guest_map_l1e(v, addr, gl1mfn);    /* Find this l1e and its enclosing l1mfn in the linear map */    if ( __copy_from_user(&l2e,                           &__linear_l2_table[l2_linear_offset(addr)],                          sizeof(l2_pgentry_t)) != 0 )        return NULL;    /* Check flags that it will be safe to read the l1e */    if ( (l2e_get_flags(l2e) & (_PAGE_PRESENT | _PAGE_PSE))          != _PAGE_PRESENT )        return NULL;    *gl1mfn = l2e_get_pfn(l2e);    return &__linear_l1_table[l1_linear_offset(addr)];}/* Pull down the mapping we got from guest_map_l1e() */static inline voidguest_unmap_l1e(struct vcpu *v, void *p){    if ( unlikely(paging_mode_translate(v->domain)) )        unmap_domain_page(p);}/* Read the guest's l1e that maps this address. */static inline voidguest_get_eff_l1e(struct vcpu *v, unsigned long addr, void *eff_l1e){    if ( likely(!paging_mode_translate(v->domain)) )    {        ASSERT(!paging_mode_external(v->domain));        if ( __copy_from_user(eff_l1e,                               &__linear_l1_table[l1_linear_offset(addr)],                              sizeof(l1_pgentry_t)) != 0 )            *(l1_pgentry_t *)eff_l1e = l1e_empty();        return;    }            v->arch.paging.mode->guest_get_eff_l1e(v, addr, eff_l1e);}/* Read the guest's l1e that maps this address, from the kernel-mode * pagetables. */static inline voidguest_get_eff_kern_l1e(struct vcpu *v, unsigned long addr, void *eff_l1e){#if defined(__x86_64__)    int user_mode = !(v->arch.flags & TF_kernel_mode);#define TOGGLE_MODE() if ( user_mode ) toggle_guest_mode(v)#else#define TOGGLE_MODE() ((void)0)#endif    TOGGLE_MODE();    guest_get_eff_l1e(v, addr, eff_l1e);    TOGGLE_MODE();}#endif /* XEN_PAGING_H *//* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * indent-tabs-mode: nil * End: */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -