📄 vtlb.c
字号:
head = hcb->hash; num = (hcb->hash_sz/sizeof(thash_data_t)); do { head->next = 0; head++; num--; } while(num); cch_mem_init(hcb);}static thash_data_t *__alloc_chain(thash_cb_t *hcb){ thash_data_t *cch; cch = cch_alloc(hcb); if (cch == NULL) { thash_recycle_cch_all(hcb); cch = cch_alloc(hcb); } return cch;}/* * Insert an entry into hash TLB or VHPT. * NOTES: * 1: When inserting VHPT to thash, "va" is a must covered * address by the inserted machine VHPT entry. * 2: The format of entry is always in TLB. * 3: The caller need to make sure the new entry will not overlap * with any existed entry. */static void vtlb_insert(VCPU *v, u64 pte, u64 itir, u64 va){ thash_data_t *hash_table, *cch, *tail; /* int flag; */ ia64_rr vrr; /* u64 gppn, ppns, ppne; */ u64 tag, len; thash_cb_t *hcb = &v->arch.vtlb; vcpu_quick_region_set(PSCBX(v, tc_regions), va); vcpu_get_rr(v, va, &vrr.rrval); vrr.ps = itir_ps(itir); VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps); hash_table = vtlb_thash(hcb->pta, va, vrr.rrval, &tag); len = 0; cch = hash_table; do { if (INVALID_TLB(cch)) { cch->page_flags = pte; cch->itir = itir; cch->etag = tag; return; } ++len; tail = cch; cch = cch->next; } while(cch); if (len >= MAX_CCN_DEPTH) { thash_recycle_cch(hcb, hash_table, tail); cch = cch_alloc(hcb); } else { cch = __alloc_chain(hcb); } cch->page_flags = pte; cch->itir = itir; cch->etag = tag; cch->next = hash_table->next; wmb(); hash_table->next = cch; return;}int vtr_find_overlap(VCPU *vcpu, u64 va, u64 ps, int is_data){ thash_data_t *trp; int i; u64 end, rid; vcpu_get_rr(vcpu, va, &rid); rid &= RR_RID_MASK; end = va + PSIZE(ps); if (is_data) { if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { trp = (thash_data_t *)vcpu->arch.dtrs; for (i = 0; i < NDTRS; i++, trp++) { if (__is_tr_overlap(trp, rid, va, end )) { return i; } } } } else { if (vcpu_quick_region_check(vcpu->arch.itr_regions,va)) { trp = (thash_data_t *)vcpu->arch.itrs; for (i = 0; i < NITRS; i++, trp++) { if (__is_tr_overlap(trp, rid, va, end )) { return i; } } } } return -1;}/* * Purge entries in VTLB and VHPT */void thash_purge_entries(VCPU *v, u64 va, u64 ps){ if (vcpu_quick_region_check(v->arch.tc_regions, va)) vtlb_purge(v, va, ps); vhpt_purge(v, va, ps);}void thash_purge_entries_remote(VCPU *v, u64 va, u64 ps){ u64 old_va = va; va = REGION_OFFSET(va); if (vcpu_quick_region_check(v->arch.tc_regions, old_va)) vtlb_purge(v, va, ps); vhpt_purge(v, va, ps);}static u64 translate_phy_pte(VCPU *v, u64 pte, u64 itir, u64 va){ u64 ps, ps_mask, paddr, maddr; union pte_flags phy_pte; struct domain *d = v->domain; ps = itir_ps(itir); ps_mask = ~((1UL << ps) - 1); phy_pte.val = pte; paddr = ((pte & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask); maddr = lookup_domain_mpa(d, paddr, NULL); if (maddr & _PAGE_IO) return -1; /* Ensure WB attribute if pte is related to a normal mem page, * which is required by vga acceleration since qemu maps shared * vram buffer with WB. */ if (phy_pte.ma != VA_MATTR_NATPAGE) phy_pte.ma = VA_MATTR_WB; maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK); phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT; /* If shadow mode is enabled, virtualize dirty bit. */ if (shadow_mode_enabled(d) && phy_pte.d) { u64 gpfn = paddr >> PAGE_SHIFT; phy_pte.val |= _PAGE_VIRT_D; /* If the page is not already dirty, don't set the dirty bit! */ if (gpfn < d->arch.shadow_bitmap_size * 8 && !test_bit(gpfn, d->arch.shadow_bitmap)) phy_pte.d = 0; } return phy_pte.val;}/* * Purge overlap TCs and then insert the new entry to emulate itc ops. * Notes: Only TC entry can purge and insert. * 1 indicates this is MMIO */int thash_purge_and_insert(VCPU *v, u64 pte, u64 itir, u64 ifa, int type){ u64 ps, phy_pte, psr; ia64_rr mrr; ps = itir_ps(itir); mrr.rrval = ia64_get_rr(ifa); phy_pte = translate_phy_pte(v, pte, itir, ifa); vtlb_purge(v, ifa, ps); vhpt_purge(v, ifa, ps); if (phy_pte == -1) { vtlb_insert(v, pte, itir, ifa); return 1; } if (ps != mrr.ps) vtlb_insert(v, pte, itir, ifa); if (ps >= mrr.ps) { vmx_vhpt_insert(&v->arch.vhpt, phy_pte, itir, ifa); } else { /* Subpaging */ phy_pte &= ~PAGE_FLAGS_RV_MASK; psr = ia64_clear_ic(); ia64_itc(type + 1, ifa, phy_pte, IA64_ITIR_PS_KEY(ps, 0)); ia64_set_psr(psr); ia64_srlz_i(); } return 0;}/* * Purge all TCs or VHPT entries including those in Hash table. * *///TODO: add sections.void thash_purge_all(VCPU *v){ int num; thash_data_t *head; thash_cb_t *vtlb,*vhpt; vtlb = &v->arch.vtlb; vhpt = &v->arch.vhpt; for (num = 0; num < 8; num++) VMX(v, psbits[num]) = 0; head = vtlb->hash; num = (vtlb->hash_sz/sizeof(thash_data_t)); do{ head->page_flags = 0; head->etag = 1UL<<63; head->itir = 0; head->next = 0; head++; num--; } while(num); cch_mem_init(vtlb); head = vhpt->hash; num = (vhpt->hash_sz/sizeof(thash_data_t)); do{ head->page_flags = 0; head->etag = 1UL<<63; head->next = 0; head++; num--; } while(num); cch_mem_init(vhpt); local_flush_tlb_all();}static void __thash_purge_all(void *arg){ struct vcpu *v = arg; BUG_ON(vcpu_runnable(v) || v->is_running); thash_purge_all(v);}void vmx_vcpu_flush_vtlb_all(VCPU *v){ if (v == current) { thash_purge_all(v); return; } /* SMP safe */ vcpu_pause(v); if (v->processor == smp_processor_id()) __thash_purge_all(v); else smp_call_function_single(v->processor, __thash_purge_all, v, 1, 1); vcpu_unpause(v);}/* * Lookup the hash table and its collision chain to find an entry * covering this address rid:va or the entry. * * INPUT: * in: TLB format for both VHPT & TLB. */thash_data_t *vtlb_lookup(VCPU *v, u64 va,int is_data){ thash_data_t *cch; u64 psbits, ps, tag; ia64_rr vrr; thash_cb_t *hcb = &v->arch.vtlb; cch = __vtr_lookup(v, va, is_data); if (cch) return cch; if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0) return NULL; psbits = VMX(v, psbits[(va >> 61)]); vcpu_get_rr(v, va, &vrr.rrval); while (psbits) { ps = __ffs(psbits); psbits &= ~(1UL << ps); vrr.ps = ps; cch = vtlb_thash(hcb->pta, va, vrr.rrval, &tag); do { if (cch->etag == tag && cch->ps == ps) return cch; cch = cch->next; } while(cch); } return NULL;}/* * Initialize internal control data before service. */static void thash_init(thash_cb_t *hcb, u64 sz){ int num; thash_data_t *head; hcb->pta.val = (unsigned long)hcb->hash; hcb->pta.vf = 1; hcb->pta.ve = 1; hcb->pta.size = sz; head = hcb->hash; num = (hcb->hash_sz/sizeof(thash_data_t)); do { head->page_flags = 0; head->itir = 0; head->etag = 1UL << 63; head->next = 0; head++; num--; } while(num); hcb->cch_free_idx = 0; hcb->cch_freelist = NULL;}int thash_alloc(thash_cb_t *hcb, u64 sz_log2, char *what){ struct page_info *page; void * vbase; u64 sz = 1UL << sz_log2; page = alloc_domheap_pages(NULL, (sz_log2 + 1 - PAGE_SHIFT), 0); if (page == NULL) { printk("No enough contiguous memory(%ldKB) for init_domain_%s\n", sz >> (10 - 1), what); return -ENOMEM; } vbase = page_to_virt(page); memset(vbase, 0, sz + sz); // hash + collisions chain if (sz_log2 >= 20 - 1) printk(XENLOG_DEBUG "Allocate domain %s at 0x%p(%ldMB)\n", what, vbase, sz >> (20 - 1)); else printk(XENLOG_DEBUG "Allocate domain %s at 0x%p(%ldKB)\n", what, vbase, sz >> (10 - 1)); hcb->hash = vbase; hcb->hash_sz = sz; hcb->cch_buf = (void *)((u64)vbase + hcb->hash_sz); hcb->cch_sz = sz; thash_init(hcb, sz_log2); return 0;}void thash_free(thash_cb_t *hcb){ struct page_info *page; if (hcb->hash) { page = virt_to_page(hcb->hash); free_domheap_pages(page, hcb->pta.size + 1 - PAGE_SHIFT); hcb->hash = 0; }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -