📄 vhpt.c
字号:
/* * Initialize VHPT support. * * Copyright (C) 2004 Hewlett-Packard Co * Dan Magenheimer <dan.magenheimer@hp.com> * * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan K.K. * per vcpu vhpt support */#include <linux/config.h>#include <linux/kernel.h>#include <linux/init.h>#include <asm/processor.h>#include <asm/system.h>#include <asm/pgalloc.h>#include <asm/page.h>#include <asm/vhpt.h>#include <asm/vcpu.h>#include <asm/vcpumask.h>#include <asm/vmmu.h>DEFINE_PER_CPU (unsigned long, vhpt_paddr);DEFINE_PER_CPU (unsigned long, vhpt_pend);#ifdef CONFIG_XEN_IA64_TLBFLUSH_CLOCKDEFINE_PER_CPU(volatile u32, vhpt_tlbflush_timestamp);#endifstatic void__vhpt_flush(unsigned long vhpt_maddr, unsigned long vhpt_size_log2){ struct vhpt_lf_entry *v = (struct vhpt_lf_entry*)__va(vhpt_maddr); unsigned long num_entries = 1 << (vhpt_size_log2 - 5); int i; for (i = 0; i < num_entries; i++, v++) v->ti_tag = INVALID_TI_TAG;}voidlocal_vhpt_flush(void){ /* increment flush clock before flush */ u32 flush_time = tlbflush_clock_inc_and_return(); __vhpt_flush(__ia64_per_cpu_var(vhpt_paddr), VHPT_SIZE_LOG2); /* this must be after flush */ tlbflush_update_time(&__get_cpu_var(vhpt_tlbflush_timestamp), flush_time); perfc_incr(local_vhpt_flush);}voidvcpu_vhpt_flush(struct vcpu* v){ unsigned long vhpt_size_log2 = VHPT_SIZE_LOG2;#ifdef CONFIG_XEN_IA64_PERVCPU_VHPT if (HAS_PERVCPU_VHPT(v->domain)) vhpt_size_log2 = v->arch.pta.size;#endif __vhpt_flush(vcpu_vhpt_maddr(v), vhpt_size_log2); perfc_incr(vcpu_vhpt_flush);}static voidvhpt_erase(unsigned long vhpt_maddr, unsigned long vhpt_size_log2){ struct vhpt_lf_entry *v = (struct vhpt_lf_entry*)__va(vhpt_maddr); unsigned long num_entries = 1 << (vhpt_size_log2 - 5); int i; for (i = 0; i < num_entries; i++, v++) { v->itir = 0; v->CChain = 0; v->page_flags = 0; v->ti_tag = INVALID_TI_TAG; } // initialize cache too???}void vhpt_insert (unsigned long vadr, unsigned long pte, unsigned long itir){ struct vhpt_lf_entry *vlfe = (struct vhpt_lf_entry *)ia64_thash(vadr); unsigned long tag = ia64_ttag (vadr); /* Even though VHPT is per VCPU, still need to first disable the entry, * because the processor may support speculative VHPT walk. */ vlfe->ti_tag = INVALID_TI_TAG; wmb(); vlfe->itir = itir; vlfe->page_flags = pte | _PAGE_P; *(volatile unsigned long*)&vlfe->ti_tag = tag;}void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte, unsigned long itir){ unsigned char ps = current->arch.vhpt_pg_shift; ia64_itir_t _itir = {.itir = itir}; unsigned long mask = (1L << _itir.ps) - 1; int i; if (_itir.ps - ps > 10 && !running_on_sim) { // if this happens, we may want to revisit this algorithm panic("vhpt_multiple_insert:logps-PAGE_SHIFT>10,spinning..\n"); } if (_itir.ps - ps > 2) { // FIXME: Should add counter here to see how often this // happens (e.g. for 16MB pages!) and determine if it // is a performance problem. On a quick look, it takes // about 39000 instrs for a 16MB page and it seems to occur // only a few times/second, so OK for now. // An alternate solution would be to just insert the one // 16KB in the vhpt (but with the full mapping)? //printk("vhpt_multiple_insert: logps-PAGE_SHIFT==%d," //"va=%p, pa=%p, pa-masked=%p\n", //logps-PAGE_SHIFT,vaddr,pte&_PFN_MASK, //(pte&_PFN_MASK)&~mask); } vaddr &= ~mask; pte = ((pte & _PFN_MASK) & ~mask) | (pte & ~_PFN_MASK); for (i = 1L << (_itir.ps - ps); i > 0; i--) { vhpt_insert(vaddr, pte, _itir.itir); vaddr += (1L << ps); }}void __init vhpt_init(void){ unsigned long paddr; struct page_info *page;#if !VHPT_ENABLED return;#endif /* This allocation only holds true if vhpt table is unique for * all domains. Or else later new vhpt table should be allocated * from domain heap when each domain is created. Assume xen buddy * allocator can provide natural aligned page by order? */ page = alloc_domheap_pages(NULL, VHPT_SIZE_LOG2 - PAGE_SHIFT, 0); if (!page) panic("vhpt_init: can't allocate VHPT!\n"); paddr = page_to_maddr(page); if (paddr & ((1 << VHPT_SIZE_LOG2) - 1)) panic("vhpt_init: bad VHPT alignment!\n"); __get_cpu_var(vhpt_paddr) = paddr; __get_cpu_var(vhpt_pend) = paddr + (1 << VHPT_SIZE_LOG2) - 1; printk(XENLOG_DEBUG "vhpt_init: vhpt paddr=0x%lx, end=0x%lx\n", paddr, __get_cpu_var(vhpt_pend)); vhpt_erase(paddr, VHPT_SIZE_LOG2); // we don't enable VHPT here. // context_switch() or schedule_tail() does it.}#ifdef CONFIG_XEN_IA64_PERVCPU_VHPTvoiddomain_set_vhpt_size(struct domain *d, int8_t vhpt_size_log2){ if (vhpt_size_log2 == -1) { d->arch.has_pervcpu_vhpt = 0; printk(XENLOG_INFO "XEN_DOMCTL_arch_setup: " "domain %d VHPT is global.\n", d->domain_id); } else { d->arch.has_pervcpu_vhpt = 1; d->arch.vhpt_size_log2 = vhpt_size_log2; printk(XENLOG_INFO "XEN_DOMCTL_arch_setup: " "domain %d VHPT is per vcpu. size=2**%d\n", d->domain_id, vhpt_size_log2); }}intpervcpu_vhpt_alloc(struct vcpu *v){ unsigned long vhpt_size_log2 = VHPT_SIZE_LOG2; if (v->domain->arch.vhpt_size_log2 > 0) vhpt_size_log2 = canonicalize_vhpt_size(v->domain->arch.vhpt_size_log2); printk(XENLOG_DEBUG "%s vhpt_size_log2=%ld\n", __func__, vhpt_size_log2); v->arch.vhpt_entries = (1UL << vhpt_size_log2) / sizeof(struct vhpt_lf_entry); v->arch.vhpt_page = alloc_domheap_pages(NULL, vhpt_size_log2 - PAGE_SHIFT, 0); if (!v->arch.vhpt_page) return -ENOMEM; v->arch.vhpt_maddr = page_to_maddr(v->arch.vhpt_page); if (v->arch.vhpt_maddr & ((1 << VHPT_SIZE_LOG2) - 1)) panic("pervcpu_vhpt_init: bad VHPT alignment!\n"); v->arch.pta.val = 0; // to zero reserved bits v->arch.pta.ve = 1; // enable vhpt v->arch.pta.size = vhpt_size_log2; v->arch.pta.vf = 1; // long format v->arch.pta.base = __va_ul(v->arch.vhpt_maddr) >> 15; vhpt_erase(v->arch.vhpt_maddr, vhpt_size_log2); smp_mb(); // per vcpu vhpt may be used by another physical cpu. return 0;}voidpervcpu_vhpt_free(struct vcpu *v){ if (likely(v->arch.vhpt_page != NULL)) free_domheap_pages(v->arch.vhpt_page, v->arch.pta.size - PAGE_SHIFT);}#endifvoiddomain_purge_swtc_entries(struct domain *d){ struct vcpu* v; for_each_vcpu(d, v) { if (!v->is_initialised) continue; /* Purge TC entries. FIXME: clear only if match. */ vcpu_purge_tr_entry(&PSCBX(v,dtlb)); vcpu_purge_tr_entry(&PSCBX(v,itlb)); }}voiddomain_purge_swtc_entries_vcpu_dirty_mask(struct domain* d, vcpumask_t vcpu_dirty_mask){ int vcpu; for_each_vcpu_mask(vcpu, vcpu_dirty_mask) { struct vcpu* v = d->vcpu[vcpu]; if (!v->is_initialised) continue; /* Purge TC entries. FIXME: clear only if match. */ vcpu_purge_tr_entry(&PSCBX(v, dtlb)); vcpu_purge_tr_entry(&PSCBX(v, itlb)); }}// SMP: we can't assume v == current, vcpu might move to another physical cpu.// So memory barrier is necessary.// if we can guranttee that vcpu can run on only this physical cpu// (e.g. vcpu == current), smp_mb() is unnecessary.void vcpu_flush_vtlb_all(struct vcpu *v){ /* First VCPU tlb. */ vcpu_purge_tr_entry(&PSCBX(v,dtlb)); vcpu_purge_tr_entry(&PSCBX(v,itlb)); smp_mb(); /* Then VHPT. */ if (HAS_PERVCPU_VHPT(v->domain)) vcpu_vhpt_flush(v); else local_vhpt_flush(); smp_mb(); /* Then mTLB. */ local_flush_tlb_all(); /* We could clear bit in d->domain_dirty_cpumask only if domain d in not running on this processor. There is currently no easy way to check this. */ perfc_incr(vcpu_flush_vtlb_all);}static void __vcpu_flush_vtlb_all(void *vcpu){ vcpu_flush_vtlb_all((struct vcpu*)vcpu);}// caller must incremented reference count to d somehow.void domain_flush_vtlb_all(struct domain* d){ int cpu = smp_processor_id (); struct vcpu *v; for_each_vcpu(d, v) { if (!v->is_initialised) continue; if (VMX_DOMAIN(v)) { // This code may be called for remapping shared_info // and grant_table from guest_physmap_remove_page() // in arch_memory_op() XENMEM_add_to_physmap to realize // PV-on-HVM feature. vmx_vcpu_flush_vtlb_all(v);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -