⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vhpt.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 2 页
字号:
			continue;		}		if (v->processor == cpu)			vcpu_flush_vtlb_all(v);		else			// SMP: it is racy to reference v->processor.			// vcpu scheduler may move this vcpu to another			// physicall processor, and change the value			// using plain store.			// We may be seeing the old value of it.			// In such case, flush_vtlb_for_context_switch()			// takes care of mTLB flush.			smp_call_function_single(v->processor,						 __vcpu_flush_vtlb_all,						 v, 1, 1);	}	perfc_incr(domain_flush_vtlb_all);}// Callers may need to call smp_mb() before/after calling this.// Be carefull.static void__flush_vhpt_range(unsigned long vhpt_maddr, u64 vadr, u64 addr_range){	void *vhpt_base = __va(vhpt_maddr);	u64 pgsz = 1L << current->arch.vhpt_pg_shift;	u64 purge_addr = vadr & PAGE_MASK;	addr_range += vadr - purge_addr;	addr_range = PAGE_ALIGN(addr_range);	while ((long)addr_range > 0) {		/* Get the VHPT entry.  */		unsigned int off = ia64_thash(purge_addr) -			__va_ul(vcpu_vhpt_maddr(current));		struct vhpt_lf_entry *v = vhpt_base + off;		v->ti_tag = INVALID_TI_TAG;		addr_range -= pgsz;		purge_addr += pgsz;	}}static voidcpu_flush_vhpt_range(int cpu, u64 vadr, u64 addr_range){	__flush_vhpt_range(per_cpu(vhpt_paddr, cpu), vadr, addr_range);}static voidvcpu_flush_vhpt_range(struct vcpu* v, u64 vadr, u64 addr_range){	__flush_vhpt_range(vcpu_vhpt_maddr(v), vadr, addr_range);}void vcpu_flush_tlb_vhpt_range (u64 vadr, u64 log_range){	if (HAS_PERVCPU_VHPT(current->domain))		vcpu_flush_vhpt_range(current, vadr, 1UL << log_range);	else		cpu_flush_vhpt_range(current->processor,		                     vadr, 1UL << log_range);	ia64_ptcl(vadr, log_range << 2);	ia64_srlz_i();	perfc_incr(vcpu_flush_tlb_vhpt_range);}void domain_flush_vtlb_range (struct domain *d, u64 vadr, u64 addr_range){	struct vcpu *v;#if 0	// this only seems to occur at shutdown, but it does occur	if ((!addr_range) || addr_range & (addr_range - 1)) {		printk("vhpt_flush_address: weird range, spinning...\n");		while(1);	}#endif	domain_purge_swtc_entries(d);	smp_mb();	for_each_vcpu (d, v) {		if (!v->is_initialised)			continue;		if (HAS_PERVCPU_VHPT(d)) {			vcpu_flush_vhpt_range(v, vadr, addr_range);		} else {			// SMP: it is racy to reference v->processor.			// vcpu scheduler may move this vcpu to another			// physicall processor, and change the value			// using plain store.			// We may be seeing the old value of it.			// In such case, flush_vtlb_for_context_switch()			/* Invalidate VHPT entries.  */			cpu_flush_vhpt_range(v->processor, vadr, addr_range);		}	}	// ptc.ga has release semantics.	/* ptc.ga  */	platform_global_tlb_purge(vadr, vadr + addr_range,				  current->arch.vhpt_pg_shift);	perfc_incr(domain_flush_vtlb_range);}#ifdef CONFIG_XEN_IA64_TLB_TRACK#include <asm/tlb_track.h>#include <asm/vmx_vcpu.h>void__domain_flush_vtlb_track_entry(struct domain* d,                                const struct tlb_track_entry* entry){	unsigned long rr7_rid;	int swap_rr0 = 0;	unsigned long old_rid;	unsigned long vaddr = entry->vaddr;	struct vcpu* v;	int cpu;	int vcpu;	int local_purge = 1;	/* tlb inert tracking is done in PAGE_SIZE uint. */	unsigned char ps = max_t(unsigned char,				 current->arch.vhpt_pg_shift, PAGE_SHIFT);	/* This case isn't supported (yet). */	BUG_ON(current->arch.vhpt_pg_shift > PAGE_SHIFT);		BUG_ON((vaddr >> VRN_SHIFT) != VRN7);	/*	 * heuristic:	 * dom0linux accesses grant mapped pages via the kernel	 * straight mapped area and it doesn't change rr7 rid. 	 * So it is likey that rr7 == entry->rid so that	 * we can avoid rid change.	 * When blktap is supported, this heuristic should be revised.	 */	vcpu_get_rr(current, VRN7 << VRN_SHIFT, &rr7_rid);	if (likely(rr7_rid == entry->rid)) {		perfc_incr(tlb_track_use_rr7);	} else {		swap_rr0 = 1;		vaddr = (vaddr << 3) >> 3;// force vrn0		perfc_incr(tlb_track_swap_rr0);	}	// tlb_track_entry_printf(entry);	if (swap_rr0) {		vcpu_get_rr(current, 0, &old_rid);		vcpu_set_rr(current, 0, entry->rid);	}    	if (HAS_PERVCPU_VHPT(d)) {		for_each_vcpu_mask(vcpu, entry->vcpu_dirty_mask) {			v = d->vcpu[vcpu];			if (!v->is_initialised)				continue;			/* Invalidate VHPT entries.  */			vcpu_flush_vhpt_range(v, vaddr, 1L << ps);			/*			 * current->processor == v->processor			 * is racy. we may see old v->processor and			 * a new physical processor of v might see old			 * vhpt entry and insert tlb.			 */			if (v != current)				local_purge = 0;		}	} else {		for_each_cpu_mask(cpu, entry->pcpu_dirty_mask) {			/* Invalidate VHPT entries.  */			cpu_flush_vhpt_range(cpu, vaddr, 1L << ps);			if (d->vcpu[cpu] != current)				local_purge = 0;		}	}	/* ptc.ga  */	if (local_purge) {		ia64_ptcl(vaddr, ps << 2);		perfc_incr(domain_flush_vtlb_local);	} else {		/* ptc.ga has release semantics. */		platform_global_tlb_purge(vaddr, vaddr + (1L << ps), ps);		perfc_incr(domain_flush_vtlb_global);	}	if (swap_rr0) {		vcpu_set_rr(current, 0, old_rid);	}	perfc_incr(domain_flush_vtlb_track_entry);}voiddomain_flush_vtlb_track_entry(struct domain* d,                              const struct tlb_track_entry* entry){	domain_purge_swtc_entries_vcpu_dirty_mask(d, entry->vcpu_dirty_mask);	smp_mb();	__domain_flush_vtlb_track_entry(d, entry);}#endifstatic void flush_tlb_vhpt_all (struct domain *d){	/* First VHPT.  */	local_vhpt_flush ();	/* Then mTLB.  */	local_flush_tlb_all ();}void domain_flush_tlb_vhpt(struct domain *d){	/* Very heavy...  */	if (HAS_PERVCPU_VHPT(d) || is_hvm_domain(d))		on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);	else		on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);	cpus_clear (d->domain_dirty_cpumask);}void flush_tlb_for_log_dirty(struct domain *d){	struct vcpu *v;	/* NB. There is no race because all vcpus are paused. */	if (is_hvm_domain(d)) {		for_each_vcpu (d, v) {			if (!v->is_initialised)				continue;			/* XXX: local_flush_tlb_all is called redundantly */			thash_purge_all(v);		}		smp_call_function((void (*)(void *))local_flush_tlb_all, 					NULL, 1, 1);	} else if (HAS_PERVCPU_VHPT(d)) {		for_each_vcpu (d, v) {			if (!v->is_initialised)				continue;			vcpu_purge_tr_entry(&PSCBX(v,dtlb));			vcpu_purge_tr_entry(&PSCBX(v,itlb));			vcpu_vhpt_flush(v);		}		on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1);	} else {		on_each_cpu((void (*)(void *))flush_tlb_vhpt_all, d, 1, 1);	}	cpus_clear (d->domain_dirty_cpumask);}void flush_tlb_mask(cpumask_t mask){    int cpu;    cpu = smp_processor_id();    if (cpu_isset (cpu, mask)) {        cpu_clear(cpu, mask);        flush_tlb_vhpt_all (NULL);    }    if (cpus_empty(mask))        return;    for_each_cpu_mask (cpu, mask)        smp_call_function_single            (cpu, (void (*)(void *))flush_tlb_vhpt_all, NULL, 1, 1);}#ifdef PERF_COUNTERSvoid gather_vhpt_stats(void){	int i, cpu;	perfc_set(vhpt_nbr_entries, VHPT_NUM_ENTRIES);	for_each_present_cpu (cpu) {		struct vhpt_lf_entry *v = __va(per_cpu(vhpt_paddr, cpu));		unsigned long vhpt_valid = 0;		for (i = 0; i < VHPT_NUM_ENTRIES; i++, v++)			if (!(v->ti_tag & INVALID_TI_TAG))				vhpt_valid++;		per_cpu(perfcounters, cpu)[PERFC_vhpt_valid_entries] = vhpt_valid;	}}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -