⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 enlighten.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
}static void xen_write_cr2(unsigned long cr2){	x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;}static unsigned long xen_read_cr2(void){	return x86_read_percpu(xen_vcpu)->arch.cr2;}static unsigned long xen_read_cr2_direct(void){	return x86_read_percpu(xen_vcpu_info.arch.cr2);}static void xen_write_cr4(unsigned long cr4){	/* Just ignore cr4 changes; Xen doesn't allow us to do	   anything anyway. */}static unsigned long xen_read_cr3(void){	return x86_read_percpu(xen_cr3);}static void set_current_cr3(void *v){	x86_write_percpu(xen_current_cr3, (unsigned long)v);}static void xen_write_cr3(unsigned long cr3){	struct mmuext_op *op;	struct multicall_space mcs;	unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3));	BUG_ON(preemptible());	mcs = xen_mc_entry(sizeof(*op));  /* disables interrupts */	/* Update while interrupts are disabled, so its atomic with	   respect to ipis */	x86_write_percpu(xen_cr3, cr3);	op = mcs.args;	op->cmd = MMUEXT_NEW_BASEPTR;	op->arg1.mfn = mfn;	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);	/* Update xen_update_cr3 once the batch has actually	   been submitted. */	xen_mc_callback(set_current_cr3, (void *)cr3);	xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */}/* Early in boot, while setting up the initial pagetable, assume   everything is pinned. */static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn){	BUG_ON(mem_map);	/* should only be used early */	make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));}static void pin_pagetable_pfn(unsigned level, unsigned long pfn){	struct mmuext_op op;	op.cmd = level;	op.arg1.mfn = pfn_to_mfn(pfn);	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))		BUG();}/* This needs to make sure the new pte page is pinned iff its being   attached to a pinned pagetable. */static void xen_alloc_pt(struct mm_struct *mm, u32 pfn){	struct page *page = pfn_to_page(pfn);	if (PagePinned(virt_to_page(mm->pgd))) {		SetPagePinned(page);		if (!PageHighMem(page)) {			make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));			pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);		} else			/* make sure there are no stray mappings of			   this page */			kmap_flush_unused();	}}/* This should never happen until we're OK to use struct page */static void xen_release_pt(u32 pfn){	struct page *page = pfn_to_page(pfn);	if (PagePinned(page)) {		if (!PageHighMem(page)) {			pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);			make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));		}	}}#ifdef CONFIG_HIGHPTEstatic void *xen_kmap_atomic_pte(struct page *page, enum km_type type){	pgprot_t prot = PAGE_KERNEL;	if (PagePinned(page))		prot = PAGE_KERNEL_RO;	if (0 && PageHighMem(page))		printk("mapping highpte %lx type %d prot %s\n",		       page_to_pfn(page), type,		       (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");	return kmap_atomic_prot(page, type, prot);}#endifstatic __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte){	/* If there's an existing pte, then don't allow _PAGE_RW to be set */	if (pte_val_ma(*ptep) & _PAGE_PRESENT)		pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &			       pte_val_ma(pte));	return pte;}/* Init-time set_pte while constructing initial pagetables, which   doesn't allow RO pagetable pages to be remapped RW */static __init void xen_set_pte_init(pte_t *ptep, pte_t pte){	pte = mask_rw_pte(ptep, pte);	xen_set_pte(ptep, pte);}static __init void xen_pagetable_setup_start(pgd_t *base){	pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base;	/* special set_pte for pagetable initialization */	pv_mmu_ops.set_pte = xen_set_pte_init;	init_mm.pgd = base;	/*	 * copy top-level of Xen-supplied pagetable into place.	 For	 * !PAE we can use this as-is, but for PAE it is a stand-in	 * while we copy the pmd pages.	 */	memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t));	if (PTRS_PER_PMD > 1) {		int i;		/*		 * For PAE, need to allocate new pmds, rather than		 * share Xen's, since Xen doesn't like pmd's being		 * shared between address spaces.		 */		for (i = 0; i < PTRS_PER_PGD; i++) {			if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) {				pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);				memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]),				       PAGE_SIZE);				make_lowmem_page_readonly(pmd);				set_pgd(&base[i], __pgd(1 + __pa(pmd)));			} else				pgd_clear(&base[i]);		}	}	/* make sure zero_page is mapped RO so we can use it in pagetables */	make_lowmem_page_readonly(empty_zero_page);	make_lowmem_page_readonly(base);	/*	 * Switch to new pagetable.  This is done before	 * pagetable_init has done anything so that the new pages	 * added to the table can be prepared properly for Xen.	 */	xen_write_cr3(__pa(base));}static __init void xen_pagetable_setup_done(pgd_t *base){	/* This will work as long as patching hasn't happened yet	   (which it hasn't) */	pv_mmu_ops.alloc_pt = xen_alloc_pt;	pv_mmu_ops.set_pte = xen_set_pte;	if (!xen_feature(XENFEAT_auto_translated_physmap)) {		/*		 * Create a mapping for the shared info page.		 * Should be set_fixmap(), but shared_info is a machine		 * address with no corresponding pseudo-phys address.		 */		set_pte_mfn(fix_to_virt(FIX_PARAVIRT_BOOTMAP),			    PFN_DOWN(xen_start_info->shared_info),			    PAGE_KERNEL);		HYPERVISOR_shared_info =			(struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);	} else		HYPERVISOR_shared_info =			(struct shared_info *)__va(xen_start_info->shared_info);	/* Actually pin the pagetable down, but we can't set PG_pinned	   yet because the page structures don't exist yet. */	{		unsigned level;#ifdef CONFIG_X86_PAE		level = MMUEXT_PIN_L3_TABLE;#else		level = MMUEXT_PIN_L2_TABLE;#endif		pin_pagetable_pfn(level, PFN_DOWN(__pa(base)));	}}/* This is called once we have the cpu_possible_map */void __init xen_setup_vcpu_info_placement(void){	int cpu;	for_each_possible_cpu(cpu)		xen_vcpu_setup(cpu);	/* xen_vcpu_setup managed to place the vcpu_info within the	   percpu area for all cpus, so make use of it */	if (have_vcpu_info_placement) {		printk(KERN_INFO "Xen: using vcpu_info placement\n");		pv_irq_ops.save_fl = xen_save_fl_direct;		pv_irq_ops.restore_fl = xen_restore_fl_direct;		pv_irq_ops.irq_disable = xen_irq_disable_direct;		pv_irq_ops.irq_enable = xen_irq_enable_direct;		pv_mmu_ops.read_cr2 = xen_read_cr2_direct;		pv_cpu_ops.iret = xen_iret_direct;	}}static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,			  unsigned long addr, unsigned len){	char *start, *end, *reloc;	unsigned ret;	start = end = reloc = NULL;#define SITE(op, x)							\	case PARAVIRT_PATCH(op.x):					\	if (have_vcpu_info_placement) {					\		start = (char *)xen_##x##_direct;			\		end = xen_##x##_direct_end;				\		reloc = xen_##x##_direct_reloc;				\	}								\	goto patch_site	switch (type) {		SITE(pv_irq_ops, irq_enable);		SITE(pv_irq_ops, irq_disable);		SITE(pv_irq_ops, save_fl);		SITE(pv_irq_ops, restore_fl);#undef SITE	patch_site:		if (start == NULL || (end-start) > len)			goto default_patch;		ret = paravirt_patch_insns(insnbuf, len, start, end);		/* Note: because reloc is assigned from something that		   appears to be an array, gcc assumes it's non-null,		   but doesn't know its relationship with start and		   end. */		if (reloc > start && reloc < end) {			int reloc_off = reloc - start;			long *relocp = (long *)(insnbuf + reloc_off);			long delta = start - (char *)addr;			*relocp += delta;		}		break;	default_patch:	default:		ret = paravirt_patch_default(type, clobbers, insnbuf,					     addr, len);		break;	}	return ret;}static const struct pv_info xen_info __initdata = {	.paravirt_enabled = 1,	.shared_kernel_pmd = 0,	.name = "Xen",};static const struct pv_init_ops xen_init_ops __initdata = {	.patch = xen_patch,	.banner = xen_banner,	.memory_setup = xen_memory_setup,	.arch_setup = xen_arch_setup,	.post_allocator_init = xen_mark_init_mm_pinned,};static const struct pv_time_ops xen_time_ops __initdata = {	.time_init = xen_time_init,	.set_wallclock = xen_set_wallclock,	.get_wallclock = xen_get_wallclock,	.get_cpu_khz = xen_cpu_khz,	.sched_clock = xen_sched_clock,};static const struct pv_cpu_ops xen_cpu_ops __initdata = {	.cpuid = xen_cpuid,	.set_debugreg = xen_set_debugreg,	.get_debugreg = xen_get_debugreg,	.clts = native_clts,	.read_cr0 = native_read_cr0,	.write_cr0 = native_write_cr0,	.read_cr4 = native_read_cr4,	.read_cr4_safe = native_read_cr4_safe,	.write_cr4 = xen_write_cr4,	.wbinvd = native_wbinvd,	.read_msr = native_read_msr_safe,	.write_msr = native_write_msr_safe,	.read_tsc = native_read_tsc,	.read_pmc = native_read_pmc,	.iret = (void *)&hypercall_page[__HYPERVISOR_iret],	.irq_enable_sysexit = NULL,  /* never called */	.load_tr_desc = paravirt_nop,	.set_ldt = xen_set_ldt,	.load_gdt = xen_load_gdt,	.load_idt = xen_load_idt,	.load_tls = xen_load_tls,	.store_gdt = native_store_gdt,	.store_idt = native_store_idt,	.store_tr = xen_store_tr,	.write_ldt_entry = xen_write_ldt_entry,	.write_gdt_entry = xen_write_gdt_entry,	.write_idt_entry = xen_write_idt_entry,	.load_esp0 = xen_load_esp0,	.set_iopl_mask = xen_set_iopl_mask,	.io_delay = xen_io_delay,	.lazy_mode = {		.enter = paravirt_enter_lazy_cpu,		.leave = xen_leave_lazy,	},};static const struct pv_irq_ops xen_irq_ops __initdata = {	.init_IRQ = xen_init_IRQ,	.save_fl = xen_save_fl,	.restore_fl = xen_restore_fl,	.irq_disable = xen_irq_disable,	.irq_enable = xen_irq_enable,	.safe_halt = xen_safe_halt,	.halt = xen_halt,};static const struct pv_apic_ops xen_apic_ops __initdata = {#ifdef CONFIG_X86_LOCAL_APIC	.apic_write = xen_apic_write,	.apic_write_atomic = xen_apic_write,	.apic_read = xen_apic_read,	.setup_boot_clock = paravirt_nop,	.setup_secondary_clock = paravirt_nop,	.startup_ipi_hook = paravirt_nop,#endif};static const struct pv_mmu_ops xen_mmu_ops __initdata = {	.pagetable_setup_start = xen_pagetable_setup_start,	.pagetable_setup_done = xen_pagetable_setup_done,	.read_cr2 = xen_read_cr2,	.write_cr2 = xen_write_cr2,	.read_cr3 = xen_read_cr3,	.write_cr3 = xen_write_cr3,	.flush_tlb_user = xen_flush_tlb,	.flush_tlb_kernel = xen_flush_tlb,	.flush_tlb_single = xen_flush_tlb_single,	.flush_tlb_others = xen_flush_tlb_others,	.pte_update = paravirt_nop,	.pte_update_defer = paravirt_nop,	.alloc_pt = xen_alloc_pt_init,	.release_pt = xen_release_pt,	.alloc_pd = paravirt_nop,	.alloc_pd_clone = paravirt_nop,	.release_pd = paravirt_nop,#ifdef CONFIG_HIGHPTE	.kmap_atomic_pte = xen_kmap_atomic_pte,#endif	.set_pte = NULL,	/* see xen_pagetable_setup_* */	.set_pte_at = xen_set_pte_at,	.set_pmd = xen_set_pmd,	.pte_val = xen_pte_val,	.pgd_val = xen_pgd_val,	.make_pte = xen_make_pte,	.make_pgd = xen_make_pgd,#ifdef CONFIG_X86_PAE	.set_pte_atomic = xen_set_pte_atomic,	.set_pte_present = xen_set_pte_at,	.set_pud = xen_set_pud,	.pte_clear = xen_pte_clear,	.pmd_clear = xen_pmd_clear,	.make_pmd = xen_make_pmd,	.pmd_val = xen_pmd_val,#endif	/* PAE */	.activate_mm = xen_activate_mm,	.dup_mmap = xen_dup_mmap,	.exit_mmap = xen_exit_mmap,	.lazy_mode = {		.enter = paravirt_enter_lazy_mmu,		.leave = xen_leave_lazy,	},};#ifdef CONFIG_SMPstatic const struct smp_ops xen_smp_ops __initdata = {	.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,	.smp_prepare_cpus = xen_smp_prepare_cpus,	.cpu_up = xen_cpu_up,	.smp_cpus_done = xen_smp_cpus_done,	.smp_send_stop = xen_smp_send_stop,	.smp_send_reschedule = xen_smp_send_reschedule,	.smp_call_function_mask = xen_smp_call_function_mask,};#endif	/* CONFIG_SMP */static void xen_reboot(int reason){#ifdef CONFIG_SMP	smp_send_stop();#endif	if (HYPERVISOR_sched_op(SCHEDOP_shutdown, reason))		BUG();}static void xen_restart(char *msg){	xen_reboot(SHUTDOWN_reboot);}static void xen_emergency_restart(void){	xen_reboot(SHUTDOWN_reboot);}static void xen_machine_halt(void){	xen_reboot(SHUTDOWN_poweroff);}static void xen_crash_shutdown(struct pt_regs *regs){	xen_reboot(SHUTDOWN_crash);}static const struct machine_ops __initdata xen_machine_ops = {	.restart = xen_restart,	.halt = xen_machine_halt,	.power_off = xen_machine_halt,	.shutdown = xen_machine_halt,	.crash_shutdown = xen_crash_shutdown,	.emergency_restart = xen_emergency_restart,};static void __init xen_reserve_top(void){	unsigned long top = HYPERVISOR_VIRT_START;	struct xen_platform_parameters pp;	if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)		top = pp.virt_start;	reserve_top_address(-top + 2 * PAGE_SIZE);}/* First C function to be called on Xen boot */asmlinkage void __init xen_start_kernel(void){	pgd_t *pgd;	if (!xen_start_info)		return;	BUG_ON(memcmp(xen_start_info->magic, "xen-3", 5) != 0);	/* Install Xen paravirt ops */	pv_info = xen_info;	pv_init_ops = xen_init_ops;	pv_time_ops = xen_time_ops;	pv_cpu_ops = xen_cpu_ops;	pv_irq_ops = xen_irq_ops;	pv_apic_ops = xen_apic_ops;	pv_mmu_ops = xen_mmu_ops;	machine_ops = xen_machine_ops;#ifdef CONFIG_SMP	smp_ops = xen_smp_ops;#endif	xen_setup_features();	/* Get mfn list */	if (!xen_feature(XENFEAT_auto_translated_physmap))		phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list;	pgd = (pgd_t *)xen_start_info->pt_base;	init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;	init_mm.pgd = pgd; /* use the Xen pagetables to start */	/* keep using Xen gdt for now; no urgent need to change it */	x86_write_percpu(xen_cr3, __pa(pgd));	x86_write_percpu(xen_current_cr3, __pa(pgd));#ifdef CONFIG_SMP	/* Don't do the full vcpu_info placement stuff until we have a	   possible map. */	per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];#else	/* May as well do it now, since there's no good time to call	   it later on UP. */	xen_setup_vcpu_info_placement();#endif	pv_info.kernel_rpl = 1;	if (xen_feature(XENFEAT_supervisor_mode_kernel))		pv_info.kernel_rpl = 0;	/* set the limit of our address space */	xen_reserve_top();	/* set up basic CPUID stuff */	cpu_detect(&new_cpu_data);	new_cpu_data.hard_math = 1;	new_cpu_data.x86_capability[0] = cpuid_edx(1);	/* Poke various useful things into boot_params */	boot_params.hdr.type_of_loader = (9 << 4) | 0;	boot_params.hdr.ramdisk_image = xen_start_info->mod_start		? __pa(xen_start_info->mod_start) : 0;	boot_params.hdr.ramdisk_size = xen_start_info->mod_len;	/* Start the world */	start_kernel();}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -