⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vmi_32.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	vmi_check_page_type(__pa(ptep) >> PAGE_SHIFT, VMI_PAGE_PTE);	vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));}static void vmi_pmd_clear(pmd_t *pmd){	const pte_t pte = { 0 };	vmi_check_page_type(__pa(pmd) >> PAGE_SHIFT, VMI_PAGE_PMD);	vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);}#endif#ifdef CONFIG_SMPstatic void __devinitvmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,		     unsigned long start_esp){	struct vmi_ap_state ap;	/* Default everything to zero.  This is fine for most GPRs. */	memset(&ap, 0, sizeof(struct vmi_ap_state));	ap.gdtr_limit = GDT_SIZE - 1;	ap.gdtr_base = (unsigned long) get_cpu_gdt_table(phys_apicid);	ap.idtr_limit = IDT_ENTRIES * 8 - 1;	ap.idtr_base = (unsigned long) idt_table;	ap.ldtr = 0;	ap.cs = __KERNEL_CS;	ap.eip = (unsigned long) start_eip;	ap.ss = __KERNEL_DS;	ap.esp = (unsigned long) start_esp;	ap.ds = __USER_DS;	ap.es = __USER_DS;	ap.fs = __KERNEL_PERCPU;	ap.gs = 0;	ap.eflags = 0;#ifdef CONFIG_X86_PAE	/* efer should match BSP efer. */	if (cpu_has_nx) {		unsigned l, h;		rdmsr(MSR_EFER, l, h);		ap.efer = (unsigned long long) h << 32 | l;	}#endif	ap.cr3 = __pa(swapper_pg_dir);	/* Protected mode, paging, AM, WP, NE, MP. */	ap.cr0 = 0x80050023;	ap.cr4 = mmu_cr4_features;	vmi_ops.set_initial_ap_state((u32)&ap, phys_apicid);}#endifstatic void vmi_enter_lazy_cpu(void){	paravirt_enter_lazy_cpu();	vmi_ops.set_lazy_mode(2);}static void vmi_enter_lazy_mmu(void){	paravirt_enter_lazy_mmu();	vmi_ops.set_lazy_mode(1);}static void vmi_leave_lazy(void){	paravirt_leave_lazy(paravirt_get_lazy_mode());	vmi_ops.set_lazy_mode(0);}static inline int __init check_vmi_rom(struct vrom_header *rom){	struct pci_header *pci;	struct pnp_header *pnp;	const char *manufacturer = "UNKNOWN";	const char *product = "UNKNOWN";	const char *license = "unspecified";	if (rom->rom_signature != 0xaa55)		return 0;	if (rom->vrom_signature != VMI_SIGNATURE)		return 0;	if (rom->api_version_maj != VMI_API_REV_MAJOR ||	    rom->api_version_min+1 < VMI_API_REV_MINOR+1) {		printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",				rom->api_version_maj,				rom->api_version_min);		return 0;	}	/*	 * Relying on the VMI_SIGNATURE field is not 100% safe, so check	 * the PCI header and device type to make sure this is really a	 * VMI device.	 */	if (!rom->pci_header_offs) {		printk(KERN_WARNING "VMI: ROM does not contain PCI header.\n");		return 0;	}	pci = (struct pci_header *)((char *)rom+rom->pci_header_offs);	if (pci->vendorID != PCI_VENDOR_ID_VMWARE ||	    pci->deviceID != PCI_DEVICE_ID_VMWARE_VMI) {		/* Allow it to run... anyways, but warn */		printk(KERN_WARNING "VMI: ROM from unknown manufacturer\n");	}	if (rom->pnp_header_offs) {		pnp = (struct pnp_header *)((char *)rom+rom->pnp_header_offs);		if (pnp->manufacturer_offset)			manufacturer = (const char *)rom+pnp->manufacturer_offset;		if (pnp->product_offset)			product = (const char *)rom+pnp->product_offset;	}	if (rom->license_offs)		license = (char *)rom+rom->license_offs;	printk(KERN_INFO "VMI: Found %s %s, API version %d.%d, ROM version %d.%d\n",		manufacturer, product,		rom->api_version_maj, rom->api_version_min,		pci->rom_version_maj, pci->rom_version_min);	/* Don't allow BSD/MIT here for now because we don't want to end up	   with any binary only shim layers */	if (strcmp(license, "GPL") && strcmp(license, "GPL v2")) {		printk(KERN_WARNING "VMI: Non GPL license `%s' found for ROM. Not used.\n",			license);		return 0;	}	return 1;}/* * Probe for the VMI option ROM */static inline int __init probe_vmi_rom(void){	unsigned long base;	/* VMI ROM is in option ROM area, check signature */	for (base = 0xC0000; base < 0xE0000; base += 2048) {		struct vrom_header *romstart;		romstart = (struct vrom_header *)isa_bus_to_virt(base);		if (check_vmi_rom(romstart)) {			vmi_rom = romstart;			return 1;		}	}	return 0;}/* * VMI setup common to all processors */void vmi_bringup(void){ 	/* We must establish the lowmem mapping for MMU ops to work */	if (vmi_ops.set_linear_mapping)		vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0);}/* * Return a pointer to a VMI function or NULL if unimplemented */static void *vmi_get_function(int vmicall){	u64 reloc;	const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;	reloc = call_vrom_long_func(vmi_rom, get_reloc,	vmicall);	BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);	if (rel->type == VMI_RELOCATION_CALL_REL)		return (void *)rel->eip;	else		return NULL;}/* * Helper macro for making the VMI paravirt-ops fill code readable. * For unimplemented operations, fall back to default, unless nop * is returned by the ROM. */#define para_fill(opname, vmicall)				\do {								\	reloc = call_vrom_long_func(vmi_rom, get_reloc,		\				    VMI_CALL_##vmicall);	\	if (rel->type == VMI_RELOCATION_CALL_REL) 		\		opname = (void *)rel->eip;			\	else if (rel->type == VMI_RELOCATION_NOP) 		\		opname = (void *)vmi_nop;			\	else if (rel->type != VMI_RELOCATION_NONE)		\		printk(KERN_WARNING "VMI: Unknown relocation "	\				    "type %d for " #vmicall"\n",\					rel->type);		\} while (0)/* * Helper macro for making the VMI paravirt-ops fill code readable. * For cached operations which do not match the VMI ROM ABI and must * go through a tranlation stub.  Ignore NOPs, since it is not clear * a NOP * VMI function corresponds to a NOP paravirt-op when the * functions are not in 1-1 correspondence. */#define para_wrap(opname, wrapper, cache, vmicall)		\do {								\	reloc = call_vrom_long_func(vmi_rom, get_reloc,		\				    VMI_CALL_##vmicall);	\	BUG_ON(rel->type == VMI_RELOCATION_JUMP_REL);		\	if (rel->type == VMI_RELOCATION_CALL_REL) {		\		opname = wrapper;				\		vmi_ops.cache = (void *)rel->eip;		\	}							\} while (0)/* * Activate the VMI interface and switch into paravirtualized mode */static inline int __init activate_vmi(void){	short kernel_cs;	u64 reloc;	const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;	if (call_vrom_func(vmi_rom, vmi_init) != 0) {		printk(KERN_ERR "VMI ROM failed to initialize!");		return 0;	}	savesegment(cs, kernel_cs);	pv_info.paravirt_enabled = 1;	pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;	pv_info.name = "vmi";	pv_init_ops.patch = vmi_patch;	/*	 * Many of these operations are ABI compatible with VMI.	 * This means we can fill in the paravirt-ops with direct	 * pointers into the VMI ROM.  If the calling convention for	 * these operations changes, this code needs to be updated.	 *	 * Exceptions	 *  CPUID paravirt-op uses pointers, not the native ISA	 *  halt has no VMI equivalent; all VMI halts are "safe"	 *  no MSR support yet - just trap and emulate.  VMI uses the	 *    same ABI as the native ISA, but Linux wants exceptions	 *    from bogus MSR read / write handled	 *  rdpmc is not yet used in Linux	 */	/* CPUID is special, so very special it gets wrapped like a present */	para_wrap(pv_cpu_ops.cpuid, vmi_cpuid, cpuid, CPUID);	para_fill(pv_cpu_ops.clts, CLTS);	para_fill(pv_cpu_ops.get_debugreg, GetDR);	para_fill(pv_cpu_ops.set_debugreg, SetDR);	para_fill(pv_cpu_ops.read_cr0, GetCR0);	para_fill(pv_mmu_ops.read_cr2, GetCR2);	para_fill(pv_mmu_ops.read_cr3, GetCR3);	para_fill(pv_cpu_ops.read_cr4, GetCR4);	para_fill(pv_cpu_ops.write_cr0, SetCR0);	para_fill(pv_mmu_ops.write_cr2, SetCR2);	para_fill(pv_mmu_ops.write_cr3, SetCR3);	para_fill(pv_cpu_ops.write_cr4, SetCR4);	para_fill(pv_irq_ops.save_fl, GetInterruptMask);	para_fill(pv_irq_ops.restore_fl, SetInterruptMask);	para_fill(pv_irq_ops.irq_disable, DisableInterrupts);	para_fill(pv_irq_ops.irq_enable, EnableInterrupts);	para_fill(pv_cpu_ops.wbinvd, WBINVD);	para_fill(pv_cpu_ops.read_tsc, RDTSC);	/* The following we emulate with trap and emulate for now */	/* paravirt_ops.read_msr = vmi_rdmsr */	/* paravirt_ops.write_msr = vmi_wrmsr */	/* paravirt_ops.rdpmc = vmi_rdpmc */	/* TR interface doesn't pass TR value, wrap */	para_wrap(pv_cpu_ops.load_tr_desc, vmi_set_tr, set_tr, SetTR);	/* LDT is special, too */	para_wrap(pv_cpu_ops.set_ldt, vmi_set_ldt, _set_ldt, SetLDT);	para_fill(pv_cpu_ops.load_gdt, SetGDT);	para_fill(pv_cpu_ops.load_idt, SetIDT);	para_fill(pv_cpu_ops.store_gdt, GetGDT);	para_fill(pv_cpu_ops.store_idt, GetIDT);	para_fill(pv_cpu_ops.store_tr, GetTR);	pv_cpu_ops.load_tls = vmi_load_tls;	para_fill(pv_cpu_ops.write_ldt_entry, WriteLDTEntry);	para_fill(pv_cpu_ops.write_gdt_entry, WriteGDTEntry);	para_fill(pv_cpu_ops.write_idt_entry, WriteIDTEntry);	para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack);	para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);	para_fill(pv_cpu_ops.io_delay, IODelay);	para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu,		  set_lazy_mode, SetLazyMode);	para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy,		  set_lazy_mode, SetLazyMode);	para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu,		  set_lazy_mode, SetLazyMode);	para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy,		  set_lazy_mode, SetLazyMode);	/* user and kernel flush are just handled with different flags to FlushTLB */	para_wrap(pv_mmu_ops.flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);	para_wrap(pv_mmu_ops.flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB);	para_fill(pv_mmu_ops.flush_tlb_single, InvalPage);	/*	 * Until a standard flag format can be agreed on, we need to	 * implement these as wrappers in Linux.  Get the VMI ROM	 * function pointers for the two backend calls.	 */#ifdef CONFIG_X86_PAE	vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxELong);	vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxELong);#else	vmi_ops.set_pte = vmi_get_function(VMI_CALL_SetPxE);	vmi_ops.update_pte = vmi_get_function(VMI_CALL_UpdatePxE);#endif	if (vmi_ops.set_pte) {		pv_mmu_ops.set_pte = vmi_set_pte;		pv_mmu_ops.set_pte_at = vmi_set_pte_at;		pv_mmu_ops.set_pmd = vmi_set_pmd;#ifdef CONFIG_X86_PAE		pv_mmu_ops.set_pte_atomic = vmi_set_pte_atomic;		pv_mmu_ops.set_pte_present = vmi_set_pte_present;		pv_mmu_ops.set_pud = vmi_set_pud;		pv_mmu_ops.pte_clear = vmi_pte_clear;		pv_mmu_ops.pmd_clear = vmi_pmd_clear;#endif	}	if (vmi_ops.update_pte) {		pv_mmu_ops.pte_update = vmi_update_pte;		pv_mmu_ops.pte_update_defer = vmi_update_pte_defer;	}	vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);	if (vmi_ops.allocate_page) {		pv_mmu_ops.alloc_pt = vmi_allocate_pt;		pv_mmu_ops.alloc_pd = vmi_allocate_pd;		pv_mmu_ops.alloc_pd_clone = vmi_allocate_pd_clone;	}	vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);	if (vmi_ops.release_page) {		pv_mmu_ops.release_pt = vmi_release_pt;		pv_mmu_ops.release_pd = vmi_release_pd;	}	/* Set linear is needed in all cases */	vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);#ifdef CONFIG_HIGHPTE	if (vmi_ops.set_linear_mapping)		pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;#endif	/*	 * These MUST always be patched.  Don't support indirect jumps	 * through these operations, as the VMI interface may use either	 * a jump or a call to get to these operations, depending on	 * the backend.  They are performance critical anyway, so requiring	 * a patch is not a big problem.	 */	pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0;	pv_cpu_ops.iret = (void *)0xbadbab0;#ifdef CONFIG_SMP	para_wrap(pv_apic_ops.startup_ipi_hook, vmi_startup_ipi_hook, set_initial_ap_state, SetInitialAPState);#endif#ifdef CONFIG_X86_LOCAL_APIC	para_fill(pv_apic_ops.apic_read, APICRead);	para_fill(pv_apic_ops.apic_write, APICWrite);	para_fill(pv_apic_ops.apic_write_atomic, APICWrite);#endif	/*	 * Check for VMI timer functionality by probing for a cycle frequency method	 */	reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);	if (!disable_vmi_timer && rel->type != VMI_RELOCATION_NONE) {		vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;		vmi_timer_ops.get_cycle_counter =			vmi_get_function(VMI_CALL_GetCycleCounter);		vmi_timer_ops.get_wallclock =			vmi_get_function(VMI_CALL_GetWallclockTime);		vmi_timer_ops.wallclock_updated =			vmi_get_function(VMI_CALL_WallclockUpdated);		vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);		vmi_timer_ops.cancel_alarm =			 vmi_get_function(VMI_CALL_CancelAlarm);		pv_time_ops.time_init = vmi_time_init;		pv_time_ops.get_wallclock = vmi_get_wallclock;		pv_time_ops.set_wallclock = vmi_set_wallclock;#ifdef CONFIG_X86_LOCAL_APIC		pv_apic_ops.setup_boot_clock = vmi_time_bsp_init;		pv_apic_ops.setup_secondary_clock = vmi_time_ap_init;#endif		pv_time_ops.sched_clock = vmi_sched_clock; 		pv_time_ops.get_cpu_khz = vmi_cpu_khz;		/* We have true wallclock functions; disable CMOS clock sync */		no_sync_cmos_clock = 1;	} else {		disable_noidle = 1;		disable_vmi_timer = 1;	}	para_fill(pv_irq_ops.safe_halt, Halt);	/*	 * Alternative instruction rewriting doesn't happen soon enough	 * to convert VMI_IRET to a call instead of a jump; so we have	 * to do this before IRQs get reenabled.  Fortunately, it is	 * idempotent.	 */	apply_paravirt(__parainstructions, __parainstructions_end);	vmi_bringup();	return 1;}#undef para_fillvoid __init vmi_init(void){	unsigned long flags;	if (!vmi_rom)		probe_vmi_rom();	else		check_vmi_rom(vmi_rom);	/* In case probing for or validating the ROM failed, basil */	if (!vmi_rom)		return;	reserve_top_address(-vmi_rom->virtual_top);	local_irq_save(flags);	activate_vmi();#ifdef CONFIG_X86_IO_APIC	/* This is virtual hardware; timer routing is wired correctly */	no_timer_check = 1;#endif	local_irq_restore(flags & X86_EFLAGS_IF);}static int __init parse_vmi(char *arg){	if (!arg)		return -EINVAL;	if (!strcmp(arg, "disable_pge")) {		clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability);		disable_pge = 1;	} else if (!strcmp(arg, "disable_pse")) {		clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);		disable_pse = 1;	} else if (!strcmp(arg, "disable_sep")) {		clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);		disable_sep = 1;	} else if (!strcmp(arg, "disable_tsc")) {		clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);		disable_tsc = 1;	} else if (!strcmp(arg, "disable_mtrr")) {		clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);		disable_mtrr = 1;	} else if (!strcmp(arg, "disable_timer")) {		disable_vmi_timer = 1;		disable_noidle = 1;	} else if (!strcmp(arg, "disable_noidle"))		disable_noidle = 1;	return 0;}early_param("vmi", parse_vmi);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -