⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 efi.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
	/* it's too early to be able to use the standard kernel command line support... */	for (cp = boot_command_line; *cp; ) {		if (memcmp(cp, "mem=", 4) == 0) {			mem_limit = memparse(cp + 4, &cp);		} else if (memcmp(cp, "max_addr=", 9) == 0) {			max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));		} else if (memcmp(cp, "min_addr=", 9) == 0) {			min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));		} else {			while (*cp != ' ' && *cp)				++cp;			while (*cp == ' ')				++cp;		}	}	if (min_addr != 0UL)		printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);	if (max_addr != ~0UL)		printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);	efi.systab = __va(ia64_boot_param->efi_systab);	/*	 * Verify the EFI Table	 */	if (efi.systab == NULL)		panic("Woah! Can't find EFI system table.\n");	if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)		panic("Woah! EFI system table signature incorrect\n");	if ((efi.systab->hdr.revision >> 16) == 0)		printk(KERN_WARNING "Warning: EFI system table version "		       "%d.%02d, expected 1.00 or greater\n",		       efi.systab->hdr.revision >> 16,		       efi.systab->hdr.revision & 0xffff);	config_tables = __va(efi.systab->tables);	/* Show what we know for posterity */	c16 = __va(efi.systab->fw_vendor);	if (c16) {		for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i)			vendor[i] = *c16++;		vendor[i] = '\0';	}	printk(KERN_INFO "EFI v%u.%.02u by %s:",	       efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor);	efi.mps        = EFI_INVALID_TABLE_ADDR;	efi.acpi       = EFI_INVALID_TABLE_ADDR;	efi.acpi20     = EFI_INVALID_TABLE_ADDR;	efi.smbios     = EFI_INVALID_TABLE_ADDR;	efi.sal_systab = EFI_INVALID_TABLE_ADDR;	efi.boot_info  = EFI_INVALID_TABLE_ADDR;	efi.hcdp       = EFI_INVALID_TABLE_ADDR;	efi.uga        = EFI_INVALID_TABLE_ADDR;	for (i = 0; i < (int) efi.systab->nr_tables; i++) {		if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {			efi.mps = config_tables[i].table;			printk(" MPS=0x%lx", config_tables[i].table);		} else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {			efi.acpi20 = config_tables[i].table;			printk(" ACPI 2.0=0x%lx", config_tables[i].table);		} else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {			efi.acpi = config_tables[i].table;			printk(" ACPI=0x%lx", config_tables[i].table);		} else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {			efi.smbios = config_tables[i].table;			printk(" SMBIOS=0x%lx", config_tables[i].table);		} else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {			efi.sal_systab = config_tables[i].table;			printk(" SALsystab=0x%lx", config_tables[i].table);		} else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {			efi.hcdp = config_tables[i].table;			printk(" HCDP=0x%lx", config_tables[i].table);		}	}	printk("\n");	runtime = __va(efi.systab->runtime);	efi.get_time = phys_get_time;	efi.set_time = phys_set_time;	efi.get_wakeup_time = phys_get_wakeup_time;	efi.set_wakeup_time = phys_set_wakeup_time;	efi.get_variable = phys_get_variable;	efi.get_next_variable = phys_get_next_variable;	efi.set_variable = phys_set_variable;	efi.get_next_high_mono_count = phys_get_next_high_mono_count;	efi.reset_system = phys_reset_system;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;#if EFI_DEBUG	/* print EFI memory map: */	{		efi_memory_desc_t *md;		void *p;		for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {			md = p;			printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",			       i, md->type, md->attribute, md->phys_addr,			       md->phys_addr + efi_md_size(md),			       md->num_pages >> (20 - EFI_PAGE_SHIFT));		}	}#endif	efi_map_pal_code();	efi_enter_virtual_mode();}voidefi_enter_virtual_mode (void){	void *efi_map_start, *efi_map_end, *p;	efi_memory_desc_t *md;	efi_status_t status;	u64 efi_desc_size;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {		md = p;		if (md->attribute & EFI_MEMORY_RUNTIME) {			/*			 * Some descriptors have multiple bits set, so the order of			 * the tests is relevant.			 */			if (md->attribute & EFI_MEMORY_WB) {				md->virt_addr = (u64) __va(md->phys_addr);			} else if (md->attribute & EFI_MEMORY_UC) {				md->virt_addr = (u64) ioremap(md->phys_addr, 0);			} else if (md->attribute & EFI_MEMORY_WC) {#if 0				md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P									   | _PAGE_D									   | _PAGE_MA_WC									   | _PAGE_PL_0									   | _PAGE_AR_RW));#else				printk(KERN_INFO "EFI_MEMORY_WC mapping\n");				md->virt_addr = (u64) ioremap(md->phys_addr, 0);#endif			} else if (md->attribute & EFI_MEMORY_WT) {#if 0				md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P									   | _PAGE_D | _PAGE_MA_WT									   | _PAGE_PL_0									   | _PAGE_AR_RW));#else				printk(KERN_INFO "EFI_MEMORY_WT mapping\n");				md->virt_addr = (u64) ioremap(md->phys_addr, 0);#endif			}		}	}	status = efi_call_phys(__va(runtime->set_virtual_address_map),			       ia64_boot_param->efi_memmap_size,			       efi_desc_size, ia64_boot_param->efi_memdesc_version,			       ia64_boot_param->efi_memmap);	if (status != EFI_SUCCESS) {		printk(KERN_WARNING "warning: unable to switch EFI into virtual mode "		       "(status=%lu)\n", status);		return;	}	/*	 * Now that EFI is in virtual mode, we call the EFI functions more efficiently:	 */	efi.get_time = virt_get_time;	efi.set_time = virt_set_time;	efi.get_wakeup_time = virt_get_wakeup_time;	efi.set_wakeup_time = virt_set_wakeup_time;	efi.get_variable = virt_get_variable;	efi.get_next_variable = virt_get_next_variable;	efi.set_variable = virt_set_variable;	efi.get_next_high_mono_count = virt_get_next_high_mono_count;	efi.reset_system = virt_reset_system;}/* * Walk the EFI memory map looking for the I/O port range.  There can only be one entry of * this type, other I/O port ranges should be described via ACPI. */u64efi_get_iobase (void){	void *efi_map_start, *efi_map_end, *p;	efi_memory_desc_t *md;	u64 efi_desc_size;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {		md = p;		if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {			if (md->attribute & EFI_MEMORY_UC)				return md->phys_addr;		}	}	return 0;}static struct kern_memdesc *kern_memory_descriptor (unsigned long phys_addr){	struct kern_memdesc *md;	for (md = kern_memmap; md->start != ~0UL; md++) {		if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))			 return md;	}	return NULL;}static efi_memory_desc_t *efi_memory_descriptor (unsigned long phys_addr){	void *efi_map_start, *efi_map_end, *p;	efi_memory_desc_t *md;	u64 efi_desc_size;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {		md = p;		if (phys_addr - md->phys_addr < efi_md_size(md))			 return md;	}	return NULL;}static intefi_memmap_intersects (unsigned long phys_addr, unsigned long size){	void *efi_map_start, *efi_map_end, *p;	efi_memory_desc_t *md;	u64 efi_desc_size;	unsigned long end;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	end = phys_addr + size;	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {		md = p;		if (md->phys_addr < end && efi_md_end(md) > phys_addr)			return 1;	}	return 0;}u32efi_mem_type (unsigned long phys_addr){	efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);	if (md)		return md->type;	return 0;}u64efi_mem_attributes (unsigned long phys_addr){	efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);	if (md)		return md->attribute;	return 0;}EXPORT_SYMBOL(efi_mem_attributes);u64efi_mem_attribute (unsigned long phys_addr, unsigned long size){	unsigned long end = phys_addr + size;	efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);	u64 attr;	if (!md)		return 0;	/*	 * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells	 * the kernel that firmware needs this region mapped.	 */	attr = md->attribute & ~EFI_MEMORY_RUNTIME;	do {		unsigned long md_end = efi_md_end(md);		if (end <= md_end)			return attr;		md = efi_memory_descriptor(md_end);		if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)			return 0;	} while (md);	return 0;}u64kern_mem_attribute (unsigned long phys_addr, unsigned long size){	unsigned long end = phys_addr + size;	struct kern_memdesc *md;	u64 attr;	/*	 * This is a hack for ioremap calls before we set up kern_memmap.	 * Maybe we should do efi_memmap_init() earlier instead.	 */	if (!kern_memmap) {		attr = efi_mem_attribute(phys_addr, size);		if (attr & EFI_MEMORY_WB)			return EFI_MEMORY_WB;		return 0;	}	md = kern_memory_descriptor(phys_addr);	if (!md)		return 0;	attr = md->attribute;	do {		unsigned long md_end = kmd_end(md);		if (end <= md_end)			return attr;		md = kern_memory_descriptor(md_end);		if (!md || md->attribute != attr)			return 0;	} while (md);	return 0;}EXPORT_SYMBOL(kern_mem_attribute);intvalid_phys_addr_range (unsigned long phys_addr, unsigned long size){	u64 attr;	/*	 * /dev/mem reads and writes use copy_to_user(), which implicitly	 * uses a granule-sized kernel identity mapping.  It's really	 * only safe to do this for regions in kern_memmap.  For more	 * details, see Documentation/ia64/aliasing.txt.	 */	attr = kern_mem_attribute(phys_addr, size);	if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)		return 1;	return 0;}intvalid_mmap_phys_addr_range (unsigned long pfn, unsigned long size){	unsigned long phys_addr = pfn << PAGE_SHIFT;	u64 attr;	attr = efi_mem_attribute(phys_addr, size);	/*	 * /dev/mem mmap uses normal user pages, so we don't need the entire	 * granule, but the entire region we're mapping must support the same	 * attribute.	 */	if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)		return 1;	/*	 * Intel firmware doesn't tell us about all the MMIO regions, so	 * in general we have to allow mmap requests.  But if EFI *does*	 * tell us about anything inside this region, we should deny it.	 * The user can always map a smaller region to avoid the overlap.	 */	if (efi_memmap_intersects(phys_addr, size))		return 0;	return 1;}pgprot_tphys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,		     pgprot_t vma_prot){	unsigned long phys_addr = pfn << PAGE_SHIFT;	u64 attr;	/*	 * For /dev/mem mmap, we use user mappings, but if the region is	 * in kern_memmap (and hence may be covered by a kernel mapping),	 * we must use the same attribute as the kernel mapping.	 */	attr = kern_mem_attribute(phys_addr, size);	if (attr & EFI_MEMORY_WB)		return pgprot_cacheable(vma_prot);	else if (attr & EFI_MEMORY_UC)		return pgprot_noncached(vma_prot);	/*	 * Some chipsets don't support UC access to memory.  If	 * WB is supported, we prefer that.	 */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -