⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 efi.c

📁 linux2.6.16版本
💻 C
📖 第 1 页 / 共 2 页
字号:
			 * Some descriptors have multiple bits set, so the order of			 * the tests is relevant.			 */			if (md->attribute & EFI_MEMORY_WB) {				md->virt_addr = (u64) __va(md->phys_addr);			} else if (md->attribute & EFI_MEMORY_UC) {				md->virt_addr = (u64) ioremap(md->phys_addr, 0);			} else if (md->attribute & EFI_MEMORY_WC) {#if 0				md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P									   | _PAGE_D									   | _PAGE_MA_WC									   | _PAGE_PL_0									   | _PAGE_AR_RW));#else				printk(KERN_INFO "EFI_MEMORY_WC mapping\n");				md->virt_addr = (u64) ioremap(md->phys_addr, 0);#endif			} else if (md->attribute & EFI_MEMORY_WT) {#if 0				md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P									   | _PAGE_D | _PAGE_MA_WT									   | _PAGE_PL_0									   | _PAGE_AR_RW));#else				printk(KERN_INFO "EFI_MEMORY_WT mapping\n");				md->virt_addr = (u64) ioremap(md->phys_addr, 0);#endif			}		}	}	status = efi_call_phys(__va(runtime->set_virtual_address_map),			       ia64_boot_param->efi_memmap_size,			       efi_desc_size, ia64_boot_param->efi_memdesc_version,			       ia64_boot_param->efi_memmap);	if (status != EFI_SUCCESS) {		printk(KERN_WARNING "warning: unable to switch EFI into virtual mode "		       "(status=%lu)\n", status);		return;	}	/*	 * Now that EFI is in virtual mode, we call the EFI functions more efficiently:	 */	efi.get_time = virt_get_time;	efi.set_time = virt_set_time;	efi.get_wakeup_time = virt_get_wakeup_time;	efi.set_wakeup_time = virt_set_wakeup_time;	efi.get_variable = virt_get_variable;	efi.get_next_variable = virt_get_next_variable;	efi.set_variable = virt_set_variable;	efi.get_next_high_mono_count = virt_get_next_high_mono_count;	efi.reset_system = virt_reset_system;}/* * Walk the EFI memory map looking for the I/O port range.  There can only be one entry of * this type, other I/O port ranges should be described via ACPI. */u64efi_get_iobase (void){	void *efi_map_start, *efi_map_end, *p;	efi_memory_desc_t *md;	u64 efi_desc_size;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {		md = p;		if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {			if (md->attribute & EFI_MEMORY_UC)				return md->phys_addr;		}	}	return 0;}static efi_memory_desc_t *efi_memory_descriptor (unsigned long phys_addr){	void *efi_map_start, *efi_map_end, *p;	efi_memory_desc_t *md;	u64 efi_desc_size;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {		md = p;		if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))			 return md;	}	return 0;}static intefi_memmap_has_mmio (void){	void *efi_map_start, *efi_map_end, *p;	efi_memory_desc_t *md;	u64 efi_desc_size;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {		md = p;		if (md->type == EFI_MEMORY_MAPPED_IO)			return 1;	}	return 0;}u32efi_mem_type (unsigned long phys_addr){	efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);	if (md)		return md->type;	return 0;}u64efi_mem_attributes (unsigned long phys_addr){	efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);	if (md)		return md->attribute;	return 0;}EXPORT_SYMBOL(efi_mem_attributes);/* * Determines whether the memory at phys_addr supports the desired * attribute (WB, UC, etc).  If this returns 1, the caller can safely * access *size bytes at phys_addr with the specified attribute. */static intefi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr){	efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);	unsigned long md_end;	if (!md || (md->attribute & attr) != attr)		return 0;	do {		md_end = efi_md_end(md);		if (phys_addr + *size <= md_end)			return 1;		md = efi_memory_descriptor(md_end);		if (!md || (md->attribute & attr) != attr) {			*size = md_end - phys_addr;			return 1;		}	} while (md);	return 0;}/* * For /dev/mem, we only allow read & write system calls to access * write-back memory, because read & write don't allow the user to * control access size. */intvalid_phys_addr_range (unsigned long phys_addr, unsigned long *size){	return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB);}/* * We allow mmap of anything in the EFI memory map that supports * either write-back or uncacheable access.  For uncacheable regions, * the supported access sizes are system-dependent, and the user is * responsible for using the correct size. * * Note that this doesn't currently allow access to hot-added memory, * because that doesn't appear in the boot-time EFI memory map. */intvalid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size){	if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB))		return 1;	if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC))		return 1;	/*	 * Some firmware doesn't report MMIO regions in the EFI memory map.	 * The Intel BigSur (a.k.a. HP i2000) has this problem.  In this	 * case, we can't use the EFI memory map to validate mmap requests.	 */	if (!efi_memmap_has_mmio())		return 1;	return 0;}int __initefi_uart_console_only(void){	efi_status_t status;	char *s, name[] = "ConOut";	efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;	efi_char16_t *utf16, name_utf16[32];	unsigned char data[1024];	unsigned long size = sizeof(data);	struct efi_generic_dev_path *hdr, *end_addr;	int uart = 0;	/* Convert to UTF-16 */	utf16 = name_utf16;	s = name;	while (*s)		*utf16++ = *s++ & 0x7f;	*utf16 = 0;	status = efi.get_variable(name_utf16, &guid, NULL, &size, data);	if (status != EFI_SUCCESS) {		printk(KERN_ERR "No EFI %s variable?\n", name);		return 0;	}	hdr = (struct efi_generic_dev_path *) data;	end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);	while (hdr < end_addr) {		if (hdr->type == EFI_DEV_MSG &&		    hdr->sub_type == EFI_DEV_MSG_UART)			uart = 1;		else if (hdr->type == EFI_DEV_END_PATH ||			  hdr->type == EFI_DEV_END_PATH2) {			if (!uart)				return 0;			if (hdr->sub_type == EFI_DEV_END_ENTIRE)				return 1;			uart = 0;		}		hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length);	}	printk(KERN_ERR "Malformed %s value\n", name);	return 0;}/* * Look for the first granule aligned memory descriptor memory * that is big enough to hold EFI memory map. Make sure this * descriptor is atleast granule sized so it does not get trimmed */struct kern_memdesc *find_memmap_space (void){	u64	contig_low=0, contig_high=0;	u64	as = 0, ae;	void *efi_map_start, *efi_map_end, *p, *q;	efi_memory_desc_t *md, *pmd = NULL, *check_md;	u64	space_needed, efi_desc_size;	unsigned long total_mem = 0;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	/*	 * Worst case: we need 3 kernel descriptors for each efi descriptor	 * (if every entry has a WB part in the middle, and UC head and tail),	 * plus one for the end marker.	 */	space_needed = sizeof(kern_memdesc_t) *		(3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);	for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {		md = p;		if (!efi_wb(md)) {			continue;		}		if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {			contig_low = GRANULEROUNDUP(md->phys_addr);			contig_high = efi_md_end(md);			for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {				check_md = q;				if (!efi_wb(check_md))					break;				if (contig_high != check_md->phys_addr)					break;				contig_high = efi_md_end(check_md);			}			contig_high = GRANULEROUNDDOWN(contig_high);		}		if (!is_available_memory(md) || md->type == EFI_LOADER_DATA)			continue;		/* Round ends inward to granule boundaries */		as = max(contig_low, md->phys_addr);		ae = min(contig_high, efi_md_end(md));		/* keep within max_addr= command line arg */		ae = min(ae, max_addr);		if (ae <= as)			continue;		/* avoid going over mem= command line arg */		if (total_mem + (ae - as) > mem_limit)			ae -= total_mem + (ae - as) - mem_limit;		if (ae <= as)			continue;		if (ae - as > space_needed)			break;	}	if (p >= efi_map_end)		panic("Can't allocate space for kernel memory descriptors");	return __va(as);}/* * Walk the EFI memory map and gather all memory available for kernel * to use.  We can allocate partial granules only if the unavailable * parts exist, and are WB. */voidefi_memmap_init(unsigned long *s, unsigned long *e){	struct kern_memdesc *k, *prev = 0;	u64	contig_low=0, contig_high=0;	u64	as, ae, lim;	void *efi_map_start, *efi_map_end, *p, *q;	efi_memory_desc_t *md, *pmd = NULL, *check_md;	u64	efi_desc_size;	unsigned long total_mem = 0;	k = kern_memmap = find_memmap_space();	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {		md = p;		if (!efi_wb(md)) {			if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY ||				    	   md->type == EFI_BOOT_SERVICES_DATA)) {				k->attribute = EFI_MEMORY_UC;				k->start = md->phys_addr;				k->num_pages = md->num_pages;				k++;			}			continue;		}		if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {			contig_low = GRANULEROUNDUP(md->phys_addr);			contig_high = efi_md_end(md);			for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {				check_md = q;				if (!efi_wb(check_md))					break;				if (contig_high != check_md->phys_addr)					break;				contig_high = efi_md_end(check_md);			}			contig_high = GRANULEROUNDDOWN(contig_high);		}		if (!is_available_memory(md))			continue;		/*		 * Round ends inward to granule boundaries		 * Give trimmings to uncached allocator		 */		if (md->phys_addr < contig_low) {			lim = min(efi_md_end(md), contig_low);			if (efi_uc(md)) {				if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC &&				    kmd_end(k-1) == md->phys_addr) {					(k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT;				} else {					k->attribute = EFI_MEMORY_UC;					k->start = md->phys_addr;					k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT;					k++;				}			}			as = contig_low;		} else			as = md->phys_addr;		if (efi_md_end(md) > contig_high) {			lim = max(md->phys_addr, contig_high);			if (efi_uc(md)) {				if (lim == md->phys_addr && k > kern_memmap &&				    (k-1)->attribute == EFI_MEMORY_UC &&				    kmd_end(k-1) == md->phys_addr) {					(k-1)->num_pages += md->num_pages;				} else {					k->attribute = EFI_MEMORY_UC;					k->start = lim;					k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT;					k++;				}			}			ae = contig_high;		} else			ae = efi_md_end(md);		/* keep within max_addr= command line arg */		ae = min(ae, max_addr);		if (ae <= as)			continue;		/* avoid going over mem= command line arg */		if (total_mem + (ae - as) > mem_limit)			ae -= total_mem + (ae - as) - mem_limit;		if (ae <= as)			continue;		if (prev && kmd_end(prev) == md->phys_addr) {			prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;			total_mem += ae - as;			continue;		}		k->attribute = EFI_MEMORY_WB;		k->start = as;		k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;		total_mem += ae - as;		prev = k++;	}	k->start = ~0L; /* end-marker */	/* reserve the memory we are using for kern_memmap */	*s = (u64)kern_memmap;	*e = (u64)++k;}voidefi_initialize_iomem_resources(struct resource *code_resource,			       struct resource *data_resource){	struct resource *res;	void *efi_map_start, *efi_map_end, *p;	efi_memory_desc_t *md;	u64 efi_desc_size;	char *name;	unsigned long flags;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	res = NULL;	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {		md = p;		if (md->num_pages == 0) /* should not happen */			continue;		flags = IORESOURCE_MEM;		switch (md->type) {			case EFI_MEMORY_MAPPED_IO:			case EFI_MEMORY_MAPPED_IO_PORT_SPACE:				continue;			case EFI_LOADER_CODE:			case EFI_LOADER_DATA:			case EFI_BOOT_SERVICES_DATA:			case EFI_BOOT_SERVICES_CODE:			case EFI_CONVENTIONAL_MEMORY:				if (md->attribute & EFI_MEMORY_WP) {					name = "System ROM";					flags |= IORESOURCE_READONLY;				} else {					name = "System RAM";				}				break;			case EFI_ACPI_MEMORY_NVS:				name = "ACPI Non-volatile Storage";				flags |= IORESOURCE_BUSY;				break;			case EFI_UNUSABLE_MEMORY:				name = "reserved";				flags |= IORESOURCE_BUSY | IORESOURCE_DISABLED;				break;			case EFI_RESERVED_TYPE:			case EFI_RUNTIME_SERVICES_CODE:			case EFI_RUNTIME_SERVICES_DATA:			case EFI_ACPI_RECLAIM_MEMORY:			default:				name = "reserved";				flags |= IORESOURCE_BUSY;				break;		}		if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {			printk(KERN_ERR "failed to alocate resource for iomem\n");			return;		}		res->name = name;		res->start = md->phys_addr;		res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;		res->flags = flags;		if (insert_resource(&iomem_resource, res) < 0)			kfree(res);		else {			/*			 * We don't know which region contains			 * kernel data so we try it repeatedly and			 * let the resource manager test it.			 */			insert_resource(res, code_resource);			insert_resource(res, data_resource);		}	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -