⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 efi.c

📁 该文件是rt_linux
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Extensible Firmware Interface * * Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999 * * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999-2002 Hewlett-Packard Co. *	David Mosberger-Tang <davidm@hpl.hp.com> *	Stephane Eranian <eranian@hpl.hp.com> * * All EFI Runtime Services are not implemented yet as EFI only * supports physical mode addressing on SoftSDV. This is to be fixed * in a future version.  --drummond 1999-07-20 * * Implemented EFI runtime services and virtual mode calls.  --davidm * * Goutham Rao: <goutham.rao@intel.com> *	Skip non-WB memory and ignore empty memory ranges. */#include <linux/config.h>#include <linux/kernel.h>#include <linux/init.h>#include <linux/types.h>#include <linux/time.h>#include <linux/proc_fs.h>#include <linux/efi.h>#include <asm/io.h>#include <asm/kregs.h>#include <asm/pgtable.h>#include <asm/processor.h>#define EFI_DEBUG	0extern efi_status_t efi_call_phys (void *, ...);struct efi efi;static efi_runtime_services_t *runtime;/* * efi_dir is allocated here, but the directory isn't created * here, as proc_mkdir() doesn't work this early in the bootup * process.  Therefore, each module, like efivars, must test for *    if (!efi_dir)  efi_dir = proc_mkdir("efi", NULL); * prior to creating their own entries under /proc/efi. */#ifdef CONFIG_PROC_FSstruct proc_dir_entry *efi_dir = NULL;#endifstatic unsigned long mem_limit = ~0UL;static efi_status_tphys_get_time (efi_time_t *tm, efi_time_cap_t *tc){	return efi_call_phys(__va(runtime->get_time), __pa(tm), __pa(tc));}static efi_status_tphys_set_time (efi_time_t *tm){	return efi_call_phys(__va(runtime->set_time), __pa(tm));}static efi_status_tphys_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm){	return efi_call_phys(__va(runtime->get_wakeup_time), __pa(enabled), __pa(pending),			     __pa(tm));}static efi_status_tphys_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm){	return efi_call_phys(__va(runtime->set_wakeup_time), enabled, __pa(tm));}static efi_status_tphys_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,		   unsigned long *data_size, void *data){	return efi_call_phys(__va(runtime->get_variable), __pa(name), __pa(vendor), __pa(attr),			     __pa(data_size), __pa(data));}static efi_status_tphys_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor){	return efi_call_phys(__va(runtime->get_next_variable), __pa(name_size), __pa(name),			     __pa(vendor));}static efi_status_tphys_set_variable (efi_char16_t *name, efi_guid_t *vendor, u32 attr,		   unsigned long data_size, void *data){	return efi_call_phys(__va(runtime->set_variable), __pa(name), __pa(vendor), attr,			     data_size, __pa(data));}static efi_status_tphys_get_next_high_mono_count (u64 *count){	return efi_call_phys(__va(runtime->get_next_high_mono_count), __pa(count));}static voidphys_reset_system (int reset_type, efi_status_t status,		   unsigned long data_size, efi_char16_t *data){	efi_call_phys(__va(runtime->reset_system), status, data_size, __pa(data));}voidefi_gettimeofday (struct timeval *tv){	efi_time_t tm;	memset(tv, 0, sizeof(tv));	if ((*efi.get_time)(&tm, 0) != EFI_SUCCESS)		return;	tv->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);	tv->tv_usec = tm.nanosecond / 1000;}static intis_available_memory (efi_memory_desc_t *md){	if (!(md->attribute & EFI_MEMORY_WB))		return 0;	switch (md->type) {	      case EFI_LOADER_CODE:	      case EFI_LOADER_DATA:	      case EFI_BOOT_SERVICES_CODE:	      case EFI_BOOT_SERVICES_DATA:	      case EFI_CONVENTIONAL_MEMORY:		return 1;	}	return 0;}/* * Trim descriptor MD so its starts at address START_ADDR.  If the descriptor covers * memory that is normally available to the kernel, issue a warning that some memory * is being ignored. */static voidtrim_bottom (efi_memory_desc_t *md, u64 start_addr){	u64 num_skipped_pages;	if (md->phys_addr >= start_addr || !md->num_pages)		return;	num_skipped_pages = (start_addr - md->phys_addr) >> EFI_PAGE_SHIFT;	if (num_skipped_pages > md->num_pages)		num_skipped_pages = md->num_pages;	if (is_available_memory(md))		printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "		       "at 0x%lx\n", __FUNCTION__,		       (num_skipped_pages << EFI_PAGE_SHIFT) >> 10,		       md->phys_addr, start_addr - IA64_GRANULE_SIZE);	/*	 * NOTE: Don't set md->phys_addr to START_ADDR because that could cause the memory	 * descriptor list to become unsorted.  In such a case, md->num_pages will be	 * zero, so the Right Thing will happen.	 */	md->phys_addr += num_skipped_pages << EFI_PAGE_SHIFT;	md->num_pages -= num_skipped_pages;}static voidtrim_top (efi_memory_desc_t *md, u64 end_addr){	u64 num_dropped_pages, md_end_addr;	md_end_addr = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);	if (md_end_addr <= end_addr || !md->num_pages)		return;	num_dropped_pages = (md_end_addr - end_addr) >> EFI_PAGE_SHIFT;	if (num_dropped_pages > md->num_pages)		num_dropped_pages = md->num_pages;	if (is_available_memory(md))		printk(KERN_NOTICE "efi.%s: ignoring %luKB of memory at 0x%lx due to granule hole "		       "at 0x%lx\n", __FUNCTION__,		       (num_dropped_pages << EFI_PAGE_SHIFT) >> 10,		       md->phys_addr, end_addr);	md->num_pages -= num_dropped_pages;}/* * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that * has memory that is available for OS use. */voidefi_memmap_walk (efi_freemem_callback_t callback, void *arg){	int prev_valid = 0;	struct range {		u64 start;		u64 end;	} prev, curr;	void *efi_map_start, *efi_map_end, *p, *q;	efi_memory_desc_t *md, *check_md;	u64 efi_desc_size, start, end, granule_addr, first_non_wb_addr = 0;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {		md = p;		/* skip over non-WB memory descriptors; that's all we're interested in... */		if (!(md->attribute & EFI_MEMORY_WB))			continue;		if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > first_non_wb_addr) {			/*			 * Search for the next run of contiguous WB memory.  Start search			 * at first granule boundary covered by md.			 */			granule_addr = ((md->phys_addr + IA64_GRANULE_SIZE - 1)					& -IA64_GRANULE_SIZE);			first_non_wb_addr = granule_addr;			for (q = p; q < efi_map_end; q += efi_desc_size) {				check_md = q;				if (check_md->attribute & EFI_MEMORY_WB)					trim_bottom(md, granule_addr);				if (check_md->phys_addr < granule_addr)					continue;				if (!(check_md->attribute & EFI_MEMORY_WB))					break;	/* hit a non-WB region; stop search */				if (check_md->phys_addr != first_non_wb_addr)					break;	/* hit a memory hole; stop search */				first_non_wb_addr += check_md->num_pages << EFI_PAGE_SHIFT;			}			/* round it down to the previous granule-boundary: */			first_non_wb_addr &= -IA64_GRANULE_SIZE;			if (!(first_non_wb_addr > granule_addr))				continue;	/* couldn't find enough contiguous memory */		}		/* BUG_ON((md->phys_addr >> IA64_GRANULE_SHIFT) < first_non_wb_addr); */		trim_top(md, first_non_wb_addr);		if (is_available_memory(md)) {			if (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) > mem_limit) {				if (md->phys_addr > mem_limit)					continue;				md->num_pages = (mem_limit - md->phys_addr) >> EFI_PAGE_SHIFT;			}			if (md->num_pages == 0)				continue;			curr.start = PAGE_OFFSET + md->phys_addr;			curr.end   = curr.start + (md->num_pages << EFI_PAGE_SHIFT);			if (!prev_valid) {				prev = curr;				prev_valid = 1;			} else {				if (curr.start < prev.start)					printk("Oops: EFI memory table not ordered!\n");				if (prev.end == curr.start) {					/* merge two consecutive memory ranges */					prev.end = curr.end;				} else {					start = PAGE_ALIGN(prev.start);					end = prev.end & PAGE_MASK;					if ((end > start) && (*callback)(start, end, arg) < 0)						return;					prev = curr;				}			}		}	}	if (prev_valid) {		start = PAGE_ALIGN(prev.start);		end = prev.end & PAGE_MASK;		if (end > start)			(*callback)(start, end, arg);	}}/* * Look for the PAL_CODE region reported by EFI and maps it using an * ITR to enable safe PAL calls in virtual mode.  See IA-64 Processor * Abstraction Layer chapter 11 in ADAG */voidefi_map_pal_code (void){	void *efi_map_start, *efi_map_end, *p;	efi_memory_desc_t *md;	u64 efi_desc_size;	int pal_code_count = 0;	u64 mask, psr;	u64 vaddr;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -