⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 efi.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 3 页
字号:
	u64 attr;	/*	 * For /dev/mem mmap, we use user mappings, but if the region is	 * in kern_memmap (and hence may be covered by a kernel mapping),	 * we must use the same attribute as the kernel mapping.	 */	attr = kern_mem_attribute(phys_addr, size);	if (attr & EFI_MEMORY_WB)		return pgprot_cacheable(vma_prot);	else if (attr & EFI_MEMORY_UC)		return pgprot_noncached(vma_prot);	/*	 * Some chipsets don't support UC access to memory.  If	 * WB is supported, we prefer that.	 */	if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)		return pgprot_cacheable(vma_prot);	return pgprot_noncached(vma_prot);}#endifint __initefi_uart_console_only(void){	efi_status_t status;	char *s, name[] = "ConOut";	efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;	efi_char16_t *utf16, name_utf16[32];	unsigned char data[1024];	unsigned long size = sizeof(data);	struct efi_generic_dev_path *hdr, *end_addr;	int uart = 0;	/* Convert to UTF-16 */	utf16 = name_utf16;	s = name;	while (*s)		*utf16++ = *s++ & 0x7f;	*utf16 = 0;	status = efi.get_variable(name_utf16, &guid, NULL, &size, data);	if (status != EFI_SUCCESS) {		printk(KERN_ERR "No EFI %s variable?\n", name);		return 0;	}	hdr = (struct efi_generic_dev_path *) data;	end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);	while (hdr < end_addr) {		if (hdr->type == EFI_DEV_MSG &&		    hdr->sub_type == EFI_DEV_MSG_UART)			uart = 1;		else if (hdr->type == EFI_DEV_END_PATH ||			  hdr->type == EFI_DEV_END_PATH2) {			if (!uart)				return 0;			if (hdr->sub_type == EFI_DEV_END_ENTIRE)				return 1;			uart = 0;		}		hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length);	}	printk(KERN_ERR "Malformed %s value\n", name);	return 0;}/* * Look for the first granule aligned memory descriptor memory * that is big enough to hold EFI memory map. Make sure this * descriptor is atleast granule sized so it does not get trimmed */struct kern_memdesc *find_memmap_space (void){	u64	contig_low=0, contig_high=0;	u64	as = 0, ae;	void *efi_map_start, *efi_map_end, *p, *q;	efi_memory_desc_t *md, *pmd = NULL, *check_md;	u64	space_needed, efi_desc_size;	unsigned long total_mem = 0;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	/*	 * Worst case: we need 3 kernel descriptors for each efi descriptor	 * (if every entry has a WB part in the middle, and UC head and tail),	 * plus one for the end marker.	 */	space_needed = sizeof(kern_memdesc_t) *		(3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);	for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {		md = p;		if (!efi_wb(md)) {			continue;		}		if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {			contig_low = GRANULEROUNDUP(md->phys_addr);			contig_high = efi_md_end(md);			for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {				check_md = q;				if (!efi_wb(check_md))					break;				if (contig_high != check_md->phys_addr)					break;				contig_high = efi_md_end(check_md);			}			contig_high = GRANULEROUNDDOWN(contig_high);		}		if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)			continue;		/* Round ends inward to granule boundaries */		as = max(contig_low, md->phys_addr);		ae = min(contig_high, efi_md_end(md));		/* keep within max_addr= and min_addr= command line arg */		as = max(as, min_addr);		ae = min(ae, max_addr);		if (ae <= as)			continue;		/* avoid going over mem= command line arg */		if (total_mem + (ae - as) > mem_limit)			ae -= total_mem + (ae - as) - mem_limit;		if (ae <= as)			continue;		if (ae - as > space_needed)			break;	}	if (p >= efi_map_end)		panic("Can't allocate space for kernel memory descriptors");	return __va(as);}/* * Walk the EFI memory map and gather all memory available for kernel * to use.  We can allocate partial granules only if the unavailable * parts exist, and are WB. */voidefi_memmap_init(unsigned long *s, unsigned long *e){	struct kern_memdesc *k, *prev = NULL;	u64	contig_low=0, contig_high=0;	u64	as, ae, lim;	void *efi_map_start, *efi_map_end, *p, *q;	efi_memory_desc_t *md, *pmd = NULL, *check_md;	u64	efi_desc_size;	unsigned long total_mem = 0;	k = kern_memmap = find_memmap_space();	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {		md = p;		if (!efi_wb(md)) {			if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY ||				    	   md->type == EFI_BOOT_SERVICES_DATA)) {				k->attribute = EFI_MEMORY_UC;				k->start = md->phys_addr;				k->num_pages = md->num_pages;				k++;			}			continue;		}#ifdef XEN		/* this works around a problem in the ski bootloader */		if (running_on_sim && md->type != EFI_CONVENTIONAL_MEMORY)			continue;#endif		if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {			contig_low = GRANULEROUNDUP(md->phys_addr);			contig_high = efi_md_end(md);			for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {				check_md = q;				if (!efi_wb(check_md))					break;				if (contig_high != check_md->phys_addr)					break;				contig_high = efi_md_end(check_md);			}			contig_high = GRANULEROUNDDOWN(contig_high);		}		if (!is_memory_available(md))			continue;#ifdef CONFIG_CRASH_DUMP		/* saved_max_pfn should ignore max_addr= command line arg */		if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))			saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);#endif		/*		 * Round ends inward to granule boundaries		 * Give trimmings to uncached allocator		 */		if (md->phys_addr < contig_low) {			lim = min(efi_md_end(md), contig_low);			if (efi_uc(md)) {				if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC &&				    kmd_end(k-1) == md->phys_addr) {					(k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT;				} else {					k->attribute = EFI_MEMORY_UC;					k->start = md->phys_addr;					k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT;					k++;				}			}			as = contig_low;		} else			as = md->phys_addr;		if (efi_md_end(md) > contig_high) {			lim = max(md->phys_addr, contig_high);			if (efi_uc(md)) {				if (lim == md->phys_addr && k > kern_memmap &&				    (k-1)->attribute == EFI_MEMORY_UC &&				    kmd_end(k-1) == md->phys_addr) {					(k-1)->num_pages += md->num_pages;				} else {					k->attribute = EFI_MEMORY_UC;					k->start = lim;					k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT;					k++;				}			}			ae = contig_high;		} else			ae = efi_md_end(md);		/* keep within max_addr= and min_addr= command line arg */		as = max(as, min_addr);		ae = min(ae, max_addr);		if (ae <= as)			continue;		/* avoid going over mem= command line arg */		if (total_mem + (ae - as) > mem_limit)			ae -= total_mem + (ae - as) - mem_limit;		if (ae <= as)			continue;		if (prev && kmd_end(prev) == md->phys_addr) {			prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;			total_mem += ae - as;			continue;		}		k->attribute = EFI_MEMORY_WB;		k->start = as;		k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;		total_mem += ae - as;		prev = k++;	}	k->start = ~0L; /* end-marker */	/* reserve the memory we are using for kern_memmap */	*s = (u64)kern_memmap;	*e = (u64)++k;}#ifndef XENvoidefi_initialize_iomem_resources(struct resource *code_resource,			       struct resource *data_resource){	struct resource *res;	void *efi_map_start, *efi_map_end, *p;	efi_memory_desc_t *md;	u64 efi_desc_size;	char *name;	unsigned long flags;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	res = NULL;	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {		md = p;		if (md->num_pages == 0) /* should not happen */			continue;		flags = IORESOURCE_MEM;		switch (md->type) {			case EFI_MEMORY_MAPPED_IO:			case EFI_MEMORY_MAPPED_IO_PORT_SPACE:				continue;			case EFI_LOADER_CODE:			case EFI_LOADER_DATA:			case EFI_BOOT_SERVICES_DATA:			case EFI_BOOT_SERVICES_CODE:			case EFI_CONVENTIONAL_MEMORY:				if (md->attribute & EFI_MEMORY_WP) {					name = "System ROM";					flags |= IORESOURCE_READONLY;				} else {					name = "System RAM";				}				break;			case EFI_ACPI_MEMORY_NVS:				name = "ACPI Non-volatile Storage";				flags |= IORESOURCE_BUSY;				break;			case EFI_UNUSABLE_MEMORY:				name = "reserved";				flags |= IORESOURCE_BUSY | IORESOURCE_DISABLED;				break;			case EFI_RESERVED_TYPE:			case EFI_RUNTIME_SERVICES_CODE:			case EFI_RUNTIME_SERVICES_DATA:			case EFI_ACPI_RECLAIM_MEMORY:			default:				name = "reserved";				flags |= IORESOURCE_BUSY;				break;		}		if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {			printk(KERN_ERR "failed to alocate resource for iomem\n");			return;		}		res->name = name;		res->start = md->phys_addr;		res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;		res->flags = flags;		if (insert_resource(&iomem_resource, res) < 0)			kfree(res);		else {			/*			 * We don't know which region contains			 * kernel data so we try it repeatedly and			 * let the resource manager test it.			 */			insert_resource(res, code_resource);			insert_resource(res, data_resource);#ifdef CONFIG_KEXEC                        insert_resource(res, &efi_memmap_res);                        insert_resource(res, &boot_param_res);			if (crashk_res.end > crashk_res.start)				insert_resource(res, &crashk_res);#endif		}	}}#endif /* XEN */#if defined(CONFIG_KEXEC) || defined(XEN)/* find a block of memory aligned to 64M exclude reserved regions   rsvd_regions are sorted */unsigned long __initkdump_find_rsvd_region (unsigned long size,		struct rsvd_region *r, int n){  int i;  u64 start, end;  u64 alignment = 1UL << _PAGE_SIZE_64M;  void *efi_map_start, *efi_map_end, *p;  efi_memory_desc_t *md;  u64 efi_desc_size;  efi_map_start = __va(ia64_boot_param->efi_memmap);  efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;  efi_desc_size = ia64_boot_param->efi_memdesc_size;  for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {	  md = p;	  if (!efi_wb(md))		  continue;	  start = ALIGN(md->phys_addr, alignment);	  end = efi_md_end(md);	  for (i = 0; i < n; i++) {		if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {			if (__pa(r[i].start) > start + size)				return start;			start = ALIGN(__pa(r[i].end), alignment);			if (i < n-1 && __pa(r[i+1].start) < start + size)				continue;			else				break;		}	  }	  if (end > start + size)		return start;  }  printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",	size);  return ~0UL;}#endif#ifndef XEN#ifdef CONFIG_PROC_VMCORE/* locate the size find a the descriptor at a certain address */unsigned longvmcore_find_descriptor_size (unsigned long address){	void *efi_map_start, *efi_map_end, *p;	efi_memory_desc_t *md;	u64 efi_desc_size;	unsigned long ret = 0;	efi_map_start = __va(ia64_boot_param->efi_memmap);	efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;	efi_desc_size = ia64_boot_param->efi_memdesc_size;	for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {		md = p;		if (efi_wb(md) && md->type == EFI_LOADER_DATA		    && md->phys_addr == address) {			ret = efi_md_size(md);			break;		}	}	if (ret == 0)		printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");	return ret;}#endif#endif /* XEN */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -