⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 setup.c

📁 xen虚拟机源代码安装包
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Architecture-specific setup. * * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co *	David Mosberger-Tang <davidm@hpl.hp.com> *	Stephane Eranian <eranian@hpl.hp.com> * Copyright (C) 2000, 2004 Intel Corp * 	Rohit Seth <rohit.seth@intel.com> * 	Suresh Siddha <suresh.b.siddha@intel.com> * 	Gordon Jin <gordon.jin@intel.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * * 12/26/04 S.Siddha, G.Jin, R.Seth *			Add multi-threading and multi-core detection * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map * 03/31/00 R.Seth	cpu_initialized and current->processor fixes * 02/04/00 D.Mosberger	some more get_cpuinfo fixes... * 02/01/00 R.Seth	fixed get_cpuinfo for SMP * 01/07/99 S.Eranian	added the support for command line argument * 06/24/99 W.Drummond	added boot_cpu_data. * 05/28/05 Z. Menyhart	Dynamic stride size for "flush_icache_range()" */#include <linux/config.h>#include <linux/module.h>#include <linux/init.h>#include <linux/acpi.h>#include <linux/bootmem.h>#include <linux/console.h>#include <linux/delay.h>#include <linux/kernel.h>#include <linux/shutdown.h>#include <linux/sched.h>#include <linux/seq_file.h>#include <linux/string.h>#include <linux/threads.h>#include <linux/tty.h>#include <linux/serial.h>#include <linux/serial_core.h>#include <linux/efi.h>#include <linux/initrd.h>#include <linux/platform.h>#include <linux/pm.h>#include <asm/ia32.h>#include <asm/machvec.h>#include <asm/mca.h>#include <asm/meminit.h>#include <asm/page.h>#include <asm/patch.h>#include <asm/pgtable.h>#include <asm/processor.h>#include <asm/sal.h>#include <asm/sections.h>#include <asm/serial.h>#include <asm/setup.h>#include <asm/smp.h>#include <asm/system.h>#include <asm/unistd.h>#ifdef XEN#include <asm/vmx.h>#include <asm/io.h>#include <asm/kexec.h>#include <public/kexec.h>#include <xen/kexec.h>#endif#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)# error "struct cpuinfo_ia64 too big!"#endif#ifdef CONFIG_SMPunsigned long __per_cpu_offset[NR_CPUS];EXPORT_SYMBOL(__per_cpu_offset);#endifDEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);#ifdef XENDEFINE_PER_CPU(cpu_kr_ia64_t, cpu_kr);#endifDEFINE_PER_CPU(unsigned long, local_per_cpu_offset);DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);unsigned long ia64_cycles_per_usec;struct ia64_boot_param *ia64_boot_param;struct screen_info screen_info;unsigned long vga_console_iobase;unsigned long vga_console_membase;unsigned long ia64_max_cacheline_size;unsigned long ia64_iobase;	/* virtual address for I/O accesses */EXPORT_SYMBOL(ia64_iobase);struct io_space io_space[MAX_IO_SPACES];EXPORT_SYMBOL(io_space);unsigned int num_io_spaces;#ifdef XENextern void early_cmdline_parse(char **);extern unsigned int ns16550_com1_gsi;#endif/* * "flush_icache_range()" needs to know what processor dependent stride size to use * when it makes i-cache(s) coherent with d-caches. */#define	I_CACHE_STRIDE_SHIFT	5	/* Safest way to go: 32 bytes by 32 bytes */unsigned long ia64_i_cache_stride_shift = ~0;#ifdef XEN#define D_CACHE_STRIDE_SHIFT	5	/* Safest.  */unsigned long ia64_d_cache_stride_shift = ~0;#endif/* * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1).  This * mask specifies a mask of address bits that must be 0 in order for two buffers to be * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start * address of the second buffer must be aligned to (merge_mask+1) in order to be * mergeable).  By default, we assume there is no I/O MMU which can merge physically * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu * page-size of 2^64. */unsigned long ia64_max_iommu_merge_mask = ~0UL;EXPORT_SYMBOL(ia64_max_iommu_merge_mask);/* * We use a special marker for the end of memory and it uses the extra (+1) slot */struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];int num_rsvd_regions;/* * Filter incoming memory segments based on the primitive map created from the boot * parameters. Segments contained in the map are removed from the memory ranges. A * caller-specified function is called with the memory ranges that remain after filtering. * This routine does not assume the incoming segments are sorted. */intfilter_rsvd_memory (unsigned long start, unsigned long end, void *arg){	unsigned long range_start, range_end, prev_start;	void (*func)(unsigned long, unsigned long, int);	int i;#if IGNORE_PFN0	if (start == PAGE_OFFSET) {		printk(KERN_WARNING "warning: skipping physical page 0\n");		start += PAGE_SIZE;		if (start >= end) return 0;	}#endif	/*	 * lowest possible address(walker uses virtual)	 */	prev_start = PAGE_OFFSET;	func = arg;	for (i = 0; i < num_rsvd_regions; ++i) {		range_start = max(start, prev_start);		range_end   = min(end, rsvd_region[i].start);		if (range_start < range_end)#ifdef XEN		{		/* init_boot_pages requires "ps, pe" */			printk("Init boot pages: 0x%lx -> 0x%lx.\n",				__pa(range_start), __pa(range_end));			(*func)(__pa(range_start), __pa(range_end), 0);		}#else			call_pernode_memory(__pa(range_start), range_end - range_start, func);#endif		/* nothing more available in this segment */		if (range_end == end) return 0;		prev_start = rsvd_region[i].end;	}	/* end of memory marker allows full processing inside loop body */	return 0;}static voidsort_regions (struct rsvd_region *rsvd_region, int max){	int j;	/* simple bubble sorting */	while (max--) {		for (j = 0; j < max; ++j) {			if (rsvd_region[j].start > rsvd_region[j+1].start) {				struct rsvd_region tmp;				tmp = rsvd_region[j];				rsvd_region[j] = rsvd_region[j + 1];				rsvd_region[j + 1] = tmp;			}		}	}}/** * reserve_memory - setup reserved memory areas * * Setup the reserved memory areas set aside for the boot parameters, * initrd, etc.  There are currently %IA64_MAX_RSVD_REGIONS defined, * see include/asm-ia64/meminit.h if you need to define more. */voidreserve_memory (void){	int n = 0;	/*	 * none of the entries in this table overlap	 */	rsvd_region[n].start = (unsigned long) ia64_boot_param;	rsvd_region[n].end   = rsvd_region[n].start + sizeof(*ia64_boot_param);	n++;	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);	rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;	n++;	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);	rsvd_region[n].end   = (rsvd_region[n].start				+ strlen(__va(ia64_boot_param->command_line)) + 1);	n++;	rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);#ifdef XEN	/* Reserve xen image/bitmap/xen-heap */	rsvd_region[n].end   = rsvd_region[n].start + xenheap_size;#else	rsvd_region[n].end   = (unsigned long) ia64_imva(_end);#endif	n++;#ifdef XEN	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->domain_start);	rsvd_region[n].end   = (rsvd_region[n].start + ia64_boot_param->domain_size);	n++;#endif#if defined(XEN)||defined(CONFIG_BLK_DEV_INITRD)	if (ia64_boot_param->initrd_start) {		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);		rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->initrd_size;		n++;	}#endif	efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);	n++;#ifdef XEN	/* crashkernel=size@offset specifies the size to reserve for a crash	 * kernel. If offset is 0, then it is determined automatically.	 * By reserving this memory we guarantee that linux never set's it	 * up as a DMA target. Useful for holding code to do something	 * appropriate after a kernel panic.	 */	if (kexec_crash_area.size > 0) {		if (!kexec_crash_area.start) {			sort_regions(rsvd_region, n);			kexec_crash_area.start =				kdump_find_rsvd_region(kexec_crash_area.size,						       rsvd_region, n);		}		if (kexec_crash_area.start != ~0UL) {			printk("Kdump: %luMB (%lukB) at 0x%lx\n",			       kexec_crash_area.size >> 20,			       kexec_crash_area.size >> 10,			       kexec_crash_area.start);			rsvd_region[n].start =				(unsigned long)__va(kexec_crash_area.start);			rsvd_region[n].end =				(unsigned long)__va(kexec_crash_area.start +						    kexec_crash_area.size);			n++;		}		else {			kexec_crash_area.size = 0;			kexec_crash_area.start = 0;		}	}#endif	/* end of memory marker */	rsvd_region[n].start = ~0UL;	rsvd_region[n].end   = ~0UL;	n++;	num_rsvd_regions = n;	sort_regions(rsvd_region, num_rsvd_regions);}/** * find_initrd - get initrd parameters from the boot parameter structure * * Grab the initrd start and end from the boot parameter struct given us by * the boot loader. */voidfind_initrd (void){#ifdef CONFIG_BLK_DEV_INITRD	if (ia64_boot_param->initrd_start) {		initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);		initrd_end   = initrd_start+ia64_boot_param->initrd_size;		printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",		       initrd_start, ia64_boot_param->initrd_size);	}#endif}static void __initio_port_init (void){	extern unsigned long ia64_iobase;	unsigned long phys_iobase;	/*	 *  Set `iobase' to the appropriate address in region 6 (uncached access range).	 *	 *  The EFI memory map is the "preferred" location to get the I/O port space base,	 *  rather the relying on AR.KR0. This should become more clear in future SAL	 *  specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is	 *  found in the memory map.	 */	phys_iobase = efi_get_iobase();	if (phys_iobase)		/* set AR.KR0 since this is all we use it for anyway */		ia64_set_kr(IA64_KR_IO_BASE, phys_iobase);	else {		phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);		printk(KERN_INFO "No I/O port range found in EFI memory map, falling back "		       "to AR.KR0\n");		printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase);	}	ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);	/* setup legacy IO port space */	io_space[0].mmio_base = ia64_iobase;	io_space[0].sparse = 1;	num_io_spaces = 1;}#ifdef XENstatic int __initacpi_oem_console_setup(void){	extern struct ns16550_defaults ns16550_com1;	efi_system_table_t *systab;	efi_config_table_t *tables;	struct acpi_table_rsdp *rsdp = NULL;	struct acpi_table_xsdt *xsdt;	struct acpi_table_header *hdr;	int i;	/* Don't duplicate setup if an HCDP table is present */	if (efi.hcdp != EFI_INVALID_TABLE_ADDR)		return -ENODEV;	/* Manually walk firmware provided tables to get to the XSDT.  */	systab = __va(ia64_boot_param->efi_systab);	if (!systab || systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)		return -ENODEV;	tables = __va(systab->tables);	for (i = 0 ; i < (int)systab->nr_tables && !rsdp ; i++) {		if (efi_guidcmp(tables[i].guid, ACPI_20_TABLE_GUID) == 0)			rsdp =			     (struct acpi_table_rsdp *)__va(tables[i].table);	}	if (!rsdp ||	    strncmp(rsdp->signature, ACPI_SIG_RSDP, sizeof(ACPI_SIG_RSDP) - 1))		return -ENODEV;	xsdt = (struct acpi_table_xsdt *)__va(rsdp->xsdt_physical_address);	hdr = &xsdt->header;	if (strncmp(hdr->signature, ACPI_SIG_XSDT, sizeof(ACPI_SIG_XSDT) - 1))		return -ENODEV;	/* Looking for Fujitsu PRIMEQUEST systems */	if (!strncmp(hdr->oem_id, "FUJITSPQ", 8) &&	    (!strncmp(hdr->oem_table_id, "PQ", 2))){		ns16550_com1.baud = BAUD_AUTO;		ns16550_com1.io_base =	0x3f8;		ns16550_com1.irq = ns16550_com1_gsi = 4;		return 0;	}	/*	 * Looking for Intel Tiger systems	 * Tiger 2: SR870BH2	 * Tiger 4: SR870BN4	 */	if (!strncmp(hdr->oem_id, "INTEL", 5)) {		if (!strncmp(hdr->oem_table_id, "SR870BH2", 8) ||		    !strncmp(hdr->oem_table_id, "SR870BN4", 8)) {			ns16550_com1.baud = BAUD_AUTO;			ns16550_com1.io_base = 0x2f8;			ns16550_com1.irq = 3;			return 0;		} else {			ns16550_com1.baud = BAUD_AUTO;			ns16550_com1.io_base = 0x3f8;			ns16550_com1.irq = ns16550_com1_gsi = 4;			return 0;		}	}	return -ENODEV;}#endif/** * early_console_setup - setup debugging console * * Consoles started here require little enough setup that we can start using * them very early in the boot process, either right after the machine * vector initialization, or even before if the drivers can detect their hw. * * Returns non-zero if a console couldn't be setup. */static inline int __initearly_console_setup (char *cmdline){	int earlycons = 0;#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE	{		extern int sn_serial_console_early_setup(void);		if (!sn_serial_console_early_setup())			earlycons++;	}#endif#ifdef CONFIG_EFI_PCDP	if (!efi_setup_pcdp_console(cmdline))		earlycons++;#endif#ifdef CONFIG_SERIAL_8250_CONSOLE	if (!early_serial_console_init(cmdline))		earlycons++;#endif#ifdef XEN	if (!acpi_oem_console_setup())		earlycons++;#endif	return (earlycons) ? 0 : -1;}static inline voidmark_bsp_online (void){#ifdef CONFIG_SMP	/* If we register an early console, allow CPU 0 to printk */	cpu_set(smp_processor_id(), cpu_online_map);#endif}#ifdef CONFIG_SMPstatic voidcheck_for_logical_procs (void){	pal_logical_to_physical_t info;	s64 status;	status = ia64_pal_logical_to_phys(0, &info);	if (status == -1) {		printk(KERN_INFO "No logical to physical processor mapping "		       "available\n");		return;	}	if (status) {		printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",		       status);		return;	}	/*	 * Total number of siblings that BSP has.  Though not all of them 	 * may have booted successfully. The correct number of siblings 	 * booted is in info.overview_num_log.	 */	smp_num_siblings = info.overview_tpc;	smp_num_cpucores = info.overview_cpp;}#endifvoid __init#ifdef XENearly_setup_arch (char **cmdline_p)#elsesetup_arch (char **cmdline_p)#endif{	unw_init();	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);	*cmdline_p = __va(ia64_boot_param->command_line);#ifndef XEN	strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);#else	early_cmdline_parse(cmdline_p);	cmdline_parse(*cmdline_p);#endif	efi_init();	io_port_init();#ifdef CONFIG_IA64_GENERIC	{		const char *mvec_name = strstr (*cmdline_p, "machvec=");		char str[64];		if (mvec_name) {			const char *end;			size_t len;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -