⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 setup.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Architecture-specific setup. * * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co *	David Mosberger-Tang <davidm@hpl.hp.com> *	Stephane Eranian <eranian@hpl.hp.com> * Copyright (C) 2000, 2004 Intel Corp * 	Rohit Seth <rohit.seth@intel.com> * 	Suresh Siddha <suresh.b.siddha@intel.com> * 	Gordon Jin <gordon.jin@intel.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * * 12/26/04 S.Siddha, G.Jin, R.Seth *			Add multi-threading and multi-core detection * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map * 03/31/00 R.Seth	cpu_initialized and current->processor fixes * 02/04/00 D.Mosberger	some more get_cpuinfo fixes... * 02/01/00 R.Seth	fixed get_cpuinfo for SMP * 01/07/99 S.Eranian	added the support for command line argument * 06/24/99 W.Drummond	added boot_cpu_data. * 05/28/05 Z. Menyhart	Dynamic stride size for "flush_icache_range()" */#include <linux/config.h>#include <linux/module.h>#include <linux/init.h>#include <linux/acpi.h>#include <linux/bootmem.h>#include <linux/console.h>#include <linux/delay.h>#include <linux/kernel.h>#include <linux/reboot.h>#include <linux/sched.h>#include <linux/seq_file.h>#include <linux/string.h>#include <linux/threads.h>#include <linux/tty.h>#include <linux/serial.h>#include <linux/serial_core.h>#include <linux/efi.h>#include <linux/initrd.h>#include <linux/platform.h>#include <linux/pm.h>#include <asm/ia32.h>#include <asm/machvec.h>#include <asm/mca.h>#include <asm/meminit.h>#include <asm/page.h>#include <asm/patch.h>#include <asm/pgtable.h>#include <asm/processor.h>#include <asm/sal.h>#include <asm/sections.h>#include <asm/serial.h>#include <asm/setup.h>#include <asm/smp.h>#include <asm/system.h>#include <asm/unistd.h>#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)# error "struct cpuinfo_ia64 too big!"#endif#ifdef CONFIG_SMPunsigned long __per_cpu_offset[NR_CPUS];EXPORT_SYMBOL(__per_cpu_offset);#endifDEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);unsigned long ia64_cycles_per_usec;struct ia64_boot_param *ia64_boot_param;struct screen_info screen_info;unsigned long vga_console_iobase;unsigned long vga_console_membase;static struct resource data_resource = {	.name	= "Kernel data",	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM};static struct resource code_resource = {	.name	= "Kernel code",	.flags	= IORESOURCE_BUSY | IORESOURCE_MEM};extern void efi_initialize_iomem_resources(struct resource *,		struct resource *);extern char _text[], _end[], _etext[];unsigned long ia64_max_cacheline_size;int dma_get_cache_alignment(void){        return ia64_max_cacheline_size;}EXPORT_SYMBOL(dma_get_cache_alignment);unsigned long ia64_iobase;	/* virtual address for I/O accesses */EXPORT_SYMBOL(ia64_iobase);struct io_space io_space[MAX_IO_SPACES];EXPORT_SYMBOL(io_space);unsigned int num_io_spaces;/* * "flush_icache_range()" needs to know what processor dependent stride size to use * when it makes i-cache(s) coherent with d-caches. */#define	I_CACHE_STRIDE_SHIFT	5	/* Safest way to go: 32 bytes by 32 bytes */unsigned long ia64_i_cache_stride_shift = ~0;/* * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1).  This * mask specifies a mask of address bits that must be 0 in order for two buffers to be * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start * address of the second buffer must be aligned to (merge_mask+1) in order to be * mergeable).  By default, we assume there is no I/O MMU which can merge physically * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu * page-size of 2^64. */unsigned long ia64_max_iommu_merge_mask = ~0UL;EXPORT_SYMBOL(ia64_max_iommu_merge_mask);/* * We use a special marker for the end of memory and it uses the extra (+1) slot */struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];int num_rsvd_regions;/* * Filter incoming memory segments based on the primitive map created from the boot * parameters. Segments contained in the map are removed from the memory ranges. A * caller-specified function is called with the memory ranges that remain after filtering. * This routine does not assume the incoming segments are sorted. */intfilter_rsvd_memory (unsigned long start, unsigned long end, void *arg){	unsigned long range_start, range_end, prev_start;	void (*func)(unsigned long, unsigned long, int);	int i;#if IGNORE_PFN0	if (start == PAGE_OFFSET) {		printk(KERN_WARNING "warning: skipping physical page 0\n");		start += PAGE_SIZE;		if (start >= end) return 0;	}#endif	/*	 * lowest possible address(walker uses virtual)	 */	prev_start = PAGE_OFFSET;	func = arg;	for (i = 0; i < num_rsvd_regions; ++i) {		range_start = max(start, prev_start);		range_end   = min(end, rsvd_region[i].start);		if (range_start < range_end)			call_pernode_memory(__pa(range_start), range_end - range_start, func);		/* nothing more available in this segment */		if (range_end == end) return 0;		prev_start = rsvd_region[i].end;	}	/* end of memory marker allows full processing inside loop body */	return 0;}static voidsort_regions (struct rsvd_region *rsvd_region, int max){	int j;	/* simple bubble sorting */	while (max--) {		for (j = 0; j < max; ++j) {			if (rsvd_region[j].start > rsvd_region[j+1].start) {				struct rsvd_region tmp;				tmp = rsvd_region[j];				rsvd_region[j] = rsvd_region[j + 1];				rsvd_region[j + 1] = tmp;			}		}	}}/* * Request address space for all standard resources */static int __init register_memory(void){	code_resource.start = ia64_tpa(_text);	code_resource.end   = ia64_tpa(_etext) - 1;	data_resource.start = ia64_tpa(_etext);	data_resource.end   = ia64_tpa(_end) - 1;	efi_initialize_iomem_resources(&code_resource, &data_resource);	return 0;}__initcall(register_memory);/** * reserve_memory - setup reserved memory areas * * Setup the reserved memory areas set aside for the boot parameters, * initrd, etc.  There are currently %IA64_MAX_RSVD_REGIONS defined, * see include/asm-ia64/meminit.h if you need to define more. */voidreserve_memory (void){	int n = 0;	/*	 * none of the entries in this table overlap	 */	rsvd_region[n].start = (unsigned long) ia64_boot_param;	rsvd_region[n].end   = rsvd_region[n].start + sizeof(*ia64_boot_param);	n++;	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);	rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;	n++;	rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);	rsvd_region[n].end   = (rsvd_region[n].start				+ strlen(__va(ia64_boot_param->command_line)) + 1);	n++;	rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);	rsvd_region[n].end   = (unsigned long) ia64_imva(_end);	n++;#ifdef CONFIG_BLK_DEV_INITRD	if (ia64_boot_param->initrd_start) {		rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);		rsvd_region[n].end   = rsvd_region[n].start + ia64_boot_param->initrd_size;		n++;	}#endif	efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);	n++;	/* end of memory marker */	rsvd_region[n].start = ~0UL;	rsvd_region[n].end   = ~0UL;	n++;	num_rsvd_regions = n;	sort_regions(rsvd_region, num_rsvd_regions);}/** * find_initrd - get initrd parameters from the boot parameter structure * * Grab the initrd start and end from the boot parameter struct given us by * the boot loader. */voidfind_initrd (void){#ifdef CONFIG_BLK_DEV_INITRD	if (ia64_boot_param->initrd_start) {		initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);		initrd_end   = initrd_start+ia64_boot_param->initrd_size;		printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",		       initrd_start, ia64_boot_param->initrd_size);	}#endif}static void __initio_port_init (void){	unsigned long phys_iobase;	/*	 * Set `iobase' based on the EFI memory map or, failing that, the	 * value firmware left in ar.k0.	 *	 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute	 * the port's virtual address, so ia32_load_state() loads it with a	 * user virtual address.  But in ia64 mode, glibc uses the	 * *physical* address in ar.k0 to mmap the appropriate area from	 * /dev/mem, and the inX()/outX() interfaces use MMIO.  In both	 * cases, user-mode can only use the legacy 0-64K I/O port space.	 *	 * ar.k0 is not involved in kernel I/O port accesses, which can use	 * any of the I/O port spaces and are done via MMIO using the	 * virtual mmio_base from the appropriate io_space[].	 */	phys_iobase = efi_get_iobase();	if (!phys_iobase) {		phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);		printk(KERN_INFO "No I/O port range found in EFI memory map, "			"falling back to AR.KR0 (0x%lx)\n", phys_iobase);	}	ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);	ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));	/* setup legacy IO port space */	io_space[0].mmio_base = ia64_iobase;	io_space[0].sparse = 1;	num_io_spaces = 1;}/** * early_console_setup - setup debugging console * * Consoles started here require little enough setup that we can start using * them very early in the boot process, either right after the machine * vector initialization, or even before if the drivers can detect their hw. * * Returns non-zero if a console couldn't be setup. */static inline int __initearly_console_setup (char *cmdline){	int earlycons = 0;#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE	{		extern int sn_serial_console_early_setup(void);		if (!sn_serial_console_early_setup())			earlycons++;	}#endif#ifdef CONFIG_EFI_PCDP	if (!efi_setup_pcdp_console(cmdline))		earlycons++;#endif#ifdef CONFIG_SERIAL_8250_CONSOLE	if (!early_serial_console_init(cmdline))		earlycons++;#endif	return (earlycons) ? 0 : -1;}static inline voidmark_bsp_online (void){#ifdef CONFIG_SMP	/* If we register an early console, allow CPU 0 to printk */	cpu_set(smp_processor_id(), cpu_online_map);#endif}#ifdef CONFIG_SMPstatic voidcheck_for_logical_procs (void){	pal_logical_to_physical_t info;	s64 status;	status = ia64_pal_logical_to_phys(0, &info);	if (status == -1) {		printk(KERN_INFO "No logical to physical processor mapping "		       "available\n");		return;	}	if (status) {		printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",		       status);		return;	}	/*	 * Total number of siblings that BSP has.  Though not all of them 	 * may have booted successfully. The correct number of siblings 	 * booted is in info.overview_num_log.	 */	smp_num_siblings = info.overview_tpc;	smp_num_cpucores = info.overview_cpp;}#endifvoid __initsetup_arch (char **cmdline_p){	unw_init();	ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);	*cmdline_p = __va(ia64_boot_param->command_line);	strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);	efi_init();	io_port_init();#ifdef CONFIG_IA64_GENERIC	{		const char *mvec_name = strstr (*cmdline_p, "machvec=");		char str[64];		if (mvec_name) {			const char *end;			size_t len;			mvec_name += 8;			end = strchr (mvec_name, ' ');			if (end)				len = end - mvec_name;			else				len = strlen (mvec_name);			len = min(len, sizeof (str) - 1);			strncpy (str, mvec_name, len);			str[len] = '\0';			mvec_name = str;		} else			mvec_name = acpi_get_sysname();		machvec_init(mvec_name);	}#endif	if (early_console_setup(*cmdline_p) == 0)		mark_bsp_online();#ifdef CONFIG_ACPI	/* Initialize the ACPI boot-time table parser */	acpi_table_init();# ifdef CONFIG_ACPI_NUMA	acpi_numa_init();# endif#else# ifdef CONFIG_SMP	smp_build_cpu_map();	/* happens, e.g., with the Ski simulator */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -