setup.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 1,210 行 · 第 1/3 页

C
1,210
字号
#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)struct edd edd;#ifdef CONFIG_EDD_MODULEEXPORT_SYMBOL(edd);#endif/** * copy_edd() - Copy the BIOS EDD information *              from boot_params into a safe place. * */static inline void copy_edd(void){     memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));     memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));     edd.mbr_signature_nr = EDD_MBR_SIG_NR;     edd.edd_info_nr = EDD_NR;}#elsestatic inline void copy_edd(void){}#endif#define EBDA_ADDR_POINTER 0x40Estatic void __init reserve_ebda_region(void){	unsigned int addr;	/** 	 * there is a real-mode segmented pointer pointing to the 	 * 4K EBDA area at 0x40E	 */	addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);	addr <<= 4;	if (addr)		reserve_bootmem_generic(addr, PAGE_SIZE);}void __init setup_arch(char **cmdline_p){	unsigned long low_mem_size;	unsigned long kernel_end; 	ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); 	drive_info = DRIVE_INFO; 	screen_info = SCREEN_INFO;	edid_info = EDID_INFO;	aux_device_present = AUX_DEVICE_INFO;	saved_video_mode = SAVED_VIDEO_MODE;#ifdef CONFIG_BLK_DEV_RAM	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);#endif	setup_memory_region();	copy_edd();	if (!MOUNT_ROOT_RDONLY)		root_mountflags &= ~MS_RDONLY;	init_mm.start_code = (unsigned long) &_text;	init_mm.end_code = (unsigned long) &_etext;	init_mm.end_data = (unsigned long) &_edata;	init_mm.brk = (unsigned long) &_end;	code_resource.start = virt_to_phys(&_text);	code_resource.end = virt_to_phys(&_etext)-1;	data_resource.start = virt_to_phys(&_etext);	data_resource.end = virt_to_phys(&_edata)-1;	parse_cmdline_early(cmdline_p);	/*	 * partially used pages are not usable - thus	 * we are rounding upwards:	 */	end_pfn = e820_end_of_ram();	check_efer();	init_memory_mapping(); #ifdef CONFIG_DISCONTIGMEM	numa_initmem_init(0, end_pfn); #else	contig_initmem_init(); #endif	/* Reserve direct mapping */	reserve_bootmem_generic(table_start << PAGE_SHIFT, 				(table_end - table_start) << PAGE_SHIFT);	/* reserve kernel */	kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);	reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);	/*	 * reserve physical page 0 - it's a special BIOS page on many boxes,	 * enabling clean reboots, SMP operation, laptop functions.	 */	reserve_bootmem_generic(0, PAGE_SIZE);	/* reserve ebda region */	reserve_ebda_region();#ifdef CONFIG_SMP	/*	 * But first pinch a few for the stack/trampoline stuff	 * FIXME: Don't need the extra page at 4K, but need to fix	 * trampoline before removing it. (see the GDT stuff)	 */	reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);	/* Reserve SMP trampoline */	reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);#endif#ifdef CONFIG_ACPI_SLEEP       /*        * Reserve low memory region for sleep support.        */       acpi_reserve_bootmem();#endif#ifdef CONFIG_X86_LOCAL_APIC	/*	 * Find and reserve possible boot-time SMP configuration:	 */	find_smp_config();#endif#ifdef CONFIG_BLK_DEV_INITRD	if (LOADER_TYPE && INITRD_START) {		if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {			reserve_bootmem_generic(INITRD_START, INITRD_SIZE);			initrd_start =				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;			initrd_end = initrd_start+INITRD_SIZE;		}		else {			printk(KERN_ERR "initrd extends beyond end of memory "			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",			    (unsigned long)(INITRD_START + INITRD_SIZE),			    (unsigned long)(end_pfn << PAGE_SHIFT));			initrd_start = 0;		}	}#endif	paging_init();		check_ioapic();#ifdef CONFIG_ACPI_BOOT       /*        * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).        * Must do this after paging_init (due to reliance on fixmap, and thus        * the bootmem allocator) but before get_smp_config (to allow parsing        * of MADT).        */	acpi_boot_init();#endif#ifdef CONFIG_X86_LOCAL_APIC	/*	 * get boot-time SMP configuration:	 */	if (smp_found_config)		get_smp_config();	init_apic_mappings();#endif	/*	 * Request address space for all standard RAM and ROM resources	 * and also for regions reported as reserved by the e820.	 */	probe_roms();	e820_reserve_resources(); 	request_resource(&iomem_resource, &video_ram_resource);	{	unsigned i;	/* request I/O space for devices used on all i[345]86 PCs */	for (i = 0; i < STANDARD_IO_RESOURCES; i++)		request_resource(&ioport_resource, &standard_io_resources[i]);	}	/* Will likely break when you have unassigned resources with more	   than 4GB memory and bridges that don't support more than 4GB. 	   Doing it properly would require to use pci_alloc_consistent	   in this case. */	low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;	if (low_mem_size > pci_mem_start)		pci_mem_start = low_mem_size;#ifdef CONFIG_GART_IOMMU       iommu_hole_init();#endif#ifdef CONFIG_VT#if defined(CONFIG_VGA_CONSOLE)	conswitchp = &vga_con;#elif defined(CONFIG_DUMMY_CONSOLE)	conswitchp = &dummy_con;#endif#endif}static int __init get_model_name(struct cpuinfo_x86 *c){	unsigned int *v;	if (cpuid_eax(0x80000000) < 0x80000004)		return 0;	v = (unsigned int *) c->x86_model_id;	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);	c->x86_model_id[48] = 0;	return 1;}static void __init display_cacheinfo(struct cpuinfo_x86 *c){	unsigned int n, dummy, eax, ebx, ecx, edx;	n = cpuid_eax(0x80000000);	if (n >= 0x80000005) {		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);		c->x86_cache_size=(ecx>>24)+(edx>>24);			/* DTLB and ITLB together, but only 4K */		c->x86_tlbsize = ((ebx>>16)&0xff) + (ebx&0xff);	}	if (n >= 0x80000006) {		cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);	ecx = cpuid_ecx(0x80000006);	c->x86_cache_size = ecx >> 16;		c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);	printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",		c->x86_cache_size, ecx & 0xFF);	}	if (n >= 0x80000007)		cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); 	if (n >= 0x80000008) {		cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); 		c->x86_virt_bits = (eax >> 8) & 0xff;		c->x86_phys_bits = eax & 0xff;	}}static int __init init_amd(struct cpuinfo_x86 *c){	int r;	int level;	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */	clear_bit(0*32+31, &c->x86_capability);		/* C-stepping K8? */	level = cpuid_eax(1);	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)		set_bit(X86_FEATURE_K8_C, &c->x86_capability);	r = get_model_name(c);	if (!r) { 		switch (c->x86) { 		case 15:			/* Should distinguish Models here, but this is only			   a fallback anyways. */			strcpy(c->x86_model_id, "Hammer");			break; 		} 	} 	display_cacheinfo(c);	if (c->cpuid_level >= 0x80000008) {		c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;		if (c->x86_num_cores & (c->x86_num_cores - 1))			c->x86_num_cores = 1;#ifdef CONFIG_NUMA		/* On a dual core setup the lower bits of apic id		   distingush the cores. Fix up the CPU<->node mappings		   here based on that.		   Assumes number of cores is a power of two. */		if (c->x86_num_cores > 1) {			int cpu = c->x86_apicid;			cpu_to_node[cpu] = cpu >> hweight32(c->x86_num_cores - 1);			printk(KERN_INFO "CPU %d -> Node %d\n",			       cpu, cpu_to_node[cpu]);		}#endif	}	return r;}static void __init detect_ht(struct cpuinfo_x86 *c){#ifdef CONFIG_SMP	u32 	eax, ebx, ecx, edx;	int 	index_lsb, index_msb, tmp;	int	initial_apic_id;	int 	cpu = smp_processor_id();		if (!cpu_has(c, X86_FEATURE_HT))		return;	cpuid(1, &eax, &ebx, &ecx, &edx);	smp_num_siblings = (ebx & 0xff0000) >> 16;		if (smp_num_siblings == 1) {		printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");	} else if (smp_num_siblings > 1) {		index_lsb = 0;		index_msb = 31;		/*		 * At this point we only support two siblings per		 * processor package.		 */		if (smp_num_siblings > NR_CPUS) {			printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);			smp_num_siblings = 1;			return;		}		tmp = smp_num_siblings;		while ((tmp & 1) == 0) {			tmp >>=1 ;			index_lsb++;		}		tmp = smp_num_siblings;		while ((tmp & 0x80000000 ) == 0) {			tmp <<=1 ;			index_msb--;		}		if (index_lsb != index_msb )			index_msb++;		initial_apic_id = hard_smp_processor_id();		phys_proc_id[cpu] = initial_apic_id >> index_msb;				printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",		       phys_proc_id[cpu]);	}#endif}	#define LVL_1_INST	1#define LVL_1_DATA	2#define LVL_2		3#define LVL_3		4#define LVL_TRACE	5struct _cache_table{	unsigned char descriptor;	char cache_type;	short size;};/* all the cache descriptor types we care about (no TLB or trace cache entries) */static struct _cache_table cache_table[] __initdata ={	{ 0x06, LVL_1_INST, 8 },	{ 0x08, LVL_1_INST, 16 },	{ 0x0a, LVL_1_DATA, 8 },	{ 0x0c, LVL_1_DATA, 16 },	{ 0x22, LVL_3,      512 },	{ 0x23, LVL_3,      1024 },	{ 0x25, LVL_3,      2048 },	{ 0x29, LVL_3,      4096 },	{ 0x2c, LVL_1_DATA, 32 },	{ 0x30, LVL_1_INST, 32 },	{ 0x39, LVL_2,      128 },	{ 0x3b, LVL_2,      128 },	{ 0x3c, LVL_2,      256 },	{ 0x41, LVL_2,      128 },	{ 0x42, LVL_2,      256 },	{ 0x43, LVL_2,      512 },	{ 0x44, LVL_2,      1024 },	{ 0x45, LVL_2,      2048 },	{ 0x60, LVL_1_DATA, 16 },	{ 0x66, LVL_1_DATA, 8 },	{ 0x67, LVL_1_DATA, 16 },	{ 0x68, LVL_1_DATA, 32 },	{ 0x70, LVL_TRACE,  12 },	{ 0x71, LVL_TRACE,  16 },	{ 0x72, LVL_TRACE,  32 },	{ 0x79, LVL_2,      128 },	{ 0x7a, LVL_2,      256 },	{ 0x7b, LVL_2,      512 },	{ 0x7c, LVL_2,      1024 },	{ 0x82, LVL_2,      256 },	{ 0x83, LVL_2,      512 },	{ 0x84, LVL_2,      1024 },	{ 0x85, LVL_2,      2048 },	{ 0x86, LVL_2,      512 },	{ 0x87, LVL_2,      1024 },	{ 0x00, 0, 0}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?