⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 setup.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 5 页
字号:
#ifdef CONFIG_HIGHMEM	highstart_pfn = highend_pfn = max_pfn;	if (max_pfn > max_low_pfn) {		highstart_pfn = max_low_pfn;	}	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",		pages_to_mb(highend_pfn - highstart_pfn));#endif	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",			pages_to_mb(max_low_pfn));	/*	 * Initialize the boot-time allocator (with low memory only):	 */	bootmap_size = init_bootmem(start_pfn, max_low_pfn);	register_bootmem_low_pages(max_low_pfn);	/*	 * Reserve the bootmem bitmap itself as well. We do this in two	 * steps (first step was init_bootmem()) because this catches	 * the (very unlikely) case of us accidentally initializing the	 * bootmem allocator with an invalid RAM area.	 */	reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +			 bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));	/*	 * reserve physical page 0 - it's a special BIOS page on many boxes,	 * enabling clean reboots, SMP operation, laptop functions.	 */	reserve_bootmem(0, PAGE_SIZE);#ifdef CONFIG_SMP	/*	 * But first pinch a few for the stack/trampoline stuff	 * FIXME: Don't need the extra page at 4K, but need to fix	 * trampoline before removing it. (see the GDT stuff)	 */	reserve_bootmem(PAGE_SIZE, PAGE_SIZE);#endif#ifdef CONFIG_X86_LOCAL_APIC	/*	 * Find and reserve possible boot-time SMP configuration:	 */	find_smp_config();#endif#ifdef CONFIG_BLK_DEV_INITRD	if (LOADER_TYPE && INITRD_START) {		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {			reserve_bootmem(INITRD_START, INITRD_SIZE);			initrd_start =				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;			initrd_end = initrd_start+INITRD_SIZE;		}		else {			printk(KERN_ERR "initrd extends beyond end of memory "			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",			    INITRD_START + INITRD_SIZE,			    max_low_pfn << PAGE_SHIFT);			initrd_start = 0;		}	}#endif	return max_low_pfn;} /* * Request address space for all standard RAM and ROM resources * and also for regions reported as reserved by the e820. */static void __init register_memory(unsigned long max_low_pfn){	unsigned long low_mem_size;	int i;	probe_roms();	for (i = 0; i < e820.nr_map; i++) {		struct resource *res;		if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)			continue;		res = alloc_bootmem_low(sizeof(struct resource));		switch (e820.map[i].type) {		case E820_RAM:	res->name = "System RAM"; break;		case E820_ACPI:	res->name = "ACPI Tables"; break;		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;		default:	res->name = "reserved";		}		res->start = e820.map[i].addr;		res->end = res->start + e820.map[i].size - 1;		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;		request_resource(&iomem_resource, res);		if (e820.map[i].type == E820_RAM) {			/*			 *  We dont't know which RAM region contains kernel data,			 *  so we try it repeatedly and let the resource manager			 *  test it.			 */			request_resource(res, &code_resource);			request_resource(res, &data_resource);		}	}	request_resource(&iomem_resource, &vram_resource);	/* request I/O space for devices used on all i[345]86 PCs */	for (i = 0; i < STANDARD_IO_RESOURCES; i++)		request_resource(&ioport_resource, standard_io_resources+i);	/* Tell the PCI layer not to allocate too close to the RAM area.. */	low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;	if (low_mem_size > pci_mem_start)		pci_mem_start = low_mem_size;}void __init setup_arch(char **cmdline_p){	unsigned long max_low_pfn;#ifdef CONFIG_VISWS	visws_get_board_type_and_rev();#endif#ifndef CONFIG_HIGHIO	blk_nohighio = 1;#endif 	ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV); 	drive_info = DRIVE_INFO; 	screen_info = SCREEN_INFO;	apm_info.bios = APM_BIOS_INFO;	if( SYS_DESC_TABLE.length != 0 ) {		MCA_bus = SYS_DESC_TABLE.table[3] &0x2;		machine_id = SYS_DESC_TABLE.table[0];		machine_submodel_id = SYS_DESC_TABLE.table[1];		BIOS_revision = SYS_DESC_TABLE.table[2];	}	aux_device_present = AUX_DEVICE_INFO;#ifdef CONFIG_BLK_DEV_RAM	rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;	rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);	rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);#endif	setup_memory_region();	copy_edd();	if (!MOUNT_ROOT_RDONLY)		root_mountflags &= ~MS_RDONLY;	init_mm.start_code = (unsigned long) &_text;	init_mm.end_code = (unsigned long) &_etext;	init_mm.end_data = (unsigned long) &_edata;	init_mm.brk = (unsigned long) &_end;	code_resource.start = virt_to_bus(&_text);	code_resource.end = virt_to_bus(&_etext)-1;	data_resource.start = virt_to_bus(&_etext);	data_resource.end = virt_to_bus(&_edata)-1;	parse_cmdline_early(cmdline_p);	max_low_pfn = setup_memory();	/*	 * If enable_acpi_smp_table and HT feature present, acpitable.c	 * will find all logical cpus despite disable_x86_ht: so if both	 * "noht" and "acpismp=force" are specified, let "noht" override	 * "acpismp=force" cleanly.  Why retain "acpismp=force"? because	 * parsing ACPI SMP table might prove useful on some non-HT cpu.	 */	if (disable_x86_ht) {		clear_bit(X86_FEATURE_HT, &boot_cpu_data.x86_capability[0]);		set_bit(X86_FEATURE_HT, disabled_x86_caps);		enable_acpi_smp_table = 0;	}	if (test_bit(X86_FEATURE_HT, &boot_cpu_data.x86_capability[0]))		enable_acpi_smp_table = 1;		/*	 * NOTE: before this point _nobody_ is allowed to allocate	 * any memory using the bootmem allocator.	 */#ifdef CONFIG_SMP	smp_alloc_memory(); /* AP processor realmode stacks in low memory*/#endif	paging_init();#ifdef CONFIG_X86_LOCAL_APIC	/*	 * get boot-time SMP configuration:	 */	if (smp_found_config)		get_smp_config();#endif	register_memory(max_low_pfn);#ifdef CONFIG_VT#if defined(CONFIG_VGA_CONSOLE)	conswitchp = &vga_con;#elif defined(CONFIG_DUMMY_CONSOLE)	conswitchp = &dummy_con;#endif#endif	dmi_scan_machine();}static int cachesize_override __initdata = -1;static int __init cachesize_setup(char *str){	get_option (&str, &cachesize_override);	return 1;}__setup("cachesize=", cachesize_setup);#ifndef CONFIG_X86_TSCstatic int tsc_disable __initdata = 0;static int __init notsc_setup(char *str){	tsc_disable = 1;	return 1;}#elsestatic int __init notsc_setup(char *str){	printk("notsc: Kernel compiled with CONFIG_X86_TSC, cannot disable TSC.\n");	return 1;}#endif__setup("notsc", notsc_setup);static int __init highio_setup(char *str){	printk("i386: disabling HIGHMEM block I/O\n");	blk_nohighio = 1;	return 1;}__setup("nohighio", highio_setup);static int __init get_model_name(struct cpuinfo_x86 *c){	unsigned int *v;	char *p, *q;	if (cpuid_eax(0x80000000) < 0x80000004)		return 0;	v = (unsigned int *) c->x86_model_id;	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);	c->x86_model_id[48] = 0;	/* Intel chips right-justify this string for some dumb reason;	   undo that brain damage */	p = q = &c->x86_model_id[0];	while ( *p == ' ' )	     p++;	if ( p != q ) {	     while ( *p )		  *q++ = *p++;	     while ( q <= &c->x86_model_id[48] )		  *q++ = '\0';	/* Zero-pad the rest */	}	return 1;}static void __init display_cacheinfo(struct cpuinfo_x86 *c){	unsigned int n, dummy, ecx, edx, l2size;	n = cpuid_eax(0x80000000);	if (n >= 0x80000005) {		cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);		c->x86_cache_size=(ecx>>24)+(edx>>24);		}	if (n < 0x80000006)	/* Some chips just has a large L1. */		return;	ecx = cpuid_ecx(0x80000006);	l2size = ecx >> 16;	/* AMD errata T13 (order #21922) */	if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {		if (c->x86_model == 3 && c->x86_mask == 0)	/* Duron Rev A0 */			l2size = 64;		if (c->x86_model == 4 &&			(c->x86_mask==0 || c->x86_mask==1))	/* Tbird rev A1/A2 */			l2size = 256;	}	/* VIA C3 CPUs (670-68F) need further shifting. */	if (c->x86_vendor == X86_VENDOR_CENTAUR && (c->x86 == 6) &&		((c->x86_model == 7) || (c->x86_model == 8))) {		l2size = l2size >> 8;	}	/* Allow user to override all this if necessary. */	if (cachesize_override != -1)		l2size = cachesize_override;	if ( l2size == 0 )		return;		/* Again, no L2 cache is possible */	c->x86_cache_size = l2size;	printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",	       l2size, ecx & 0xFF);}/* *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause *	misexecution of code under Linux. Owners of such processors should *	contact AMD for precise details and a CPU swap. * *	See	http://www.multimania.com/poulot/k6bug.html *		http://www.amd.com/K6/k6docs/revgd.html * *	The following test is erm.. interesting. AMD neglected to up *	the chip setting when fixing the bug but they also tweaked some *	performance at the same time.. */ extern void vide(void);__asm__(".align 4\nvide: ret");static int __init init_amd(struct cpuinfo_x86 *c){	u32 l, h;	int mbytes = max_mapnr >> (20-PAGE_SHIFT);	int r;	/*	 *	FIXME: We should handle the K5 here. Set up the write	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc,	 *	no bus pipeline)	 */	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */	clear_bit(0*32+31, &c->x86_capability);		r = get_model_name(c);	switch(c->x86)	{		case 5:			if( c->x86_model < 6 )			{				/* Based on AMD doc 20734R - June 2000 */				if ( c->x86_model == 0 ) {					clear_bit(X86_FEATURE_APIC, &c->x86_capability);					set_bit(X86_FEATURE_PGE, &c->x86_capability);				}				break;			}						if ( c->x86_model == 6 && c->x86_mask == 1 ) {				const int K6_BUG_LOOP = 1000000;				int n;				void (*f_vide)(void);				unsigned long d, d2;								printk(KERN_INFO "AMD K6 stepping B detected - ");								/*				 * It looks like AMD fixed the 2.6.2 bug and improved indirect 				 * calls at the same time.				 */				n = K6_BUG_LOOP;				f_vide = vide;				rdtscl(d);				while (n--) 					f_vide();				rdtscl(d2);				d = d2-d;								/* Knock these two lines out if it debugs out ok */				printk(KERN_INFO "K6 BUG %ld %d (Report these if test report is incorrect)\n", d, 20*K6_BUG_LOOP);				printk(KERN_INFO "AMD K6 stepping B detected - ");				/* -- cut here -- */				if (d > 20*K6_BUG_LOOP) 					printk("system stability may be impaired when more than 32 MB are used.\n");				else 					printk("probably OK (after B9730xxxx).\n");				printk(KERN_INFO "Please see http://www.mygale.com/~poulot/k6bug.html\n");			}			/* K6 with old style WHCR */			if (c->x86_model < 8 ||			   (c->x86_model== 8 && c->x86_mask < 8)) {				/* We can only write allocate on the low 508Mb */				if(mbytes>508)					mbytes=508;				rdmsr(MSR_K6_WHCR, l, h);				if ((l&0x0000FFFF)==0) {					unsigned long flags;					l=(1<<0)|((mbytes/4)<<1);					local_irq_save(flags);					wbinvd();					wrmsr(MSR_K6_WHCR, l, h);					local_irq_restore(flags);					printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",						mbytes);				}				break;			}			if ((c->x86_model == 8 && c->x86_mask >7) ||			     c->x86_model == 9 || c->x86_model == 13) {				/* The more serious chips .. */				if(mbytes>4092)					mbytes=4092;				rdmsr(MSR_K6_WHCR, l, h);				if ((l&0xFFFF0000)==0) {					unsigned long flags;					l=((mbytes>>2)<<22)|(1<<16);					local_irq_save(flags);					wbinvd();					wrmsr(MSR_K6_WHCR, l, h);					local_irq_restore(flags);					printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",						mbytes);				}				/*  Set MTRR capability flag if appropriate */				if (c->x86_model == 13 || c->x86_model == 9 ||				   (c->x86_model == 8 && c->x86_mask >= 8))					set_bit(X86_FEATURE_K6_MTRR, &c->x86_capability);				break;			}			break;		case 6: /* An Athlon/Duron */ 			/* Bit 15 of Athlon specific MSR 15, needs to be 0 			 * to enable SSE on Palomino/Morgan CPU's.			 * If the BIOS didn't enable it already, enable it			 * here.			 */			if (c->x86_model == 6 || c->x86_model == 7) {				if (!test_bit(X86_FEATURE_XMM,					      &c->x86_capability)) {					printk(KERN_INFO					       "Enabling Disabled K7/SSE Support...\n");					rdmsr(MSR_K7_HWCR, l, h);					l &= ~0x00008000;					wrmsr(MSR_K7_HWCR, l, h);					set_bit(X86_FEATURE_XMM,                                                &c->x86_capability);				}			}			/* It's been determined by AMD that Athlons since model 8 stepping 1			 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx			 * As per AMD technical note 27212 0.2			 */			if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {				rdmsr(MSR_K7_CLK_CTL, l, h);				if ((l & 0xfff00000) != 0x20000000) {					printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,						((l & 0x000fffff)|0x20000000));					wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);				}			}			break;	}	display_cacheinfo(c);	return r;}/* * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU */static void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1){	unsigned char ccr2, ccr3;	unsigned long flags;	

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -