⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 setup.c

📁 microwindows移植到S3C44B0的源码
💻 C
📖 第 1 页 / 共 5 页
字号:
#if CONFIG_HIGHMEM		if (highmem_pages >= max_pfn) {			printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));			highmem_pages = 0;		}		if (highmem_pages) {			if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){				printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));				highmem_pages = 0;			}			max_low_pfn -= highmem_pages;		}#else		if (highmem_pages)			printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");#endif	}#ifdef CONFIG_HIGHMEM	highstart_pfn = highend_pfn = max_pfn;	if (max_pfn > max_low_pfn) {		highstart_pfn = max_low_pfn;	}	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",		pages_to_mb(highend_pfn - highstart_pfn));#endif	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",			pages_to_mb(max_low_pfn));	/*	 * Initialize the boot-time allocator (with low memory only):	 */	bootmap_size = init_bootmem(start_pfn, max_low_pfn);	/*	 * Register fully available low RAM pages with the bootmem allocator.	 */	for (i = 0; i < e820.nr_map; i++) {		unsigned long curr_pfn, last_pfn, size; 		/*		 * Reserve usable low memory		 */		if (e820.map[i].type != E820_RAM)			continue;		/*		 * We are rounding up the start address of usable memory:		 */		curr_pfn = PFN_UP(e820.map[i].addr);		if (curr_pfn >= max_low_pfn)			continue;		/*		 * ... and at the end of the usable range downwards:		 */		last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);		if (last_pfn > max_low_pfn)			last_pfn = max_low_pfn;		/*		 * .. finally, did all the rounding and playing		 * around just make the area go away?		 */		if (last_pfn <= curr_pfn)			continue;		size = last_pfn - curr_pfn;		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));	}	/*	 * Reserve the bootmem bitmap itself as well. We do this in two	 * steps (first step was init_bootmem()) because this catches	 * the (very unlikely) case of us accidentally initializing the	 * bootmem allocator with an invalid RAM area.	 */	reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +			 bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));	/*	 * reserve physical page 0 - it's a special BIOS page on many boxes,	 * enabling clean reboots, SMP operation, laptop functions.	 */	reserve_bootmem(0, PAGE_SIZE);#ifdef CONFIG_SMP	/*	 * But first pinch a few for the stack/trampoline stuff	 * FIXME: Don't need the extra page at 4K, but need to fix	 * trampoline before removing it. (see the GDT stuff)	 */	reserve_bootmem(PAGE_SIZE, PAGE_SIZE);#endif#ifdef CONFIG_X86_LOCAL_APIC	/*	 * Find and reserve possible boot-time SMP configuration:	 */	find_smp_config();#endif#ifdef CONFIG_BLK_DEV_INITRD	if (LOADER_TYPE && INITRD_START) {		if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {			reserve_bootmem(INITRD_START, INITRD_SIZE);			initrd_start =				INITRD_START ? INITRD_START + PAGE_OFFSET : 0;			initrd_end = initrd_start+INITRD_SIZE;		}		else {			printk(KERN_ERR "initrd extends beyond end of memory "			    "(0x%08lx > 0x%08lx)\ndisabling initrd\n",			    INITRD_START + INITRD_SIZE,			    max_low_pfn << PAGE_SHIFT);			initrd_start = 0;		}	}#endif	/*	 * If enable_acpi_smp_table and HT feature present, acpitable.c	 * will find all logical cpus despite disable_x86_ht: so if both	 * "noht" and "acpismp=force" are specified, let "noht" override	 * "acpismp=force" cleanly.  Why retain "acpismp=force"? because	 * parsing ACPI SMP table might prove useful on some non-HT cpu.	 */	if (disable_x86_ht) {		clear_bit(X86_FEATURE_HT, &boot_cpu_data.x86_capability[0]);		enable_acpi_smp_table = 0;	}	if (test_bit(X86_FEATURE_HT, &boot_cpu_data.x86_capability[0]))		enable_acpi_smp_table = 1;		/*	 * NOTE: before this point _nobody_ is allowed to allocate	 * any memory using the bootmem allocator.	 */#ifdef CONFIG_SMP	smp_alloc_memory(); /* AP processor realmode stacks in low memory*/#endif	/*	 * short-term fix for a conflicting cache attribute bug in the 	 * kernel that is exposed by advanced speculative caching on	 * newer AMD Athlon processors.	 */	if (disable_adv_spec_cache && amd_adv_spec_cache_feature())		clear_bit(X86_FEATURE_PSE, &boot_cpu_data.x86_capability);	paging_init();#ifdef CONFIG_X86_LOCAL_APIC	/*	 * get boot-time SMP configuration:	 */	if (smp_found_config)		get_smp_config();#endif	/*	 * Request address space for all standard RAM and ROM resources	 * and also for regions reported as reserved by the e820.	 */	probe_roms();	for (i = 0; i < e820.nr_map; i++) {		struct resource *res;		if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)			continue;		res = alloc_bootmem_low(sizeof(struct resource));		switch (e820.map[i].type) {		case E820_RAM:	res->name = "System RAM"; break;		case E820_ACPI:	res->name = "ACPI Tables"; break;		case E820_NVS:	res->name = "ACPI Non-volatile Storage"; break;		default:	res->name = "reserved";		}		res->start = e820.map[i].addr;		res->end = res->start + e820.map[i].size - 1;		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;		request_resource(&iomem_resource, res);		if (e820.map[i].type == E820_RAM) {			/*			 *  We dont't know which RAM region contains kernel data,			 *  so we try it repeatedly and let the resource manager			 *  test it.			 */			request_resource(res, &code_resource);			request_resource(res, &data_resource);		}	}	request_resource(&iomem_resource, &vram_resource);	/* request I/O space for devices used on all i[345]86 PCs */	for (i = 0; i < STANDARD_IO_RESOURCES; i++)		request_resource(&ioport_resource, standard_io_resources+i);	/* Tell the PCI layer not to allocate too close to the RAM area.. */	low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;	if (low_mem_size > pci_mem_start)		pci_mem_start = low_mem_size;#ifdef CONFIG_VT#if defined(CONFIG_VGA_CONSOLE)	conswitchp = &vga_con;#elif defined(CONFIG_DUMMY_CONSOLE)	conswitchp = &dummy_con;#endif#endif	dmi_scan_machine();}static int cachesize_override __initdata = -1;static int __init cachesize_setup(char *str){	get_option (&str, &cachesize_override);	return 1;}__setup("cachesize=", cachesize_setup);#ifndef CONFIG_X86_TSCstatic int tsc_disable __initdata = 0;static int __init tsc_setup(char *str){	tsc_disable = 1;	return 1;}__setup("notsc", tsc_setup);#endifstatic int __init get_model_name(struct cpuinfo_x86 *c){	unsigned int *v;	char *p, *q;	if (cpuid_eax(0x80000000) < 0x80000004)		return 0;	v = (unsigned int *) c->x86_model_id;	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);	c->x86_model_id[48] = 0;	/* Intel chips right-justify this string for some dumb reason;	   undo that brain damage */	p = q = &c->x86_model_id[0];	while ( *p == ' ' )	     p++;	if ( p != q ) {	     while ( *p )		  *q++ = *p++;	     while ( q <= &c->x86_model_id[48] )		  *q++ = '\0';	/* Zero-pad the rest */	}	return 1;}static void __init display_cacheinfo(struct cpuinfo_x86 *c){	unsigned int n, dummy, ecx, edx, l2size;	n = cpuid_eax(0x80000000);	if (n >= 0x80000005) {		cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);		printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",			edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);		c->x86_cache_size=(ecx>>24)+(edx>>24);		}	if (n < 0x80000006)	/* Some chips just has a large L1. */		return;	ecx = cpuid_ecx(0x80000006);	l2size = ecx >> 16;	/* AMD errata T13 (order #21922) */	if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {		if (c->x86_model == 3 && c->x86_mask == 0)	/* Duron Rev A0 */			l2size = 64;		if (c->x86_model == 4 &&			(c->x86_mask==0 || c->x86_mask==1))	/* Tbird rev A1/A2 */			l2size = 256;	}	/* Intel PIII Tualatin. This comes in two flavours.	 * One has 256kb of cache, the other 512. We have no way	 * to determine which, so we use a boottime override	 * for the 512kb model, and assume 256 otherwise.	 */	if ((c->x86_vendor == X86_VENDOR_INTEL) && (c->x86 == 6) &&		(c->x86_model == 11) && (l2size == 0))		l2size = 256;	/* VIA C3 CPUs (670-68F) need further shifting. */	if (c->x86_vendor == X86_VENDOR_CENTAUR && (c->x86 == 6) &&		((c->x86_model == 7) || (c->x86_model == 8))) {		l2size = l2size >> 8;	}	/* Allow user to override all this if necessary. */	if (cachesize_override != -1)		l2size = cachesize_override;	if ( l2size == 0 )		return;		/* Again, no L2 cache is possible */	c->x86_cache_size = l2size;	printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",	       l2size, ecx & 0xFF);}/*======================================================================= * amd_adv_spec_cache_disable * Setting a special MSR big that disables a small part of advanced * speculative caching as part of a short-term fix for a conflicting cache * attribute bug in the kernel that is exposed by advanced speculative * caching in newer AMD Athlon processors. =======================================================================*/static void amd_adv_spec_cache_disable(void){	printk(KERN_INFO "Disabling advanced speculative caching\n");	__asm__ __volatile__ (		" movl	 $0x9c5a203a,%%edi   \n" /* msr enable */		" movl	 $0xc0011022,%%ecx   \n" /* msr addr	 */		" rdmsr			     \n" /* get reg val	 */		" orl	 $0x00010000,%%eax   \n" /* set bit 16	 */		" wrmsr			     \n" /* put it back	 */		" xorl	%%edi, %%edi	     \n" /* clear msr enable */		: /* no outputs */		: /* no inputs, either */		: "%eax","%ecx","%edx","%edi" /* clobbered regs */ );}/* *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause *	misexecution of code under Linux. Owners of such processors should *	contact AMD for precise details and a CPU swap. * *	See	http://www.multimania.com/poulot/k6bug.html *		http://www.amd.com/K6/k6docs/revgd.html * *	The following test is erm.. interesting. AMD neglected to up *	the chip setting when fixing the bug but they also tweaked some *	performance at the same time.. */ extern void vide(void);__asm__(".align 4\nvide: ret");static int __init init_amd(struct cpuinfo_x86 *c){	u32 l, h;	int mbytes = max_mapnr >> (20-PAGE_SHIFT);	int r;	/*	 *	FIXME: We should handle the K5 here. Set up the write	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc,	 *	no bus pipeline)	 */	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */	clear_bit(0*32+31, &c->x86_capability);		r = get_model_name(c);	switch(c->x86)	{		case 5:			if( c->x86_model < 6 )			{				/* Based on AMD doc 20734R - June 2000 */				if ( c->x86_model == 0 ) {					clear_bit(X86_FEATURE_APIC, &c->x86_capability);					set_bit(X86_FEATURE_PGE, &c->x86_capability);				}				break;			}						if ( c->x86_model == 6 && c->x86_mask == 1 ) {				const int K6_BUG_LOOP = 1000000;				int n;				void (*f_vide)(void);				unsigned long d, d2;								printk(KERN_INFO "AMD K6 stepping B detected - ");								/*				 * It looks like AMD fixed the 2.6.2 bug and improved indirect 				 * calls at the same time.				 */				n = K6_BUG_LOOP;				f_vide = vide;				rdtscl(d);				while (n--) 					f_vide();				rdtscl(d2);				d = d2-d;								/* Knock these two lines out if it debugs out ok */				printk(KERN_INFO "K6 BUG %ld %d (Report these if test report is incorrect)\n", d, 20*K6_BUG_LOOP);				printk(KERN_INFO "AMD K6 stepping B detected - ");				/* -- cut here -- */				if (d > 20*K6_BUG_LOOP) 					printk("system stability may be impaired when more than 32 MB are used.\n");				else 					printk("probably OK (after B9730xxxx).\n");				printk(KERN_INFO "Please see http://www.mygale.com/~poulot/k6bug.html\n");			}			/* K6 with old style WHCR */			if (c->x86_model < 8 ||			   (c->x86_model== 8 && c->x86_mask < 8)) {				/* We can only write allocate on the low 508Mb */				if(mbytes>508)					mbytes=508;				rdmsr(MSR_K6_WHCR, l, h);				if ((l&0x0000FFFF)==0) {					unsigned long flags;					l=(1<<0)|((mbytes/4)<<1);					local_irq_save(flags);					wbinvd();					wrmsr(MSR_K6_WHCR, l, h);					local_irq_restore(flags);					printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",						mbytes);				}				break;			}			if ((c->x86_model == 8 && c->x86_mask >7) ||			     c->x86_model == 9 || c->x86_model == 13) {				/* The more serious chips .. */				if(mbytes>4092)					mbytes=4092;				rdmsr(MSR_K6_WHCR, l, h);				if ((l&0xFFFF0000)==0) {					unsigned long flags;					l=((mbytes>>2)<<22)|(1<<16);					local_irq_save(flags);					wbinvd();					wrmsr(MSR_K6_WHCR, l, h);					local_irq_restore(flags);					printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",						mbytes);				}				/*  Set MTRR capability flag if appropriate */				if (c->x86_model == 13 || c->x86_model == 9 ||				   (c->x86_model == 8 && c->x86_mask >= 8))					set_bit(X86_FEATURE_K6_MTRR, &c->x86_capability);				break;			}			break;		case 6: /* An Athlon/Duron */ 			/* Bit 15 of Athlon specific MSR 15, needs to be 0 			 * to enable SSE on Palomino/Morgan CPU's.			 * If the BIOS didn't enable it already, enable it			 * here.			 *			 * Avoiding the use of 4MB/2MB pages along with			 * setting a special MSR bit that disables a small			 * part of advanced speculative caching as part of a			 * short-term fix for a conflicting cache attribute			 * bug in the kernel that is exposed by advanced			 * speculative caching in newer AMD Atlon processors.			 *			 * If we cleared the PSE bit earlier as part			 * of the workaround for this problem, we need			 * to clear it again, as our caller may have			 * clobbered it if uniprocessor APIC is enabled.			 */			if (c->x86_model >= 6) {				if (!cpu_has_xmm) {					printk(KERN_INFO					       "Enabling Disabled K7/SSE Support...\n");					rdmsr(MSR_K7_HWCR, l, h);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -