📄 setup.c
字号:
probe_roms(); for (i = 0; i < e820.nr_map; i++) { struct resource *res; if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) continue; res = alloc_bootmem_low(sizeof(struct resource)); switch (e820.map[i].type) { case E820_RAM: res->name = "System RAM"; break; case E820_ACPI: res->name = "ACPI Tables"; break; case E820_NVS: res->name = "ACPI Non-volatile Storage"; break; default: res->name = "reserved"; } res->start = e820.map[i].addr; res->end = res->start + e820.map[i].size - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); if (e820.map[i].type == E820_RAM) { /* * We dont't know which RAM region contains kernel data, * so we try it repeatedly and let the resource manager * test it. */ request_resource(res, &code_resource); request_resource(res, &data_resource); } } request_resource(&iomem_resource, &vram_resource); /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < STANDARD_IO_RESOURCES; i++) request_resource(&ioport_resource, standard_io_resources+i); /* Tell the PCI layer not to allocate too close to the RAM area.. */ low_mem_size = ((max_low_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff; if (low_mem_size > pci_mem_start) pci_mem_start = low_mem_size;#ifdef CONFIG_VT#if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con;#elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con;#endif#endif}static int cachesize_override __initdata = -1;static int __init cachesize_setup(char *str){ get_option (&str, &cachesize_override); return 1;}__setup("cachesize=", cachesize_setup);#ifndef CONFIG_X86_TSCstatic int tsc_disable __initdata = 0;static int __init tsc_setup(char *str){ tsc_disable = 1; return 1;}__setup("notsc", tsc_setup);#endifstatic int __init get_model_name(struct cpuinfo_x86 *c){ unsigned int *v; char *p, *q; if (cpuid_eax(0x80000000) < 0x80000004) return 0; v = (unsigned int *) c->x86_model_id; cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); c->x86_model_id[48] = 0; /* Intel chips right-justify this string for some dumb reason; undo that brain damage */ p = q = &c->x86_model_id[0]; while ( *p == ' ' ) p++; if ( p != q ) { while ( *p ) *q++ = *p++; while ( q <= &c->x86_model_id[48] ) *q++ = '\0'; /* Zero-pad the rest */ } return 1;}static void __init display_cacheinfo(struct cpuinfo_x86 *c){ unsigned int n, dummy, ecx, edx, l2size; n = cpuid_eax(0x80000000); if (n >= 0x80000005) { cpuid(0x80000005, &dummy, &dummy, &ecx, &edx); printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); c->x86_cache_size=(ecx>>24)+(edx>>24); } if (n < 0x80000006) /* Some chips just has a large L1. */ return; ecx = cpuid_ecx(0x80000006); l2size = ecx >> 16; /* AMD errata T13 (order #21922) */ if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ l2size = 64; if (c->x86_model == 4 && (c->x86_mask==0 || c->x86_mask==1)) /* Tbird rev A1/A2 */ l2size = 256; } /* Intel PIII Tualatin. This comes in two flavours. * One has 256kb of cache, the other 512. We have no way * to determine which, so we use a boottime override * for the 512kb model, and assume 256 otherwise. */ if ((c->x86_vendor == X86_VENDOR_INTEL) && (c->x86 == 6) && (c->x86_model == 11) && (l2size == 0)) l2size = 256; /* VIA C3 CPUs (670-68F) need further shifting. */ if (c->x86_vendor == X86_VENDOR_CENTAUR && (c->x86 == 6) && ((c->x86_model == 7) || (c->x86_model == 8))) { l2size = l2size >> 8; } /* Allow user to override all this if necessary. */ if (cachesize_override != -1) l2size = cachesize_override; if ( l2size == 0 ) return; /* Again, no L2 cache is possible */ c->x86_cache_size = l2size; printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", l2size, ecx & 0xFF);}/* * B step AMD K6 before B 9730xxxx have hardware bugs that can cause * misexecution of code under Linux. Owners of such processors should * contact AMD for precise details and a CPU swap. * * See http://www.multimania.com/poulot/k6bug.html * http://www.amd.com/K6/k6docs/revgd.html * * The following test is erm.. interesting. AMD neglected to up * the chip setting when fixing the bug but they also tweaked some * performance at the same time.. */ extern void vide(void);__asm__(".align 4\nvide: ret");static int __init init_amd(struct cpuinfo_x86 *c){ u32 l, h; int mbytes = max_mapnr >> (20-PAGE_SHIFT); int r; /* * FIXME: We should handle the K5 here. Set up the write * range and also turn on MSR 83 bits 4 and 31 (write alloc, * no bus pipeline) */ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ clear_bit(0*32+31, &c->x86_capability); r = get_model_name(c); switch(c->x86) { case 5: if( c->x86_model < 6 ) { /* Based on AMD doc 20734R - June 2000 */ if ( c->x86_model == 0 ) { clear_bit(X86_FEATURE_APIC, &c->x86_capability); set_bit(X86_FEATURE_PGE, &c->x86_capability); } break; } if ( c->x86_model == 6 && c->x86_mask == 1 ) { const int K6_BUG_LOOP = 1000000; int n; void (*f_vide)(void); unsigned long d, d2; printk(KERN_INFO "AMD K6 stepping B detected - "); /* * It looks like AMD fixed the 2.6.2 bug and improved indirect * calls at the same time. */ n = K6_BUG_LOOP; f_vide = vide; rdtscl(d); while (n--) f_vide(); rdtscl(d2); d = d2-d; /* Knock these two lines out if it debugs out ok */ printk(KERN_INFO "K6 BUG %ld %d (Report these if test report is incorrect)\n", d, 20*K6_BUG_LOOP); printk(KERN_INFO "AMD K6 stepping B detected - "); /* -- cut here -- */ if (d > 20*K6_BUG_LOOP) printk("system stability may be impaired when more than 32 MB are used.\n"); else printk("probably OK (after B9730xxxx).\n"); printk(KERN_INFO "Please see http://www.mygale.com/~poulot/k6bug.html\n"); } /* K6 with old style WHCR */ if (c->x86_model < 8 || (c->x86_model== 8 && c->x86_mask < 8)) { /* We can only write allocate on the low 508Mb */ if(mbytes>508) mbytes=508; rdmsr(MSR_K6_WHCR, l, h); if ((l&0x0000FFFF)==0) { unsigned long flags; l=(1<<0)|((mbytes/4)<<1); local_irq_save(flags); wbinvd(); wrmsr(MSR_K6_WHCR, l, h); local_irq_restore(flags); printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", mbytes); } break; } if ((c->x86_model == 8 && c->x86_mask >7) || c->x86_model == 9 || c->x86_model == 13) { /* The more serious chips .. */ if(mbytes>4092) mbytes=4092; rdmsr(MSR_K6_WHCR, l, h); if ((l&0xFFFF0000)==0) { unsigned long flags; l=((mbytes>>2)<<22)|(1<<16); local_irq_save(flags); wbinvd(); wrmsr(MSR_K6_WHCR, l, h); local_irq_restore(flags); printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", mbytes); } /* Set MTRR capability flag if appropriate */ if (c->x86_model == 13 || c->x86_model == 9 || (c->x86_model == 8 && c->x86_mask >= 8)) set_bit(X86_FEATURE_K6_MTRR, &c->x86_capability); break; } break; case 6: /* An Athlon/Duron */ /* Bit 15 of Athlon specific MSR 15, needs to be 0 * to enable SSE on Palomino/Morgan CPU's. * If the BIOS didn't enable it already, enable it * here. */ if (c->x86_model == 6 || c->x86_model == 7) { if (!test_bit(X86_FEATURE_XMM, &c->x86_capability)) { printk(KERN_INFO "Enabling Disabled K7/SSE Support...\n"); rdmsr(MSR_K7_HWCR, l, h); l &= ~0x00008000; wrmsr(MSR_K7_HWCR, l, h); set_bit(X86_FEATURE_XMM, &c->x86_capability); } } break; } display_cacheinfo(c); return r;}/* * Read Cyrix DEVID registers (DIR) to get more detailed info. about the CPU */static void __init do_cyrix_devid(unsigned char *dir0, unsigned char *dir1){ unsigned char ccr2, ccr3; unsigned long flags; /* we test for DEVID by checking whether CCR3 is writable */ local_irq_save(flags); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, ccr3 ^ 0x80); getCx86(0xc0); /* dummy to change bus */ if (getCx86(CX86_CCR3) == ccr3) { /* no DEVID regs. */ ccr2 = getCx86(CX86_CCR2); setCx86(CX86_CCR2, ccr2 ^ 0x04); getCx86(0xc0); /* dummy */ if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */ *dir0 = 0xfd; else { /* Cx486S A step */ setCx86(CX86_CCR2, ccr2); *dir0 = 0xfe; } } else { setCx86(CX86_CCR3, ccr3); /* restore CCR3 */ /* read DIR0 and DIR1 CPU registers */ *dir0 = getCx86(CX86_DIR0); *dir1 = getCx86(CX86_DIR1); } local_irq_restore(flags);}/* * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in * order to identify the Cyrix CPU model after we're out of setup.c * * Actually since bugs.h doesnt even reference this perhaps someone should * fix the documentation ??? */static unsigned char Cx86_dir0_msb __initdata = 0;static char Cx86_model[][9] __initdata = { "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", "M II ", "Unknown"};static char Cx486_name[][5] __initdata = { "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", "SRx2", "DRx2"};static char Cx486S_name[][4] __initdata = { "S", "S2", "Se", "S2e"};static char Cx486D_name[][4] __initdata = { "DX", "DX2", "?", "?", "?", "DX4"};static char Cx86_cb[] __initdata = "?.5x Core/Bus Clock";static char cyrix_model_mult1[] __initdata = "12??43";static char cyrix_model_mult2[] __initdata = "12233445";/* * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old * BIOSes for compatability with DOS games. This makes the udelay loop * work correctly, and improves performance. * * FIXME: our newer udelay uses the tsc. We dont need to frob with SLOP */extern void calibrate_delay(void) __init;static void __init check_cx686_slop(struct cpuinfo_x86 *c){ unsigned long flags; if (Cx86_dir0_msb == 3) { unsigned char ccr3, ccr5; local_irq_save(flags); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ ccr5 = getCx86(CX86_CCR5); if (ccr5 & 2) setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */ setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ local_irq_restore(flags); if (ccr5 & 2) { /* possible wrong calibration done */ printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n"); calibrate_delay(); c->loops_per_jiffy = loops_per_jiffy; } }}static void __init init_cyrix(struct cpuinfo_x86 *c){ unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; char *buf = c->x86_model_id; const char *p = NULL; /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ clear_bit(0*32+31, &c->x86_capability); /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */ if ( test_bit(1*32+24, &c->x86_capability) ) { clear_bit(1*32+24, &c->x86_capability); set_bit(X86_FEATURE_CXMMX, &c->x86_capability); } do_cyrix_devid(&dir0, &dir1); check_cx686_slop(c); Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family" */ dir0_lsn = dir0 & 0xf; /* model or clock multiplier */ /* common case step number/rev -- exceptions handled below */ c->x86_model = (dir1 >> 4) + 1; c->x86_mask = dir1 & 0xf; /* Now cook; the original recipe is by Channing Corn, from Cyrix. * We do the same thing for each generation: we work out * the model, multiplier and stepping. Black magic included, * to make the silicon step/rev numbers match the printed ones. */ switch (dir0_msn) { unsigned char tmp; case 0: /* Cx486SLC/DLC/SRx/DRx */ p = Cx486_name[dir0_lsn & 7]; break; case 1: /* Cx486S/DX/DX2/DX4 */ p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5] : Cx486S_name[dir0_lsn & 3]; break; case 2: /* 5x86 */ Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5]; p = Cx86_cb+2; break; case 3: /* 6x86/6x86L */ Cx86_cb[1] = ' '; Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5]; if (dir1 > 0x21) { /* 686L */ Cx86_cb[0] = 'L'; p = Cx86_cb; (c->x86_model)++; } else /* 686 */ p = Cx86_cb+1; /* Emulate MTRRs using Cyrix's ARRs. */ set_bit(X86_FEATURE_CYRIX_ARR, &c->x86_capability); /* 6x86's contain this bug */ c->coma_bug = 1; break; case 4: /* MediaGX/GXm */ /* * Life sometimes gets weiiiiiiiird if we use this * on the MediaGX. So we turn it off for now. */ #ifdef CONFIG_PCI /* It isnt really a PCI quirk directly, but the cure is the same. The MediaGX has deep magic SMM stuff that handles the SB emulation. It thows away the fifo on disable_dma() which is wrong and ruins the audio. Bug2: VSA1 has a wrap bug so that using maximum sized DMA causes bad things. According to NatSemi VSA2 has another bug to do with 'hlt'. I've not seen any boards using VSA2 and X doesn't seem to support it either so who cares 8). VSA1 we work around however. */ printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n"); isa_dma_bridge_buggy = 2;#endif c->x86_cache_size=16; /* Yep 16K integrated cache thats it */ /* GXm supports extended cpuid levels 'ala' AMD */ if (c->cpuid_level == 2) { get_model_name(c); /* get CPU marketing name */ clear_bit(X86_FEATURE_TSC, c->x86_capability); return; } else { /* MediaGX */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -