📄 setup_64.c
字号:
"(0x%08lx > 0x%08lx)\ndisabling initrd\n", ramdisk_end, end_of_mem); initrd_start = 0; } }#endif reserve_crashkernel(); paging_init();#ifdef CONFIG_PCI early_quirks();#endif /* * set this early, so we dont allocate cpu0 * if MADT list doesnt list BSP first * mpparse.c/MP_processor_info() allocates logical cpu numbers. */ cpu_set(0, cpu_present_map);#ifdef CONFIG_ACPI /* * Read APIC and some other early information from ACPI tables. */ acpi_boot_init();#endif init_cpu_to_node(); /* * get boot-time SMP configuration: */ if (smp_found_config) get_smp_config(); init_apic_mappings(); /* * We trust e820 completely. No explicit ROM probing in memory. */ e820_reserve_resources(); e820_mark_nosave_regions(); { unsigned i; /* request I/O space for devices used on all i[345]86 PCs */ for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) request_resource(&ioport_resource, &standard_io_resources[i]); } e820_setup_gap();#ifdef CONFIG_VT#if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con;#elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con;#endif#endif}static int __cpuinit get_model_name(struct cpuinfo_x86 *c){ unsigned int *v; if (c->extended_cpuid_level < 0x80000004) return 0; v = (unsigned int *) c->x86_model_id; cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); c->x86_model_id[48] = 0; return 1;}static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c){ unsigned int n, dummy, eax, ebx, ecx, edx; n = c->extended_cpuid_level; if (n >= 0x80000005) { cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n", edx>>24, edx&0xFF, ecx>>24, ecx&0xFF); c->x86_cache_size=(ecx>>24)+(edx>>24); /* On K8 L1 TLB is inclusive, so don't count it */ c->x86_tlbsize = 0; } if (n >= 0x80000006) { cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); ecx = cpuid_ecx(0x80000006); c->x86_cache_size = ecx >> 16; c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", c->x86_cache_size, ecx & 0xFF); } if (n >= 0x80000007) cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power); if (n >= 0x80000008) { cpuid(0x80000008, &eax, &dummy, &dummy, &dummy); c->x86_virt_bits = (eax >> 8) & 0xff; c->x86_phys_bits = eax & 0xff; }}#ifdef CONFIG_NUMAstatic int nearby_node(int apicid){ int i; for (i = apicid - 1; i >= 0; i--) { int node = apicid_to_node[i]; if (node != NUMA_NO_NODE && node_online(node)) return node; } for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { int node = apicid_to_node[i]; if (node != NUMA_NO_NODE && node_online(node)) return node; } return first_node(node_online_map); /* Shouldn't happen */}#endif/* * On a AMD dual core setup the lower bits of the APIC id distingush the cores. * Assumes number of cores is a power of two. */static void __init amd_detect_cmp(struct cpuinfo_x86 *c){#ifdef CONFIG_SMP unsigned bits;#ifdef CONFIG_NUMA int cpu = smp_processor_id(); int node = 0; unsigned apicid = hard_smp_processor_id();#endif unsigned ecx = cpuid_ecx(0x80000008); c->x86_max_cores = (ecx & 0xff) + 1; /* CPU telling us the core id bits shift? */ bits = (ecx >> 12) & 0xF; /* Otherwise recompute */ if (bits == 0) { while ((1 << bits) < c->x86_max_cores) bits++; } /* Low order bits define the core id (index of core in socket) */ c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1); /* Convert the APIC ID into the socket ID */ c->phys_proc_id = phys_pkg_id(bits);#ifdef CONFIG_NUMA node = c->phys_proc_id; if (apicid_to_node[apicid] != NUMA_NO_NODE) node = apicid_to_node[apicid]; if (!node_online(node)) { /* Two possibilities here: - The CPU is missing memory and no node was created. In that case try picking one from a nearby CPU - The APIC IDs differ from the HyperTransport node IDs which the K8 northbridge parsing fills in. Assume they are all increased by a constant offset, but in the same order as the HT nodeids. If that doesn't result in a usable node fall back to the path for the previous case. */ int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits); if (ht_nodeid >= 0 && apicid_to_node[ht_nodeid] != NUMA_NO_NODE) node = apicid_to_node[ht_nodeid]; /* Pick a nearby node */ if (!node_online(node)) node = nearby_node(apicid); } numa_set_node(cpu, node); printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);#endif#endif}#define ENABLE_C1E_MASK 0x18000000#define CPUID_PROCESSOR_SIGNATURE 1#define CPUID_XFAM 0x0ff00000#define CPUID_XFAM_K8 0x00000000#define CPUID_XFAM_10H 0x00100000#define CPUID_XFAM_11H 0x00200000#define CPUID_XMOD 0x000f0000#define CPUID_XMOD_REV_F 0x00040000/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */static __cpuinit int amd_apic_timer_broken(void){ u32 lo, hi; u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); switch (eax & CPUID_XFAM) { case CPUID_XFAM_K8: if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F) break; case CPUID_XFAM_10H: case CPUID_XFAM_11H: rdmsr(MSR_K8_ENABLE_C1E, lo, hi); if (lo & ENABLE_C1E_MASK) return 1; break; default: /* err on the side of caution */ return 1; } return 0;}static void __cpuinit init_amd(struct cpuinfo_x86 *c){ unsigned level;#ifdef CONFIG_SMP unsigned long value; /* * Disable TLB flush filter by setting HWCR.FFDIS on K8 * bit 6 of msr C001_0015 * * Errata 63 for SH-B3 steppings * Errata 122 for all steppings (F+ have it disabled by default) */ if (c->x86 == 15) { rdmsrl(MSR_K8_HWCR, value); value |= 1 << 6; wrmsrl(MSR_K8_HWCR, value); }#endif /* Bit 31 in normal CPUID used for nonstandard 3DNow ID; 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ clear_bit(0*32+31, &c->x86_capability); /* On C+ stepping K8 rep microcode works well for copy/memset */ level = cpuid_eax(1); if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); if (c->x86 == 0x10 || c->x86 == 0x11) set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); /* Enable workaround for FXSAVE leak */ if (c->x86 >= 6) set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability); level = get_model_name(c); if (!level) { switch (c->x86) { case 15: /* Should distinguish Models here, but this is only a fallback anyways. */ strcpy(c->x86_model_id, "Hammer"); break; } } display_cacheinfo(c); /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ if (c->x86_power & (1<<8)) set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability); /* Multi core CPU? */ if (c->extended_cpuid_level >= 0x80000008) amd_detect_cmp(c); if (c->extended_cpuid_level >= 0x80000006 && (cpuid_edx(0x80000006) & 0xf000)) num_cache_leaves = 4; else num_cache_leaves = 3; if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11) set_bit(X86_FEATURE_K8, &c->x86_capability); /* RDTSC can be speculated around */ clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability); /* Family 10 doesn't support C states in MWAIT so don't use it */ if (c->x86 == 0x10 && !force_mwait) clear_bit(X86_FEATURE_MWAIT, &c->x86_capability); if (amd_apic_timer_broken()) disable_apic_timer = 1;}static void __cpuinit detect_ht(struct cpuinfo_x86 *c){#ifdef CONFIG_SMP u32 eax, ebx, ecx, edx; int index_msb, core_bits; cpuid(1, &eax, &ebx, &ecx, &edx); if (!cpu_has(c, X86_FEATURE_HT)) return; if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) goto out; smp_num_siblings = (ebx & 0xff0000) >> 16; if (smp_num_siblings == 1) { printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); } else if (smp_num_siblings > 1 ) { if (smp_num_siblings > NR_CPUS) { printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); smp_num_siblings = 1; return; } index_msb = get_count_order(smp_num_siblings); c->phys_proc_id = phys_pkg_id(index_msb); smp_num_siblings = smp_num_siblings / c->x86_max_cores; index_msb = get_count_order(smp_num_siblings) ; core_bits = get_count_order(c->x86_max_cores); c->cpu_core_id = phys_pkg_id(index_msb) & ((1 << core_bits) - 1); }out: if ((c->x86_max_cores * smp_num_siblings) > 1) { printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id); printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id); }#endif}/* * find out the number of processor cores on the die */static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c){ unsigned int eax, t; if (c->cpuid_level < 4) return 1; cpuid_count(4, 0, &eax, &t, &t, &t); if (eax & 0x1f) return ((eax >> 26) + 1); else return 1;}static void srat_detect_node(void){#ifdef CONFIG_NUMA unsigned node; int cpu = smp_processor_id(); int apicid = hard_smp_processor_id(); /* Don't do the funky fallback heuristics the AMD version employs for now. */ node = apicid_to_node[apicid]; if (node == NUMA_NO_NODE) node = first_node(node_online_map); numa_set_node(cpu, node); printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);#endif}static void __cpuinit init_intel(struct cpuinfo_x86 *c){ /* Cache sizes */ unsigned n; init_intel_cacheinfo(c); if (c->cpuid_level > 9 ) { unsigned eax = cpuid_eax(10); /* Check for version and the number of counters */ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability); } if (cpu_has_ds) { unsigned int l1, l2; rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); if (!(l1 & (1<<11))) set_bit(X86_FEATURE_BTS, c->x86_capability); if (!(l1 & (1<<12))) set_bit(X86_FEATURE_PEBS, c->x86_capability); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -