⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smpboot_32.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *	x86 SMP booting functions * *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com> *	(c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> * *	Much of the core SMP work is based on previous work by Thomas Radke, to *	whom a great many thanks are extended. * *	Thanks to Intel for making available several different Pentium, *	Pentium Pro and Pentium-II/Xeon MP machines. *	Original development of Linux SMP code supported by Caldera. * *	This code is released under the GNU General Public License version 2 or *	later. * *	Fixes *		Felix Koop	:	NR_CPUS used properly *		Jose Renau	:	Handle single CPU case. *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report. *		Greg Wright	:	Fix for kernel stacks panic. *		Erich Boleyn	:	MP v1.4 and additional changes. *	Matthias Sattler	:	Changes for 2.1 kernel map. *	Michel Lespinasse	:	Changes for 2.1 kernel map. *	Michael Chastain	:	Change trampoline.S to gnu as. *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine *		Ingo Molnar	:	Added APIC timers, based on code *					from Jose Renau *		Ingo Molnar	:	various cleanups and rewrites *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug. *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs *		Martin J. Bligh	: 	Added support for multi-quad systems *		Dave Jones	:	Report invalid combinations of Athlon CPUs.*		Rusty Russell	:	Hacked into shape for new "hotplug" boot process. */#include <linux/module.h>#include <linux/init.h>#include <linux/kernel.h>#include <linux/mm.h>#include <linux/sched.h>#include <linux/kernel_stat.h>#include <linux/bootmem.h>#include <linux/notifier.h>#include <linux/cpu.h>#include <linux/percpu.h>#include <linux/nmi.h>#include <linux/delay.h>#include <linux/mc146818rtc.h>#include <asm/tlbflush.h>#include <asm/desc.h>#include <asm/arch_hooks.h>#include <asm/nmi.h>#include <mach_apic.h>#include <mach_wakecpu.h>#include <smpboot_hooks.h>#include <asm/vmi.h>#include <asm/mtrr.h>/* Set if we find a B stepping CPU */static int __cpuinitdata smp_b_stepping;/* Number of siblings per CPU package */int smp_num_siblings = 1;EXPORT_SYMBOL(smp_num_siblings);/* Last level cache ID of each logical CPU */DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;/* representing HT siblings of each logical CPU */DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);/* representing HT and core siblings of each logical CPU */DEFINE_PER_CPU(cpumask_t, cpu_core_map);EXPORT_PER_CPU_SYMBOL(cpu_core_map);/* bitmap of online cpus */cpumask_t cpu_online_map __read_mostly;EXPORT_SYMBOL(cpu_online_map);cpumask_t cpu_callin_map;cpumask_t cpu_callout_map;EXPORT_SYMBOL(cpu_callout_map);cpumask_t cpu_possible_map;EXPORT_SYMBOL(cpu_possible_map);static cpumask_t smp_commenced_mask;/* Per CPU bogomips and other parameters */DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);EXPORT_PER_CPU_SYMBOL(cpu_info);/* * The following static array is used during kernel startup * and the x86_cpu_to_apicid_ptr contains the address of the * array during this time.  Is it zeroed when the per_cpu * data area is removed. */u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata =			{ [0 ... NR_CPUS-1] = BAD_APICID };void *x86_cpu_to_apicid_ptr;DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);u8 apicid_2_node[MAX_APICID];/* * Trampoline 80x86 program as an array. */extern const unsigned char trampoline_data [];extern const unsigned char trampoline_end  [];static unsigned char *trampoline_base;static int trampoline_exec;static void map_cpu_to_logical_apicid(void);/* State of each CPU. */DEFINE_PER_CPU(int, cpu_state) = { 0 };/* * Currently trivial. Write the real->protected mode * bootstrap into the page concerned. The caller * has made sure it's suitably aligned. */static unsigned long __cpuinit setup_trampoline(void){	memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);	return virt_to_phys(trampoline_base);}/* * We are called very early to get the low memory for the * SMP bootup trampoline page. */void __init smp_alloc_memory(void){	trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);	/*	 * Has to be in very low memory so we can execute	 * real-mode AP code.	 */	if (__pa(trampoline_base) >= 0x9F000)		BUG();	/*	 * Make the SMP trampoline executable:	 */	trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);}/* * The bootstrap kernel entry code has set these up. Save them for * a given CPU */void __cpuinit smp_store_cpu_info(int id){	struct cpuinfo_x86 *c = &cpu_data(id);	*c = boot_cpu_data;	c->cpu_index = id;	if (id!=0)		identify_secondary_cpu(c);	/*	 * Mask B, Pentium, but not Pentium MMX	 */	if (c->x86_vendor == X86_VENDOR_INTEL &&	    c->x86 == 5 &&	    c->x86_mask >= 1 && c->x86_mask <= 4 &&	    c->x86_model <= 3)		/*		 * Remember we have B step Pentia with bugs		 */		smp_b_stepping = 1;	/*	 * Certain Athlons might work (for various values of 'work') in SMP	 * but they are not certified as MP capable.	 */	if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {		if (num_possible_cpus() == 1)			goto valid_k7;		/* Athlon 660/661 is valid. */			if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))			goto valid_k7;		/* Duron 670 is valid */		if ((c->x86_model==7) && (c->x86_mask==0))			goto valid_k7;		/*		 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.		 * It's worth noting that the A5 stepping (662) of some Athlon XP's		 * have the MP bit set.		 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.		 */		if (((c->x86_model==6) && (c->x86_mask>=2)) ||		    ((c->x86_model==7) && (c->x86_mask>=1)) ||		     (c->x86_model> 7))			if (cpu_has_mp)				goto valid_k7;		/* If we get here, it's not a certified SMP capable AMD system. */		add_taint(TAINT_UNSAFE_SMP);	}valid_k7:	;}extern void calibrate_delay(void);static atomic_t init_deasserted;static void __cpuinit smp_callin(void){	int cpuid, phys_id;	unsigned long timeout;	/*	 * If waken up by an INIT in an 82489DX configuration	 * we may get here before an INIT-deassert IPI reaches	 * our local APIC.  We have to wait for the IPI or we'll	 * lock up on an APIC access.	 */	wait_for_init_deassert(&init_deasserted);	/*	 * (This works even if the APIC is not enabled.)	 */	phys_id = GET_APIC_ID(apic_read(APIC_ID));	cpuid = smp_processor_id();	if (cpu_isset(cpuid, cpu_callin_map)) {		printk("huh, phys CPU#%d, CPU#%d already present??\n",					phys_id, cpuid);		BUG();	}	Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);	/*	 * STARTUP IPIs are fragile beasts as they might sometimes	 * trigger some glue motherboard logic. Complete APIC bus	 * silence for 1 second, this overestimates the time the	 * boot CPU is spending to send the up to 2 STARTUP IPIs	 * by a factor of two. This should be enough.	 */	/*	 * Waiting 2s total for startup (udelay is not yet working)	 */	timeout = jiffies + 2*HZ;	while (time_before(jiffies, timeout)) {		/*		 * Has the boot CPU finished it's STARTUP sequence?		 */		if (cpu_isset(cpuid, cpu_callout_map))			break;		rep_nop();	}	if (!time_before(jiffies, timeout)) {		printk("BUG: CPU%d started up but did not get a callout!\n",			cpuid);		BUG();	}	/*	 * the boot CPU has finished the init stage and is spinning	 * on callin_map until we finish. We are free to set up this	 * CPU, first the APIC. (this is probably redundant on most	 * boards)	 */	Dprintk("CALLIN, before setup_local_APIC().\n");	smp_callin_clear_local_apic();	setup_local_APIC();	map_cpu_to_logical_apicid();	/*	 * Get our bogomips.	 */	calibrate_delay();	Dprintk("Stack at about %p\n",&cpuid);	/*	 * Save our processor parameters	 */	smp_store_cpu_info(cpuid);	/*	 * Allow the master to continue.	 */	cpu_set(cpuid, cpu_callin_map);}static int cpucount;/* maps the cpu to the sched domain representing multi-core */cpumask_t cpu_coregroup_map(int cpu){	struct cpuinfo_x86 *c = &cpu_data(cpu);	/*	 * For perf, we return last level cache shared map.	 * And for power savings, we return cpu_core_map	 */	if (sched_mc_power_savings || sched_smt_power_savings)		return per_cpu(cpu_core_map, cpu);	else		return c->llc_shared_map;}/* representing cpus for which sibling maps can be computed */static cpumask_t cpu_sibling_setup_map;void __cpuinit set_cpu_sibling_map(int cpu){	int i;	struct cpuinfo_x86 *c = &cpu_data(cpu);	cpu_set(cpu, cpu_sibling_setup_map);	if (smp_num_siblings > 1) {		for_each_cpu_mask(i, cpu_sibling_setup_map) {			if (c->phys_proc_id == cpu_data(i).phys_proc_id &&			    c->cpu_core_id == cpu_data(i).cpu_core_id) {				cpu_set(i, per_cpu(cpu_sibling_map, cpu));				cpu_set(cpu, per_cpu(cpu_sibling_map, i));				cpu_set(i, per_cpu(cpu_core_map, cpu));				cpu_set(cpu, per_cpu(cpu_core_map, i));				cpu_set(i, c->llc_shared_map);				cpu_set(cpu, cpu_data(i).llc_shared_map);			}		}	} else {		cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));	}	cpu_set(cpu, c->llc_shared_map);	if (current_cpu_data.x86_max_cores == 1) {		per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);		c->booted_cores = 1;		return;	}	for_each_cpu_mask(i, cpu_sibling_setup_map) {		if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&		    per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {			cpu_set(i, c->llc_shared_map);			cpu_set(cpu, cpu_data(i).llc_shared_map);		}		if (c->phys_proc_id == cpu_data(i).phys_proc_id) {			cpu_set(i, per_cpu(cpu_core_map, cpu));			cpu_set(cpu, per_cpu(cpu_core_map, i));			/*			 *  Does this new cpu bringup a new core?			 */			if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {				/*				 * for each core in package, increment				 * the booted_cores for this new cpu				 */				if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)					c->booted_cores++;				/*				 * increment the core count for all				 * the other cpus in this package				 */				if (i != cpu)					cpu_data(i).booted_cores++;			} else if (i != cpu && !c->booted_cores)				c->booted_cores = cpu_data(i).booted_cores;		}	}}/* * Activate a secondary processor. */static void __cpuinit start_secondary(void *unused){	/*	 * Don't put *anything* before cpu_init(), SMP booting is too	 * fragile that we want to limit the things done here to the	 * most necessary things.	 */#ifdef CONFIG_VMI	vmi_bringup();#endif	cpu_init();	preempt_disable();	smp_callin();	while (!cpu_isset(smp_processor_id(), smp_commenced_mask))		rep_nop();	/*	 * Check TSC synchronization with the BP:	 */	check_tsc_sync_target();	setup_secondary_clock();	if (nmi_watchdog == NMI_IO_APIC) {		disable_8259A_irq(0);		enable_NMI_through_LVT0(NULL);		enable_8259A_irq(0);	}	/*	 * low-memory mappings have been cleared, flush them from	 * the local TLBs too.	 */	local_flush_tlb();	/* This must be done before setting cpu_online_map */	set_cpu_sibling_map(raw_smp_processor_id());	wmb();	/*	 * We need to hold call_lock, so there is no inconsistency	 * between the time smp_call_function() determines number of	 * IPI recipients, and the time when the determination is made	 * for which cpus receive the IPI. Holding this	 * lock helps us to not include this cpu in a currently in progress	 * smp_call_function().	 */	lock_ipi_call_lock();	cpu_set(smp_processor_id(), cpu_online_map);	unlock_ipi_call_lock();	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;	/* We can take interrupts now: we're officially "up". */	local_irq_enable();	wmb();	cpu_idle();}/* * Everything has been set up for the secondary * CPUs - they just need to reload everything * from the task structure * This function must not return. */void __devinit initialize_secondary(void)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -