⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smpboot_64.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
	/* allocate memory for gdts of secondary cpus. Hotplug is considered */	if (!cpu_gdt_descr[cpu].address &&		!(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) {		printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu);		return -1;	}	/* Allocate node local memory for AP pdas */	if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) {		struct x8664_pda *newpda, *pda;		int node = cpu_to_node(cpu);		pda = cpu_pda(cpu);		newpda = kmalloc_node(sizeof (struct x8664_pda), GFP_ATOMIC,				      node);		if (newpda) {			memcpy(newpda, pda, sizeof (struct x8664_pda));			cpu_pda(cpu) = newpda;		} else			printk(KERN_ERR		"Could not allocate node local PDA for CPU %d on node %d\n",				cpu, node);	}	alternatives_smp_switch(1);	c_idle.idle = get_idle_for_cpu(cpu);	if (c_idle.idle) {		c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)			(THREAD_SIZE +  task_stack_page(c_idle.idle))) - 1);		init_idle(c_idle.idle, cpu);		goto do_rest;	}	/*	 * During cold boot process, keventd thread is not spun up yet.	 * When we do cpu hot-add, we create idle threads on the fly, we should	 * not acquire any attributes from the calling context. Hence the clean	 * way to create kernel_threads() is to do that from keventd().	 * We do the current_is_keventd() due to the fact that ACPI notifier	 * was also queuing to keventd() and when the caller is already running	 * in context of keventd(), we would end up with locking up the keventd	 * thread.	 */	if (!keventd_up() || current_is_keventd())		c_idle.work.func(&c_idle.work);	else {		schedule_work(&c_idle.work);		wait_for_completion(&c_idle.done);	}	if (IS_ERR(c_idle.idle)) {		printk("failed fork for CPU %d\n", cpu);		return PTR_ERR(c_idle.idle);	}	set_idle_for_cpu(cpu, c_idle.idle);do_rest:	cpu_pda(cpu)->pcurrent = c_idle.idle;	start_rip = setup_trampoline();	init_rsp = c_idle.idle->thread.rsp;	per_cpu(init_tss,cpu).rsp0 = init_rsp;	initial_code = start_secondary;	clear_tsk_thread_flag(c_idle.idle, TIF_FORK);	printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu,		cpus_weight(cpu_present_map),		apicid);	/*	 * This grunge runs the startup process for	 * the targeted processor.	 */	atomic_set(&init_deasserted, 0);	Dprintk("Setting warm reset code and vector.\n");	CMOS_WRITE(0xa, 0xf);	local_flush_tlb();	Dprintk("1.\n");	*((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4;	Dprintk("2.\n");	*((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf;	Dprintk("3.\n");	/*	 * Be paranoid about clearing APIC errors.	 */	apic_write(APIC_ESR, 0);	apic_read(APIC_ESR);	/*	 * Status is now clean	 */	boot_error = 0;	/*	 * Starting actual IPI sequence...	 */	boot_error = wakeup_secondary_via_INIT(apicid, start_rip);	if (!boot_error) {		/*		 * allow APs to start initializing.		 */		Dprintk("Before Callout %d.\n", cpu);		cpu_set(cpu, cpu_callout_map);		Dprintk("After Callout %d.\n", cpu);		/*		 * Wait 5s total for a response		 */		for (timeout = 0; timeout < 50000; timeout++) {			if (cpu_isset(cpu, cpu_callin_map))				break;	/* It has booted */			udelay(100);		}		if (cpu_isset(cpu, cpu_callin_map)) {			/* number CPUs logically, starting from 1 (BSP is 0) */			Dprintk("CPU has booted.\n");		} else {			boot_error = 1;			if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE))					== 0xA5)				/* trampoline started but...? */				printk("Stuck ??\n");			else				/* trampoline code not run */				printk("Not responding.\n");#ifdef APIC_DEBUG			inquire_remote_apic(apicid);#endif		}	}	if (boot_error) {		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */		clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */		clear_node_cpumask(cpu); /* was set by numa_add_cpu */		cpu_clear(cpu, cpu_present_map);		cpu_clear(cpu, cpu_possible_map);		per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;		return -EIO;	}	return 0;}cycles_t cacheflush_time;unsigned long cache_decay_ticks;/* * Cleanup possible dangling ends... */static __cpuinit void smp_cleanup_boot(void){	/*	 * Paranoid:  Set warm reset code and vector here back	 * to default values.	 */	CMOS_WRITE(0, 0xf);	/*	 * Reset trampoline flag	 */	*((volatile int *) phys_to_virt(0x467)) = 0;}/* * Fall back to non SMP mode after errors. * * RED-PEN audit/test this more. I bet there is more state messed up here. */static __init void disable_smp(void){	cpu_present_map = cpumask_of_cpu(0);	cpu_possible_map = cpumask_of_cpu(0);	if (smp_found_config)		phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);	else		phys_cpu_present_map = physid_mask_of_physid(0);	cpu_set(0, per_cpu(cpu_sibling_map, 0));	cpu_set(0, per_cpu(cpu_core_map, 0));}#ifdef CONFIG_HOTPLUG_CPUint additional_cpus __initdata = -1;/* * cpu_possible_map should be static, it cannot change as cpu's * are onlined, or offlined. The reason is per-cpu data-structures * are allocated by some modules at init time, and dont expect to * do this dynamically on cpu arrival/departure. * cpu_present_map on the other hand can change dynamically. * In case when cpu_hotplug is not compiled, then we resort to current * behaviour, which is cpu_possible == cpu_present. * - Ashok Raj * * Three ways to find out the number of additional hotplug CPUs: * - If the BIOS specified disabled CPUs in ACPI/mptables use that. * - The user can overwrite it with additional_cpus=NUM * - Otherwise don't reserve additional CPUs. * We do this because additional CPUs waste a lot of memory. * -AK */__init void prefill_possible_map(void){	int i;	int possible; 	if (additional_cpus == -1) { 		if (disabled_cpus > 0) 			additional_cpus = disabled_cpus; 		else			additional_cpus = 0; 	}	possible = num_processors + additional_cpus;	if (possible > NR_CPUS) 		possible = NR_CPUS;	printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",		possible,	        max_t(int, possible - num_processors, 0));	for (i = 0; i < possible; i++)		cpu_set(i, cpu_possible_map);}#endif/* * Various sanity checks. */static int __init smp_sanity_check(unsigned max_cpus){	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",		       hard_smp_processor_id());		physid_set(hard_smp_processor_id(), phys_cpu_present_map);	}	/*	 * If we couldn't find an SMP configuration at boot time,	 * get out of here now!	 */	if (!smp_found_config) {		printk(KERN_NOTICE "SMP motherboard not detected.\n");		disable_smp();		if (APIC_init_uniprocessor())			printk(KERN_NOTICE "Local APIC not detected."					   " Using dummy APIC emulation.\n");		return -1;	}	/*	 * Should not be necessary because the MP table should list the boot	 * CPU too, but we do it for the sake of robustness anyway.	 */	if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {		printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",								 boot_cpu_id);		physid_set(hard_smp_processor_id(), phys_cpu_present_map);	}	/*	 * If we couldn't find a local APIC, then get out of here now!	 */	if (!cpu_has_apic) {		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",			boot_cpu_id);		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");		nr_ioapics = 0;		return -1;	}	/*	 * If SMP should be disabled, then really disable it!	 */	if (!max_cpus) {		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");		nr_ioapics = 0;		return -1;	}	return 0;}/* * Copy apicid's found by MP_processor_info from initial array to the per cpu * data area.  The x86_cpu_to_apicid_init array is then expendable and the * x86_cpu_to_apicid_ptr is zeroed indicating that the static array is no * longer available. */void __init smp_set_apicids(void){	int cpu;	for_each_cpu_mask(cpu, cpu_possible_map) {		if (per_cpu_offset(cpu))			per_cpu(x86_cpu_to_apicid, cpu) =						x86_cpu_to_apicid_init[cpu];	}	/* indicate the static array will be going away soon */	x86_cpu_to_apicid_ptr = NULL;}/* * Prepare for SMP bootup.  The MP table or ACPI has been read * earlier.  Just do some sanity checking here and enable APIC mode. */void __init smp_prepare_cpus(unsigned int max_cpus){	nmi_watchdog_default();	current_cpu_data = boot_cpu_data;	current_thread_info()->cpu = 0;  /* needed? */	smp_set_apicids();	set_cpu_sibling_map(0);	if (smp_sanity_check(max_cpus) < 0) {		printk(KERN_INFO "SMP disabled\n");		disable_smp();		return;	}	/*	 * Switch from PIC to APIC mode.	 */	setup_local_APIC();	if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id) {		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",		      GET_APIC_ID(apic_read(APIC_ID)), boot_cpu_id);		/* Or can we switch back to PIC here? */	}	/*	 * Now start the IO-APICs	 */	if (!skip_ioapic_setup && nr_ioapics)		setup_IO_APIC();	else		nr_ioapics = 0;	/*	 * Set up local APIC timer on boot CPU.	 */	setup_boot_APIC_clock();}/* * Early setup to make printk work. */void __init smp_prepare_boot_cpu(void){	int me = smp_processor_id();	cpu_set(me, cpu_online_map);	cpu_set(me, cpu_callout_map);	per_cpu(cpu_state, me) = CPU_ONLINE;}/* * Entry point to boot a CPU. */int __cpuinit __cpu_up(unsigned int cpu){	int apicid = cpu_present_to_apicid(cpu);	unsigned long flags;	int err;	WARN_ON(irqs_disabled());	Dprintk("++++++++++++++++++++=_---CPU UP  %u\n", cpu);	if (apicid == BAD_APICID || apicid == boot_cpu_id ||	    !physid_isset(apicid, phys_cpu_present_map)) {		printk("__cpu_up: bad cpu %d\n", cpu);		return -EINVAL;	}	/*	 * Already booted CPU?	 */ 	if (cpu_isset(cpu, cpu_callin_map)) {		Dprintk("do_boot_cpu %d Already started\n", cpu); 		return -ENOSYS;	}	/*	 * Save current MTRR state in case it was changed since early boot	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:	 */	mtrr_save_state();	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;	/* Boot it! */	err = do_boot_cpu(cpu, apicid);	if (err < 0) {		Dprintk("do_boot_cpu failed %d\n", err);		return err;	}	/* Unleash the CPU! */	Dprintk("waiting for cpu %d\n", cpu);	/*  	 * Make sure and check TSC sync: 	 */	local_irq_save(flags);	check_tsc_sync_source(cpu);	local_irq_restore(flags);	while (!cpu_isset(cpu, cpu_online_map))		cpu_relax();	err = 0;	return err;}/* * Finish the SMP boot. */void __init smp_cpus_done(unsigned int max_cpus){	smp_cleanup_boot();	setup_ioapic_dest();	check_nmi_watchdog();}#ifdef CONFIG_HOTPLUG_CPUstatic void remove_siblinginfo(int cpu){	int sibling;	struct cpuinfo_x86 *c = &cpu_data(cpu);	for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {		cpu_clear(cpu, per_cpu(cpu_core_map, sibling));		/*		 * last thread sibling in this cpu core going down		 */		if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)			cpu_data(sibling).booted_cores--;	}				for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))		cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));	cpus_clear(per_cpu(cpu_sibling_map, cpu));	cpus_clear(per_cpu(cpu_core_map, cpu));	c->phys_proc_id = 0;	c->cpu_core_id = 0;	cpu_clear(cpu, cpu_sibling_setup_map);}void remove_cpu_from_maps(void){	int cpu = smp_processor_id();	cpu_clear(cpu, cpu_callout_map);	cpu_clear(cpu, cpu_callin_map);	clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */	clear_node_cpumask(cpu);}int __cpu_disable(void){	int cpu = smp_processor_id();	/*	 * Perhaps use cpufreq to drop frequency, but that could go	 * into generic code. 	 *	 * We won't take down the boot processor on i386 due to some	 * interrupts only being able to be serviced by the BSP.	 * Especially so if we're not using an IOAPIC	-zwane	 */	if (cpu == 0)		return -EBUSY;	if (nmi_watchdog == NMI_LOCAL_APIC)		stop_apic_nmi_watchdog(NULL);	clear_local_APIC();	/*	 * HACK:	 * Allow any queued timer interrupts to get serviced	 * This is only a temporary solution until we cleanup	 * fixup_irqs as we do for IA64.	 */	local_irq_enable();	mdelay(1);	local_irq_disable();	remove_siblinginfo(cpu);	spin_lock(&vector_lock);	/* It's now safe to remove this processor from the online map */	cpu_clear(cpu, cpu_online_map);	spin_unlock(&vector_lock);	remove_cpu_from_maps();	fixup_irqs(cpu_online_map);	return 0;}void __cpu_die(unsigned int cpu){	/* We don't do anything here: idle task is faking death itself. */	unsigned int i;	for (i = 0; i < 10; i++) {		/* They ack this in play_dead by setting CPU_DEAD */		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {			printk ("CPU %d is now offline\n", cpu);			if (1 == num_online_cpus())				alternatives_smp_switch(0);			return;		}		msleep(100);	} 	printk(KERN_ERR "CPU %u didn't die...\n", cpu);}static __init int setup_additional_cpus(char *s){	return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;}early_param("additional_cpus", setup_additional_cpus);#else /* ... !CONFIG_HOTPLUG_CPU */int __cpu_disable(void){	return -ENOSYS;}void __cpu_die(unsigned int cpu){	/* We said "no" in __cpu_disable */	BUG();}#endif /* CONFIG_HOTPLUG_CPU */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -