⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smpboot_32.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
	cpucount --;	cpu_uninit();	irq_ctx_exit(cpu);	cpu_clear(cpu, cpu_callout_map);	cpu_clear(cpu, cpu_callin_map);	cpu_clear(cpu, smp_commenced_mask);	unmap_cpu_to_logical_apicid(cpu);}struct warm_boot_cpu_info {	struct completion *complete;	struct work_struct task;	int apicid;	int cpu;};static void __cpuinit do_warm_boot_cpu(struct work_struct *work){	struct warm_boot_cpu_info *info =		container_of(work, struct warm_boot_cpu_info, task);	do_boot_cpu(info->apicid, info->cpu);	complete(info->complete);}static int __cpuinit __smp_prepare_cpu(int cpu){	DECLARE_COMPLETION_ONSTACK(done);	struct warm_boot_cpu_info info;	int	apicid, ret;	apicid = per_cpu(x86_cpu_to_apicid, cpu);	if (apicid == BAD_APICID) {		ret = -ENODEV;		goto exit;	}	info.complete = &done;	info.apicid = apicid;	info.cpu = cpu;	INIT_WORK(&info.task, do_warm_boot_cpu);	/* init low mem mapping */	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,			min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));	flush_tlb_all();	schedule_work(&info.task);	wait_for_completion(&done);	zap_low_mappings();	ret = 0;exit:	return ret;}#endif/* * Cycle through the processors sending APIC IPIs to boot each. */static int boot_cpu_logical_apicid;/* Where the IO area was mapped on multiquad, always 0 otherwise */void *xquad_portio;#ifdef CONFIG_X86_NUMAQEXPORT_SYMBOL(xquad_portio);#endifstatic void __init smp_boot_cpus(unsigned int max_cpus){	int apicid, cpu, bit, kicked;	unsigned long bogosum = 0;	/*	 * Setup boot CPU information	 */	smp_store_cpu_info(0); /* Final full version of the data */	printk("CPU%d: ", 0);	print_cpu_info(&cpu_data(0));	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));	boot_cpu_logical_apicid = logical_smp_processor_id();	per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;	current_thread_info()->cpu = 0;	set_cpu_sibling_map(0);	/*	 * If we couldn't find an SMP configuration at boot time,	 * get out of here now!	 */	if (!smp_found_config && !acpi_lapic) {		printk(KERN_NOTICE "SMP motherboard not detected.\n");		smpboot_clear_io_apic_irqs();		phys_cpu_present_map = physid_mask_of_physid(0);		if (APIC_init_uniprocessor())			printk(KERN_NOTICE "Local APIC not detected."					   " Using dummy APIC emulation.\n");		map_cpu_to_logical_apicid();		cpu_set(0, per_cpu(cpu_sibling_map, 0));		cpu_set(0, per_cpu(cpu_core_map, 0));		return;	}	/*	 * Should not be necessary because the MP table should list the boot	 * CPU too, but we do it for the sake of robustness anyway.	 * Makes no sense to do this check in clustered apic mode, so skip it	 */	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",				boot_cpu_physical_apicid);		physid_set(hard_smp_processor_id(), phys_cpu_present_map);	}	/*	 * If we couldn't find a local APIC, then get out of here now!	 */	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",			boot_cpu_physical_apicid);		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");		smpboot_clear_io_apic_irqs();		phys_cpu_present_map = physid_mask_of_physid(0);		map_cpu_to_logical_apicid();		cpu_set(0, per_cpu(cpu_sibling_map, 0));		cpu_set(0, per_cpu(cpu_core_map, 0));		return;	}	verify_local_APIC();	/*	 * If SMP should be disabled, then really disable it!	 */	if (!max_cpus) {		smp_found_config = 0;		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");		if (nmi_watchdog == NMI_LOCAL_APIC) {			printk(KERN_INFO "activating minimal APIC for NMI watchdog use.\n");			connect_bsp_APIC();			setup_local_APIC();		}		smpboot_clear_io_apic_irqs();		phys_cpu_present_map = physid_mask_of_physid(0);		map_cpu_to_logical_apicid();		cpu_set(0, per_cpu(cpu_sibling_map, 0));		cpu_set(0, per_cpu(cpu_core_map, 0));		return;	}	connect_bsp_APIC();	setup_local_APIC();	map_cpu_to_logical_apicid();	setup_portio_remap();	/*	 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu	 *	 * In clustered apic mode, phys_cpu_present_map is a constructed thus:	 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the 	 * clustered apic ID.	 */	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));	kicked = 1;	for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {		apicid = cpu_present_to_apicid(bit);		/*		 * Don't even attempt to start the boot CPU!		 */		if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))			continue;		if (!check_apicid_present(bit))			continue;		if (max_cpus <= cpucount+1)			continue;		if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))			printk("CPU #%d not responding - cannot use it.\n",								apicid);		else			++kicked;	}	/*	 * Cleanup possible dangling ends...	 */	smpboot_restore_warm_reset_vector();	/*	 * Allow the user to impress friends.	 */	Dprintk("Before bogomips.\n");	for (cpu = 0; cpu < NR_CPUS; cpu++)		if (cpu_isset(cpu, cpu_callout_map))			bogosum += cpu_data(cpu).loops_per_jiffy;	printk(KERN_INFO		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",		cpucount+1,		bogosum/(500000/HZ),		(bogosum/(5000/HZ))%100);		Dprintk("Before bogocount - setting activated=1.\n");	if (smp_b_stepping)		printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");	/*	 * Don't taint if we are running SMP kernel on a single non-MP	 * approved Athlon	 */	if (tainted & TAINT_UNSAFE_SMP) {		if (cpucount)			printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");		else			tainted &= ~TAINT_UNSAFE_SMP;	}	Dprintk("Boot done.\n");	/*	 * construct cpu_sibling_map, so that we can tell sibling CPUs	 * efficiently.	 */	for (cpu = 0; cpu < NR_CPUS; cpu++) {		cpus_clear(per_cpu(cpu_sibling_map, cpu));		cpus_clear(per_cpu(cpu_core_map, cpu));	}	cpu_set(0, per_cpu(cpu_sibling_map, 0));	cpu_set(0, per_cpu(cpu_core_map, 0));	smpboot_setup_io_apic();	setup_boot_clock();}/* These are wrappers to interface to the new boot process.  Someone   who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */void __init native_smp_prepare_cpus(unsigned int max_cpus){	smp_commenced_mask = cpumask_of_cpu(0);	cpu_callin_map = cpumask_of_cpu(0);	mb();	smp_boot_cpus(max_cpus);}void __init native_smp_prepare_boot_cpu(void){	unsigned int cpu = smp_processor_id();	init_gdt(cpu);	switch_to_new_gdt();	cpu_set(cpu, cpu_online_map);	cpu_set(cpu, cpu_callout_map);	cpu_set(cpu, cpu_present_map);	cpu_set(cpu, cpu_possible_map);	__get_cpu_var(cpu_state) = CPU_ONLINE;}#ifdef CONFIG_HOTPLUG_CPUvoid remove_siblinginfo(int cpu){	int sibling;	struct cpuinfo_x86 *c = &cpu_data(cpu);	for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {		cpu_clear(cpu, per_cpu(cpu_core_map, sibling));		/*/		 * last thread sibling in this cpu core going down		 */		if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)			cpu_data(sibling).booted_cores--;	}				for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))		cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));	cpus_clear(per_cpu(cpu_sibling_map, cpu));	cpus_clear(per_cpu(cpu_core_map, cpu));	c->phys_proc_id = 0;	c->cpu_core_id = 0;	cpu_clear(cpu, cpu_sibling_setup_map);}int __cpu_disable(void){	cpumask_t map = cpu_online_map;	int cpu = smp_processor_id();	/*	 * Perhaps use cpufreq to drop frequency, but that could go	 * into generic code. 	 *	 * We won't take down the boot processor on i386 due to some	 * interrupts only being able to be serviced by the BSP.	 * Especially so if we're not using an IOAPIC	-zwane	 */	if (cpu == 0)		return -EBUSY;	if (nmi_watchdog == NMI_LOCAL_APIC)		stop_apic_nmi_watchdog(NULL);	clear_local_APIC();	/* Allow any queued timer interrupts to get serviced */	local_irq_enable();	mdelay(1);	local_irq_disable();	remove_siblinginfo(cpu);	cpu_clear(cpu, map);	fixup_irqs(map);	/* It's now safe to remove this processor from the online map */	cpu_clear(cpu, cpu_online_map);	return 0;}void __cpu_die(unsigned int cpu){	/* We don't do anything here: idle task is faking death itself. */	unsigned int i;	for (i = 0; i < 10; i++) {		/* They ack this in play_dead by setting CPU_DEAD */		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {			printk ("CPU %d is now offline\n", cpu);			if (1 == num_online_cpus())				alternatives_smp_switch(0);			return;		}		msleep(100);	} 	printk(KERN_ERR "CPU %u didn't die...\n", cpu);}#else /* ... !CONFIG_HOTPLUG_CPU */int __cpu_disable(void){	return -ENOSYS;}void __cpu_die(unsigned int cpu){	/* We said "no" in __cpu_disable */	BUG();}#endif /* CONFIG_HOTPLUG_CPU */int __cpuinit native_cpu_up(unsigned int cpu){	unsigned long flags;#ifdef CONFIG_HOTPLUG_CPU	int ret = 0;	/*	 * We do warm boot only on cpus that had booted earlier	 * Otherwise cold boot is all handled from smp_boot_cpus().	 * cpu_callin_map is set during AP kickstart process. Its reset	 * when a cpu is taken offline from cpu_exit_clear().	 */	if (!cpu_isset(cpu, cpu_callin_map))		ret = __smp_prepare_cpu(cpu);	if (ret)		return -EIO;#endif	/* In case one didn't come up */	if (!cpu_isset(cpu, cpu_callin_map)) {		printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);		return -EIO;	}	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;	/* Unleash the CPU! */	cpu_set(cpu, smp_commenced_mask);	/*	 * Check TSC synchronization with the AP (keep irqs disabled	 * while doing so):	 */	local_irq_save(flags);	check_tsc_sync_source(cpu);	local_irq_restore(flags);	while (!cpu_isset(cpu, cpu_online_map)) {		cpu_relax();		touch_nmi_watchdog();	}	return 0;}void __init native_smp_cpus_done(unsigned int max_cpus){#ifdef CONFIG_X86_IO_APIC	setup_ioapic_dest();#endif	zap_low_mappings();#ifndef CONFIG_HOTPLUG_CPU	/*	 * Disable executability of the SMP trampoline:	 */	set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);#endif}void __init smp_intr_init(void){	/*	 * IRQ0 must be given a fixed assignment and initialized,	 * because it's used before the IO-APIC is set up.	 */	set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);	/*	 * The reschedule interrupt is a CPU-to-CPU reschedule-helper	 * IPI, driven by wakeup.	 */	set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);	/* IPI for invalidation */	set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);	/* IPI for generic function call */	set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);}/* * If the BIOS enumerates physical processors before logical, * maxcpus=N at enumeration-time can be used to disable HT. */static int __init parse_maxcpus(char *arg){	extern unsigned int maxcpus;	maxcpus = simple_strtoul(arg, NULL, 0);	return 0;}early_param("maxcpus", parse_maxcpus);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -