⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smpboot.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 3 页
字号:
		}		if (cpu_isset(cpu, cpu_callin_map)) {			/* number CPUs logically, starting from 1 (BSP is 0) */			Dprintk("OK.\n");			printk("CPU%d: ", cpu);			print_cpu_info(&cpu_data[cpu]);			Dprintk("CPU has booted.\n");		} else {			boot_error= 1;			if (*((volatile unsigned char *)trampoline_base)					== 0xA5)				/* trampoline started but...? */				printk("Stuck ??\n");			else				/* trampoline code not run */				printk("Not responding.\n");			inquire_remote_apic(apicid);		}	}	if (boot_error) {		/* Try to put things back the way they were before ... */		unmap_cpu_to_logical_apicid(cpu);		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */		cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */		cpucount--;	} else {		x86_cpu_to_apicid[cpu] = apicid;		cpu_set(cpu, cpu_present_map);	}	/* mark "stuck" area as not stuck */	*((volatile unsigned long *)trampoline_base) = 0;	return boot_error;}#ifdef CONFIG_HOTPLUG_CPUvoid cpu_exit_clear(void){	int cpu = raw_smp_processor_id();	idle_task_exit();	cpucount --;	cpu_uninit();	irq_ctx_exit(cpu);	cpu_clear(cpu, cpu_callout_map);	cpu_clear(cpu, cpu_callin_map);	cpu_clear(cpu, cpu_present_map);	cpu_clear(cpu, smp_commenced_mask);	unmap_cpu_to_logical_apicid(cpu);}struct warm_boot_cpu_info {	struct completion *complete;	int apicid;	int cpu;};static void __devinit do_warm_boot_cpu(void *p){	struct warm_boot_cpu_info *info = p;	do_boot_cpu(info->apicid, info->cpu);	complete(info->complete);}int __devinit smp_prepare_cpu(int cpu){	DECLARE_COMPLETION(done);	struct warm_boot_cpu_info info;	struct work_struct task;	int	apicid, ret;	lock_cpu_hotplug();	apicid = x86_cpu_to_apicid[cpu];	if (apicid == BAD_APICID) {		ret = -ENODEV;		goto exit;	}	info.complete = &done;	info.apicid = apicid;	info.cpu = cpu;	INIT_WORK(&task, do_warm_boot_cpu, &info);	tsc_sync_disabled = 1;	/* init low mem mapping */	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,			KERNEL_PGD_PTRS);	flush_tlb_all();	schedule_work(&task);	wait_for_completion(&done);	tsc_sync_disabled = 0;	zap_low_mappings();	ret = 0;exit:	unlock_cpu_hotplug();	return ret;}#endifstatic void smp_tune_scheduling (void){	unsigned long cachesize;       /* kB   */	unsigned long bandwidth = 350; /* MB/s */	/*	 * Rough estimation for SMP scheduling, this is the number of	 * cycles it takes for a fully memory-limited process to flush	 * the SMP-local cache.	 *	 * (For a P5 this pretty much means we will choose another idle	 *  CPU almost always at wakeup time (this is due to the small	 *  L1 cache), on PIIs it's around 50-100 usecs, depending on	 *  the cache size)	 */	if (!cpu_khz) {		/*		 * this basically disables processor-affinity		 * scheduling on SMP without a TSC.		 */		return;	} else {		cachesize = boot_cpu_data.x86_cache_size;		if (cachesize == -1) {			cachesize = 16; /* Pentiums, 2x8kB cache */			bandwidth = 100;		}	}}/* * Cycle through the processors sending APIC IPIs to boot each. */static int boot_cpu_logical_apicid;/* Where the IO area was mapped on multiquad, always 0 otherwise */void *xquad_portio;#ifdef CONFIG_X86_NUMAQEXPORT_SYMBOL(xquad_portio);#endifstatic void __init smp_boot_cpus(unsigned int max_cpus){	int apicid, cpu, bit, kicked;	unsigned long bogosum = 0;	/*	 * Setup boot CPU information	 */	smp_store_cpu_info(0); /* Final full version of the data */	printk("CPU%d: ", 0);	print_cpu_info(&cpu_data[0]);	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));	boot_cpu_logical_apicid = logical_smp_processor_id();	x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;	current_thread_info()->cpu = 0;	smp_tune_scheduling();	set_cpu_sibling_map(0);	/*	 * If we couldn't find an SMP configuration at boot time,	 * get out of here now!	 */	if (!smp_found_config && !acpi_lapic) {		printk(KERN_NOTICE "SMP motherboard not detected.\n");		smpboot_clear_io_apic_irqs();		phys_cpu_present_map = physid_mask_of_physid(0);		if (APIC_init_uniprocessor())			printk(KERN_NOTICE "Local APIC not detected."					   " Using dummy APIC emulation.\n");		map_cpu_to_logical_apicid();		cpu_set(0, cpu_sibling_map[0]);		cpu_set(0, cpu_core_map[0]);		return;	}	/*	 * Should not be necessary because the MP table should list the boot	 * CPU too, but we do it for the sake of robustness anyway.	 * Makes no sense to do this check in clustered apic mode, so skip it	 */	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",				boot_cpu_physical_apicid);		physid_set(hard_smp_processor_id(), phys_cpu_present_map);	}	/*	 * If we couldn't find a local APIC, then get out of here now!	 */	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",			boot_cpu_physical_apicid);		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");		smpboot_clear_io_apic_irqs();		phys_cpu_present_map = physid_mask_of_physid(0);		cpu_set(0, cpu_sibling_map[0]);		cpu_set(0, cpu_core_map[0]);		return;	}	verify_local_APIC();	/*	 * If SMP should be disabled, then really disable it!	 */	if (!max_cpus) {		smp_found_config = 0;		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");		smpboot_clear_io_apic_irqs();		phys_cpu_present_map = physid_mask_of_physid(0);		cpu_set(0, cpu_sibling_map[0]);		cpu_set(0, cpu_core_map[0]);		return;	}	connect_bsp_APIC();	setup_local_APIC();	map_cpu_to_logical_apicid();	setup_portio_remap();	/*	 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu	 *	 * In clustered apic mode, phys_cpu_present_map is a constructed thus:	 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the 	 * clustered apic ID.	 */	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));	kicked = 1;	for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {		apicid = cpu_present_to_apicid(bit);		/*		 * Don't even attempt to start the boot CPU!		 */		if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))			continue;		if (!check_apicid_present(bit))			continue;		if (max_cpus <= cpucount+1)			continue;		if (((cpu = alloc_cpu_id()) <= 0) || do_boot_cpu(apicid, cpu))			printk("CPU #%d not responding - cannot use it.\n",								apicid);		else			++kicked;	}	/*	 * Cleanup possible dangling ends...	 */	smpboot_restore_warm_reset_vector();	/*	 * Allow the user to impress friends.	 */	Dprintk("Before bogomips.\n");	for (cpu = 0; cpu < NR_CPUS; cpu++)		if (cpu_isset(cpu, cpu_callout_map))			bogosum += cpu_data[cpu].loops_per_jiffy;	printk(KERN_INFO		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",		cpucount+1,		bogosum/(500000/HZ),		(bogosum/(5000/HZ))%100);		Dprintk("Before bogocount - setting activated=1.\n");	if (smp_b_stepping)		printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");	/*	 * Don't taint if we are running SMP kernel on a single non-MP	 * approved Athlon	 */	if (tainted & TAINT_UNSAFE_SMP) {		if (cpucount)			printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");		else			tainted &= ~TAINT_UNSAFE_SMP;	}	Dprintk("Boot done.\n");	/*	 * construct cpu_sibling_map[], so that we can tell sibling CPUs	 * efficiently.	 */	for (cpu = 0; cpu < NR_CPUS; cpu++) {		cpus_clear(cpu_sibling_map[cpu]);		cpus_clear(cpu_core_map[cpu]);	}	cpu_set(0, cpu_sibling_map[0]);	cpu_set(0, cpu_core_map[0]);	smpboot_setup_io_apic();	setup_boot_APIC_clock();	/*	 * Synchronize the TSC with the AP	 */	if (cpu_has_tsc && cpucount && cpu_khz)		synchronize_tsc_bp();}/* These are wrappers to interface to the new boot process.  Someone   who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */void __init smp_prepare_cpus(unsigned int max_cpus){	smp_commenced_mask = cpumask_of_cpu(0);	cpu_callin_map = cpumask_of_cpu(0);	mb();	smp_boot_cpus(max_cpus);}void __devinit smp_prepare_boot_cpu(void){	cpu_set(smp_processor_id(), cpu_online_map);	cpu_set(smp_processor_id(), cpu_callout_map);	cpu_set(smp_processor_id(), cpu_present_map);	cpu_set(smp_processor_id(), cpu_possible_map);	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;}#ifdef CONFIG_HOTPLUG_CPUstatic voidremove_siblinginfo(int cpu){	int sibling;	struct cpuinfo_x86 *c = cpu_data;	for_each_cpu_mask(sibling, cpu_core_map[cpu]) {		cpu_clear(cpu, cpu_core_map[sibling]);		/*		 * last thread sibling in this cpu core going down		 */		if (cpus_weight(cpu_sibling_map[cpu]) == 1)			c[sibling].booted_cores--;	}				for_each_cpu_mask(sibling, cpu_sibling_map[cpu])		cpu_clear(cpu, cpu_sibling_map[sibling]);	cpus_clear(cpu_sibling_map[cpu]);	cpus_clear(cpu_core_map[cpu]);	phys_proc_id[cpu] = BAD_APICID;	cpu_core_id[cpu] = BAD_APICID;	cpu_clear(cpu, cpu_sibling_setup_map);}int __cpu_disable(void){	cpumask_t map = cpu_online_map;	int cpu = smp_processor_id();	/*	 * Perhaps use cpufreq to drop frequency, but that could go	 * into generic code. 	 *	 * We won't take down the boot processor on i386 due to some	 * interrupts only being able to be serviced by the BSP.	 * Especially so if we're not using an IOAPIC	-zwane	 */	if (cpu == 0)		return -EBUSY;	clear_local_APIC();	/* Allow any queued timer interrupts to get serviced */	local_irq_enable();	mdelay(1);	local_irq_disable();	remove_siblinginfo(cpu);	cpu_clear(cpu, map);	fixup_irqs(map);	/* It's now safe to remove this processor from the online map */	cpu_clear(cpu, cpu_online_map);	return 0;}void __cpu_die(unsigned int cpu){	/* We don't do anything here: idle task is faking death itself. */	unsigned int i;	for (i = 0; i < 10; i++) {		/* They ack this in play_dead by setting CPU_DEAD */		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {			printk ("CPU %d is now offline\n", cpu);			return;		}		msleep(100);	} 	printk(KERN_ERR "CPU %u didn't die...\n", cpu);}#else /* ... !CONFIG_HOTPLUG_CPU */int __cpu_disable(void){	return -ENOSYS;}void __cpu_die(unsigned int cpu){	/* We said "no" in __cpu_disable */	BUG();}#endif /* CONFIG_HOTPLUG_CPU */int __devinit __cpu_up(unsigned int cpu){	/* In case one didn't come up */	if (!cpu_isset(cpu, cpu_callin_map)) {		printk(KERN_DEBUG "skipping cpu%d, didn't come online\n", cpu);		local_irq_enable();		return -EIO;	}	local_irq_enable();	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;	/* Unleash the CPU! */	cpu_set(cpu, smp_commenced_mask);	while (!cpu_isset(cpu, cpu_online_map))		mb();	return 0;}void __init smp_cpus_done(unsigned int max_cpus){#ifdef CONFIG_X86_IO_APIC	setup_ioapic_dest();#endif	zap_low_mappings();#ifndef CONFIG_HOTPLUG_CPU	/*	 * Disable executability of the SMP trampoline:	 */	set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);#endif}void __init smp_intr_init(void){	/*	 * IRQ0 must be given a fixed assignment and initialized,	 * because it's used before the IO-APIC is set up.	 */	set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);	/*	 * The reschedule interrupt is a CPU-to-CPU reschedule-helper	 * IPI, driven by wakeup.	 */	set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);	/* IPI for invalidation */	set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);	/* IPI for generic function call */	set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -