smpboot.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 1,160 行 · 第 1/2 页

C
1,160
字号
	timeout = 0;	do {		Dprintk("+");		udelay(100);		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;	} while (send_status && (timeout++ < 1000));	/*	 * Give the other CPU some time to accept the IPI.	 */	udelay(200);	/*	 * Due to the Pentium erratum 3AP.	 */	maxlvt = get_maxlvt();	if (maxlvt > 3) {		apic_read_around(APIC_SPIV);		apic_write(APIC_ESR, 0);	}	accept_status = (apic_read(APIC_ESR) & 0xEF);	Dprintk("NMI sent.\n");	if (send_status)		printk("APIC never delivered???\n");	if (accept_status)		printk("APIC delivery error (%lx).\n", accept_status);	return (send_status | accept_status);}#endif	/* WAKE_SECONDARY_VIA_NMI */#ifdef WAKE_SECONDARY_VIA_INITstatic int __initwakeup_secondary_cpu(int phys_apicid, unsigned long start_eip){	unsigned long send_status = 0, accept_status = 0;	int maxlvt, timeout, num_starts, j;	/*	 * Be paranoid about clearing APIC errors.	 */	if (APIC_INTEGRATED(apic_version[phys_apicid])) {		apic_read_around(APIC_SPIV);		apic_write(APIC_ESR, 0);		apic_read(APIC_ESR);	}	Dprintk("Asserting INIT.\n");	/*	 * Turn INIT on target chip	 */	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));	/*	 * Send IPI	 */	apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT				| APIC_DM_INIT);	Dprintk("Waiting for send to finish...\n");	timeout = 0;	do {		Dprintk("+");		udelay(100);		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;	} while (send_status && (timeout++ < 1000));	mdelay(10);	Dprintk("Deasserting INIT.\n");	/* Target chip */	apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));	/* Send IPI */	apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);	Dprintk("Waiting for send to finish...\n");	timeout = 0;	do {		Dprintk("+");		udelay(100);		send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;	} while (send_status && (timeout++ < 1000));	atomic_set(&init_deasserted, 1);	/*	 * Should we send STARTUP IPIs ?	 *	 * Determine this based on the APIC version.	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.	 */	if (APIC_INTEGRATED(apic_version[phys_apicid]))		num_starts = 2;	else		num_starts = 0;	/*	 * Run STARTUP IPI loop.	 */	Dprintk("#startup loops: %d.\n", num_starts);	maxlvt = get_maxlvt();	for (j = 1; j <= num_starts; j++) {		Dprintk("Sending STARTUP #%d.\n",j);		apic_read_around(APIC_SPIV);		apic_write(APIC_ESR, 0);		apic_read(APIC_ESR);		Dprintk("After apic_write.\n");		/*		 * STARTUP IPI		 */		/* Target chip */		apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));		/* Boot on the stack */		/* Kick the second */		apic_write_around(APIC_ICR, APIC_DM_STARTUP					| (start_eip >> 12));		/*		 * Give the other CPU some time to accept the IPI.		 */		udelay(300);		Dprintk("Startup point 1.\n");		Dprintk("Waiting for send to finish...\n");		timeout = 0;		do {			Dprintk("+");			udelay(100);			send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;		} while (send_status && (timeout++ < 1000));		/*		 * Give the other CPU some time to accept the IPI.		 */		udelay(200);		/*		 * Due to the Pentium erratum 3AP.		 */		if (maxlvt > 3) {			apic_read_around(APIC_SPIV);			apic_write(APIC_ESR, 0);		}		accept_status = (apic_read(APIC_ESR) & 0xEF);		if (send_status || accept_status)			break;	}	Dprintk("After Startup.\n");	if (send_status)		printk("APIC never delivered???\n");	if (accept_status)		printk("APIC delivery error (%lx).\n", accept_status);	return (send_status | accept_status);}#endif	/* WAKE_SECONDARY_VIA_INIT */extern cpumask_t cpu_initialized;static int __init do_boot_cpu(int apicid)/* * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad * (ie clustered apic addressing mode), this is a LOGICAL apic ID. * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. */{	struct task_struct *idle;	unsigned long boot_error;	int timeout, cpu;	unsigned long start_eip;	unsigned short nmi_high = 0, nmi_low = 0;	cpu = ++cpucount;	/*	 * We can't use kernel_thread since we must avoid to	 * reschedule the child.	 */	idle = fork_idle(cpu);	if (IS_ERR(idle))		panic("failed fork for CPU %d", cpu);	idle->thread.eip = (unsigned long) start_secondary;	/* start_eip had better be page-aligned! */	start_eip = setup_trampoline();	/* So we see what's up   */	printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);	/* Stack for startup_32 can be just as for start_secondary onwards */	stack_start.esp = (void *) idle->thread.esp;	irq_ctx_init(cpu);	/*	 * This grunge runs the startup process for	 * the targeted processor.	 */	atomic_set(&init_deasserted, 0);	Dprintk("Setting warm reset code and vector.\n");	store_NMI_vector(&nmi_high, &nmi_low);	smpboot_setup_warm_reset_vector(start_eip);	/*	 * Starting actual IPI sequence...	 */	boot_error = wakeup_secondary_cpu(apicid, start_eip);	if (!boot_error) {		/*		 * allow APs to start initializing.		 */		Dprintk("Before Callout %d.\n", cpu);		cpu_set(cpu, cpu_callout_map);		Dprintk("After Callout %d.\n", cpu);		/*		 * Wait 5s total for a response		 */		for (timeout = 0; timeout < 50000; timeout++) {			if (cpu_isset(cpu, cpu_callin_map))				break;	/* It has booted */			udelay(100);		}		if (cpu_isset(cpu, cpu_callin_map)) {			/* number CPUs logically, starting from 1 (BSP is 0) */			Dprintk("OK.\n");			printk("CPU%d: ", cpu);			print_cpu_info(&cpu_data[cpu]);			Dprintk("CPU has booted.\n");		} else {			boot_error= 1;			if (*((volatile unsigned char *)trampoline_base)					== 0xA5)				/* trampoline started but...? */				printk("Stuck ??\n");			else				/* trampoline code not run */				printk("Not responding.\n");			inquire_remote_apic(apicid);		}	}	x86_cpu_to_apicid[cpu] = apicid;	if (boot_error) {		/* Try to put things back the way they were before ... */		unmap_cpu_to_logical_apicid(cpu);		cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */		cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */		cpucount--;	}	/* mark "stuck" area as not stuck */	*((volatile unsigned long *)trampoline_base) = 0;	return boot_error;}cycles_t cacheflush_time;unsigned long cache_decay_ticks;static void smp_tune_scheduling (void){	unsigned long cachesize;       /* kB   */	unsigned long bandwidth = 350; /* MB/s */	/*	 * Rough estimation for SMP scheduling, this is the number of	 * cycles it takes for a fully memory-limited process to flush	 * the SMP-local cache.	 *	 * (For a P5 this pretty much means we will choose another idle	 *  CPU almost always at wakeup time (this is due to the small	 *  L1 cache), on PIIs it's around 50-100 usecs, depending on	 *  the cache size)	 */	if (!cpu_khz) {		/*		 * this basically disables processor-affinity		 * scheduling on SMP without a TSC.		 */		cacheflush_time = 0;		return;	} else {		cachesize = boot_cpu_data.x86_cache_size;		if (cachesize == -1) {			cachesize = 16; /* Pentiums, 2x8kB cache */			bandwidth = 100;		}		cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;	}	cache_decay_ticks = (long)cacheflush_time/cpu_khz + 1;	printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",		(long)cacheflush_time/(cpu_khz/1000),		((long)cacheflush_time*100/(cpu_khz/1000)) % 100);	printk("task migration cache decay timeout: %ld msecs.\n",		cache_decay_ticks);}/* * Cycle through the processors sending APIC IPIs to boot each. */static int boot_cpu_logical_apicid;/* Where the IO area was mapped on multiquad, always 0 otherwise */void *xquad_portio;cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;static void __init smp_boot_cpus(unsigned int max_cpus){	int apicid, cpu, bit, kicked;	unsigned long bogosum = 0;	/*	 * Setup boot CPU information	 */	smp_store_cpu_info(0); /* Final full version of the data */	printk("CPU%d: ", 0);	print_cpu_info(&cpu_data[0]);	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));	boot_cpu_logical_apicid = logical_smp_processor_id();	x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;	current_thread_info()->cpu = 0;	smp_tune_scheduling();	cpus_clear(cpu_sibling_map[0]);	cpu_set(0, cpu_sibling_map[0]);	/*	 * If we couldn't find an SMP configuration at boot time,	 * get out of here now!	 */	if (!smp_found_config && !acpi_lapic) {		printk(KERN_NOTICE "SMP motherboard not detected.\n");		smpboot_clear_io_apic_irqs();		phys_cpu_present_map = physid_mask_of_physid(0);		if (APIC_init_uniprocessor())			printk(KERN_NOTICE "Local APIC not detected."					   " Using dummy APIC emulation.\n");		map_cpu_to_logical_apicid();		return;	}	/*	 * Should not be necessary because the MP table should list the boot	 * CPU too, but we do it for the sake of robustness anyway.	 * Makes no sense to do this check in clustered apic mode, so skip it	 */	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",				boot_cpu_physical_apicid);		physid_set(hard_smp_processor_id(), phys_cpu_present_map);	}	/*	 * If we couldn't find a local APIC, then get out of here now!	 */	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",			boot_cpu_physical_apicid);		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");		smpboot_clear_io_apic_irqs();		phys_cpu_present_map = physid_mask_of_physid(0);		return;	}	verify_local_APIC();	/*	 * If SMP should be disabled, then really disable it!	 */	if (!max_cpus) {		smp_found_config = 0;		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");		smpboot_clear_io_apic_irqs();		phys_cpu_present_map = physid_mask_of_physid(0);		return;	}	connect_bsp_APIC();	setup_local_APIC();	map_cpu_to_logical_apicid();	setup_portio_remap();	/*	 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu	 *	 * In clustered apic mode, phys_cpu_present_map is a constructed thus:	 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the 	 * clustered apic ID.	 */	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));	kicked = 1;	for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {		apicid = cpu_present_to_apicid(bit);		/*		 * Don't even attempt to start the boot CPU!		 */		if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))			continue;		if (!check_apicid_present(bit))			continue;		if (max_cpus <= cpucount+1)			continue;		if (do_boot_cpu(apicid))			printk("CPU #%d not responding - cannot use it.\n",								apicid);		else			++kicked;	}	/*	 * Cleanup possible dangling ends...	 */	smpboot_restore_warm_reset_vector();	/*	 * Allow the user to impress friends.	 */	Dprintk("Before bogomips.\n");	for (cpu = 0; cpu < NR_CPUS; cpu++)		if (cpu_isset(cpu, cpu_callout_map))			bogosum += cpu_data[cpu].loops_per_jiffy;	printk(KERN_INFO		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",		cpucount+1,		bogosum/(500000/HZ),		(bogosum/(5000/HZ))%100);		Dprintk("Before bogocount - setting activated=1.\n");	if (smp_b_stepping)		printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");	/*	 * Don't taint if we are running SMP kernel on a single non-MP	 * approved Athlon	 */	if (tainted & TAINT_UNSAFE_SMP) {		if (cpucount)			printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");		else			tainted &= ~TAINT_UNSAFE_SMP;	}	Dprintk("Boot done.\n");	/*	 * construct cpu_sibling_map[], so that we can tell sibling CPUs	 * efficiently.	 */	for (cpu = 0; cpu < NR_CPUS; cpu++)		cpus_clear(cpu_sibling_map[cpu]);	for (cpu = 0; cpu < NR_CPUS; cpu++) {		int siblings = 0;		int i;		if (!cpu_isset(cpu, cpu_callout_map))			continue;		if (smp_num_siblings > 1) {			for (i = 0; i < NR_CPUS; i++) {				if (!cpu_isset(i, cpu_callout_map))					continue;				if (phys_proc_id[cpu] == phys_proc_id[i]) {					siblings++;					cpu_set(i, cpu_sibling_map[cpu]);				}			}		} else {			siblings++;			cpu_set(cpu, cpu_sibling_map[cpu]);		}		if (siblings != smp_num_siblings)			printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);	}	if (nmi_watchdog == NMI_LOCAL_APIC)		check_nmi_watchdog();	smpboot_setup_io_apic();	setup_boot_APIC_clock();	/*	 * Synchronize the TSC with the AP	 */	if (cpu_has_tsc && cpucount && cpu_khz)		synchronize_tsc_bp();}/* These are wrappers to interface to the new boot process.  Someone   who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */void __init smp_prepare_cpus(unsigned int max_cpus){	smp_boot_cpus(max_cpus);}void __devinit smp_prepare_boot_cpu(void){	cpu_set(smp_processor_id(), cpu_online_map);	cpu_set(smp_processor_id(), cpu_callout_map);}int __devinit __cpu_up(unsigned int cpu){	/* This only works at boot for x86.  See "rewrite" above. */	if (cpu_isset(cpu, smp_commenced_mask)) {		local_irq_enable();		return -ENOSYS;	}	/* In case one didn't come up */	if (!cpu_isset(cpu, cpu_callin_map)) {		local_irq_enable();		return -EIO;	}	local_irq_enable();	/* Unleash the CPU! */	cpu_set(cpu, smp_commenced_mask);	while (!cpu_isset(cpu, cpu_online_map))		mb();	return 0;}void __init smp_cpus_done(unsigned int max_cpus){#ifdef CONFIG_X86_IO_APIC	setup_ioapic_dest();#endif	zap_low_mappings();	/*	 * Disable executability of the SMP trampoline:	 */	set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);}void __init smp_intr_init(void){	/*	 * IRQ0 must be given a fixed assignment and initialized,	 * because it's used before the IO-APIC is set up.	 */	set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);	/*	 * The reschedule interrupt is a CPU-to-CPU reschedule-helper	 * IPI, driven by wakeup.	 */	set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);	/* IPI for invalidation */	set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);	/* IPI for generic function call */	set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?