⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp.c

📁 ARM 嵌入式 系统 设计与实例开发 实验教材 二源码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *	Intel SMP support routines. * *	(c) 1995 Alan Cox, Building #3 <alan@redhat.com> *	(c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> * *	This code is released under the GNU General Public License version 2 or *	later. */#include <linux/init.h>#include <linux/mm.h>#include <linux/irq.h>#include <linux/delay.h>#include <linux/spinlock.h>#include <linux/smp_lock.h>#include <linux/kernel_stat.h>#include <linux/mc146818rtc.h>#include <linux/cache.h>#include <asm/mtrr.h>#include <asm/pgalloc.h>#include <asm/smpboot.h>/* *	Some notes on x86 processor bugs affecting SMP operation: * *	Pentium, Pentium Pro, II, III (and all CPUs) have bugs. *	The Linux implications for SMP are handled as follows: * *	Pentium III / [Xeon] *		None of the E1AP-E3AP errata are visible to the user. * *	E1AP.	see PII A1AP *	E2AP.	see PII A2AP *	E3AP.	see PII A3AP * *	Pentium II / [Xeon] *		None of the A1AP-A3AP errata are visible to the user. * *	A1AP.	see PPro 1AP *	A2AP.	see PPro 2AP *	A3AP.	see PPro 7AP * *	Pentium Pro *		None of 1AP-9AP errata are visible to the normal user, *	except occasional delivery of 'spurious interrupt' as trap #15. *	This is very rare and a non-problem. * *	1AP.	Linux maps APIC as non-cacheable *	2AP.	worked around in hardware *	3AP.	fixed in C0 and above steppings microcode update. *		Linux does not use excessive STARTUP_IPIs. *	4AP.	worked around in hardware *	5AP.	symmetric IO mode (normal Linux operation) not affected. *		'noapic' mode has vector 0xf filled out properly. *	6AP.	'noapic' mode might be affected - fixed in later steppings *	7AP.	We do not assume writes to the LVT deassering IRQs *	8AP.	We do not enable low power mode (deep sleep) during MP bootup *	9AP.	We do not use mixed mode * *	Pentium *		There is a marginal case where REP MOVS on 100MHz SMP *	machines with B stepping processors can fail. XXX should provide *	an L1cache=Writethrough or L1cache=off option. * *		B stepping CPUs may hang. There are hardware work arounds *	for this. We warn about it in case your board doesnt have the work *	arounds. Basically thats so I can tell anyone with a B stepping *	CPU and SMP problems "tough". * *	Specific items [From Pentium Processor Specification Update] * *	1AP.	Linux doesn't use remote read *	2AP.	Linux doesn't trust APIC errors *	3AP.	We work around this *	4AP.	Linux never generated 3 interrupts of the same priority *		to cause a lost local interrupt. *	5AP.	Remote read is never used *	6AP.	not affected - worked around in hardware *	7AP.	not affected - worked around in hardware *	8AP.	worked around in hardware - we get explicit CS errors if not *	9AP.	only 'noapic' mode affected. Might generate spurious *		interrupts, we log only the first one and count the *		rest silently. *	10AP.	not affected - worked around in hardware *	11AP.	Linux reads the APIC between writes to avoid this, as per *		the documentation. Make sure you preserve this as it affects *		the C stepping chips too. *	12AP.	not affected - worked around in hardware *	13AP.	not affected - worked around in hardware *	14AP.	we always deassert INIT during bootup *	15AP.	not affected - worked around in hardware *	16AP.	not affected - worked around in hardware *	17AP.	not affected - worked around in hardware *	18AP.	not affected - worked around in hardware *	19AP.	not affected - worked around in BIOS * *	If this sounds worrying believe me these bugs are either ___RARE___, *	or are signal timing bugs worked around in hardware and there's *	about nothing of note with C stepping upwards. *//* The 'big kernel lock' */spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;struct tlb_state cpu_tlbstate[NR_CPUS] = {[0 ... NR_CPUS-1] = { &init_mm, 0 }};/* * the following functions deal with sending IPIs between CPUs. * * We use 'broadcast', CPU->CPU IPIs and self-IPIs too. */static inline int __prepare_ICR (unsigned int shortcut, int vector){	return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL;}static inline int __prepare_ICR2 (unsigned int mask){	return SET_APIC_DEST_FIELD(mask);}static inline void __send_IPI_shortcut(unsigned int shortcut, int vector){	/*	 * Subtle. In the case of the 'never do double writes' workaround	 * we have to lock out interrupts to be safe.  As we don't care	 * of the value read we use an atomic rmw access to avoid costly	 * cli/sti.  Otherwise we use an even cheaper single atomic write	 * to the APIC.	 */	unsigned int cfg;	/*	 * Wait for idle.	 */	apic_wait_icr_idle();	/*	 * No need to touch the target chip field	 */	cfg = __prepare_ICR(shortcut, vector);	/*	 * Send the IPI. The write to APIC_ICR fires this off.	 */	apic_write_around(APIC_ICR, cfg);}void send_IPI_self(int vector){	__send_IPI_shortcut(APIC_DEST_SELF, vector);}static inline void send_IPI_mask_bitmask(int mask, int vector){	unsigned long cfg;	unsigned long flags;	__save_flags(flags);	__cli();			/*	 * Wait for idle.	 */	apic_wait_icr_idle();			/*	 * prepare target chip field	 */	cfg = __prepare_ICR2(mask);	apic_write_around(APIC_ICR2, cfg);			/*	 * program the ICR 	 */	cfg = __prepare_ICR(0, vector);				/*	 * Send the IPI. The write to APIC_ICR fires this off.	 */	apic_write_around(APIC_ICR, cfg);	__restore_flags(flags);}static inline void send_IPI_mask_sequence(int mask, int vector){	unsigned long cfg, flags;	unsigned int query_cpu, query_mask;	/*	 * Hack. The clustered APIC addressing mode doesn't allow us to send 	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This 	 * should be modified to do 1 message per cluster ID - mbligh	 */ 	__save_flags(flags);	__cli();	for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {		query_mask = 1 << query_cpu;		if (query_mask & mask) {					/*			 * Wait for idle.			 */			apic_wait_icr_idle();					/*			 * prepare target chip field			 */			cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu));			apic_write_around(APIC_ICR2, cfg);					/*			 * program the ICR 			 */			cfg = __prepare_ICR(0, vector);						/*			 * Send the IPI. The write to APIC_ICR fires this off.			 */			apic_write_around(APIC_ICR, cfg);		}	}	__restore_flags(flags);}static inline void send_IPI_mask(int mask, int vector){	if (clustered_apic_mode) 		send_IPI_mask_sequence(mask, vector);	else		send_IPI_mask_bitmask(mask, vector);}static inline void send_IPI_allbutself(int vector){	/*	 * if there are no other CPUs in the system then	 * we get an APIC send error if we try to broadcast.	 * thus we have to avoid sending IPIs in this case.	 */	if (!(smp_num_cpus > 1))		return;	if (clustered_apic_mode) {		// Pointless. Use send_IPI_mask to do this instead		int cpu;		if (smp_num_cpus > 1) {			for (cpu = 0; cpu < smp_num_cpus; ++cpu) {				if (cpu != smp_processor_id())					send_IPI_mask(1 << cpu, vector);			}		}	} else {		__send_IPI_shortcut(APIC_DEST_ALLBUT, vector);		return;	}}static inline void send_IPI_all(int vector){	if (clustered_apic_mode) {		// Pointless. Use send_IPI_mask to do this instead		int cpu;		for (cpu = 0; cpu < smp_num_cpus; ++cpu) {			send_IPI_mask(1 << cpu, vector);		}	} else {		__send_IPI_shortcut(APIC_DEST_ALLINC, vector);	}}/* *	Smarter SMP flushing macros.  *		c/o Linus Torvalds. * *	These mean you can really definitely utterly forget about *	writing to user space from interrupts. (Its not allowed anyway). * *	Optimizations Manfred Spraul <manfred@colorfullife.com> */static volatile unsigned long flush_cpumask;static struct mm_struct * flush_mm;static unsigned long flush_va;static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;#define FLUSH_ALL	0xffffffff/* * We cannot call mmdrop() because we are in interrupt context,  * instead update mm->cpu_vm_mask. */static void inline leave_mm (unsigned long cpu){	if (cpu_tlbstate[cpu].state == TLBSTATE_OK)		BUG();	clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);}/* * * The flush IPI assumes that a thread switch happens in this order: * [cpu0: the cpu that switches] * 1) switch_mm() either 1a) or 1b)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -