⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp.c

📁 这个linux源代码是很全面的~基本完整了~使用c编译的~由于时间问题我没有亲自测试~但就算用来做参考资料也是非常好的
💻 C
📖 第 1 页 / 共 2 页
字号:
/*** SMP Support**** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>** Copyright (C) 2001 Grant Grundler <grundler@parisc-linux.org>** ** Lots of stuff stolen from arch/alpha/kernel/smp.c** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)**** Thanks to John Curry and Ullas Ponnadi. I learned alot from their work.** -grant (1/12/2001)****	This program is free software; you can redistribute it and/or modify**	it under the terms of the GNU General Public License as published by**      the Free Software Foundation; either version 2 of the License, or**      (at your option) any later version.*/#define __KERNEL_SYSCALLS__#undef ENTRY_SYS_CPUS	/* syscall support for iCOD-like functionality */#include <linux/autoconf.h>#include <linux/types.h>#include <linux/spinlock.h>#include <linux/slab.h>#include <linux/kernel.h>#include <linux/sched.h>#include <linux/init.h>#include <linux/interrupt.h>#include <linux/smp.h>#include <linux/kernel_stat.h>#include <linux/mm.h>#include <linux/delay.h>#include <asm/system.h>#include <asm/atomic.h>#include <asm/bitops.h>#include <asm/current.h>#include <asm/delay.h>#include <asm/pgalloc.h>	/* for flush_tlb_all() proto/macro */#include <asm/io.h>#include <asm/irq.h>		/* for CPU_IRQ_REGION and friends */#include <asm/mmu_context.h>#include <asm/page.h>#include <asm/pgtable.h>#include <asm/pgalloc.h>#include <asm/processor.h>#include <asm/ptrace.h>#include <asm/unistd.h>#define kDEBUG 0spinlock_t pa_dbit_lock = SPIN_LOCK_UNLOCKED;spinlock_t smp_lock = SPIN_LOCK_UNLOCKED;volatile struct task_struct *smp_init_current_idle_task;spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;static volatile int smp_commenced = 0;   /* Set when the idlers are all forked */static volatile int cpu_now_booting = 0;      /* track which CPU is booting */volatile unsigned long cpu_online_map = 0;   /* Bitmap of online CPUs */#define IS_LOGGED_IN(cpunum) (test_bit(cpunum, (atomic_t *)&cpu_online_map))int smp_num_cpus = 1;int smp_threads_ready = 0;static int max_cpus = -1;			     /* Command line */struct smp_call_struct {	void (*func) (void *info);	void *info;	long wait;	atomic_t unstarted_count;	atomic_t unfinished_count;};static volatile struct smp_call_struct *smp_call_function_data;enum ipi_message_type {	IPI_NOP=0,	IPI_RESCHEDULE=1,	IPI_CALL_FUNC,	IPI_CPU_START,	IPI_CPU_STOP,	IPI_CPU_TEST};/********** SMP inter processor interrupt and communication routines */#undef PER_CPU_IRQ_REGION#ifdef PER_CPU_IRQ_REGION/* XXX REVISIT Ignore for now.**    *May* need this "hook" to register IPI handler**    once we have perCPU ExtIntr switch tables.*/static voidipi_init(int cpuid){	/* If CPU is present ... */#ifdef ENTRY_SYS_CPUS	/* *and* running (not stopped) ... */#error iCOD support wants state checked here.#endif#error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region	if(IS_LOGGED_IN(cpuid) )	{		switch_to_idle_task(current);	}	return;}#endif/*** Yoink this CPU from the runnable list... ***/static voidhalt_processor(void) {#ifdef ENTRY_SYS_CPUS#error halt_processor() needs rework/*** o migrate I/O interrupts off this CPU.** o leave IPI enabled - __cli() will disable IPI.** o leave CPU in online map - just change the state*/	cpu_data[this_cpu].state = STATE_STOPPED;	mark_bh(IPI_BH);#else	/* REVISIT : redirect I/O Interrupts to another CPU? */	/* REVISIT : does PM *know* this CPU isn't available? */	clear_bit(smp_processor_id(), (void *)&cpu_online_map);	__cli();	for (;;)		;#endif}voidipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) {	int this_cpu = smp_processor_id();	struct cpuinfo_parisc *p = &cpu_data[this_cpu];	unsigned long ops;	unsigned long flags;	/* Count this now; we may make a call that never returns. */	p->ipi_count++;	mb();	/* Order interrupt and bit testing. */	for (;;) {		spin_lock_irqsave(&(p->lock),flags);		ops = p->pending_ipi;		p->pending_ipi = 0;		spin_unlock_irqrestore(&(p->lock),flags);		mb(); /* Order bit clearing and data access. */		if (!ops)		    break;		while (ops) {			unsigned long which = ffz(~ops);			switch (which) {			case IPI_RESCHEDULE:#if (kDEBUG>=100)				printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);#endif /* kDEBUG */				ops &= ~(1 << IPI_RESCHEDULE);				/*				 * Reschedule callback.  Everything to be				 * done is done by the interrupt return path.				 */				break;			case IPI_CALL_FUNC:#if (kDEBUG>=100)				printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);#endif /* kDEBUG */				ops &= ~(1 << IPI_CALL_FUNC);				{					volatile struct smp_call_struct *data;					void (*func)(void *info);					void *info;					int wait;					data = smp_call_function_data;					func = data->func;					info = data->info;					wait = data->wait;					mb();					atomic_dec ((atomic_t *)&data->unstarted_count);					/* At this point, *data can't					 * be relied upon.					 */					(*func)(info);					/* Notify the sending CPU that the					 * task is done.					 */					mb();					if (wait)						atomic_dec ((atomic_t *)&data->unfinished_count);				}				break;			case IPI_CPU_START:#if (kDEBUG>=100)				printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);#endif /* kDEBUG */				ops &= ~(1 << IPI_CPU_START);#ifdef ENTRY_SYS_CPUS				p->state = STATE_RUNNING;#endif				break;			case IPI_CPU_STOP:#if (kDEBUG>=100)				printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);#endif /* kDEBUG */				ops &= ~(1 << IPI_CPU_STOP);#ifdef ENTRY_SYS_CPUS#else				halt_processor();#endif				break;			case IPI_CPU_TEST:#if (kDEBUG>=100)				printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);#endif /* kDEBUG */				ops &= ~(1 << IPI_CPU_TEST);				break;			default:				printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",					this_cpu, which);				ops &= ~(1 << which);				return;			} /* Switch */		} /* while (ops) */	}	return;}static inline voidipi_send(int cpu, enum ipi_message_type op){	struct cpuinfo_parisc *p = &cpu_data[cpu];	unsigned long flags;	spin_lock_irqsave(&(p->lock),flags);	p->pending_ipi |= 1 << op;	__raw_writel(IRQ_OFFSET(IPI_IRQ), cpu_data[cpu].hpa);	spin_unlock_irqrestore(&(p->lock),flags);}static inline voidsend_IPI_single(int dest_cpu, enum ipi_message_type op){	if (dest_cpu == NO_PROC_ID) {		BUG();		return;	}	ipi_send(dest_cpu, op);}static inline voidsend_IPI_allbutself(enum ipi_message_type op){	int i;		for (i = 0; i < smp_num_cpus; i++) {		if (i != smp_processor_id())			send_IPI_single(i, op);	}}inline void smp_send_stop(void)	{ send_IPI_allbutself(IPI_CPU_STOP); }static inline voidsmp_send_start(void)	{ send_IPI_allbutself(IPI_CPU_START); }void smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }/** * Run a function on all other CPUs. *  <func>	The function to run. This must be fast and non-blocking. *  <info>	An arbitrary pointer to pass to the function. *  <retry>	If true, keep retrying until ready. *  <wait>	If true, wait until function has completed on other CPUs. *  [RETURNS]   0 on success, else a negative status code. * * Does not return until remote CPUs are nearly ready to execute <func> * or have executed. */intsmp_call_function (void (*func) (void *info), void *info, int retry, int wait){	struct smp_call_struct data;	long timeout;	static spinlock_t lock = SPIN_LOCK_UNLOCKED;		data.func = func;	data.info = info;	data.wait = wait;	atomic_set(&data.unstarted_count, smp_num_cpus - 1);	atomic_set(&data.unfinished_count, smp_num_cpus - 1);	if (retry) {		spin_lock (&lock);		while (smp_call_function_data != 0)			barrier();	}	else {		spin_lock (&lock);		if (smp_call_function_data) {			spin_unlock (&lock);			return -EBUSY;		}	}	smp_call_function_data = &data;	spin_unlock (&lock);		/*  Send a message to all other CPUs and wait for them to respond  */	send_IPI_allbutself(IPI_CALL_FUNC);	/*  Wait for response  */	timeout = jiffies + HZ;	while ( (atomic_read (&data.unstarted_count) > 0) &&		time_before (jiffies, timeout) )		barrier ();	/* We either got one or timed out. Release the lock */	mb();	smp_call_function_data = NULL;	if (atomic_read (&data.unstarted_count) > 0) {		printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d)\n",		      smp_processor_id());		return -ETIMEDOUT;	}	while (wait && atomic_read (&data.unfinished_count) > 0)			barrier ();	return 0;}/* *	Setup routine for controlling SMP activation * *	Command-line option of "nosmp" or "maxcpus=0" will disable SMP *	activation entirely (the MPS table probe still happens, though). * *	Command-line option of "maxcpus=<NUM>", where <NUM> is an integer *	greater than 0, limits the maximum number of CPUs activated in *	SMP mode to <NUM>. */static int __init nosmp(char *str){	max_cpus = 0;	return 1;}__setup("nosmp", nosmp);static int __init maxcpus(char *str){	get_option(&str, &max_cpus);	return 1;}__setup("maxcpus=", maxcpus);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -