⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
		cpu_set(i, cpu_possible_map);		cpu_count++;	}	smp_num_cpus = cpu_count;}void __devinitsmp_prepare_boot_cpu(void){	/*	 * Mark the boot cpu (current cpu) as both present and online	 */ 	cpu_set(smp_processor_id(), cpu_present_mask);	cpu_set(smp_processor_id(), cpu_online_map);}int __devinit__cpu_up(unsigned int cpu){	smp_boot_one_cpu(cpu);	return cpu_online(cpu) ? 0 : -ENOSYS;}void __initsmp_cpus_done(unsigned int max_cpus){	int cpu;	unsigned long bogosum = 0;	for(cpu = 0; cpu < NR_CPUS; cpu++) 		if (cpu_online(cpu))			bogosum += cpu_data[cpu].loops_per_jiffy;		printk(KERN_INFO "SMP: Total of %d processors activated "	       "(%lu.%02lu BogoMIPS).\n",	       num_online_cpus(), 	       (bogosum + 2500) / (500000/HZ),	       ((bogosum + 2500) / (5000/HZ)) % 100);}voidsmp_percpu_timer_interrupt(struct pt_regs *regs){	int cpu = smp_processor_id();	unsigned long user = user_mode(regs);	struct cpuinfo_alpha *data = &cpu_data[cpu];	/* Record kernel PC.  */	profile_tick(CPU_PROFILING, regs);	if (!--data->prof_counter) {		/* We need to make like a normal interrupt -- otherwise		   timer interrupts ignore the global interrupt lock,		   which would be a Bad Thing.  */		irq_enter();		update_process_times(user);		data->prof_counter = data->prof_multiplier;		irq_exit();	}}int __initsetup_profiling_timer(unsigned int multiplier){	return -EINVAL;}static voidsend_ipi_message(cpumask_t to_whom, enum ipi_message_type operation){	int i;	mb();	for_each_cpu_mask(i, to_whom)		set_bit(operation, &ipi_data[i].bits);	mb();	for_each_cpu_mask(i, to_whom)		wripir(i);}/* Structure and data for smp_call_function.  This is designed to    minimize static memory requirements.  Plus it looks cleaner.  */struct smp_call_struct {	void (*func) (void *info);	void *info;	long wait;	atomic_t unstarted_count;	atomic_t unfinished_count;};static struct smp_call_struct *smp_call_function_data;/* Atomicly drop data into a shared pointer.  The pointer is free if   it is initially locked.  If retry, spin until free.  */static intpointer_lock (void *lock, void *data, int retry){	void *old, *tmp;	mb(); again:	/* Compare and swap with zero.  */	asm volatile (	"1:	ldq_l	%0,%1\n"	"	mov	%3,%2\n"	"	bne	%0,2f\n"	"	stq_c	%2,%1\n"	"	beq	%2,1b\n"	"2:"	: "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)	: "r"(data)	: "memory");	if (old == 0)		return 0;	if (! retry)		return -EBUSY;	while (*(void **)lock)		barrier();	goto again;}voidhandle_ipi(struct pt_regs *regs){	int this_cpu = smp_processor_id();	unsigned long *pending_ipis = &ipi_data[this_cpu].bits;	unsigned long ops;#if 0	DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",	      this_cpu, *pending_ipis, regs->pc));#endif	mb();	/* Order interrupt and bit testing. */	while ((ops = xchg(pending_ipis, 0)) != 0) {	  mb();	/* Order bit clearing and data access. */	  do {		unsigned long which;		which = ops & -ops;		ops &= ~which;		which = __ffs(which);		switch (which) {		case IPI_RESCHEDULE:			/* Reschedule callback.  Everything to be done			   is done by the interrupt return path.  */			break;		case IPI_CALL_FUNC:		    {			struct smp_call_struct *data;			void (*func)(void *info);			void *info;			int wait;			data = smp_call_function_data;			func = data->func;			info = data->info;			wait = data->wait;			/* Notify the sending CPU that the data has been			   received, and execution is about to begin.  */			mb();			atomic_dec (&data->unstarted_count);			/* At this point the structure may be gone unless			   wait is true.  */			(*func)(info);			/* Notify the sending CPU that the task is done.  */			mb();			if (wait) atomic_dec (&data->unfinished_count);			break;		    }		case IPI_CPU_STOP:			halt();		default:			printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",			       this_cpu, which);			break;		}	  } while (ops);	  mb();	/* Order data access and bit testing. */	}	cpu_data[this_cpu].ipi_count++;	if (hwrpb->txrdy)		recv_secondary_console_msg();}voidsmp_send_reschedule(int cpu){#ifdef DEBUG_IPI_MSG	if (cpu == hard_smp_processor_id())		printk(KERN_WARNING		       "smp_send_reschedule: Sending IPI to self.\n");#endif	send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);}voidsmp_send_stop(void){	cpumask_t to_whom = cpu_possible_map;	cpu_clear(smp_processor_id(), to_whom);#ifdef DEBUG_IPI_MSG	if (hard_smp_processor_id() != boot_cpu_id)		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");#endif	send_ipi_message(to_whom, IPI_CPU_STOP);}/* * Run a function on all other CPUs. *  <func>	The function to run. This must be fast and non-blocking. *  <info>	An arbitrary pointer to pass to the function. *  <retry>	If true, keep retrying until ready. *  <wait>	If true, wait until function has completed on other CPUs. *  [RETURNS]   0 on success, else a negative status code. * * Does not return until remote CPUs are nearly ready to execute <func> * or are or have executed. * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */intsmp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,			  int wait, cpumask_t to_whom){	struct smp_call_struct data;	unsigned long timeout;	int num_cpus_to_call;		/* Can deadlock when called with interrupts disabled */	WARN_ON(irqs_disabled());	data.func = func;	data.info = info;	data.wait = wait;	cpu_clear(smp_processor_id(), to_whom);	num_cpus_to_call = cpus_weight(to_whom);	atomic_set(&data.unstarted_count, num_cpus_to_call);	atomic_set(&data.unfinished_count, num_cpus_to_call);	/* Acquire the smp_call_function_data mutex.  */	if (pointer_lock(&smp_call_function_data, &data, retry))		return -EBUSY;	/* Send a message to the requested CPUs.  */	send_ipi_message(to_whom, IPI_CALL_FUNC);	/* Wait for a minimal response.  */	timeout = jiffies + HZ;	while (atomic_read (&data.unstarted_count) > 0	       && time_before (jiffies, timeout))		barrier();	/* If there's no response yet, log a message but allow a longer	 * timeout period -- if we get a response this time, log	 * a message saying when we got it.. 	 */	if (atomic_read(&data.unstarted_count) > 0) {		long start_time = jiffies;		printk(KERN_ERR "%s: initial timeout -- trying long wait\n",		       __FUNCTION__);		timeout = jiffies + 30 * HZ;		while (atomic_read(&data.unstarted_count) > 0		       && time_before(jiffies, timeout))			barrier();		if (atomic_read(&data.unstarted_count) <= 0) {			long delta = jiffies - start_time;			printk(KERN_ERR 			       "%s: response %ld.%ld seconds into long wait\n",			       __FUNCTION__, delta / HZ,			       (100 * (delta - ((delta / HZ) * HZ))) / HZ);		}	}	/* We either got one or timed out -- clear the lock. */	mb();	smp_call_function_data = NULL;	/* 	 * If after both the initial and long timeout periods we still don't	 * have a response, something is very wrong...	 */	BUG_ON(atomic_read (&data.unstarted_count) > 0);	/* Wait for a complete response, if needed.  */	if (wait) {		while (atomic_read (&data.unfinished_count) > 0)			barrier();	}	return 0;}intsmp_call_function (void (*func) (void *info), void *info, int retry, int wait){	return smp_call_function_on_cpu (func, info, retry, wait,					 cpu_online_map);}static voidipi_imb(void *ignored){	imb();}voidsmp_imb(void){	/* Must wait other processors to flush their icache before continue. */	if (on_each_cpu(ipi_imb, NULL, 1, 1))		printk(KERN_CRIT "smp_imb: timed out\n");}static voidipi_flush_tlb_all(void *ignored){	tbia();}voidflush_tlb_all(void){	/* Although we don't have any data to pass, we do want to	   synchronize with the other processors.  */	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {		printk(KERN_CRIT "flush_tlb_all: timed out\n");	}}#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)static voidipi_flush_tlb_mm(void *x){	struct mm_struct *mm = (struct mm_struct *) x;	if (mm == current->active_mm && !asn_locked())		flush_tlb_current(mm);	else		flush_tlb_other(mm);}voidflush_tlb_mm(struct mm_struct *mm){	preempt_disable();	if (mm == current->active_mm) {		flush_tlb_current(mm);		if (atomic_read(&mm->mm_users) <= 1) {			int cpu, this_cpu = smp_processor_id();			for (cpu = 0; cpu < NR_CPUS; cpu++) {				if (!cpu_online(cpu) || cpu == this_cpu)					continue;				if (mm->context[cpu])					mm->context[cpu] = 0;			}			preempt_enable();			return;		}	}	if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {		printk(KERN_CRIT "flush_tlb_mm: timed out\n");	}	preempt_enable();}struct flush_tlb_page_struct {	struct vm_area_struct *vma;	struct mm_struct *mm;	unsigned long addr;};static voidipi_flush_tlb_page(void *x){	struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;	struct mm_struct * mm = data->mm;	if (mm == current->active_mm && !asn_locked())		flush_tlb_current_page(mm, data->vma, data->addr);	else		flush_tlb_other(mm);}voidflush_tlb_page(struct vm_area_struct *vma, unsigned long addr){	struct flush_tlb_page_struct data;	struct mm_struct *mm = vma->vm_mm;	preempt_disable();	if (mm == current->active_mm) {		flush_tlb_current_page(mm, vma, addr);		if (atomic_read(&mm->mm_users) <= 1) {			int cpu, this_cpu = smp_processor_id();			for (cpu = 0; cpu < NR_CPUS; cpu++) {				if (!cpu_online(cpu) || cpu == this_cpu)					continue;				if (mm->context[cpu])					mm->context[cpu] = 0;			}			preempt_enable();			return;		}	}	data.vma = vma;	data.mm = mm;	data.addr = addr;	if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {		printk(KERN_CRIT "flush_tlb_page: timed out\n");	}	preempt_enable();}voidflush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end){	/* On the Alpha we always flush the whole user tlb.  */	flush_tlb_mm(vma->vm_mm);}static voidipi_flush_icache_page(void *x){	struct mm_struct *mm = (struct mm_struct *) x;	if (mm == current->active_mm && !asn_locked())		__load_new_mm_context(mm);	else		flush_tlb_other(mm);}voidflush_icache_user_range(struct vm_area_struct *vma, struct page *page,			unsigned long addr, int len){	struct mm_struct *mm = vma->vm_mm;	if ((vma->vm_flags & VM_EXEC) == 0)		return;	preempt_disable();	if (mm == current->active_mm) {		__load_new_mm_context(mm);		if (atomic_read(&mm->mm_users) <= 1) {			int cpu, this_cpu = smp_processor_id();			for (cpu = 0; cpu < NR_CPUS; cpu++) {				if (!cpu_online(cpu) || cpu == this_cpu)					continue;				if (mm->context[cpu])					mm->context[cpu] = 0;			}			preempt_enable();			return;		}	}	if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {		printk(KERN_CRIT "flush_icache_page: timed out\n");	}	preempt_enable();}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -