⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 smp.c

📁 该文件是rt_linux
💻 C
📖 第 1 页 / 共 3 页
字号:
		printk("CPU[%d]: mondo stuckage result[%016lx]\n",		       smp_processor_id(), result);	} else {		udelay(2);		goto again;	}}static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long mask){	int ncpus = smp_num_cpus - 1;	int i;	u64 pstate;	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));	for (i = 0; (i < NR_CPUS) && ncpus; i++) {		if (mask & (1UL << i)) {			spitfire_xcall_helper(data0, data1, data2, pstate, i);			ncpus--;		}	}}/* Cheetah now allows to send the whole 64-bytes of data in the interrupt * packet, but we have no use for that.  However we do take advantage of * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). */#if NR_CPUS > 32#error Fixup cheetah_xcall_deliver Dave...#endifstatic void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, unsigned long mask){	u64 pstate;	int nack_busy_id;	if (!mask)		return;	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));retry:	__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"			     : : "r" (pstate), "i" (PSTATE_IE));	/* Setup the dispatch data registers. */	__asm__ __volatile__("stxa	%0, [%3] %6\n\t"			     "stxa	%1, [%4] %6\n\t"			     "stxa	%2, [%5] %6\n\t"			     "membar	#Sync\n\t"			     : /* no outputs */			     : "r" (data0), "r" (data1), "r" (data2),			       "r" (0x40), "r" (0x50), "r" (0x60),			       "i" (ASI_INTR_W));	nack_busy_id = 0;	{		int i, ncpus = smp_num_cpus - 1;		for (i = 0; (i < NR_CPUS) && ncpus; i++) {			if (mask & (1UL << i)) {				u64 target = (i << 14) | 0x70;				target |= (nack_busy_id++ << 24);				__asm__ __volatile__("stxa	%%g0, [%0] %1\n\t"						     "membar	#Sync\n\t"						     : /* no outputs */						     : "r" (target), "i" (ASI_INTR_W));				ncpus--;			}		}	}	/* Now, poll for completion. */	{		u64 dispatch_stat;		long stuck;		stuck = 100000 * nack_busy_id;		do {			__asm__ __volatile__("ldxa	[%%g0] %1, %0"					     : "=r" (dispatch_stat)					     : "i" (ASI_INTR_DISPATCH_STAT));			if (dispatch_stat == 0UL) {				__asm__ __volatile__("wrpr %0, 0x0, %%pstate"						     : : "r" (pstate));				return;			}			if (!--stuck)				break;		} while (dispatch_stat & 0x5555555555555555UL);		__asm__ __volatile__("wrpr %0, 0x0, %%pstate"				     : : "r" (pstate));		if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {			/* Busy bits will not clear, continue instead			 * of freezing up on this cpu.			 */			printk("CPU[%d]: mondo stuckage result[%016lx]\n",			       smp_processor_id(), dispatch_stat);		} else {			int i, this_busy_nack = 0;			/* Delay some random time with interrupts enabled			 * to prevent deadlock.			 */			udelay(2 * nack_busy_id);			/* Clear out the mask bits for cpus which did not			 * NACK us.			 */			for (i = 0; i < NR_CPUS; i++) {				if (mask & (1UL << i)) {					if ((dispatch_stat & (0x2 << this_busy_nack)) == 0)						mask &= ~(1UL << i);					this_busy_nack += 2;				}			}			goto retry;		}	}}/* Send cross call to all processors mentioned in MASK * except self. */static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, unsigned long mask){	if (smp_processors_ready) {		u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));		mask &= ~(1UL<<smp_processor_id());		if (tlb_type == spitfire)			spitfire_xcall_deliver(data0, data1, data2, mask);		else			cheetah_xcall_deliver(data0, data1, data2, mask);		/* NOTE: Caller runs local copy on master. */	}}/* Send cross call to all processors except self. */#define smp_cross_call(func, ctx, data1, data2) \	smp_cross_call_masked(func, ctx, data1, data2, cpu_present_map)struct call_data_struct {	void (*func) (void *info);	void *info;	atomic_t finished;	int wait;};static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;static struct call_data_struct *call_data;extern unsigned long xcall_call_function;int smp_call_function(void (*func)(void *info), void *info,		      int nonatomic, int wait){	struct call_data_struct data;	int cpus = smp_num_cpus - 1;	long timeout;	if (!cpus)		return 0;	data.func = func;	data.info = info;	atomic_set(&data.finished, 0);	data.wait = wait;	spin_lock_bh(&call_lock);	call_data = &data;	smp_cross_call(&xcall_call_function, 0, 0, 0);	/* 	 * Wait for other cpus to complete function or at	 * least snap the call data.	 */	timeout = 1000000;	while (atomic_read(&data.finished) != cpus) {		if (--timeout <= 0)			goto out_timeout;		barrier();		udelay(1);	}	spin_unlock_bh(&call_lock);	return 0;out_timeout:	spin_unlock_bh(&call_lock);	printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",	       smp_num_cpus - 1, atomic_read(&data.finished));	return 0;}void smp_call_function_client(int irq, struct pt_regs *regs){	void (*func) (void *info) = call_data->func;	void *info = call_data->info;	clear_softint(1 << irq);	if (call_data->wait) {		/* let initiator proceed only after completion */		func(info);		atomic_inc(&call_data->finished);	} else {		/* let initiator proceed after getting data */		atomic_inc(&call_data->finished);		func(info);	}}extern unsigned long xcall_flush_tlb_page;extern unsigned long xcall_flush_tlb_mm;extern unsigned long xcall_flush_tlb_range;extern unsigned long xcall_flush_tlb_all_spitfire;extern unsigned long xcall_flush_tlb_all_cheetah;extern unsigned long xcall_flush_cache_all_spitfire;extern unsigned long xcall_report_regs;extern unsigned long xcall_receive_signal;extern unsigned long xcall_flush_dcache_page_cheetah;extern unsigned long xcall_flush_dcache_page_spitfire;#ifdef CONFIG_DEBUG_DCFLUSHextern atomic_t dcpage_flushes;extern atomic_t dcpage_flushes_xcall;#endifstatic __inline__ void __local_flush_dcache_page(struct page *page){#if (L1DCACHE_SIZE > PAGE_SIZE)	__flush_dcache_page(page->virtual,			    ((tlb_type == spitfire) &&			     page->mapping != NULL));#else	if (page->mapping != NULL &&	    tlb_type == spitfire)		__flush_icache_page(__pa(page->virtual));#endif}void smp_flush_dcache_page_impl(struct page *page, int cpu){	if (smp_processors_ready) {		unsigned long mask = 1UL << cpu;#ifdef CONFIG_DEBUG_DCFLUSH		atomic_inc(&dcpage_flushes);#endif		if (cpu == smp_processor_id()) {			__local_flush_dcache_page(page);		} else if ((cpu_present_map & mask) != 0) {			u64 data0;			if (tlb_type == spitfire) {				data0 = ((u64)&xcall_flush_dcache_page_spitfire);				if (page->mapping != NULL)					data0 |= ((u64)1 << 32);				spitfire_xcall_deliver(data0,						       __pa(page->virtual),						       (u64) page->virtual,						       mask);			} else {				data0 = ((u64)&xcall_flush_dcache_page_cheetah);				cheetah_xcall_deliver(data0,						      __pa(page->virtual),						      0, mask);			}#ifdef CONFIG_DEBUG_DCFLUSH			atomic_inc(&dcpage_flushes_xcall);#endif		}	}}void flush_dcache_page_all(struct mm_struct *mm, struct page *page){	if (smp_processors_ready) {		unsigned long mask = cpu_present_map & ~(1UL << smp_processor_id());		u64 data0;#ifdef CONFIG_DEBUG_DCFLUSH		atomic_inc(&dcpage_flushes);#endif		if (mask == 0UL)			goto flush_self;		if (tlb_type == spitfire) {			data0 = ((u64)&xcall_flush_dcache_page_spitfire);			if (page->mapping != NULL)				data0 |= ((u64)1 << 32);			spitfire_xcall_deliver(data0,					       __pa(page->virtual),					       (u64) page->virtual,					       mask);		} else {			data0 = ((u64)&xcall_flush_dcache_page_cheetah);			cheetah_xcall_deliver(data0,					      __pa(page->virtual),					      0, mask);		}#ifdef CONFIG_DEBUG_DCFLUSH		atomic_inc(&dcpage_flushes_xcall);#endif	flush_self:		__local_flush_dcache_page(page);	}}void smp_receive_signal(int cpu){	if (smp_processors_ready) {		unsigned long mask = 1UL << cpu;		if ((cpu_present_map & mask) != 0) {			u64 data0 = (((u64)&xcall_receive_signal) & 0xffffffff);			if (tlb_type == spitfire)				spitfire_xcall_deliver(data0, 0, 0, mask);			else				cheetah_xcall_deliver(data0, 0, 0, mask);		}	}}void smp_receive_signal_client(int irq, struct pt_regs *regs){	/* Just return, rtrap takes care of the rest. */	clear_softint(1 << irq);}void smp_report_regs(void){	smp_cross_call(&xcall_report_regs, 0, 0, 0);}void smp_flush_cache_all(void){	/* Cheetah need do nothing. */	if (tlb_type == spitfire) {		smp_cross_call(&xcall_flush_cache_all_spitfire, 0, 0, 0);		__flush_cache_all();	}}void smp_flush_tlb_all(void){	if (tlb_type == spitfire)		smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0);	else		smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0);	__flush_tlb_all();}/* We know that the window frames of the user have been flushed * to the stack before we get here because all callers of us * are flush_tlb_*() routines, and these run after flush_cache_*() * which performs the flushw. * * The SMP TLB coherency scheme we use works as follows: * * 1) mm->cpu_vm_mask is a bit mask of which cpus an address *    space has (potentially) executed on, this is the heuristic *    we use to avoid doing cross calls. * *    Also, for flushing from kswapd and also for clones, we *    use cpu_vm_mask as the list of cpus to make run the TLB. * * 2) TLB context numbers are shared globally across all processors *    in the system, this allows us to play several games to avoid *    cross calls. * *    One invariant is that when a cpu switches to a process, and *    that processes tsk->active_mm->cpu_vm_mask does not have the *    current cpu's bit set, that tlb context is flushed locally. * *    If the address space is non-shared (ie. mm->count == 1) we avoid *    cross calls when we want to flush the currently running process's *    tlb state.  This is done by clearing all cpu bits except the current *    processor's in current->active_mm->cpu_vm_mask and performing the *    flush locally only.  This will force any subsequent cpus which run *    this task to flush the context from the local tlb if the process *    migrates to another cpu (again). * * 3) For shared address spaces (threads) and swapping we bite the *    bullet for most cases and perform the cross call (but only to *    the cpus listed in cpu_vm_mask). * *    The performance gain from "optimizing" away the cross call for threads is *    questionable (in theory the big win for threads is the massive sharing of *    address space state across processors). */void smp_flush_tlb_mm(struct mm_struct *mm){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -