📄 smp.c
字号:
continue; if (smp_boot_one_cpu(i, cpu_count)) continue; cpu_present_mask |= 1UL << i; cpu_count++; } if (cpu_count == 1) { printk(KERN_ERR "SMP: Only one lonely processor alive.\n"); return; } bogosum = 0; for (i = 0; i < NR_CPUS; i++) { if (cpu_present_mask & (1UL << i)) bogosum += cpu_data[i].loops_per_jiffy; } printk(KERN_INFO "SMP: Total of %d processors activated " "(%lu.%02lu BogoMIPS).\n", cpu_count, (bogosum + 2500) / (500000/HZ), ((bogosum + 2500) / (5000/HZ)) % 100); smp_num_cpus = cpu_count;}/* * Called by smp_init to release the blocking online cpus once they * are all started. */void __initsmp_commence(void){ /* smp_init sets smp_threads_ready -- that's enough. */ mb();}voidsmp_percpu_timer_interrupt(struct pt_regs *regs){ int cpu = smp_processor_id(); unsigned long user = user_mode(regs); struct cpuinfo_alpha *data = &cpu_data[cpu]; /* Record kernel PC. */ if (!user) alpha_do_profile(regs->pc); if (!--data->prof_counter) { /* We need to make like a normal interrupt -- otherwise timer interrupts ignore the global interrupt lock, which would be a Bad Thing. */ irq_enter(cpu, RTC_IRQ); update_process_times(user); data->prof_counter = data->prof_multiplier; irq_exit(cpu, RTC_IRQ); if (softirq_pending(cpu)) do_softirq(); }}int __initsetup_profiling_timer(unsigned int multiplier){ return -EINVAL;}static voidsend_ipi_message(unsigned long to_whom, enum ipi_message_type operation){ long i, j; /* Reduce the number of memory barriers by doing two loops, one to set the bits, one to invoke the interrupts. */ mb(); /* Order out-of-band data and bit setting. */ for (i = 0, j = 1; i < NR_CPUS; ++i, j <<= 1) { if (to_whom & j) set_bit(operation, &ipi_data[i].bits); } mb(); /* Order bit setting and interrupt. */ for (i = 0, j = 1; i < NR_CPUS; ++i, j <<= 1) { if (to_whom & j) wripir(i); }}/* Structure and data for smp_call_function. This is designed to minimize static memory requirements. Plus it looks cleaner. */struct smp_call_struct { void (*func) (void *info); void *info; long wait; atomic_t unstarted_count; atomic_t unfinished_count;};static struct smp_call_struct *smp_call_function_data;/* Atomicly drop data into a shared pointer. The pointer is free if it is initially locked. If retry, spin until free. */static inline intpointer_lock (void *lock, void *data, int retry){ void *old, *tmp; mb();again: /* Compare and swap with zero. */ asm volatile ( "1: ldq_l %0,%1\n" " mov %3,%2\n" " bne %0,2f\n" " stq_c %2,%1\n" " beq %2,1b\n" "2:" : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp) : "r"(data) : "memory"); if (old == 0) return 0; if (! retry) return -EBUSY; while (*(void **)lock) barrier(); goto again;}voidhandle_ipi(struct pt_regs *regs){ int this_cpu = smp_processor_id(); unsigned long *pending_ipis = &ipi_data[this_cpu].bits; unsigned long ops;#if 0 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n", this_cpu, *pending_ipis, regs->pc));#endif mb(); /* Order interrupt and bit testing. */ while ((ops = xchg(pending_ipis, 0)) != 0) { mb(); /* Order bit clearing and data access. */ do { unsigned long which; which = ops & -ops; ops &= ~which; which = ffz(~which); if (which == IPI_RESCHEDULE) { /* Reschedule callback. Everything to be done is done by the interrupt return path. */ } else if (which == IPI_CALL_FUNC) { struct smp_call_struct *data; void (*func)(void *info); void *info; int wait; data = smp_call_function_data; func = data->func; info = data->info; wait = data->wait; /* Notify the sending CPU that the data has been received, and execution is about to begin. */ mb(); atomic_dec (&data->unstarted_count); /* At this point the structure may be gone unless wait is true. */ (*func)(info); /* Notify the sending CPU that the task is done. */ mb(); if (wait) atomic_dec (&data->unfinished_count); } else if (which == IPI_CPU_STOP) { halt(); } else { printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); } } while (ops); mb(); /* Order data access and bit testing. */ } cpu_data[this_cpu].ipi_count++; if (hwrpb->txrdy) recv_secondary_console_msg();}voidsmp_send_reschedule(int cpu){#if DEBUG_IPI_MSG if (cpu == hard_smp_processor_id()) printk(KERN_WARNING "smp_send_reschedule: Sending IPI to self.\n");#endif send_ipi_message(1UL << cpu, IPI_RESCHEDULE);}voidsmp_send_stop(void){ unsigned long to_whom = cpu_present_mask ^ (1UL << smp_processor_id());#if DEBUG_IPI_MSG if (hard_smp_processor_id() != boot_cpu_id) printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");#endif send_ipi_message(to_whom, IPI_CPU_STOP);}/* * Run a function on all other CPUs. * <func> The function to run. This must be fast and non-blocking. * <info> An arbitrary pointer to pass to the function. * <retry> If true, keep retrying until ready. * <wait> If true, wait until function has completed on other CPUs. * [RETURNS] 0 on success, else a negative status code. * * Does not return until remote CPUs are nearly ready to execute <func> * or are or have executed. */intsmp_call_function_on_cpu (void (*func) (void *info), void *info, int retry, int wait, unsigned long to_whom){ struct smp_call_struct data; long timeout; int num_cpus_to_call; long i,j; data.func = func; data.info = info; data.wait = wait; to_whom &= ~(1L << smp_processor_id()); for (i = 0, j = 1, num_cpus_to_call = 0; i < NR_CPUS; ++i, j <<= 1) if (to_whom & j) num_cpus_to_call++; atomic_set(&data.unstarted_count, num_cpus_to_call); atomic_set(&data.unfinished_count, num_cpus_to_call); /* Acquire the smp_call_function_data mutex. */ if (pointer_lock(&smp_call_function_data, &data, retry)) return -EBUSY; /* Send a message to the requested CPUs. */ send_ipi_message(to_whom, IPI_CALL_FUNC); /* Wait for a minimal response. */ timeout = jiffies + HZ; while (atomic_read (&data.unstarted_count) > 0 && time_before (jiffies, timeout)) barrier(); /* We either got one or timed out -- clear the lock. */ mb(); smp_call_function_data = 0; if (atomic_read (&data.unstarted_count) > 0) return -ETIMEDOUT; /* Wait for a complete response, if needed. */ if (wait) { while (atomic_read (&data.unfinished_count) > 0) barrier(); } return 0;}intsmp_call_function (void (*func) (void *info), void *info, int retry, int wait){ return smp_call_function_on_cpu (func, info, retry, wait, cpu_present_mask);}static voidipi_imb(void *ignored){ imb();}voidsmp_imb(void){ /* Must wait other processors to flush their icache before continue. */ if (smp_call_function(ipi_imb, NULL, 1, 1)) printk(KERN_CRIT "smp_imb: timed out\n"); imb();}static voidipi_flush_tlb_all(void *ignored){ tbia();}voidflush_tlb_all(void){ /* Although we don't have any data to pass, we do want to synchronize with the other processors. */ if (smp_call_function(ipi_flush_tlb_all, NULL, 1, 1)) { printk(KERN_CRIT "flush_tlb_all: timed out\n"); } tbia();}#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)static voidipi_flush_tlb_mm(void *x){ struct mm_struct *mm = (struct mm_struct *) x; if (mm == current->active_mm && !asn_locked()) flush_tlb_current(mm); else flush_tlb_other(mm);}voidflush_tlb_mm(struct mm_struct *mm){ if (mm == current->active_mm) { flush_tlb_current(mm); if (atomic_read(&mm->mm_users) <= 1) { int i, cpu, this_cpu = smp_processor_id(); for (i = 0; i < smp_num_cpus; i++) { cpu = cpu_logical_map(i); if (cpu == this_cpu) continue; if (mm->context[cpu]) mm->context[cpu] = 0; } return; } } if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { printk(KERN_CRIT "flush_tlb_mm: timed out\n"); }}struct flush_tlb_page_struct { struct vm_area_struct *vma; struct mm_struct *mm; unsigned long addr;};static voidipi_flush_tlb_page(void *x){ struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x; struct mm_struct * mm = data->mm; if (mm == current->active_mm && !asn_locked()) flush_tlb_current_page(mm, data->vma, data->addr); else flush_tlb_other(mm);}voidflush_tlb_page(struct vm_area_struct *vma, unsigned long addr){ struct flush_tlb_page_struct data; struct mm_struct *mm = vma->vm_mm; if (mm == current->active_mm) { flush_tlb_current_page(mm, vma, addr); if (atomic_read(&mm->mm_users) <= 1) { int i, cpu, this_cpu = smp_processor_id(); for (i = 0; i < smp_num_cpus; i++) { cpu = cpu_logical_map(i); if (cpu == this_cpu) continue; if (mm->context[cpu]) mm->context[cpu] = 0; } return; } } data.vma = vma; data.mm = mm; data.addr = addr; if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { printk(KERN_CRIT "flush_tlb_page: timed out\n"); }}voidflush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end){ /* On the Alpha we always flush the whole user tlb. */ flush_tlb_mm(mm);}static voidipi_flush_icache_page(void *x){ struct mm_struct *mm = (struct mm_struct *) x; if (mm == current->active_mm && !asn_locked()) __load_new_mm_context(mm); else flush_tlb_other(mm);}voidflush_icache_page(struct vm_area_struct *vma, struct page *page){ struct mm_struct *mm = vma->vm_mm; if ((vma->vm_flags & VM_EXEC) == 0) return; if (mm == current->active_mm) { __load_new_mm_context(mm); if (atomic_read(&mm->mm_users) <= 1) { int i, cpu, this_cpu = smp_processor_id(); for (i = 0; i < smp_num_cpus; i++) { cpu = cpu_logical_map(i); if (cpu == this_cpu) continue; if (mm->context[cpu]) mm->context[cpu] = 0; } return; } } if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { printk(KERN_CRIT "flush_icache_page: timed out\n"); }}#ifdef CONFIG_DEBUG_SPINLOCKvoidspin_unlock(spinlock_t * lock){ mb(); lock->lock = 0; lock->on_cpu = -1; lock->previous = NULL; lock->task = NULL; lock->base_file = "none"; lock->line_no = 0;}voiddebug_spin_lock(spinlock_t * lock, const char *base_file, int line_no){ long tmp; long stuck; void *inline_pc = __builtin_return_address(0); unsigned long started = jiffies; int printed = 0; int cpu = smp_processor_id(); stuck = 1L << 30; try_again: /* Use sub-sections to put the actual loop at the end of this object file's text section so as to perfect branch prediction. */ __asm__ __volatile__( "1: ldl_l %0,%1\n" " subq %2,1,%2\n" " blbs %0,2f\n" " or %0,1,%0\n" " stl_c %0,%1\n" " beq %0,3f\n" "4: mb\n" ".subsection 2\n" "2: ldl %0,%1\n" " subq %2,1,%2\n" "3: blt %2,4b\n" " blbs %0,2b\n" " br 1b\n" ".previous" : "=r" (tmp), "=m" (lock->lock), "=r" (stuck) : "1" (lock->lock), "2" (stuck) : "memory"); if (stuck < 0) { printk(KERN_WARNING "%s:%d spinlock stuck in %s at %p(%d)" " owner %s at %p(%d) %s:%d\n", base_file, line_no, current->comm, inline_pc, cpu, lock->task->comm, lock->previous, lock->on_cpu, lock->base_file, lock->line_no); stuck = 1L << 36; printed = 1; goto try_again; } /* Exiting. Got the lock. */ lock->on_cpu = cpu; lock->previous = inline_pc; lock->task = current; lock->base_file = base_file; lock->line_no = line_no; if (printed) { printk(KERN_WARNING "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n", base_file, line_no, current->comm, inline_pc, cpu, jiffies - started); }}intdebug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no){ int ret; if ((ret = !test_and_set_bit(0, lock))) { lock->on_cpu = smp_processor_id(); lock->previous = __builtin_return_address(0); lock->task = current; } else { lock->base_file = base_file; lock->line_no = line_no; } return ret;}#endif /* CONFIG_DEBUG_SPINLOCK */#ifdef CONFIG_DEBUG_RWLOCKvoid write_lock(rwlock_t * lock){ long regx, regy; int stuck_lock, stuck_reader; void *inline_pc = __builtin_return_address(0); try_again: stuck_lock = 1<<30; stuck_reader = 1<<30; __asm__ __volatile__( "1: ldl_l %1,%0\n" " blbs %1,6f\n" " blt %1,8f\n" " mov 1,%1\n" " stl_c %1,%0\n" " beq %1,6f\n" "4: mb\n" ".subsection 2\n" "6: blt %3,4b # debug\n" " subl %3,1,%3 # debug\n" " ldl %1,%0\n" " blbs %1,6b\n" "8: blt %4,4b # debug\n" " subl %4,1,%4 # debug\n" " ldl %1,%0\n" " blt %1,8b\n" " br 1b\n" ".previous" : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy), "=&r" (stuck_lock), "=&r" (stuck_reader) : "0" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory"); if (stuck_lock < 0) { printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc); goto try_again; } if (stuck_reader < 0) { printk(KERN_WARNING "write_lock stuck on readers at %p\n", inline_pc); goto try_again; }}void read_lock(rwlock_t * lock){ long regx; int stuck_lock; void *inline_pc = __builtin_return_address(0); try_again: stuck_lock = 1<<30; __asm__ __volatile__( "1: ldl_l %1,%0;" " blbs %1,6f;" " subl %1,2,%1;" " stl_c %1,%0;" " beq %1,6f;" "4: mb\n" ".subsection 2\n" "6: ldl %1,%0;" " blt %2,4b # debug\n" " subl %2,1,%2 # debug\n" " blbs %1,6b;" " br 1b\n" ".previous" : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock) : "0" (*(volatile int *)lock), "2" (stuck_lock) : "memory"); if (stuck_lock < 0) { printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc); goto try_again; }}#endif /* CONFIG_DEBUG_RWLOCK */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -