smp.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 1,218 行 · 第 1/2 页
C
1,218 行
update_process_times(user); data->prof_counter = data->prof_multiplier; irq_exit(); }}int __initsetup_profiling_timer(unsigned int multiplier){ return -EINVAL;}static voidsend_ipi_message(cpumask_t to_whom, enum ipi_message_type operation){ int i; mb(); for_each_cpu_mask(i, to_whom) set_bit(operation, &ipi_data[i].bits); mb(); for_each_cpu_mask(i, to_whom) wripir(i);}/* Structure and data for smp_call_function. This is designed to minimize static memory requirements. Plus it looks cleaner. */struct smp_call_struct { void (*func) (void *info); void *info; long wait; atomic_t unstarted_count; atomic_t unfinished_count;};static struct smp_call_struct *smp_call_function_data;/* Atomicly drop data into a shared pointer. The pointer is free if it is initially locked. If retry, spin until free. */static intpointer_lock (void *lock, void *data, int retry){ void *old, *tmp; mb(); again: /* Compare and swap with zero. */ asm volatile ( "1: ldq_l %0,%1\n" " mov %3,%2\n" " bne %0,2f\n" " stq_c %2,%1\n" " beq %2,1b\n" "2:" : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp) : "r"(data) : "memory"); if (old == 0) return 0; if (! retry) return -EBUSY; while (*(void **)lock) barrier(); goto again;}voidhandle_ipi(struct pt_regs *regs){ int this_cpu = smp_processor_id(); unsigned long *pending_ipis = &ipi_data[this_cpu].bits; unsigned long ops;#if 0 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n", this_cpu, *pending_ipis, regs->pc));#endif mb(); /* Order interrupt and bit testing. */ while ((ops = xchg(pending_ipis, 0)) != 0) { mb(); /* Order bit clearing and data access. */ do { unsigned long which; which = ops & -ops; ops &= ~which; which = __ffs(which); switch (which) { case IPI_RESCHEDULE: /* Reschedule callback. Everything to be done is done by the interrupt return path. */ break; case IPI_CALL_FUNC: { struct smp_call_struct *data; void (*func)(void *info); void *info; int wait; data = smp_call_function_data; func = data->func; info = data->info; wait = data->wait; /* Notify the sending CPU that the data has been received, and execution is about to begin. */ mb(); atomic_dec (&data->unstarted_count); /* At this point the structure may be gone unless wait is true. */ (*func)(info); /* Notify the sending CPU that the task is done. */ mb(); if (wait) atomic_dec (&data->unfinished_count); break; } case IPI_CPU_STOP: halt(); default: printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); break; } } while (ops); mb(); /* Order data access and bit testing. */ } cpu_data[this_cpu].ipi_count++; if (hwrpb->txrdy) recv_secondary_console_msg();}voidsmp_send_reschedule(int cpu){#ifdef DEBUG_IPI_MSG if (cpu == hard_smp_processor_id()) printk(KERN_WARNING "smp_send_reschedule: Sending IPI to self.\n");#endif send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);}voidsmp_send_stop(void){ cpumask_t to_whom = cpu_possible_map; cpu_clear(smp_processor_id(), to_whom);#ifdef DEBUG_IPI_MSG if (hard_smp_processor_id() != boot_cpu_id) printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");#endif send_ipi_message(to_whom, IPI_CPU_STOP);}/* * Run a function on all other CPUs. * <func> The function to run. This must be fast and non-blocking. * <info> An arbitrary pointer to pass to the function. * <retry> If true, keep retrying until ready. * <wait> If true, wait until function has completed on other CPUs. * [RETURNS] 0 on success, else a negative status code. * * Does not return until remote CPUs are nearly ready to execute <func> * or are or have executed. * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */intsmp_call_function_on_cpu (void (*func) (void *info), void *info, int retry, int wait, cpumask_t to_whom){ struct smp_call_struct data; unsigned long timeout; int num_cpus_to_call; /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); data.func = func; data.info = info; data.wait = wait; cpu_clear(smp_processor_id(), to_whom); num_cpus_to_call = cpus_weight(to_whom); atomic_set(&data.unstarted_count, num_cpus_to_call); atomic_set(&data.unfinished_count, num_cpus_to_call); /* Acquire the smp_call_function_data mutex. */ if (pointer_lock(&smp_call_function_data, &data, retry)) return -EBUSY; /* Send a message to the requested CPUs. */ send_ipi_message(to_whom, IPI_CALL_FUNC); /* Wait for a minimal response. */ timeout = jiffies + HZ; while (atomic_read (&data.unstarted_count) > 0 && time_before (jiffies, timeout)) barrier(); /* If there's no response yet, log a message but allow a longer * timeout period -- if we get a response this time, log * a message saying when we got it.. */ if (atomic_read(&data.unstarted_count) > 0) { long start_time = jiffies; printk(KERN_ERR "%s: initial timeout -- trying long wait\n", __FUNCTION__); timeout = jiffies + 30 * HZ; while (atomic_read(&data.unstarted_count) > 0 && time_before(jiffies, timeout)) barrier(); if (atomic_read(&data.unstarted_count) <= 0) { long delta = jiffies - start_time; printk(KERN_ERR "%s: response %ld.%ld seconds into long wait\n", __FUNCTION__, delta / HZ, (100 * (delta - ((delta / HZ) * HZ))) / HZ); } } /* We either got one or timed out -- clear the lock. */ mb(); smp_call_function_data = NULL; /* * If after both the initial and long timeout periods we still don't * have a response, something is very wrong... */ BUG_ON(atomic_read (&data.unstarted_count) > 0); /* Wait for a complete response, if needed. */ if (wait) { while (atomic_read (&data.unfinished_count) > 0) barrier(); } return 0;}intsmp_call_function (void (*func) (void *info), void *info, int retry, int wait){ return smp_call_function_on_cpu (func, info, retry, wait, cpu_online_map);}static voidipi_imb(void *ignored){ imb();}voidsmp_imb(void){ /* Must wait other processors to flush their icache before continue. */ if (on_each_cpu(ipi_imb, NULL, 1, 1)) printk(KERN_CRIT "smp_imb: timed out\n");}static voidipi_flush_tlb_all(void *ignored){ tbia();}voidflush_tlb_all(void){ /* Although we don't have any data to pass, we do want to synchronize with the other processors. */ if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) { printk(KERN_CRIT "flush_tlb_all: timed out\n"); }}#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)static voidipi_flush_tlb_mm(void *x){ struct mm_struct *mm = (struct mm_struct *) x; if (mm == current->active_mm && !asn_locked()) flush_tlb_current(mm); else flush_tlb_other(mm);}voidflush_tlb_mm(struct mm_struct *mm){ preempt_disable(); if (mm == current->active_mm) { flush_tlb_current(mm); if (atomic_read(&mm->mm_users) <= 1) { int cpu, this_cpu = smp_processor_id(); for (cpu = 0; cpu < NR_CPUS; cpu++) { if (!cpu_online(cpu) || cpu == this_cpu) continue; if (mm->context[cpu]) mm->context[cpu] = 0; } preempt_enable(); return; } } if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { printk(KERN_CRIT "flush_tlb_mm: timed out\n"); } preempt_enable();}struct flush_tlb_page_struct { struct vm_area_struct *vma; struct mm_struct *mm; unsigned long addr;};static voidipi_flush_tlb_page(void *x){ struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x; struct mm_struct * mm = data->mm; if (mm == current->active_mm && !asn_locked()) flush_tlb_current_page(mm, data->vma, data->addr); else flush_tlb_other(mm);}voidflush_tlb_page(struct vm_area_struct *vma, unsigned long addr){ struct flush_tlb_page_struct data; struct mm_struct *mm = vma->vm_mm; preempt_disable(); if (mm == current->active_mm) { flush_tlb_current_page(mm, vma, addr); if (atomic_read(&mm->mm_users) <= 1) { int cpu, this_cpu = smp_processor_id(); for (cpu = 0; cpu < NR_CPUS; cpu++) { if (!cpu_online(cpu) || cpu == this_cpu) continue; if (mm->context[cpu]) mm->context[cpu] = 0; } preempt_enable(); return; } } data.vma = vma; data.mm = mm; data.addr = addr; if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { printk(KERN_CRIT "flush_tlb_page: timed out\n"); } preempt_enable();}voidflush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end){ /* On the Alpha we always flush the whole user tlb. */ flush_tlb_mm(vma->vm_mm);}static voidipi_flush_icache_page(void *x){ struct mm_struct *mm = (struct mm_struct *) x; if (mm == current->active_mm && !asn_locked()) __load_new_mm_context(mm); else flush_tlb_other(mm);}voidflush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len){ struct mm_struct *mm = vma->vm_mm; if ((vma->vm_flags & VM_EXEC) == 0) return; preempt_disable(); if (mm == current->active_mm) { __load_new_mm_context(mm); if (atomic_read(&mm->mm_users) <= 1) { int cpu, this_cpu = smp_processor_id(); for (cpu = 0; cpu < NR_CPUS; cpu++) { if (!cpu_online(cpu) || cpu == this_cpu) continue; if (mm->context[cpu]) mm->context[cpu] = 0; } preempt_enable(); return; } } if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { printk(KERN_CRIT "flush_icache_page: timed out\n"); } preempt_enable();}#ifdef CONFIG_DEBUG_SPINLOCKvoid_raw_spin_unlock(spinlock_t * lock){ mb(); lock->lock = 0; lock->on_cpu = -1; lock->previous = NULL; lock->task = NULL; lock->base_file = "none"; lock->line_no = 0;}voiddebug_spin_lock(spinlock_t * lock, const char *base_file, int line_no){ long tmp; long stuck; void *inline_pc = __builtin_return_address(0); unsigned long started = jiffies; int printed = 0; int cpu = smp_processor_id(); stuck = 1L << 30; try_again: /* Use sub-sections to put the actual loop at the end of this object file's text section so as to perfect branch prediction. */ __asm__ __volatile__( "1: ldl_l %0,%1\n" " subq %2,1,%2\n" " blbs %0,2f\n" " or %0,1,%0\n" " stl_c %0,%1\n" " beq %0,3f\n" "4: mb\n" ".subsection 2\n" "2: ldl %0,%1\n" " subq %2,1,%2\n" "3: blt %2,4b\n" " blbs %0,2b\n" " br 1b\n" ".previous" : "=r" (tmp), "=m" (lock->lock), "=r" (stuck) : "1" (lock->lock), "2" (stuck) : "memory"); if (stuck < 0) { printk(KERN_WARNING "%s:%d spinlock stuck in %s at %p(%d)" " owner %s at %p(%d) %s:%d\n", base_file, line_no, current->comm, inline_pc, cpu, lock->task->comm, lock->previous, lock->on_cpu, lock->base_file, lock->line_no); stuck = 1L << 36; printed = 1; goto try_again; } /* Exiting. Got the lock. */ lock->on_cpu = cpu; lock->previous = inline_pc; lock->task = current; lock->base_file = base_file; lock->line_no = line_no; if (printed) { printk(KERN_WARNING "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n", base_file, line_no, current->comm, inline_pc, cpu, jiffies - started); }}intdebug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no){ int ret; if ((ret = !test_and_set_bit(0, lock))) { lock->on_cpu = smp_processor_id(); lock->previous = __builtin_return_address(0); lock->task = current; } else { lock->base_file = base_file; lock->line_no = line_no; } return ret;}#endif /* CONFIG_DEBUG_SPINLOCK */#ifdef CONFIG_DEBUG_RWLOCKvoid _raw_write_lock(rwlock_t * lock){ long regx, regy; int stuck_lock, stuck_reader; void *inline_pc = __builtin_return_address(0); try_again: stuck_lock = 1<<30; stuck_reader = 1<<30; __asm__ __volatile__( "1: ldl_l %1,%0\n" " blbs %1,6f\n" " blt %1,8f\n" " mov 1,%1\n" " stl_c %1,%0\n" " beq %1,6f\n" "4: mb\n" ".subsection 2\n" "6: blt %3,4b # debug\n" " subl %3,1,%3 # debug\n" " ldl %1,%0\n" " blbs %1,6b\n" "8: blt %4,4b # debug\n" " subl %4,1,%4 # debug\n" " ldl %1,%0\n" " blt %1,8b\n" " br 1b\n" ".previous" : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy), "=&r" (stuck_lock), "=&r" (stuck_reader) : "0" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory"); if (stuck_lock < 0) { printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc); goto try_again; } if (stuck_reader < 0) { printk(KERN_WARNING "write_lock stuck on readers at %p\n", inline_pc); goto try_again; }}void _raw_read_lock(rwlock_t * lock){ long regx; int stuck_lock; void *inline_pc = __builtin_return_address(0); try_again: stuck_lock = 1<<30; __asm__ __volatile__( "1: ldl_l %1,%0;" " blbs %1,6f;" " subl %1,2,%1;" " stl_c %1,%0;" " beq %1,6f;" "4: mb\n" ".subsection 2\n" "6: ldl %1,%0;" " blt %2,4b # debug\n" " subl %2,1,%2 # debug\n" " blbs %1,6b;" " br 1b\n" ".previous" : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock) : "0" (*(volatile int *)lock), "2" (stuck_lock) : "memory"); if (stuck_lock < 0) { printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc); goto try_again; }}#endif /* CONFIG_DEBUG_RWLOCK */
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?