📄 voyager_smp.c
字号:
} vic_irq_enable_mask[cpu] = 0; spin_unlock(&vic_irq_lock); put_cpu_no_resched();} /* * CPU halt call-back */static voidsmp_stop_cpu_function(void *dummy){ VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id())); cpu_clear(smp_processor_id(), cpu_online_map); local_irq_disable(); for(;;) halt();}static DEFINE_SPINLOCK(call_lock);struct call_data_struct { void (*func) (void *info); void *info; volatile unsigned long started; volatile unsigned long finished; int wait;};static struct call_data_struct * call_data;/* execute a thread on a new CPU. The function to be called must be * previously set up. This is used to schedule a function for * execution on all CPUs - set up the function then broadcast a * function_interrupt CPI to come here on each CPU */static voidsmp_call_function_interrupt(void){ void (*func) (void *info) = call_data->func; void *info = call_data->info; /* must take copy of wait because call_data may be replaced * unless the function is waiting for us to finish */ int wait = call_data->wait; __u8 cpu = smp_processor_id(); /* * Notify initiating CPU that I've grabbed the data and am * about to execute the function */ mb(); if(!test_and_clear_bit(cpu, &call_data->started)) { /* If the bit wasn't set, this could be a replay */ printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion with no call pending\n", cpu); return; } /* * At this point the info structure may be out of scope unless wait==1 */ irq_enter(); (*func)(info); __get_cpu_var(irq_stat).irq_call_count++; irq_exit(); if (wait) { mb(); clear_bit(cpu, &call_data->finished); }}static intvoyager_smp_call_function_mask (cpumask_t cpumask, void (*func) (void *info), void *info, int wait){ struct call_data_struct data; u32 mask = cpus_addr(cpumask)[0]; mask &= ~(1<<smp_processor_id()); if (!mask) return 0; /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); data.func = func; data.info = info; data.started = mask; data.wait = wait; if (wait) data.finished = mask; spin_lock(&call_lock); call_data = &data; wmb(); /* Send a message to all other CPUs and wait for them to respond */ send_CPI(mask, VIC_CALL_FUNCTION_CPI); /* Wait for response */ while (data.started) barrier(); if (wait) while (data.finished) barrier(); spin_unlock(&call_lock); return 0;}/* Sorry about the name. In an APIC based system, the APICs * themselves are programmed to send a timer interrupt. This is used * by linux to reschedule the processor. Voyager doesn't have this, * so we use the system clock to interrupt one processor, which in * turn, broadcasts a timer CPI to all the others --- we receive that * CPI here. We don't use this actually for counting so losing * ticks doesn't matter * * FIXME: For those CPUs which actually have a local APIC, we could * try to use it to trigger this interrupt instead of having to * broadcast the timer tick. Unfortunately, all my pentium DYADs have * no local APIC, so I can't do this * * This function is currently a placeholder and is unused in the code */fastcall void smp_apic_timer_interrupt(struct pt_regs *regs){ struct pt_regs *old_regs = set_irq_regs(regs); wrapper_smp_local_timer_interrupt(); set_irq_regs(old_regs);}/* All of the QUAD interrupt GATES */fastcall voidsmp_qic_timer_interrupt(struct pt_regs *regs){ struct pt_regs *old_regs = set_irq_regs(regs); ack_QIC_CPI(QIC_TIMER_CPI); wrapper_smp_local_timer_interrupt(); set_irq_regs(old_regs);}fastcall voidsmp_qic_invalidate_interrupt(struct pt_regs *regs){ ack_QIC_CPI(QIC_INVALIDATE_CPI); smp_invalidate_interrupt();}fastcall voidsmp_qic_reschedule_interrupt(struct pt_regs *regs){ ack_QIC_CPI(QIC_RESCHEDULE_CPI); smp_reschedule_interrupt();}fastcall voidsmp_qic_enable_irq_interrupt(struct pt_regs *regs){ ack_QIC_CPI(QIC_ENABLE_IRQ_CPI); smp_enable_irq_interrupt();}fastcall voidsmp_qic_call_function_interrupt(struct pt_regs *regs){ ack_QIC_CPI(QIC_CALL_FUNCTION_CPI); smp_call_function_interrupt();}fastcall voidsmp_vic_cpi_interrupt(struct pt_regs *regs){ struct pt_regs *old_regs = set_irq_regs(regs); __u8 cpu = smp_processor_id(); if(is_cpu_quad()) ack_QIC_CPI(VIC_CPI_LEVEL0); else ack_VIC_CPI(VIC_CPI_LEVEL0); if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu])) wrapper_smp_local_timer_interrupt(); if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu])) smp_invalidate_interrupt(); if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu])) smp_reschedule_interrupt(); if(test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu])) smp_enable_irq_interrupt(); if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) smp_call_function_interrupt(); set_irq_regs(old_regs);}static voiddo_flush_tlb_all(void* info){ unsigned long cpu = smp_processor_id(); __flush_tlb_all(); if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY) leave_mm(cpu);}/* flush the TLB of every active CPU in the system */voidflush_tlb_all(void){ on_each_cpu(do_flush_tlb_all, 0, 1, 1);}/* used to set up the trampoline for other CPUs when the memory manager * is sorted out */void __initsmp_alloc_memory(void){ trampoline_base = (__u32)alloc_bootmem_low_pages(PAGE_SIZE); if(__pa(trampoline_base) >= 0x93000) BUG();}/* send a reschedule CPI to one CPU by physical CPU number*/static voidvoyager_smp_send_reschedule(int cpu){ send_one_CPI(cpu, VIC_RESCHEDULE_CPI);}inthard_smp_processor_id(void){ __u8 i; __u8 cpumask = inb(VIC_PROC_WHO_AM_I); if((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER) return cpumask & 0x1F; for(i = 0; i < 8; i++) { if(cpumask & (1<<i)) return i; } printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask); return 0;}intsafe_smp_processor_id(void){ return hard_smp_processor_id();}/* broadcast a halt to all other CPUs */static voidvoyager_smp_send_stop(void){ smp_call_function(smp_stop_cpu_function, NULL, 1, 1);}/* this function is triggered in time.c when a clock tick fires * we need to re-broadcast the tick to all CPUs */voidsmp_vic_timer_interrupt(void){ send_CPI_allbutself(VIC_TIMER_CPI); smp_local_timer_interrupt();}/* local (per CPU) timer interrupt. It does both profiling and * process statistics/rescheduling. * * We do profiling in every local tick, statistics/rescheduling * happen only every 'profiling multiplier' ticks. The default * multiplier is 1 and it can be changed by writing the new multiplier * value into /proc/profile. */voidsmp_local_timer_interrupt(void){ int cpu = smp_processor_id(); long weight; profile_tick(CPU_PROFILING); if (--per_cpu(prof_counter, cpu) <= 0) { /* * The multiplier may have changed since the last time we got * to this point as a result of the user writing to * /proc/profile. In this case we need to adjust the APIC * timer accordingly. * * Interrupts are already masked off at this point. */ per_cpu(prof_counter,cpu) = per_cpu(prof_multiplier, cpu); if (per_cpu(prof_counter, cpu) != per_cpu(prof_old_multiplier, cpu)) { /* FIXME: need to update the vic timer tick here */ per_cpu(prof_old_multiplier, cpu) = per_cpu(prof_counter, cpu); } update_process_times(user_mode_vm(get_irq_regs())); } if( ((1<<cpu) & voyager_extended_vic_processors) == 0) /* only extended VIC processors participate in * interrupt distribution */ return; /* * We take the 'long' return path, and there every subsystem * grabs the appropriate locks (kernel lock/ irq lock). * * we might want to decouple profiling from the 'long path', * and do the profiling totally in assembly. * * Currently this isn't too much of an issue (performance wise), * we can take more than 100K local irqs per second on a 100 MHz P5. */ if((++vic_tick[cpu] & 0x7) != 0) return; /* get here every 16 ticks (about every 1/6 of a second) */ /* Change our priority to give someone else a chance at getting * the IRQ. The algorithm goes like this: * * In the VIC, the dynamically routed interrupt is always * handled by the lowest priority eligible (i.e. receiving * interrupts) CPU. If >1 eligible CPUs are equal lowest, the * lowest processor number gets it. * * The priority of a CPU is controlled by a special per-CPU * VIC priority register which is 3 bits wide 0 being lowest * and 7 highest priority.. * * Therefore we subtract the average number of interrupts from * the number we've fielded. If this number is negative, we * lower the activity count and if it is positive, we raise * it. * * I'm afraid this still leads to odd looking interrupt counts: * the totals are all roughly equal, but the individual ones * look rather skewed. * * FIXME: This algorithm is total crap when mixed with SMP * affinity code since we now try to even up the interrupt * counts when an affinity binding is keeping them on a * particular CPU*/ weight = (vic_intr_count[cpu]*voyager_extended_cpus - vic_intr_total) >> 4; weight += 4; if(weight > 7) weight = 7; if(weight < 0) weight = 0; outb((__u8)weight, VIC_PRIORITY_REGISTER);#ifdef VOYAGER_DEBUG if((vic_tick[cpu] & 0xFFF) == 0) { /* print this message roughly every 25 secs */ printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n", cpu, vic_tick[cpu], weight); }#endif}/* setup the profiling timer */int setup_profiling_timer(unsigned int multiplier){ int i; if ( (!multiplier)) return -EINVAL; /* * Set the new multiplier for each CPU. CPUs don't start using the * new values until the next timer interrupt in which they do process * accounting. */ for (i = 0; i < NR_CPUS; ++i) per_cpu(prof_multiplier, i) = multiplier; return 0;}/* This is a bit of a mess, but forced on us by the genirq changes * there's no genirq handler that really does what voyager wants * so hack it up with the simple IRQ handler */static void fastcallhandle_vic_irq(unsigned int irq, struct irq_desc *desc){ before_handle_vic_irq(irq); handle_simple_irq(irq, desc); after_handle_vic_irq(irq);}/* The CPIs are handled in the per cpu 8259s, so they must be * enabled to be received: FIX: enabling the CPIs in the early * boot sequence interferes with bug checking; enable them later * on in smp_init */#define VIC_SET_GATE(cpi, vector) \ set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector))#define QIC_SET_GATE(cpi, vector) \ set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))void __initsmp_intr_init(void){ int i; /* initialize the per cpu irq mask to all disabled */ for(i = 0; i < NR_CPUS; i++) vic_irq_mask[i] = 0xFFFF; VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt); VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt); VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt); QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt); QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt); QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt); QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt); QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt); /* now put the VIC descriptor into the first 48 IRQs * * This is for later: first 16 correspond to PC IRQs; next 16 * are Primary MC IRQs and final 16 are Secondary MC IRQs */ for(i = 0; i < 48; i++) set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);}/* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per * processor to receive CPI */static voidsend_CPI(__u32 cpuset, __u8 cpi){ int cpu; __u32 quad_cpuset = (cpuset & voyager_quad_processors); if(cpi < VIC_START_FAKE_CPI) { /* fake CPI are only used for booting, so send to the * extended quads as well---Quads must be VIC booted */ outb((__u8)(cpuset), VIC_CPI_Registers[cpi]); return; } if(quad_cpuset) send_QIC_CPI(quad_cpuset, cpi); cpuset &= ~quad_cpuset; cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */ if(cpuset == 0) return; for_each_online_cpu(cpu) { if(cpuset & (1<<cpu)) set_bit(cpi, &vic_cpi_mailbox[cpu]); } if(cpuset) outb((__u8)cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]);}/* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and * set the cache line to shared by reading it. * * DON'T make this inline otherwise the cache line read will be * optimised away * */static intack_QIC_CPI(__u8 cpi) { __u8 cpu = hard_smp_processor_id(); cpi &= 7; outb(1<<cpi, QIC_INTERRUPT_CLEAR1); return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi;}static voidack_special_QIC_CPI(__u8 cpi){ switch(cpi) { case VIC_CMN_INT: outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0); break;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -