📄 voyager_smp.c
字号:
case VIC_SYS_INT: outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0); break; } /* also clear at the VIC, just in case (nop for non-extended proc) */ ack_VIC_CPI(cpi);}/* Acknowledge receipt of CPI in the VIC (essentially an EOI) */static voidack_VIC_CPI(__u8 cpi){#ifdef VOYAGER_DEBUG unsigned long flags; __u16 isr; __u8 cpu = smp_processor_id(); local_irq_save(flags); isr = vic_read_isr(); if((isr & (1<<(cpi &7))) == 0) { printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi); }#endif /* send specific EOI; the two system interrupts have * bit 4 set for a separate vector but behave as the * corresponding 3 bit intr */ outb_p(0x60|(cpi & 7),0x20);#ifdef VOYAGER_DEBUG if((vic_read_isr() & (1<<(cpi &7))) != 0) { printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi); } local_irq_restore(flags);#endif}/* cribbed with thanks from irq.c */#define __byte(x,y) (((unsigned char *)&(y))[x])#define cached_21(cpu) (__byte(0,vic_irq_mask[cpu]))#define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu]))static unsigned intstartup_vic_irq(unsigned int irq){ unmask_vic_irq(irq); return 0;}/* The enable and disable routines. This is where we run into * conflicting architectural philosophy. Fundamentally, the voyager * architecture does not expect to have to disable interrupts globally * (the IRQ controllers belong to each CPU). The processor masquerade * which is used to start the system shouldn't be used in a running OS * since it will cause great confusion if two separate CPUs drive to * the same IRQ controller (I know, I've tried it). * * The solution is a variant on the NCR lazy SPL design: * * 1) To disable an interrupt, do nothing (other than set the * IRQ_DISABLED flag). This dares the interrupt actually to arrive. * * 2) If the interrupt dares to come in, raise the local mask against * it (this will result in all the CPU masks being raised * eventually). * * 3) To enable the interrupt, lower the mask on the local CPU and * broadcast an Interrupt enable CPI which causes all other CPUs to * adjust their masks accordingly. */static voidunmask_vic_irq(unsigned int irq){ /* linux doesn't to processor-irq affinity, so enable on * all CPUs we know about */ int cpu = smp_processor_id(), real_cpu; __u16 mask = (1<<irq); __u32 processorList = 0; unsigned long flags; VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n", irq, cpu, cpu_irq_affinity[cpu])); spin_lock_irqsave(&vic_irq_lock, flags); for_each_online_cpu(real_cpu) { if(!(voyager_extended_vic_processors & (1<<real_cpu))) continue; if(!(cpu_irq_affinity[real_cpu] & mask)) { /* irq has no affinity for this CPU, ignore */ continue; } if(real_cpu == cpu) { enable_local_vic_irq(irq); } else if(vic_irq_mask[real_cpu] & mask) { vic_irq_enable_mask[real_cpu] |= mask; processorList |= (1<<real_cpu); } } spin_unlock_irqrestore(&vic_irq_lock, flags); if(processorList) send_CPI(processorList, VIC_ENABLE_IRQ_CPI);}static voidmask_vic_irq(unsigned int irq){ /* lazy disable, do nothing */}static voidenable_local_vic_irq(unsigned int irq){ __u8 cpu = smp_processor_id(); __u16 mask = ~(1 << irq); __u16 old_mask = vic_irq_mask[cpu]; vic_irq_mask[cpu] &= mask; if(vic_irq_mask[cpu] == old_mask) return; VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n", irq, cpu)); if (irq & 8) { outb_p(cached_A1(cpu),0xA1); (void)inb_p(0xA1); } else { outb_p(cached_21(cpu),0x21); (void)inb_p(0x21); }}static voiddisable_local_vic_irq(unsigned int irq){ __u8 cpu = smp_processor_id(); __u16 mask = (1 << irq); __u16 old_mask = vic_irq_mask[cpu]; if(irq == 7) return; vic_irq_mask[cpu] |= mask; if(old_mask == vic_irq_mask[cpu]) return; VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n", irq, cpu)); if (irq & 8) { outb_p(cached_A1(cpu),0xA1); (void)inb_p(0xA1); } else { outb_p(cached_21(cpu),0x21); (void)inb_p(0x21); }}/* The VIC is level triggered, so the ack can only be issued after the * interrupt completes. However, we do Voyager lazy interrupt * handling here: It is an extremely expensive operation to mask an * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If * this interrupt actually comes in, then we mask and ack here to push * the interrupt off to another CPU */static voidbefore_handle_vic_irq(unsigned int irq){ irq_desc_t *desc = irq_desc + irq; __u8 cpu = smp_processor_id(); _raw_spin_lock(&vic_irq_lock); vic_intr_total++; vic_intr_count[cpu]++; if(!(cpu_irq_affinity[cpu] & (1<<irq))) { /* The irq is not in our affinity mask, push it off * onto another CPU */ VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d on cpu %d\n", irq, cpu)); disable_local_vic_irq(irq); /* set IRQ_INPROGRESS to prevent the handler in irq.c from * actually calling the interrupt routine */ desc->status |= IRQ_REPLAY | IRQ_INPROGRESS; } else if(desc->status & IRQ_DISABLED) { /* Damn, the interrupt actually arrived, do the lazy * disable thing. The interrupt routine in irq.c will * not handle a IRQ_DISABLED interrupt, so nothing more * need be done here */ VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n", irq, cpu)); disable_local_vic_irq(irq); desc->status |= IRQ_REPLAY; } else { desc->status &= ~IRQ_REPLAY; } _raw_spin_unlock(&vic_irq_lock);}/* Finish the VIC interrupt: basically mask */static voidafter_handle_vic_irq(unsigned int irq){ irq_desc_t *desc = irq_desc + irq; _raw_spin_lock(&vic_irq_lock); { unsigned int status = desc->status & ~IRQ_INPROGRESS;#ifdef VOYAGER_DEBUG __u16 isr;#endif desc->status = status; if ((status & IRQ_DISABLED)) disable_local_vic_irq(irq);#ifdef VOYAGER_DEBUG /* DEBUG: before we ack, check what's in progress */ isr = vic_read_isr(); if((isr & (1<<irq) && !(status & IRQ_REPLAY)) == 0) { int i; __u8 cpu = smp_processor_id(); __u8 real_cpu; int mask; /* Um... initialize me??? --RR */ printk("VOYAGER SMP: CPU%d lost interrupt %d\n", cpu, irq); for_each_possible_cpu(real_cpu, mask) { outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu, VIC_PROCESSOR_ID); isr = vic_read_isr(); if(isr & (1<<irq)) { printk("VOYAGER SMP: CPU%d ack irq %d\n", real_cpu, irq); ack_vic_irq(irq); } outb(cpu, VIC_PROCESSOR_ID); } }#endif /* VOYAGER_DEBUG */ /* as soon as we ack, the interrupt is eligible for * receipt by another CPU so everything must be in * order here */ ack_vic_irq(irq); if(status & IRQ_REPLAY) { /* replay is set if we disable the interrupt * in the before_handle_vic_irq() routine, so * clear the in progress bit here to allow the * next CPU to handle this correctly */ desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS); }#ifdef VOYAGER_DEBUG isr = vic_read_isr(); if((isr & (1<<irq)) != 0) printk("VOYAGER SMP: after_handle_vic_irq() after ack irq=%d, isr=0x%x\n", irq, isr);#endif /* VOYAGER_DEBUG */ } _raw_spin_unlock(&vic_irq_lock); /* All code after this point is out of the main path - the IRQ * may be intercepted by another CPU if reasserted */}/* Linux processor - interrupt affinity manipulations. * * For each processor, we maintain a 32 bit irq affinity mask. * Initially it is set to all 1's so every processor accepts every * interrupt. In this call, we change the processor's affinity mask: * * Change from enable to disable: * * If the interrupt ever comes in to the processor, we will disable it * and ack it to push it off to another CPU, so just accept the mask here. * * Change from disable to enable: * * change the mask and then do an interrupt enable CPI to re-enable on * the selected processors */voidset_vic_irq_affinity(unsigned int irq, cpumask_t mask){ /* Only extended processors handle interrupts */ unsigned long real_mask; unsigned long irq_mask = 1 << irq; int cpu; real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors; if(cpus_addr(mask)[0] == 0) /* can't have no CPUs to accept the interrupt -- extremely * bad things will happen */ return; if(irq == 0) /* can't change the affinity of the timer IRQ. This * is due to the constraint in the voyager * architecture that the CPI also comes in on and IRQ * line and we have chosen IRQ0 for this. If you * raise the mask on this interrupt, the processor * will no-longer be able to accept VIC CPIs */ return; if(irq >= 32) /* You can only have 32 interrupts in a voyager system * (and 32 only if you have a secondary microchannel * bus) */ return; for_each_online_cpu(cpu) { unsigned long cpu_mask = 1 << cpu; if(cpu_mask & real_mask) { /* enable the interrupt for this cpu */ cpu_irq_affinity[cpu] |= irq_mask; } else { /* disable the interrupt for this cpu */ cpu_irq_affinity[cpu] &= ~irq_mask; } } /* this is magic, we now have the correct affinity maps, so * enable the interrupt. This will send an enable CPI to * those CPUs who need to enable it in their local masks, * causing them to correct for the new affinity . If the * interrupt is currently globally disabled, it will simply be * disabled again as it comes in (voyager lazy disable). If * the affinity map is tightened to disable the interrupt on a * cpu, it will be pushed off when it comes in */ unmask_vic_irq(irq);}static voidack_vic_irq(unsigned int irq){ if (irq & 8) { outb(0x62,0x20); /* Specific EOI to cascade */ outb(0x60|(irq & 7),0xA0); } else { outb(0x60 | (irq & 7),0x20); }}/* enable the CPIs. In the VIC, the CPIs are delivered by the 8259 * but are not vectored by it. This means that the 8259 mask must be * lowered to receive them */static __init voidvic_enable_cpi(void){ __u8 cpu = smp_processor_id(); /* just take a copy of the current mask (nop for boot cpu) */ vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id]; enable_local_vic_irq(VIC_CPI_LEVEL0); enable_local_vic_irq(VIC_CPI_LEVEL1); /* for sys int and cmn int */ enable_local_vic_irq(7); if(is_cpu_quad()) { outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0); outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1); VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n", cpu, QIC_CPI_ENABLE)); } VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n", cpu, vic_irq_mask[cpu]));}voidvoyager_smp_dump(){ int old_cpu = smp_processor_id(), cpu; /* dump the interrupt masks of each processor */ for_each_online_cpu(cpu) { __u16 imr, isr, irr; unsigned long flags; local_irq_save(flags); outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); imr = (inb(0xa1) << 8) | inb(0x21); outb(0x0a, 0xa0); irr = inb(0xa0) << 8; outb(0x0a, 0x20); irr |= inb(0x20); outb(0x0b, 0xa0); isr = inb(0xa0) << 8; outb(0x0b, 0x20); isr |= inb(0x20); outb(old_cpu, VIC_PROCESSOR_ID); local_irq_restore(flags); printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n", cpu, vic_irq_mask[cpu], imr, irr, isr);#if 0 /* These lines are put in to try to unstick an un ack'd irq */ if(isr != 0) { int irq; for(irq=0; irq<16; irq++) { if(isr & (1<<irq)) { printk("\tCPU%d: ack irq %d\n", cpu, irq); local_irq_save(flags); outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID); ack_vic_irq(irq); outb(old_cpu, VIC_PROCESSOR_ID); local_irq_restore(flags); } } }#endif }}voidsmp_voyager_power_off(void *dummy){ if(smp_processor_id() == boot_cpu_id) voyager_power_off(); else smp_stop_cpu_function(NULL);}static void __initvoyager_smp_prepare_cpus(unsigned int max_cpus){ /* FIXME: ignore max_cpus for now */ smp_boot_cpus();}static void __cpuinit voyager_smp_prepare_boot_cpu(void){ init_gdt(smp_processor_id()); switch_to_new_gdt(); cpu_set(smp_processor_id(), cpu_online_map); cpu_set(smp_processor_id(), cpu_callout_map); cpu_set(smp_processor_id(), cpu_possible_map); cpu_set(smp_processor_id(), cpu_present_map);}static int __cpuinitvoyager_cpu_up(unsigned int cpu){ /* This only works at boot for x86. See "rewrite" above. */ if (cpu_isset(cpu, smp_commenced_mask)) return -ENOSYS; /* In case one didn't come up */ if (!cpu_isset(cpu, cpu_callin_map)) return -EIO; /* Unleash the CPU! */ cpu_set(cpu, smp_commenced_mask); while (!cpu_isset(cpu, cpu_online_map)) mb(); return 0;}static void __initvoyager_smp_cpus_done(unsigned int max_cpus){ zap_low_mappings();}void __initsmp_setup_processor_id(void){ current_thread_info()->cpu = hard_smp_processor_id(); x86_write_percpu(cpu_number, hard_smp_processor_id());}struct smp_ops smp_ops = { .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, .smp_prepare_cpus = voyager_smp_prepare_cpus, .cpu_up = voyager_cpu_up, .smp_cpus_done = voyager_smp_cpus_done, .smp_send_stop = voyager_smp_send_stop, .smp_send_reschedule = voyager_smp_send_reschedule, .smp_call_function_mask = voyager_smp_call_function_mask,};
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -