📄 irq.c
字号:
if ((unsigned char)cpu == global_irq_holder) return; count = MAXCOUNT;again: br_write_lock(BR_GLOBALIRQ_LOCK); for (;;) { spinlock_t *lock; if (!irqs_running() && (local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock))) break; br_write_unlock(BR_GLOBALIRQ_LOCK); lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock; while (irqs_running() || spin_is_locked(lock) || (!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) { if (!--count) { show("get_irqlock"); count = (~0 >> 1); } __sti(); SYNC_OTHER_ULTRAS(cpu); __cli(); } goto again; } global_irq_holder = cpu;}void __global_cli(void){ unsigned long flags; __save_flags(flags); if(flags == 0) { int cpu = smp_processor_id(); __cli(); if (! local_irq_count(cpu)) get_irqlock(cpu); }}void __global_sti(void){ int cpu = smp_processor_id(); if (! local_irq_count(cpu)) release_irqlock(cpu); __sti();}unsigned long __global_save_flags(void){ unsigned long flags, local_enabled, retval; __save_flags(flags); local_enabled = ((flags == 0) ? 1 : 0); retval = 2 + local_enabled; if (! local_irq_count(smp_processor_id())) { if (local_enabled) retval = 1; if (global_irq_holder == (unsigned char) smp_processor_id()) retval = 0; } return retval;}void __global_restore_flags(unsigned long flags){ switch (flags) { case 0: __global_cli(); break; case 1: __global_sti(); break; case 2: __cli(); break; case 3: __sti(); break; default: { unsigned long pc; __asm__ __volatile__("mov %%i7, %0" : "=r" (pc)); printk("global_restore_flags: Bogon flags(%016lx) caller %016lx\n", flags, pc); } }}#endif /* CONFIG_SMP */void catch_disabled_ivec(struct pt_regs *regs){ int cpu = smp_processor_id(); struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0)); /* We can actually see this on Ultra/PCI PCI cards, which are bridges * to other devices. Here a single IMAP enabled potentially multiple * unique interrupt sources (which each do have a unique ICLR register. * * So what we do is just register that the IVEC arrived, when registered * for real the request_irq() code will check the bit and signal * a local CPU interrupt for it. */#if 0 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n", bucket - &ivector_table[0], regs->tpc);#endif *irq_work(cpu, 0) = 0; bucket->pending = 1;}/* Tune this... */#define FORWARD_VOLUME 12void handler_irq(int irq, struct pt_regs *regs){ struct ino_bucket *bp, *nbp; int cpu = smp_processor_id();#ifdef CONFIG_SMP int should_forward = (this_is_starfire == 0 && irq < 10 && current->pid != 0); unsigned int buddy = 0; /* 'cpu' is the MID (ie. UPAID), calculate the MID * of our buddy. */ if (should_forward != 0) { buddy = cpu_number_map(cpu) + 1; if (buddy >= NR_CPUS || (buddy = cpu_logical_map(buddy)) == -1) buddy = cpu_logical_map(0); /* Voo-doo programming. */ if (cpu_data[buddy].idle_volume < FORWARD_VOLUME) should_forward = 0; buddy <<= 26; }#endif#ifndef CONFIG_SMP /* * Check for TICK_INT on level 14 softint. */ if ((irq == 14) && (get_softint() & (1UL << 0))) irq = 0;#endif clear_softint(1 << irq); irq_enter(cpu, irq); kstat.irqs[cpu][irq]++; /* Sliiiick... */#ifndef CONFIG_SMP bp = ((irq != 0) ? __bucket(xchg32(irq_work(cpu, irq), 0)) : &pil0_dummy_bucket);#else bp = __bucket(xchg32(irq_work(cpu, irq), 0));#endif for ( ; bp != NULL; bp = nbp) { unsigned char flags = bp->flags; nbp = __bucket(bp->irq_chain); if ((flags & IBF_ACTIVE) != 0) {#ifdef CONFIG_PCI if ((flags & IBF_DMA_SYNC) != 0) { upa_readl(dma_sync_reg_table[bp->synctab_ent]); upa_readq(pci_dma_wsync); }#endif if ((flags & IBF_MULTI) == 0) { struct irqaction *ap = bp->irq_info; ap->handler(__irq(bp), ap->dev_id, regs); } else { void **vector = (void **)bp->irq_info; int ent; for (ent = 0; ent < 4; ent++) { struct irqaction *ap = vector[ent]; if (ap != NULL) ap->handler(__irq(bp), ap->dev_id, regs); } } /* Only the dummy bucket lacks IMAP/ICLR. */ if (bp->pil != 0) {#ifdef CONFIG_SMP /* Ok, here is what is going on: * 1) Retargeting IRQs on Starfire is very * expensive so just forget about it on them. * 2) Moving around very high priority interrupts * is a losing game. * 3) If the current cpu is idle, interrupts are * useful work, so keep them here. But do not * pass to our neighbour if he is not very idle. */ if (should_forward != 0) { /* Push it to our buddy. */ should_forward = 0; upa_writel(buddy | IMAP_VALID, bp->imap); }#endif upa_writel(ICLR_IDLE, bp->iclr); } } else bp->pending = 1; } irq_exit(cpu, irq);}#ifdef CONFIG_BLK_DEV_FDextern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs){ struct irqaction *action = *(irq + irq_action); struct ino_bucket *bucket; int cpu = smp_processor_id(); irq_enter(cpu, irq); kstat.irqs[cpu][irq]++; *(irq_work(cpu, irq)) = 0; bucket = (struct ino_bucket *)action->mask; floppy_interrupt(irq, dev_cookie, regs); upa_writel(ICLR_IDLE, bucket->iclr); irq_exit(cpu, irq);}#endif/* The following assumes that the branch lies before the place we * are branching to. This is the case for a trap vector... * You have been warned. */#define SPARC_BRANCH(dest_addr, inst_addr) \ (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))#define SPARC_NOP (0x01000000)static void install_fast_irq(unsigned int cpu_irq, void (*handler)(int, void *, struct pt_regs *)){ extern unsigned long sparc64_ttable_tl0; unsigned long ttent = (unsigned long) &sparc64_ttable_tl0; unsigned int *insns; ttent += 0x820; ttent += (cpu_irq - 1) << 5; insns = (unsigned int *) ttent; insns[0] = SPARC_BRANCH(((unsigned long) handler), ((unsigned long)&insns[0])); insns[1] = SPARC_NOP; __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));}int request_fast_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char *name, void *dev_id){ struct irqaction *action; struct ino_bucket *bucket = __bucket(irq); unsigned long flags; /* No pil0 dummy buckets allowed here. */ if (bucket < &ivector_table[0] || bucket >= &ivector_table[NUM_IVECS]) { unsigned int *caller; __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt " "from %p, irq %08x.\n", caller, irq); return -EINVAL; } /* Only IMAP style interrupts can be registered as fast. */ if(bucket->pil == 0) return -EINVAL; if(!handler) return -EINVAL; if ((bucket->pil == 0) || (bucket->pil == 14)) { printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n"); return -EBUSY; } action = *(bucket->pil + irq_action); if(action) { if(action->flags & SA_SHIRQ) panic("Trying to register fast irq when already shared.\n"); if(irqflags & SA_SHIRQ) panic("Trying to register fast irq as shared.\n"); printk("request_fast_irq: Trying to register yet already owned.\n"); return -EBUSY; } save_and_cli(flags); if(irqflags & SA_STATIC_ALLOC) { if(static_irq_count < MAX_STATIC_ALLOC) action = &static_irqaction[static_irq_count++]; else printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed " "using kmalloc\n", bucket->pil, name); } if(action == NULL) action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL); if(!action) { restore_flags(flags); return -ENOMEM; } install_fast_irq(bucket->pil, handler); bucket->irq_info = action; bucket->flags |= IBF_ACTIVE; action->mask = (unsigned long) bucket; action->handler = handler; action->flags = irqflags | SA_IMAP_MASKED; action->dev_id = NULL; action->name = name; action->next = NULL; *(bucket->pil + irq_action) = action; enable_irq(irq); restore_flags(flags);#ifdef CONFIG_SMP distribute_irqs();#endif return 0;}/* We really don't need these at all on the Sparc. We only have * stubs here because they are exported to modules. */unsigned long probe_irq_on(void){ return 0;}int probe_irq_off(unsigned long mask){ return 0;}/* This is gets the master TICK_INT timer going. */void init_timers(void (*cfunc)(int, void *, struct pt_regs *), unsigned long *clock){ unsigned long pstate; extern unsigned long timer_tick_offset; int node, err;#ifdef CONFIG_SMP extern void smp_tick_init(void);#endif node = linux_cpus[0].prom_node; *clock = prom_getint(node, "clock-frequency"); timer_tick_offset = *clock / HZ;#ifdef CONFIG_SMP smp_tick_init();#endif /* Register IRQ handler. */ err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, (SA_INTERRUPT | SA_STATIC_ALLOC), "timer", NULL); if(err) { prom_printf("Serious problem, cannot register TICK_INT\n"); prom_halt(); } /* Guarentee that the following sequences execute * uninterrupted. */ __asm__ __volatile__("rdpr %%pstate, %0\n\t" "wrpr %0, %1, %%pstate" : "=r" (pstate) : "i" (PSTATE_IE)); /* Set things up so user can access tick register for profiling * purposes. Also workaround BB_ERRATA_1 by doing a dummy * read back of %tick after writing it. */ __asm__ __volatile__(" sethi %%hi(0x80000000), %%g1 ba,pt %%xcc, 1f sllx %%g1, 32, %%g1 .align 64 1: rd %%tick, %%g2 add %%g2, 6, %%g2 andn %%g2, %%g1, %%g2 wrpr %%g2, 0, %%tick rdpr %%tick, %%g0" : /* no outputs */ : /* no inputs */ : "g1", "g2"); /* Workaround for Spitfire Errata (#54 I think??), I discovered * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch * number 103640. * * On Blackbird writes to %tick_cmpr can fail, the * workaround seems to be to execute the wr instruction * at the start of an I-cache line, and perform a dummy * read back from %tick_cmpr right after writing to it. -DaveM */ __asm__ __volatile__(" rd %%tick, %%g1 ba,pt %%xcc, 1f add %%g1, %0, %%g1 .align 64 1: wr %%g1, 0x0, %%tick_cmpr rd %%tick_cmpr, %%g0" : /* no outputs */ : "r" (timer_tick_offset) : "g1"); /* Restore PSTATE_IE. */ __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : /* no outputs */ : "r" (pstate)); sti();}#ifdef CONFIG_SMPstatic int retarget_one_irq(struct irqaction *p, int goal_cpu){ struct ino_bucket *bucket = __bucket(p->mask); unsigned long imap = bucket->imap; unsigned int tid; /* Never change this, it causes problems on Ex000 systems. */ if (bucket->pil == 12) return goal_cpu; if(this_is_starfire == 0) { tid = __cpu_logical_map[goal_cpu] << 26; } else { tid = (starfire_translate(imap, __cpu_logical_map[goal_cpu]) << 26); } upa_writel(IMAP_VALID | (tid & IMAP_TID), imap); goal_cpu++; if(goal_cpu >= NR_CPUS || __cpu_logical_map[goal_cpu] == -1) goal_cpu = 0; return goal_cpu;}/* Called from request_irq. */static void distribute_irqs(void){ unsigned long flags; int cpu, level; save_and_cli(flags); cpu = 0; for(level = 0; level < NR_IRQS; level++) { struct irqaction *p = irq_action[level]; while(p) { if(p->flags & SA_IMAP_MASKED) cpu = retarget_one_irq(p, cpu); p = p->next; } } restore_flags(flags);}#endifstruct sun5_timer *prom_timers;static u64 prom_limit0, prom_limit1;static void map_prom_timers(void){ unsigned int addr[3]; int tnode, err; /* PROM timer node hangs out in the top level of device siblings... */ tnode = prom_finddevice("/counter-timer"); /* Assume if node is not present, PROM uses different tick mechanism * which we should not care about. */ if(tnode == 0 || tnode == -1) { prom_timers = (struct sun5_timer *) 0; return; } /* If PROM is really using this, it must be mapped by him. */ err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr)); if(err == -1) { prom_printf("PROM does not have timer mapped, trying to continue.\n"); prom_timers = (struct sun5_timer *) 0; return; } prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);}static void kill_prom_timer(void){ if(!prom_timers) return; /* Save them away for later. */ prom_limit0 = prom_timers->limit0; prom_limit1 = prom_timers->limit1; /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14. * We turn both off here just to be paranoid. */ prom_timers->limit0 = 0; prom_timers->limit1 = 0; /* Wheee, eat the interrupt packet too... */ __asm__ __volatile__(" mov 0x40, %%g2 ldxa [%%g0] %0, %%g1 ldxa [%%g2] %1, %%g1 stxa %%g0, [%%g0] %0 membar #Sync" : /* no outputs */ : "i" (ASI_INTR_RECEIVE), "i" (ASI_UDB_INTR_R) : "g1", "g2");}void enable_prom_timer(void){ if(!prom_timers) return; /* Set it to whatever was there before. */ prom_timers->limit1 = prom_limit1; prom_timers->count1 = 0; prom_timers->limit0 = prom_limit0; prom_timers->count0 = 0;}void __init init_IRQ(void){ static int called = 0; if (called == 0) { called = 1; map_prom_timers(); kill_prom_timer(); memset(&ivector_table[0], 0, sizeof(ivector_table));#ifndef CONFIG_SMP memset(&__up_workvec[0], 0, sizeof(__up_workvec));#endif } /* We need to clear any IRQ's pending in the soft interrupt * registers, a spurious one could be left around from the * PROM timer which we just disabled. */ clear_softint(get_softint()); /* Now that ivector table is initialized, it is safe * to receive IRQ vector traps. We will normally take * one or two right now, in case some device PROM used * to boot us wants to speak to us. We just ignore them. */ __asm__ __volatile__("rdpr %%pstate, %%g1\n\t" "or %%g1, %0, %%g1\n\t" "wrpr %%g1, 0x0, %%pstate" : /* No outputs */ : "i" (PSTATE_IE) : "g1");}void init_irq_proc(void){ /* For now, nothing... */}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -