📄 irq.c
字号:
{ unsigned long eirr_val; unsigned int i=3; /* limit time in interrupt context */ /* * PSW_I or EIEM bits cannot be enabled until after the * interrupts are processed. * timer_interrupt() assumes it won't get interrupted when it * holds the xtime_lock...an unmasked interrupt source could * interrupt and deadlock by trying to grab xtime_lock too. * Keeping PSW_I and EIEM disabled avoids this. */ set_eiem(0UL); /* disable all extr interrupt for now */ /* 1) only process IRQs that are enabled/unmasked (cpu_eiem) * 2) We loop here on EIRR contents in order to avoid * nested interrupts or having to take another interupt * when we could have just handled it right away. * 3) Limit the number of times we loop to make sure other * processing can occur. */ while ((eirr_val = (mfctl(23) & cpu_eiem)) && --i) { unsigned long bit = (1UL<<MAX_CPU_IRQ); unsigned int irq; mtctl(eirr_val, 23); /* reset bits we are going to process */#ifdef DEBUG_IRQ if (eirr_val != (1UL << MAX_CPU_IRQ)) printk(KERN_DEBUG "do_cpu_irq_mask %x\n", eirr_val);#endif for (irq = 0; eirr_val && bit; bit>>=1, irq++) { if (!(bit&eirr_val&cpu_eiem)) continue; /* clear bit in mask - can exit loop sooner */ eirr_val &= ~bit; do_irq(&cpu_irq_actions[irq], TIMER_IRQ+irq, regs); } } set_eiem(cpu_eiem);}/* Called from second level IRQ regions: eg dino or iosapic. */void do_irq_mask(unsigned long mask, struct irq_region *region, struct pt_regs *regs){ unsigned long bit; unsigned int irq;#ifdef DEBUG_IRQ if (mask != (1L<<MAX_CPU_IRQ)) printk(KERN_DEBUG "do_irq_mask %08lx %p %p\n", mask, region, regs);#endif for (bit = (1L<<MAX_CPU_IRQ), irq = 0; mask && bit; bit>>=1, irq++) { unsigned int irq_num; if (!(bit&mask)) continue; mask &= ~bit; /* clear bit in mask - can exit loop sooner */ irq_num = region->data.irqbase + irq; mask_irq(irq_num); do_irq(®ion->action[irq], irq_num, regs); unmask_irq(irq_num); }}static inline int find_free_region(void){ int irqreg; for (irqreg=1; irqreg <= (NR_IRQ_REGS); irqreg++) { if (irq_region[irqreg] == NULL) return irqreg; } return 0;}/***** * alloc_irq_region - allocate/init a new IRQ region * @count: number of IRQs in this region. * @ops: function table with request/release/mask/unmask/etc.. entries. * @name: name of region owner for /proc/interrupts output. * @dev: private data to associate with the new IRQ region. * * Every IRQ must become a MMIO write to the CPU's EIRR in * order to get CPU service. The IRQ region represents the * number of unique events the region handler can (or must) * identify. For PARISC CPU, that's the width of the EIR Register. * IRQ regions virtualize IRQs (eg EISA or PCI host bus controllers) * for line based devices. */struct irq_region *alloc_irq_region( int count, struct irq_region_ops *ops, const char *name, void *dev){ struct irq_region *region; int index; index = find_free_region(); if (index == 0) { printk(KERN_ERR "Maximum number of irq regions exceeded. Increase NR_IRQ_REGS!\n"); return NULL; } if ((IRQ_REGION(count-1))) return NULL; if (count < IRQ_PER_REGION) { DBG_IRQ(0, ("alloc_irq_region() using minimum of %d irq lines for %s (%d)\n", IRQ_PER_REGION, name, count)); count = IRQ_PER_REGION; } /* if either mask *or* unmask is set, both have to be set. */ if((ops->mask_irq || ops->unmask_irq) && !(ops->mask_irq && ops->unmask_irq)) return NULL; /* ditto for enable/disable */ if( (ops->disable_irq || ops->enable_irq) && !(ops->disable_irq && ops->enable_irq) ) return NULL; region = kmalloc(sizeof(*region), GFP_ATOMIC); if (!region) return NULL; memset(region, 0, sizeof(*region)); region->action = kmalloc(count * sizeof(*region->action), GFP_ATOMIC); if (!region->action) { kfree(region); return NULL; } memset(region->action, 0, count * sizeof(*region->action)); region->ops = *ops; region->data.irqbase = IRQ_FROM_REGION(index); region->data.name = name; region->data.dev = dev; irq_region[index] = region; return irq_region[index];}/* FIXME: SMP, flags, bottom halves, rest */int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char * devname, void *dev_id){ struct irqaction * action;#if 0 printk(KERN_INFO "request_irq(%d, %p, 0x%lx, %s, %p)\n",irq, handler, irqflags, devname, dev_id);#endif irq = irq_cannonicalize(irq); /* request_irq()/free_irq() may not be called from interrupt context. */ if (in_interrupt()) BUG(); if (!handler) { printk(KERN_ERR "request_irq(%d,...): Augh! No handler for irq!\n", irq); return -EINVAL; } if (irq_region[IRQ_REGION(irq)] == NULL) { /* ** Bug catcher for drivers which use "char" or u8 for ** the IRQ number. They lose the region number which ** is in pcidev->irq (an int). */ printk(KERN_ERR "%p (%s?) called request_irq with an invalid irq %d\n", __builtin_return_address(0), devname, irq); return -EINVAL; } spin_lock(&irq_lock); action = &(irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)]); /* First one is preallocated. */ if (action->handler) { /* But it's in use...find the tail and allocate a new one */ while (action->next) action = action->next; action->next = kmalloc(sizeof(*action), GFP_ATOMIC); memset(action->next, 0, sizeof(*action)); action = action->next; } if (!action) { spin_unlock(&irq_lock); printk(KERN_ERR "request_irq(): Augh! No action!\n") ; return -ENOMEM; } action->handler = handler; action->flags = irqflags; action->mask = 0; action->name = devname; action->next = NULL; action->dev_id = dev_id; spin_unlock(&irq_lock); enable_irq(irq); return 0;}void free_irq(unsigned int irq, void *dev_id){ struct irqaction *action, **p; /* See comments in request_irq() about interrupt context */ irq = irq_cannonicalize(irq); if (in_interrupt()) BUG(); spin_lock(&irq_lock); action = &irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)]; if (action->dev_id == dev_id) { if (action->next == NULL) { action->handler = NULL; } else { memcpy(action, action->next, sizeof(*action)); } spin_unlock(&irq_lock); return; } p = &action->next; action = action->next; for (; (action = *p) != NULL; p = &action->next) { if (action->dev_id != dev_id) continue; /* Found it - now free it */ *p = action->next; kfree(action); spin_unlock(&irq_lock); return; } spin_unlock(&irq_lock); printk(KERN_ERR "Trying to free free IRQ%d\n",irq);}/* * IRQ autodetection code.. * * This depends on the fact that any interrupt that * comes in on to an unassigned handler will get stuck * with "IRQ_WAITING" cleared and the interrupt * disabled. */static DECLARE_MUTEX(probe_sem);/** * probe_irq_on - begin an interrupt autodetect * * Commence probing for an interrupt. The interrupts are scanned * and a mask of potential interrupt lines is returned. * *//* TODO: spin_lock_irq(desc->lock -> irq_lock) */unsigned long probe_irq_on(void){ unsigned int i; unsigned long val; unsigned long delay; struct irq_region *region; /* support for irq autoprobing is limited to EISA (irq region 0) */ region = irq_region[EISA_IRQ_REGION]; if (!EISA_bus || !region) return 0; down(&probe_sem); /* * enable any unassigned irqs * (we must startup again here because if a longstanding irq * happened in the previous stage, it may have masked itself) */ for (i = EISA_MAX_IRQS-1; i > 0; i--) { struct irqaction *action; spin_lock_irq(&irq_lock); action = region->action + i; if (!action->handler) { region->data.status[i] |= IRQ_AUTODETECT | IRQ_WAITING; region->ops.enable_irq(region->data.dev,i); } spin_unlock_irq(&irq_lock); } /* * Wait for spurious interrupts to trigger */ for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) /* about 100ms delay */ synchronize_irq(); /* * Now filter out any obviously spurious interrupts */ val = 0; for (i = 0; i < EISA_MAX_IRQS; i++) { unsigned int status; spin_lock_irq(&irq_lock); status = region->data.status[i]; if (status & IRQ_AUTODETECT) { /* It triggered already - consider it spurious. */ if (!(status & IRQ_WAITING)) { region->data.status[i] = status & ~IRQ_AUTODETECT; region->ops.disable_irq(region->data.dev,i); } else if (i < BITS_PER_LONG) val |= (1 << i); } spin_unlock_irq(&irq_lock); } return val;}/* * Return the one interrupt that triggered (this can * handle any interrupt source). *//** * probe_irq_off - end an interrupt autodetect * @val: mask of potential interrupts (unused) * * Scans the unused interrupt lines and returns the line which * appears to have triggered the interrupt. If no interrupt was * found then zero is returned. If more than one interrupt is * found then minus the first candidate is returned to indicate * their is doubt. * * The interrupt probe logic state is returned to its previous * value. * * BUGS: When used in a module (which arguably shouldnt happen) * nothing prevents two IRQ probe callers from overlapping. The * results of this are non-optimal. */ int probe_irq_off(unsigned long val){ struct irq_region *region; int i, irq_found, nr_irqs; /* support for irq autoprobing is limited to EISA (irq region 0) */ region = irq_region[EISA_IRQ_REGION]; if (!EISA_bus || !region) return 0; nr_irqs = 0; irq_found = 0; for (i = 0; i < EISA_MAX_IRQS; i++) { unsigned int status; spin_lock_irq(&irq_lock); status = region->data.status[i]; if (status & IRQ_AUTODETECT) { if (!(status & IRQ_WAITING)) { if (!nr_irqs) irq_found = i; nr_irqs++; } region->ops.disable_irq(region->data.dev,i); region->data.status[i] = status & ~IRQ_AUTODETECT; } spin_unlock_irq(&irq_lock); } up(&probe_sem); if (nr_irqs > 1) irq_found = -irq_found; return irq_found;}void __init init_IRQ(void){ local_irq_disable(); /* PARANOID - should already be disabled */ mtctl(-1L, 23); /* EIRR : clear all pending external intr */#ifdef CONFIG_SMP if (!cpu_eiem) cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);#else cpu_eiem = EIEM_MASK(TIMER_IRQ);#endif set_eiem(cpu_eiem); /* EIEM : enable all external intr */}#ifdef CONFIG_PROC_FS/* called from kernel/sysctl.c:sysctl_init() */void __init init_irq_proc(void){}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -