📄 irq.c
字号:
{ irq_desc_t *desc = irq_desc + irq; int cpu = smp_processor_id(); kstat.irqs[cpu][irq]++; desc->handler->ack(irq); handle_IRQ_event(irq, regs, desc->action); desc->handler->end(irq);}/* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs){ /* * We ack quickly, we don't want the irq controller * thinking we're snobs just because some other CPU has * disabled global interrupts (we have already done the * INT_ACK cycles, it's too late to try to pretend to the * controller that we aren't taking the interrupt). * * 0 return value means that this irq is already being * handled by some other CPU. (or is disabled) */ int cpu = smp_processor_id(); irq_desc_t *desc = irq_desc + irq; struct irqaction * action; unsigned int status; kstat.irqs[cpu][irq]++; spin_lock(&desc->lock); desc->handler->ack(irq); /* REPLAY is when Linux resends an IRQ that was dropped earlier WAITING is used by probe to mark irqs that are being tested */ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); status |= IRQ_PENDING; /* we _want_ to handle it */ /* * If the IRQ is disabled for whatever reason, we cannot * use the action we have. */ action = NULL; if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { action = desc->action; status &= ~IRQ_PENDING; /* we commit to handling */ status |= IRQ_INPROGRESS; /* we are handling it */ } desc->status = status; /* * If there is no IRQ handler or it was disabled, exit early. * Since we set PENDING, if another processor is handling * a different instance of this same irq, the other processor * will take care of it. */ if (!action) goto out; /* * Edge triggered interrupts need to remember * pending events. * This applies to any hw interrupts that allow a second * instance of the same irq to arrive while we are in do_IRQ * or in the handler. But the code here only handles the _second_ * instance of the irq, not the third or fourth. So it is mostly * useful for irq hardware that does not mask cleanly in an * SMP environment. */ for (;;) { spin_unlock(&desc->lock); handle_IRQ_event(irq, regs, action); spin_lock(&desc->lock); if (!(desc->status & IRQ_PENDING)) break; desc->status &= ~IRQ_PENDING; } desc->status &= ~IRQ_INPROGRESS;out: /* * The ->end() handler has to deal with interrupts which got * disabled while the handler was running. */ desc->handler->end(irq); spin_unlock(&desc->lock); return 1;}int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char * devname, void *dev_id){ int retval; struct irqaction * action;#if 1 /* * Sanity-check: shared interrupts should REALLY pass in * a real dev-ID, otherwise we'll have trouble later trying * to figure out which interrupt is which (messes up the * interrupt freeing logic etc). */ if (irqflags & SA_SHIRQ) { if (!dev_id) printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]); }#endif if (irq >= NR_IRQS) return -EINVAL; if (!handler) return -EINVAL; action = (struct irqaction *) kmalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; action->handler = handler; action->flags = irqflags; action->mask = 0; action->name = devname; action->next = NULL; action->dev_id = dev_id; retval = setup_irq(irq, action); if (retval) kfree(action); return retval;} void free_irq(unsigned int irq, void *dev_id){ irq_desc_t *desc; struct irqaction **p; unsigned long flags; if (irq >= NR_IRQS) return; desc = irq_desc + irq; spin_lock_irqsave(&desc->lock,flags); p = &desc->action; for (;;) { struct irqaction * action = *p; if (action) { struct irqaction **pp = p; p = &action->next; if (action->dev_id != dev_id) continue; /* Found it - now remove it from the list of entries */ *pp = action->next; if (!desc->action) { desc->status |= IRQ_DISABLED; desc->handler->shutdown(irq); } spin_unlock_irqrestore(&desc->lock,flags);#ifdef CONFIG_SMP /* Wait to make sure it's not being used on another CPU */ while (desc->status & IRQ_INPROGRESS) barrier();#endif kfree(action); return; } printk("Trying to free free IRQ%d\n",irq); spin_unlock_irqrestore(&desc->lock,flags); return; }}/* * IRQ autodetection code.. * * This depends on the fact that any interrupt that * comes in on to an unassigned handler will get stuck * with "IRQ_WAITING" cleared and the interrupt * disabled. */unsigned long probe_irq_on(void){ unsigned int i; irq_desc_t *desc; unsigned long val; unsigned long delay; /* * something may have generated an irq long ago and we want to * flush such a longstanding irq before considering it as spurious. */ for (i = NR_IRQS-1; i > 0; i--) { desc = irq_desc + i; spin_lock_irq(&desc->lock); if (!irq_desc[i].action) irq_desc[i].handler->startup(i); spin_unlock_irq(&desc->lock); } /* Wait for longstanding interrupts to trigger. */ for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) /* about 20ms delay */ synchronize_irq(); /* * enable any unassigned irqs * (we must startup again here because if a longstanding irq * happened in the previous stage, it may have masked itself) */ for (i = NR_IRQS-1; i > 0; i--) { desc = irq_desc + i; spin_lock_irq(&desc->lock); if (!desc->action) { desc->status |= IRQ_AUTODETECT | IRQ_WAITING; if (desc->handler->startup(i)) desc->status |= IRQ_PENDING; } spin_unlock_irq(&desc->lock); } /* * Wait for spurious interrupts to trigger */ for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) /* about 100ms delay */ synchronize_irq(); /* * Now filter out any obviously spurious interrupts */ val = 0; for (i = 0; i < NR_IRQS; i++) { irq_desc_t *desc = irq_desc + i; unsigned int status; spin_lock_irq(&desc->lock); status = desc->status; if (status & IRQ_AUTODETECT) { /* It triggered already - consider it spurious. */ if (!(status & IRQ_WAITING)) { desc->status = status & ~IRQ_AUTODETECT; desc->handler->shutdown(i); } else if (i < 32) val |= 1 << i; } spin_unlock_irq(&desc->lock); } return val;}/* * Return a mask of triggered interrupts (this * can handle only legacy ISA interrupts). */unsigned int probe_irq_mask(unsigned long val){ int i; unsigned int mask; mask = 0; for (i = 0; i < 16; i++) { irq_desc_t *desc = irq_desc + i; unsigned int status; spin_lock_irq(&desc->lock); status = desc->status; if (status & IRQ_AUTODETECT) { if (!(status & IRQ_WAITING)) mask |= 1 << i; desc->status = status & ~IRQ_AUTODETECT; desc->handler->shutdown(i); } spin_unlock_irq(&desc->lock); } return mask & val;}/* * Return the one interrupt that triggered (this can * handle any interrupt source) */int probe_irq_off(unsigned long val){ int i, irq_found, nr_irqs; nr_irqs = 0; irq_found = 0; for (i = 0; i < NR_IRQS; i++) { irq_desc_t *desc = irq_desc + i; unsigned int status; spin_lock_irq(&desc->lock); status = desc->status; if (status & IRQ_AUTODETECT) { if (!(status & IRQ_WAITING)) { if (!nr_irqs) irq_found = i; nr_irqs++; } desc->status = status & ~IRQ_AUTODETECT; desc->handler->shutdown(i); } spin_unlock_irq(&desc->lock); } if (nr_irqs > 1) irq_found = -irq_found; return irq_found;}/* this was setup_x86_irq but it seems pretty generic */int setup_irq(unsigned int irq, struct irqaction * new){ int shared = 0; unsigned long flags; struct irqaction *old, **p; irq_desc_t *desc = irq_desc + irq; /* * Some drivers like serial.c use request_irq() heavily, * so we have to be careful not to interfere with a * running system. */ if (new->flags & SA_SAMPLE_RANDOM) { /* * This function might sleep, we want to call it first, * outside of the atomic block. * Yes, this might clear the entropy pool if the wrong * driver is attempted to be loaded, without actually * installing a new handler, but is this really a problem, * only the sysadmin is able to do this. */ rand_initialize_irq(irq); } /* * The following block of code has to be executed atomically */ spin_lock_irqsave(&desc->lock,flags); p = &desc->action; if ((old = *p) != NULL) { /* Can't share interrupts unless both agree to */ if (!(old->flags & new->flags & SA_SHIRQ)) { spin_unlock_irqrestore(&desc->lock,flags); return -EBUSY; } /* add new interrupt at end of irq queue */ do { p = &old->next; old = *p; } while (old); shared = 1; } *p = new; if (!shared) { desc->depth = 0; desc->status &= ~IRQ_DISABLED; desc->handler->startup(irq); } spin_unlock_irqrestore(&desc->lock,flags); register_irq_proc(irq); return 0;}static struct proc_dir_entry * root_irq_dir;static struct proc_dir_entry * irq_dir [NR_IRQS];static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };#define HEX_DIGITS 8static int irq_affinity_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data){ if (count < HEX_DIGITS+1) return -EINVAL; return sprintf (page, "%08lx\n", irq_affinity[(long)data]);}static unsigned int parse_hex_value (const char *buffer, unsigned long count, unsigned long *ret){ unsigned char hexnum [HEX_DIGITS]; unsigned long value; int i; if (!count) return -EINVAL; if (count > HEX_DIGITS) count = HEX_DIGITS; if (copy_from_user(hexnum, buffer, count)) return -EFAULT; /* * Parse the first 8 characters as a hex string, any non-hex char * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same. */ value = 0; for (i = 0; i < count; i++) { unsigned int c = hexnum[i]; switch (c) { case '0' ... '9': c -= '0'; break; case 'a' ... 'f': c -= 'a'-10; break; case 'A' ... 'F': c -= 'A'-10; break; default: goto out; } value = (value << 4) | c; }out: *ret = value; return 0;}static int irq_affinity_write_proc (struct file *file, const char *buffer, unsigned long count, void *data){ int irq = (long) data, full_count = count, err; unsigned long new_value; if (!irq_desc[irq].handler->set_affinity) return -EIO; err = parse_hex_value(buffer, count, &new_value);#if CONFIG_SMP /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least * one online CPU still has to be targeted. */ if (!(new_value & cpu_online_map)) return -EINVAL;#endif irq_affinity[irq] = new_value; irq_desc[irq].handler->set_affinity(irq, new_value); return full_count;}static int prof_cpu_mask_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data){ unsigned long *mask = (unsigned long *) data; if (count < HEX_DIGITS+1) return -EINVAL; return sprintf (page, "%08lx\n", *mask);}static int prof_cpu_mask_write_proc (struct file *file, const char *buffer, unsigned long count, void *data){ unsigned long *mask = (unsigned long *) data, full_count = count, err; unsigned long new_value; err = parse_hex_value(buffer, count, &new_value); if (err) return err; *mask = new_value; return full_count;}#define MAX_NAMELEN 10static void register_irq_proc (unsigned int irq){ struct proc_dir_entry *entry; char name [MAX_NAMELEN]; if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type)) return; memset(name, 0, MAX_NAMELEN); sprintf(name, "%d", irq); /* create /proc/irq/1234 */ irq_dir[irq] = proc_mkdir(name, root_irq_dir); /* create /proc/irq/1234/smp_affinity */ entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); entry->nlink = 1; entry->data = (void *)(long)irq; entry->read_proc = irq_affinity_read_proc; entry->write_proc = irq_affinity_write_proc; smp_affinity_entry[irq] = entry;}unsigned long prof_cpu_mask = -1;void init_irq_proc (void){ struct proc_dir_entry *entry; int i; /* create /proc/irq */ root_irq_dir = proc_mkdir("irq", 0); /* create /proc/irq/prof_cpu_mask */ entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); entry->nlink = 1; entry->data = (void *)&prof_cpu_mask; entry->read_proc = prof_cpu_mask_read_proc; entry->write_proc = prof_cpu_mask_write_proc; /* * Create entries for all existing IRQs. */ for (i = 0; i < NR_IRQS; i++) { if (irq_desc[i].handler == &no_irq_type) continue; register_irq_proc(i); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -