📄 irq.c
字号:
/* * arch/ppc/kernel/irq.c * * Derived from arch/i386/kernel/irq.c * Copyright (C) 1992 Linus Torvalds * Adapted from arch/i386 by Gary Thomas * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Updated and modified by Cort Dougan (cort@cs.nmt.edu) * Copyright (C) 1996 Cort Dougan * Adapted for Power Macintosh by Paul Mackerras * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * This file contains the code used by various IRQ handling routines: * asking for different IRQ's should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. */#include <linux/ptrace.h>#include <linux/errno.h>#include <linux/threads.h>#include <linux/kernel_stat.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/ioport.h>#include <linux/interrupt.h>#include <linux/timex.h>#include <linux/config.h>#include <linux/init.h>#include <linux/slab.h>#include <linux/pci.h>#include <linux/delay.h>#include <linux/irq.h>#include <linux/proc_fs.h>#include <linux/seq_file.h>#include <linux/random.h>#include <asm/uaccess.h>#include <asm/bitops.h>#include <asm/system.h>#include <asm/io.h>#include <asm/pgtable.h>#include <asm/irq.h>#include <asm/cache.h>#include <asm/prom.h>#include <asm/ptrace.h>#include <asm/iSeries/LparData.h>#include <asm/machdep.h>#include <asm/paca.h>#include <asm/perfmon.h>#include "local_irq.h"atomic_t ipi_recv;atomic_t ipi_sent;void enable_irq(unsigned int irq_nr);void disable_irq(unsigned int irq_nr);#ifdef CONFIG_SMPextern void iSeries_smp_message_recv( struct pt_regs * );#endifvolatile unsigned char *chrp_int_ack_special;static void register_irq_proc (unsigned int irq);irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { [0 ... NR_IRQS-1] = { 0, NULL, NULL, 0, SPIN_LOCK_UNLOCKED}}; int ppc_spurious_interrupts = 0;struct irqaction *ppc_irq_action[NR_IRQS];unsigned long lpEvent_count = 0;#ifdef CONFIG_XMONextern void xmon(struct pt_regs *regs);extern int xmon_bpt(struct pt_regs *regs);extern int xmon_sstep(struct pt_regs *regs);extern int xmon_iabr_match(struct pt_regs *regs);extern int xmon_dabr_match(struct pt_regs *regs);extern void (*xmon_fault_handler)(struct pt_regs *regs);#endif#ifdef CONFIG_XMONextern void (*debugger)(struct pt_regs *regs);extern int (*debugger_bpt)(struct pt_regs *regs);extern int (*debugger_sstep)(struct pt_regs *regs);extern int (*debugger_iabr_match)(struct pt_regs *regs);extern int (*debugger_dabr_match)(struct pt_regs *regs);extern void (*debugger_fault_handler)(struct pt_regs *regs);#endif/* nasty hack for shared irq's since we need to do kmalloc calls but * can't very early in the boot when we need to do a request irq. * this needs to be removed. * -- Cort */#define IRQ_KMALLOC_ENTRIES 16static int cache_bitmask = 0;static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];extern int mem_init_done;void *irq_kmalloc(size_t size, int pri){ unsigned int i; if ( mem_init_done ) return kmalloc(size,pri); for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ ) if ( ! ( cache_bitmask & (1<<i) ) ) { cache_bitmask |= (1<<i); return (void *)(&malloc_cache[i]); } return 0;}void irq_kfree(void *ptr){ unsigned int i; for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ ) if ( ptr == &malloc_cache[i] ) { cache_bitmask &= ~(1<<i); return; } kfree(ptr);}intsetup_irq(unsigned int irq, struct irqaction * new){ int shared = 0; unsigned long flags; struct irqaction *old, **p; irq_desc_t *desc = irq_desc + irq; /* * Some drivers like serial.c use request_irq() heavily, * so we have to be careful not to interfere with a * running system. */ if (new->flags & SA_SAMPLE_RANDOM) { /* * This function might sleep, we want to call it first, * outside of the atomic block. * Yes, this might clear the entropy pool if the wrong * driver is attempted to be loaded, without actually * installing a new handler, but is this really a problem, * only the sysadmin is able to do this. */ rand_initialize_irq(irq); } /* * The following block of code has to be executed atomically */ spin_lock_irqsave(&desc->lock,flags); p = &desc->action; if ((old = *p) != NULL) { /* Can't share interrupts unless both agree to */ if (!(old->flags & new->flags & SA_SHIRQ)) { spin_unlock_irqrestore(&desc->lock,flags); return -EBUSY; } /* add new interrupt at end of irq queue */ do { p = &old->next; old = *p; } while (old); shared = 1; } *p = new; if (!shared) { desc->depth = 0; desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING); unmask_irq(irq); } spin_unlock_irqrestore(&desc->lock,flags); register_irq_proc(irq); return 0;}/* This could be promoted to a real free_irq() ... */static intdo_free_irq(int irq, void* dev_id){ irq_desc_t *desc; struct irqaction **p; unsigned long flags; desc = irq_desc + irq; spin_lock_irqsave(&desc->lock,flags); p = &desc->action; for (;;) { struct irqaction * action = *p; if (action) { struct irqaction **pp = p; p = &action->next; if (action->dev_id != dev_id) continue; /* Found it - now remove it from the list of entries */ *pp = action->next; if (!desc->action) { desc->status |= IRQ_DISABLED; mask_irq(irq); } spin_unlock_irqrestore(&desc->lock,flags);#ifdef CONFIG_SMP /* Wait to make sure it's not being used on another CPU */ while (desc->status & IRQ_INPROGRESS) barrier();#endif irq_kfree(action); return 0; } printk("Trying to free free IRQ%d\n",irq); spin_unlock_irqrestore(&desc->lock,flags); break; } return -ENOENT;}int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char * devname, void *dev_id){ struct irqaction *action; int retval; if (irq >= NR_IRQS) return -EINVAL; if (!handler) /* We could implement really free_irq() instead of that... */ return do_free_irq(irq, dev_id); action = (struct irqaction *) irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) { printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq); return -ENOMEM; } action->handler = handler; action->flags = irqflags; action->mask = 0; action->name = devname; action->dev_id = dev_id; action->next = NULL; retval = setup_irq(irq, action); if (retval) kfree(action); return 0;}void free_irq(unsigned int irq, void *dev_id){ request_irq(irq, NULL, 0, NULL, dev_id);}/* * Generic enable/disable code: this just calls * down into the PIC-specific version for the actual * hardware disable after having gotten the irq * controller lock. */ /** * disable_irq_nosync - disable an irq without waiting * @irq: Interrupt to disable * * Disable the selected interrupt line. Disables of an interrupt * stack. Unlike disable_irq(), this function does not ensure existing * instances of the IRQ handler have completed before returning. * * This function may be called from IRQ context. */ void disable_irq_nosync(unsigned int irq){ irq_desc_t *desc = irq_desc + irq; unsigned long flags; spin_lock_irqsave(&desc->lock, flags); if (!desc->depth++) { if (!(desc->status & IRQ_PER_CPU)) desc->status |= IRQ_DISABLED; mask_irq(irq); } spin_unlock_irqrestore(&desc->lock, flags);}/** * disable_irq - disable an irq and wait for completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Disables of an interrupt * stack. That is for two disables you need two enables. This * function waits for any pending IRQ handlers for this interrupt * to complete before returning. If you use this function while * holding a resource the IRQ handler may need you will deadlock. * * This function may be called - with care - from IRQ context. */ void disable_irq(unsigned int irq){ disable_irq_nosync(irq); if (!local_irq_count(smp_processor_id())) { do { barrier(); } while (irq_desc[irq].status & IRQ_INPROGRESS); }}/** * enable_irq - enable interrupt handling on an irq * @irq: Interrupt to enable * * Re-enables the processing of interrupts on this IRQ line * providing no disable_irq calls are now in effect. * * This function may be called from IRQ context. */ void enable_irq(unsigned int irq){ irq_desc_t *desc = irq_desc + irq; unsigned long flags; spin_lock_irqsave(&desc->lock, flags); switch (desc->depth) { case 1: { unsigned int status = desc->status & ~IRQ_DISABLED; desc->status = status; if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { desc->status = status | IRQ_REPLAY; hw_resend_irq(desc->handler,irq); } unmask_irq(irq); /* fall-through */ } default: desc->depth--; break; case 0: printk("enable_irq(%u) unbalanced\n", irq); } spin_unlock_irqrestore(&desc->lock, flags);}/* one would think this function has one foot in the grave */int get_irq_list(char *buf){ int i, len = 0, j; struct irqaction * action; len += sprintf(buf+len, " "); for (j=0; j<smp_num_cpus; j++) len += sprintf(buf+len, "CPU%d ",j); *(char *)(buf+len++) = '\n'; for (i = 0 ; i < NR_IRQS ; i++) { action = irq_desc[i].action; if ( !action || !action->handler ) continue; len += sprintf(buf+len, "%3d: ", i);#ifdef CONFIG_SMP for (j = 0; j < smp_num_cpus; j++) len += sprintf(buf+len, "%10u ", kstat.irqs[cpu_logical_map(j)][i]);#else len += sprintf(buf+len, "%10u ", kstat_irqs(i));#endif /* CONFIG_SMP */if ( irq_desc[i].handler )len += sprintf(buf+len, " %s ", irq_desc[i].handler->typename );elselen += sprintf(buf+len, " None ");len += sprintf(buf+len, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge ");len += sprintf(buf+len, " %s",action->name);for (action=action->next; action; action = action->next) {len += sprintf(buf+len, ", %s", action->name);}len += sprintf(buf+len, "\n");}#ifdef CONFIG_SMP/* should this be per processor send/receive? */len += sprintf(buf+len, "IPI (recv/sent): %10u/%u\n",atomic_read(&ipi_recv), atomic_read(&ipi_sent));#endiflen += sprintf(buf+len, "BAD: %10u\n", ppc_spurious_interrupts);return len;}int show_interrupts(struct seq_file *p, void *v){ int i, j; struct irqaction * action; seq_printf(p, " "); for (j=0; j<smp_num_cpus; j++) seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); for (i = 0 ; i < NR_IRQS ; i++) { action = irq_desc[i].action; if (!action || !action->handler) continue; seq_printf(p, "%3d: ", i); #ifdef CONFIG_SMP for (j = 0; j < smp_num_cpus; j++) seq_printf(p, "%10u ", kstat.irqs[cpu_logical_map(j)][i]);#else seq_printf(p, "%10u ", kstat_irqs(i));#endif /* CONFIG_SMP */ if (irq_desc[i].handler) seq_printf(p, " %s ", irq_desc[i].handler->typename ); else seq_printf(p, " None "); seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge "); seq_printf(p, " %s",action->name); for (action=action->next; action; action = action->next) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); }#ifdef CONFIG_SMP /* should this be per processor send/receive? */ seq_printf(p, "IPI (recv/sent): %10u/%u\n", atomic_read(&ipi_recv), atomic_read(&ipi_sent));#endif seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); return 0;}static inline voidhandle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action){ int status = 0; if (!(action->flags & SA_INTERRUPT)) __sti(); do { status |= action->flags; action->handler(irq, action->dev_id, regs); action = action->next; } while (action); if (status & SA_SAMPLE_RANDOM) add_interrupt_randomness(irq); __cli();}/* * Eventually, this should take an array of interrupts and an array size * so it can dispatch multiple interrupts. */void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq){ int status; struct irqaction *action; int cpu = smp_processor_id(); irq_desc_t *desc = irq_desc + irq; kstat.irqs[cpu][irq]++; spin_lock(&desc->lock); ack_irq(irq); /* REPLAY is when Linux resends an IRQ that was dropped earlier WAITING is used by probe to mark irqs that are being tested */ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); if (!(status & IRQ_PER_CPU)) status |= IRQ_PENDING; /* we _want_ to handle it */ /* * If the IRQ is disabled for whatever reason, we cannot * use the action we have. */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -