irq.c
来自「优龙2410linux2.6.8内核源代码」· C语言 代码 · 共 882 行 · 第 1/2 页
C
882 行
/* * Code to handle x86 style IRQs plus some generic interrupt stuff. * * Copyright (C) 1992 Linus Torvalds * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org) * Copyright (C) 1999-2000 Grant Grundler * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */#include <linux/bitops.h>#include <linux/config.h>#include <linux/eisa.h>#include <linux/errno.h>#include <linux/init.h>#include <linux/module.h>#include <linux/signal.h>#include <linux/types.h>#include <linux/ioport.h>#include <linux/timex.h>#include <linux/slab.h>#include <linux/random.h>#include <linux/sched.h>#include <linux/interrupt.h>#include <linux/kernel_stat.h>#include <linux/irq.h>#include <linux/seq_file.h>#include <linux/spinlock.h>#include <asm/cache.h>#include <asm/pdc.h>#undef DEBUG_IRQ#undef PARISC_IRQ_CR16_COUNTSextern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);#ifdef DEBUG_IRQ#define DBG_IRQ(irq, x) if ((irq) != TIMER_IRQ) printk x#else /* DEBUG_IRQ */#define DBG_IRQ(irq, x) do { } while (0)#endif /* DEBUG_IRQ */#define EIEM_MASK(irq) (1UL<<(MAX_CPU_IRQ-IRQ_OFFSET(irq)))/* Bits in EIEM correlate with cpu_irq_action[].** Numbered *Big Endian*! (ie bit 0 is MSB)*/static volatile unsigned long cpu_eiem = 0;static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; /* protect IRQ regions */static void cpu_set_eiem(void *info){ set_eiem((unsigned long) info);}static inline void disable_cpu_irq(void *unused, int irq){ unsigned long eirr_bit = EIEM_MASK(irq); cpu_eiem &= ~eirr_bit; on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);}static void enable_cpu_irq(void *unused, int irq){ unsigned long eirr_bit = EIEM_MASK(irq); mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */ cpu_eiem |= eirr_bit; on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);}/* mask and disable are the same at the CPU level** Difference is enable clears pending interrupts*/#define mask_cpu_irq disable_cpu_irqstatic inline void unmask_cpu_irq(void *unused, int irq){ unsigned long eirr_bit = EIEM_MASK(irq); cpu_eiem |= eirr_bit; /* NOTE: sending an IPI will cause do_cpu_irq_mask() to ** handle *any* unmasked pending interrupts. ** ie We don't need to check for pending interrupts here. */ on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);}/* * XXX cpu_irq_actions[] will become 2 dimensional for per CPU EIR support. * correspond changes needed in: * processor_probe() initialize additional action arrays * request_irq() handle CPU IRQ region specially * do_cpu_irq_mask() index into the matching irq_action array. */struct irqaction cpu_irq_actions[IRQ_PER_REGION] = { [IRQ_OFFSET(TIMER_IRQ)] = { .handler = timer_interrupt, .name = "timer", },#ifdef CONFIG_SMP [IRQ_OFFSET(IPI_IRQ)] = { .handler = ipi_interrupt, .name = "IPI", },#endif};struct irq_region cpu0_irq_region = { .ops = { .disable_irq = disable_cpu_irq, .enable_irq = enable_cpu_irq, .mask_irq = unmask_cpu_irq, .unmask_irq = unmask_cpu_irq }, .data = { .dev = &cpu_data[0], .name = "PARISC-CPU", .irqbase = IRQ_FROM_REGION(CPU_IRQ_REGION), }, .action = cpu_irq_actions,};struct irq_region *irq_region[NR_IRQ_REGS] = { [ 0 ] = NULL, /* reserved for EISA, else causes data page fault (aka code 15) */ [ CPU_IRQ_REGION ] = &cpu0_irq_region,};/*** Generic interfaces that device drivers can use:** mask_irq() block IRQ** unmask_irq() re-enable IRQ and trigger if IRQ is pending** disable_irq() block IRQ** enable_irq() clear pending and re-enable IRQ*/void mask_irq(int irq){ struct irq_region *region; DBG_IRQ(irq, ("mask_irq(%d) %d+%d eiem 0x%lx\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem)); irq = irq_canonicalize(irq); region = irq_region[IRQ_REGION(irq)]; if (region->ops.mask_irq) region->ops.mask_irq(region->data.dev, IRQ_OFFSET(irq));}void unmask_irq(int irq){ struct irq_region *region; DBG_IRQ(irq, ("unmask_irq(%d) %d+%d eiem 0x%lx\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem)); irq = irq_canonicalize(irq); region = irq_region[IRQ_REGION(irq)]; if (region->ops.unmask_irq) region->ops.unmask_irq(region->data.dev, IRQ_OFFSET(irq));}void disable_irq(int irq){ struct irq_region *region; DBG_IRQ(irq, ("disable_irq(%d) %d+%d eiem 0x%lx\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem)); irq = irq_canonicalize(irq); region = irq_region[IRQ_REGION(irq)]; if (region->ops.disable_irq) region->ops.disable_irq(region->data.dev, IRQ_OFFSET(irq)); else BUG();}EXPORT_SYMBOL(disable_irq);void enable_irq(int irq){ struct irq_region *region; DBG_IRQ(irq, ("enable_irq(%d) %d+%d EIRR 0x%lx EIEM 0x%lx\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq), mfctl(23), mfctl(15))); irq = irq_canonicalize(irq); region = irq_region[IRQ_REGION(irq)]; if (region->ops.enable_irq) region->ops.enable_irq(region->data.dev, IRQ_OFFSET(irq)); else BUG();}EXPORT_SYMBOL(enable_irq);int show_interrupts(struct seq_file *p, void *v){#ifdef CONFIG_PROC_FS unsigned int regnr = *(loff_t *) v, i = 0; if (regnr == 0) { seq_puts(p, " ");#ifdef CONFIG_SMP for (i = 0; i < NR_CPUS; i++) if (cpu_online(i))#endif seq_printf(p, " CPU%02d ", i);#ifdef PARISC_IRQ_CR16_COUNTS seq_printf(p, "[min/avg/max] (CPU cycle counts)");#endif seq_putc(p, '\n'); } /* We don't need *irqsave lock variants since this is ** only allowed to change while in the base context. */ spin_lock(&irq_lock); if (regnr < NR_IRQ_REGS) { struct irq_region *region = irq_region[regnr]; if (!region || !region->action) goto skip; for (i = 0; i <= MAX_CPU_IRQ; i++) { struct irqaction *action = ®ion->action[i]; unsigned int irq_no = IRQ_FROM_REGION(regnr) + i; int j = 0; if (!action->handler) continue; seq_printf(p, "%3d: ", irq_no);#ifdef CONFIG_SMP for (; j < NR_CPUS; j++) if (cpu_online(j))#endif seq_printf(p, "%10u ", kstat_cpu(j).irqs[irq_no]); seq_printf(p, " %14s", region->data.name ? region->data.name : "N/A");#ifndef PARISC_IRQ_CR16_COUNTS seq_printf(p, " %s", action->name); while ((action = action->next)) seq_printf(p, ", %s", action->name);#else for ( ;action; action = action->next) { unsigned int k, avg, min, max; min = max = action->cr16_hist[0]; for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) { int hist = action->cr16_hist[k]; if (hist) { avg += hist; } else break; if (hist > max) max = hist; if (hist < min) min = hist; } avg /= k; seq_printf(p, " %s[%d/%d/%d]", action->name, min,avg,max); }#endif seq_putc(p, '\n'); } } skip: spin_unlock(&irq_lock);#endif /* CONFIG_PROC_FS */ return 0;}/*** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.**** To use txn_XXX() interfaces, get a Virtual IRQ first.** Then use that to get the Transaction address and data.*/inttxn_alloc_irq(void){ int irq; /* never return irq 0 cause that's the interval timer */ for (irq = 1; irq <= MAX_CPU_IRQ; irq++) { if (cpu_irq_actions[irq].handler == NULL) { return (IRQ_FROM_REGION(CPU_IRQ_REGION) + irq); } } /* unlikely, but be prepared */ return -1;}inttxn_claim_irq(int irq){ if (irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)].handler ==NULL) return irq; /* unlikely, but be prepared */ return -1;}unsigned longtxn_alloc_addr(int virt_irq){ static int next_cpu = -1; next_cpu++; /* assign to "next" CPU we want this bugger on */ /* validate entry */ while ((next_cpu < NR_CPUS) && (!cpu_data[next_cpu].txn_addr || !cpu_online(next_cpu))) next_cpu++; if (next_cpu >= NR_CPUS) next_cpu = 0; /* nothing else, assign monarch */ return cpu_data[next_cpu].txn_addr;}/*** The alloc process needs to accept a parameter to accommodate limitations** of the HW/SW which use these bits:** Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)** V-class (EPIC): 6 bits** N/L-class/A500: 8 bits (iosapic)** PCI 2.2 MSI: 16 bits (I think)** Existing PCI devices: 32-bits (all Symbios SCSI/ATM/HyperFabric)**** On the service provider side:** o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)** o PA 2.0 wide mode 6-bits (per processor)** o IA64 8-bits (0-256 total)**** So a Legacy PA I/O device on a PA 2.0 box can't use all** the bits supported by the processor...and the N/L-class** I/O subsystem supports more bits than PA2.0 has. The first** case is the problem.*/unsigned inttxn_alloc_data(int virt_irq, unsigned int bits_wide){ /* XXX FIXME : bits_wide indicates how wide the transaction ** data is allowed to be...we may need a different virt_irq ** if this one won't work. Another reason to index virtual ** irq's into a table which can manage CPU/IRQ bit separately. */ if (IRQ_OFFSET(virt_irq) > (1 << (bits_wide -1))) { panic("Sorry -- didn't allocate valid IRQ for this device\n"); } return (IRQ_OFFSET(virt_irq));}void do_irq(struct irqaction *action, int irq, struct pt_regs * regs){ int cpu = smp_processor_id(); irq_enter(); ++kstat_cpu(cpu).irqs[irq]; DBG_IRQ(irq, ("do_irq(%d) %d+%d eiem 0x%lx\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq), cpu_eiem)); for (; action; action = action->next) {#ifdef PARISC_IRQ_CR16_COUNTS unsigned long cr_start = mfctl(16);#endif if (action->handler == NULL) { if (IRQ_REGION(irq) == EISA_IRQ_REGION && irq_region[EISA_IRQ_REGION]) { /* were we called due to autodetecting (E)ISA irqs ? */ unsigned int *status; status = &irq_region[EISA_IRQ_REGION]->data.status[IRQ_OFFSET(irq)]; if (*status & IRQ_AUTODETECT) { *status &= ~IRQ_WAITING; continue; } } printk(KERN_ERR "IRQ: CPU:%d No handler for IRQ %d !\n", cpu, irq); continue; } action->handler(irq, action->dev_id, regs);#ifdef PARISC_IRQ_CR16_COUNTS { unsigned long cr_end = mfctl(16); unsigned long tmp = cr_end - cr_start; /* check for roll over */ cr_start = (cr_end < cr_start) ? -(tmp) : (tmp); } action->cr16_hist[action->cr16_idx++] = (int) cr_start; action->cr16_idx &= PARISC_CR16_HIST_SIZE - 1;#endif } irq_exit();}/* ONLY called from entry.S:intr_extint() */void do_cpu_irq_mask(struct pt_regs *regs){ unsigned long eirr_val; unsigned int i=3; /* limit time in interrupt context */ /* * PSW_I or EIEM bits cannot be enabled until after the * interrupts are processed. * timer_interrupt() assumes it won't get interrupted when it * holds the xtime_lock...an unmasked interrupt source could * interrupt and deadlock by trying to grab xtime_lock too. * Keeping PSW_I and EIEM disabled avoids this.
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?