📄 irq.c
字号:
/* * arch/ppc/kernel/irq.c * * Derived from arch/i386/kernel/irq.c * Copyright (C) 1992 Linus Torvalds * Adapted from arch/i386 by Gary Thomas * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Updated and modified by Cort Dougan (cort@cs.nmt.edu) * Copyright (C) 1996 Cort Dougan * Adapted for Power Macintosh by Paul Mackerras * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * This file contains the code used by various IRQ handling routines: * asking for different IRQ's should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. */#include <linux/ptrace.h>#include <linux/errno.h>#include <linux/threads.h>#include <linux/kernel_stat.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/ioport.h>#include <linux/interrupt.h>#include <linux/timex.h>#include <linux/config.h>#include <linux/init.h>#include <linux/slab.h>#include <linux/pci.h>#include <linux/delay.h>#include <linux/irq.h>#include <linux/proc_fs.h>#include <linux/seq_file.h>#include <linux/random.h>#include <linux/bootmem.h>#include <asm/uaccess.h>#include <asm/bitops.h>#include <asm/system.h>#include <asm/io.h>#include <asm/pgtable.h>#include <asm/irq.h>#include <asm/cache.h>#include <asm/prom.h>#include <asm/ptrace.h>#include <asm/iSeries/LparData.h>#include <asm/machdep.h>#include <asm/paca.h>#include <asm/perfmon.h>/* * Because the name space for interrupts is so large on ppc64 systems we * avoid declaring a single array of "NR_IRQ" interrupts and instead build * a three level tree leading to the irq_desc_t (similar to page tables). * * Currently we cover 24-bit irq values: * 10-bits: the "base" dir (2-pages) * 9-bits: the "middle" dir (1-page) * 5-bits: the "bottom" page (1-page) holding 128byte irq_desc's. * * We pack a hw_irq_stat struct directly after the irq_desc in the otherwise * wasted space of the cacheline. * * MAX_IRQS is the max this implementation will support. * It is much larger than NR_IRQS which is bogus on this arch and often used * to declare arrays. * * Note that all "undefined" mid table and bottom table pointers will point * to dummy tables. Therefore, we don't need to check for NULL on spurious * interrupts. */#define IRQ_BASE_INDEX_SIZE 10#define IRQ_MID_INDEX_SIZE 9#define IRQ_BOT_DESC_SIZE 5#define IRQ_BASE_PTRS (1 << IRQ_BASE_INDEX_SIZE)#define IRQ_MID_PTRS (1 << IRQ_MID_INDEX_SIZE)#define IRQ_BOT_DESCS (1 << IRQ_BOT_DESC_SIZE)#define IRQ_BASE_IDX_SHIFT (IRQ_MID_INDEX_SIZE + IRQ_BOT_DESC_SIZE)#define IRQ_MID_IDX_SHIFT (IRQ_BOT_DESC_SIZE)#define IRQ_MID_IDX_MASK ((1 << IRQ_MID_INDEX_SIZE) - 1)#define IRQ_BOT_IDX_MASK ((1 << IRQ_BOT_DESC_SIZE) - 1)irq_desc_t **irq_desc_base_dir[IRQ_BASE_PTRS] __page_aligned = {0};irq_desc_t **irq_desc_mid_null;irq_desc_t *irq_desc_bot_null;unsigned int _next_irq(unsigned int irq);atomic_t ipi_recv;atomic_t ipi_sent;void enable_irq(unsigned int irq_nr);void disable_irq(unsigned int irq_nr);#ifdef CONFIG_SMPextern void iSeries_smp_message_recv( struct pt_regs * );#endifvolatile unsigned char *chrp_int_ack_special;static void register_irq_proc (unsigned int irq);irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { [0 ... NR_IRQS-1] = { 0, NULL, NULL, 0, SPIN_LOCK_UNLOCKED}};static irq_desc_t *add_irq_desc(unsigned int irq);int ppc_spurious_interrupts = 0;unsigned long lpEvent_count = 0;#ifdef CONFIG_XMONextern void xmon(struct pt_regs *regs);extern int xmon_bpt(struct pt_regs *regs);extern int xmon_sstep(struct pt_regs *regs);extern int xmon_iabr_match(struct pt_regs *regs);extern int xmon_dabr_match(struct pt_regs *regs);extern void (*xmon_fault_handler)(struct pt_regs *regs);#endif#ifdef CONFIG_XMONextern void (*debugger)(struct pt_regs *regs);extern int (*debugger_bpt)(struct pt_regs *regs);extern int (*debugger_sstep)(struct pt_regs *regs);extern int (*debugger_iabr_match)(struct pt_regs *regs);extern int (*debugger_dabr_match)(struct pt_regs *regs);extern void (*debugger_fault_handler)(struct pt_regs *regs);#endif#define IRQ_KMALLOC_ENTRIES 16static int cache_bitmask = 0;static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];extern int mem_init_done;/* The hw_irq_stat struct is stored directly after the irq_desc_t * in the same cacheline. We need to use care to make sure we don't * overrun the size of the cacheline. * * Currently sizeof(irq_desc_t) is 40 bytes or less and this hw_irq_stat * fills the rest of the cache line. */struct hw_irq_stat { unsigned long irqs; /* statistic per irq */ unsigned long *per_cpu_stats; struct proc_dir_entry *irq_dir, *smp_affinity; unsigned long irq_affinity; /* ToDo: cpu bitmask */};static inline struct hw_irq_stat *get_irq_stat(irq_desc_t *desc){ /* WARNING: this assumes lock is the last field! */ return (struct hw_irq_stat *)(&desc->lock+1);}static inline unsigned long *get_irq_per_cpu(struct hw_irq_stat *hw){ return hw->per_cpu_stats;}static inline irq_desc_t **get_irq_mid_table(unsigned int irq){ /* Assume irq < MAX_IRQS so we won't index off the end. */ return irq_desc_base_dir[irq >> IRQ_BASE_IDX_SHIFT];}static inline irq_desc_t *get_irq_bot_table(unsigned int irq, irq_desc_t **mid_ptr){ return mid_ptr[(irq >> IRQ_MID_IDX_SHIFT) & IRQ_MID_IDX_MASK];}/* This should be inline. */void *_irqdesc(unsigned int irq){ irq_desc_t **mid_table, *bot_table, *desc; mid_table = get_irq_mid_table(irq); bot_table = get_irq_bot_table(irq, mid_table); desc = bot_table + (irq & IRQ_BOT_IDX_MASK); return desc;}/* * This is used by the for_each_irq(i) macro to iterate quickly over * all interrupts. It optimizes by skipping over ptrs to the null tables * when possible, but it may produce false positives. */unsigned int _next_irq(unsigned int irq){ irq_desc_t **mid_table, *bot_table; irq++; /* Easy case first...staying on the current bot_table. */ if (irq & IRQ_BOT_IDX_MASK) return irq; /* Now skip empty mid tables */ while (irq < MAX_IRQS && (mid_table = get_irq_mid_table(irq)) == irq_desc_mid_null) { /* index to the next base index (i.e. the next mid table) */ irq = (irq & ~(IRQ_BASE_IDX_SHIFT-1)) + IRQ_BASE_IDX_SHIFT; } /* And skip empty bot tables */ while (irq < MAX_IRQS && (bot_table = get_irq_bot_table(irq, mid_table)) == irq_desc_bot_null) { /* index to the next mid index (i.e. the next bot table) */ irq = (irq & ~(IRQ_MID_IDX_SHIFT-1)) + IRQ_MID_IDX_SHIFT; } return irq;}/* Same as irqdesc(irq) except it will "fault in" a real desc as needed * rather than return the null entry. * This is used by code that is actually defining the irq. * * NULL may be returned on memory allocation failure. In general, init code * doesn't look for this, but setup_irq does. In this failure case the desc * is left pointing at the null pages so callers of irqdesc() should * always return something. */void *_real_irqdesc(unsigned int irq){ irq_desc_t *desc = irqdesc(irq); if (((unsigned long)desc & PAGE_MASK) == (unsigned long)irq_desc_bot_null) { desc = add_irq_desc(irq); } return desc;}/* Allocate an irq middle page and init entries to null page. */static irq_desc_t **alloc_irq_mid_page(void){ irq_desc_t **m, **ent; if (mem_init_done) m = (irq_desc_t **)__get_free_page(GFP_KERNEL); else m = (irq_desc_t **)alloc_bootmem_pages(PAGE_SIZE); if (m) { for (ent = m; ent < m + IRQ_MID_PTRS; ent++) { *ent = irq_desc_bot_null; } } return m;}/* Allocate an irq bottom page and init the entries. */static irq_desc_t *alloc_irq_bot_page(void){ irq_desc_t *b, *ent; if (mem_init_done) b = (irq_desc_t *)get_zeroed_page(GFP_KERNEL); else b = (irq_desc_t *)alloc_bootmem_pages(PAGE_SIZE); if (b) { for (ent = b; ent < b + IRQ_BOT_DESCS; ent++) { ent->lock = SPIN_LOCK_UNLOCKED; } } return b;}/* * The universe of interrupt numbers ranges from 0 to 2^24. * Use a sparsely populated tree to map from the irq to the handler. * Top level is 2 contiguous pages, covering the 10 most significant * bits. Mid level is 1 page, covering 9 bits. Last page covering * 5 bits is the irq_desc, each of which is 128B. */static void irq_desc_init(void) { irq_desc_t ***entry_p; /* * Now initialize the tables to point though the NULL tables for * the default case of no interrupt handler (spurious). */ irq_desc_bot_null = alloc_irq_bot_page(); irq_desc_mid_null = alloc_irq_mid_page(); if (!irq_desc_bot_null || !irq_desc_mid_null) panic("irq_desc_init: could not allocate pages\n"); for(entry_p = irq_desc_base_dir; entry_p < irq_desc_base_dir + IRQ_BASE_PTRS; entry_p++) { *entry_p = irq_desc_mid_null; }}/* * Add a new irq desc for the given irq if needed. * This breaks any ptr to the "null" middle or "bottom" irq desc page. * Note that we don't ever coalesce pages as the interrupts are released. * This isn't worth the effort. We add the cpu stats info when the * interrupt is actually requested. * * May return NULL if memory could not be allocated. */static irq_desc_t *add_irq_desc(unsigned int irq){ irq_desc_t **mid_table_p, *bot_table_p; mid_table_p = get_irq_mid_table(irq); if(mid_table_p == irq_desc_mid_null) { /* No mid table for this IRQ - create it */ mid_table_p = alloc_irq_mid_page(); if (!mid_table_p) return NULL; irq_desc_base_dir[irq >> IRQ_BASE_IDX_SHIFT] = mid_table_p; } bot_table_p = (irq_desc_t *)(*(mid_table_p + ((irq >> 5) & 0x1ff))); if(bot_table_p == irq_desc_bot_null) { /* No bot table for this IRQ - create it */ bot_table_p = alloc_irq_bot_page(); if (!bot_table_p) return NULL; mid_table_p[(irq >> IRQ_MID_IDX_SHIFT) & IRQ_MID_IDX_MASK] = bot_table_p; } return bot_table_p + (irq & IRQ_BOT_IDX_MASK);}void *irq_kmalloc(size_t size, int pri){ unsigned int i; if ( mem_init_done ) return kmalloc(size,pri); for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ ) if ( ! ( cache_bitmask & (1<<i) ) ) { cache_bitmask |= (1<<i); return (void *)(&malloc_cache[i]); } return 0;}void irq_kfree(void *ptr){ unsigned int i; for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ ) if ( ptr == &malloc_cache[i] ) { cache_bitmask &= ~(1<<i); return; } kfree(ptr);}void allocate_per_cpu_stats(struct hw_irq_stat *hwstat){ unsigned long *p; if (mem_init_done) { p = (unsigned long *)kmalloc(sizeof(long)*NR_CPUS, GFP_KERNEL); if (p) memset(p, 0, sizeof(long)*NR_CPUS); } else p = (unsigned long *)alloc_bootmem(sizeof(long)*NR_CPUS); hwstat->per_cpu_stats = p;}intsetup_irq(unsigned int irq, struct irqaction * new){ int shared = 0; unsigned long flags; struct irqaction *old, **p; irq_desc_t *desc = real_irqdesc(irq); struct hw_irq_stat *hwstat; if (!desc) return -ENOMEM; ppc_md.init_irq_desc(desc); hwstat = get_irq_stat(desc);#ifdef CONFIG_IRQ_ALL_CPUS hwstat->irq_affinity = ~0;#else hwstat->irq_affinity = 0;#endif /* Now is the time to add per-cpu kstat data to the desc * since it appears we are actually going to use the irq. */ allocate_per_cpu_stats(hwstat); /* * Some drivers like serial.c use request_irq() heavily, * so we have to be careful not to interfere with a * running system. */ if (new->flags & SA_SAMPLE_RANDOM) { /* * This function might sleep, we want to call it first, * outside of the atomic block. * Yes, this might clear the entropy pool if the wrong * driver is attempted to be loaded, without actually * installing a new handler, but is this really a problem, * only the sysadmin is able to do this. */ rand_initialize_irq(irq); } /* * The following block of code has to be executed atomically */ spin_lock_irqsave(&desc->lock,flags); p = &desc->action; if ((old = *p) != NULL) { /* Can't share interrupts unless both agree to */ if (!(old->flags & new->flags & SA_SHIRQ)) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -