📄 irq.c
字号:
/* * linux/arch/ia64/kernel/irq.c * * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar * * This file contains the code used by various IRQ handling routines: * asking for different IRQ's should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. *//* * (mostly architecture independent, will move to kernel/irq.c in 2.5.) * * IRQs are in fact implemented a bit like signal handlers for the kernel. * Naturally it's not a 1:1 relation, but there are similarities. */#include <linux/config.h>#include <linux/ptrace.h>#include <linux/errno.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/ioport.h>#include <linux/interrupt.h>#include <linux/timex.h>#include <linux/slab.h>#include <linux/random.h>#include <linux/smp_lock.h>#include <linux/init.h>#include <linux/kernel_stat.h>#include <linux/irq.h>#include <linux/proc_fs.h>#include <asm/atomic.h>#include <asm/io.h>#include <asm/smp.h>#include <asm/system.h>#include <asm/bitops.h>#include <asm/uaccess.h>#include <asm/pgalloc.h>#include <asm/delay.h>#include <asm/irq.h>/* * Linux has a controller-independent x86 interrupt architecture. * every controller has a 'controller-template', that is used * by the main code to do the right thing. Each driver-visible * interrupt source is transparently wired to the apropriate * controller. Thus drivers need not be aware of the * interrupt-controller. * * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC, * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC. * (IO-APICs assumed to be messaging to Pentium local-APICs) * * the code is designed to be easily extended with new/different * interrupt controllers, without having to do assembly magic. *//* * Controller mappings for all interrupt sources: */irq_desc_t _irq_desc[NR_IRQS] __cacheline_aligned = { [0 ... NR_IRQS-1] = { IRQ_DISABLED, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};#ifdef CONFIG_IA64_GENERICstruct irq_desc *__ia64_irq_desc (unsigned int irq){ return _irq_desc + irq;}ia64_vector__ia64_irq_to_vector (unsigned int irq){ return (ia64_vector) irq;}unsigned int__ia64_local_vector_to_irq (ia64_vector vec){ return (unsigned int) vec;}#endifstatic void register_irq_proc (unsigned int irq);/* * Special irq handlers. */void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }/* * Generic no controller code */static void enable_none(unsigned int irq) { }static unsigned int startup_none(unsigned int irq) { return 0; }static void disable_none(unsigned int irq) { }static void ack_none(unsigned int irq){/* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves, it doesnt deserve * a generic callback i think. */#if CONFIG_X86 printk("unexpected IRQ trap at vector %02x\n", irq);#ifdef CONFIG_X86_LOCAL_APIC /* * Currently unexpected vectors happen only on SMP and APIC. * We _must_ ack these because every local APIC has only N * irq slots per priority level, and a 'hanging, unacked' IRQ * holds up an irq slot - in excessive cases (when multiple * unexpected vectors occur) that might lock up the APIC * completely. */ ack_APIC_irq();#endif#endif#if CONFIG_IA64 printk("Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());#endif}/* startup is the same as "enable", shutdown is same as "disable" */#define shutdown_none disable_none#define end_none enable_nonestruct hw_interrupt_type no_irq_type = { "none", startup_none, shutdown_none, enable_none, disable_none, ack_none, end_none};atomic_t irq_err_count;#if defined(CONFIG_X86) && defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG)atomic_t irq_mis_count;#endif/* * Generic, controller-independent functions: */int get_irq_list(char *buf){ int i, j; struct irqaction * action; irq_desc_t *idesc; char *p = buf; p += sprintf(p, " "); for (j=0; j<smp_num_cpus; j++) p += sprintf(p, "CPU%d ",j); *p++ = '\n'; for (i = 0 ; i < NR_IRQS ; i++) { idesc = irq_desc(i); action = idesc->action; if (!action) continue; p += sprintf(p, "%3d: ",i);#ifndef CONFIG_SMP p += sprintf(p, "%10u ", kstat_irqs(i));#else for (j = 0; j < smp_num_cpus; j++) p += sprintf(p, "%10u ", kstat.irqs[cpu_logical_map(j)][i]);#endif p += sprintf(p, " %14s", idesc->handler->typename); p += sprintf(p, " %s", action->name); for (action=action->next; action; action = action->next) p += sprintf(p, ", %s", action->name); *p++ = '\n'; } p += sprintf(p, "NMI: "); for (j = 0; j < smp_num_cpus; j++) p += sprintf(p, "%10u ", nmi_count(cpu_logical_map(j))); p += sprintf(p, "\n");#if defined(CONFIG_SMP) && defined(CONFIG_X86) p += sprintf(p, "LOC: "); for (j = 0; j < smp_num_cpus; j++) p += sprintf(p, "%10u ", apic_timer_irqs[cpu_logical_map(j)]); p += sprintf(p, "\n");#endif p += sprintf(p, "ERR: %10u\n", atomic_read(&irq_err_count));#if defined(CONFIG_X86) && defined(CONFIG_X86_IO_APIC) && defined(APIC_MISMATCH_DEBUG) p += sprintf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));#endif return p - buf;}/* * Global interrupt locks for SMP. Allow interrupts to come in on any * CPU, yet make cli/sti act globally to protect critical regions.. */#ifdef CONFIG_SMPunsigned int global_irq_holder = NO_PROC_ID;unsigned volatile long global_irq_lock; /* pedantic: long for set_bit --RR */extern void show_stack(unsigned long* esp);static void show(char * str){ int i; int cpu = smp_processor_id(); printk("\n%s, CPU %d:\n", str, cpu); printk("irq: %d [",irqs_running()); for(i=0;i < smp_num_cpus;i++) printk(" %d",irq_count(i)); printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0); for(i=0;i < smp_num_cpus;i++) printk(" %d",bh_count(i)); printk(" ]\nStack dumps:");#if defined(CONFIG_IA64) /* * We can't unwind the stack of another CPU without access to * the registers of that CPU. And sending an IPI when we're * in a potentially wedged state doesn't sound like a smart * idea. */#elif defined(CONFIG_X86) for(i=0;i< smp_num_cpus;i++) { unsigned long esp; if(i==cpu) continue; printk("\nCPU %d:",i); esp = init_tss[i].esp0; if(esp==NULL) { /* tss->esp0 is set to NULL in cpu_init(), * it's initialized when the cpu returns to user * space. -- manfreds */ printk(" <unknown> "); continue; } esp &= ~(THREAD_SIZE-1); esp += sizeof(struct task_struct); show_stack((void*)esp); }#else You lose...#endif printk("\nCPU %d:",cpu); show_stack(NULL); printk("\n");}#define MAXCOUNT 100000000/* * I had a lockup scenario where a tight loop doing * spin_unlock()/spin_lock() on CPU#1 was racing with * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but * apparently the spin_unlock() information did not make it * through to CPU#0 ... nasty, is this by design, do we have to limit * 'memory update oscillation frequency' artificially like here? * * Such 'high frequency update' races can be avoided by careful design, but * some of our major constructs like spinlocks use similar techniques, * it would be nice to clarify this issue. Set this define to 0 if you * want to check whether your system freezes. I suspect the delay done * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but * i thought that such things are guaranteed by design, since we use * the 'LOCK' prefix. */#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND# define SYNC_OTHER_CORES(x) udelay(x+1)#else/* * We have to allow irqs to arrive between __sti and __cli */# ifdef CONFIG_IA64# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop 0")# else# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")# endif#endifstatic inline void wait_on_irq(void){ int count = MAXCOUNT; for (;;) { /* * Wait until all interrupts are gone. Wait * for bottom half handlers unless we're * already executing in one.. */ if (!irqs_running()) if (really_local_bh_count() || !spin_is_locked(&global_bh_lock)) break; /* Duh, we have to loop. Release the lock to avoid deadlocks */ smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ clear_bit(0,&global_irq_lock); for (;;) { if (!--count) { show("wait_on_irq"); count = ~0; } __sti(); SYNC_OTHER_CORES(smp_processor_id()); __cli(); if (irqs_running()) continue; if (global_irq_lock) continue; if (!really_local_bh_count() && spin_is_locked(&global_bh_lock)) continue; if (!test_and_set_bit(0,&global_irq_lock)) break; } }}/* * This is called when we want to synchronize with * interrupts. We may for example tell a device to * stop sending interrupts: but to make sure there * are no interrupts that are executing on another * CPU we need to call this function. */void synchronize_irq(void){ if (irqs_running()) { /* Stupid approach */ cli(); sti(); }}static inline void get_irqlock(void){ if (test_and_set_bit(0,&global_irq_lock)) { /* do we already hold the lock? */ if (smp_processor_id() == global_irq_holder) return; /* Uhhuh.. Somebody else got it. Wait.. */ do { do {#ifdef CONFIG_X86 rep_nop();#endif } while (test_bit(0,&global_irq_lock)); } while (test_and_set_bit(0,&global_irq_lock)); } /* * We also to make sure that nobody else is running * in an interrupt context. */ wait_on_irq(); /* * Ok, finally.. */ global_irq_holder = smp_processor_id();}#define EFLAGS_IF_SHIFT 9/* * A global "cli()" while in an interrupt context * turns into just a local cli(). Interrupts * should use spinlocks for the (very unlikely) * case that they ever want to protect against * each other. * * If we already have local interrupts disabled, * this will not turn a local disable into a * global one (problems with spinlocks: this makes * save_flags+cli+sti usable inside a spinlock). */void __global_cli(void){ unsigned int flags;#ifdef CONFIG_IA64 __save_flags(flags); if (flags & IA64_PSR_I) { __cli(); if (!really_local_irq_count()) get_irqlock(); }#else __save_flags(flags); if (flags & (1 << EFLAGS_IF_SHIFT)) { __cli(); if (!really_local_irq_count()) get_irqlock(); }#endif}void __global_sti(void){ if (!really_local_irq_count()) release_irqlock(smp_processor_id()); __sti();}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -