📄 irq.c
字号:
/*
* linux/arch/i386/kernel/irq.c
*
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*
* This file contains the code used by various IRQ handling routines:
* asking for different IRQ's should be done through these routines
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
*/
/*
* (mostly architecture independent, will move to kernel/irq.c in 2.5.)
*
* IRQs are in fact implemented a bit like signal handlers for the kernel.
* Naturally it's not a 1:1 relation, but there are similarities.
*/
#include <linux/config.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/malloc.h>
#include <linux/random.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
#include <linux/irq.h>
#include <linux/proc_fs.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/delay.h>
#include <asm/desc.h>
#include <asm/irq.h>
/*
* Linux has a controller-independent x86 interrupt architecture.
* every controller has a 'controller-template', that is used
* by the main code to do the right thing. Each driver-visible
* interrupt source is transparently wired to the apropriate
* controller. Thus drivers need not be aware of the
* interrupt-controller.
*
* Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
* PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
* (IO-APICs assumed to be messaging to Pentium local-APICs)
*
* the code is designed to be easily extended with new/different
* interrupt controllers, without having to do assembly magic.
*/
/*
* Controller mappings for all interrupt sources:
*/
irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
{ [0 ... NR_IRQS-1] = { 0, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};
static void register_irq_proc (unsigned int irq);
/*
* Special irq handlers.
*/
void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }
/*
* Generic no controller code
*/
static void enable_none(unsigned int irq) { }
static unsigned int startup_none(unsigned int irq) { return 0; }
static void disable_none(unsigned int irq) { }
static void ack_none(unsigned int irq)
{
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves, it doesnt deserve
* a generic callback i think.
*/
#if CONFIG_X86
printk("unexpected IRQ trap at vector %02x\n", irq);
#ifdef CONFIG_X86_LOCAL_APIC
/*
* Currently unexpected vectors happen only on SMP and APIC.
* We _must_ ack these because every local APIC has only N
* irq slots per priority level, and a 'hanging, unacked' IRQ
* holds up an irq slot - in excessive cases (when multiple
* unexpected vectors occur) that might lock up the APIC
* completely.
*/
ack_APIC_irq();
#endif
#endif
}
/* startup is the same as "enable", shutdown is same as "disable" */
#define shutdown_none disable_none
#define end_none enable_none
struct hw_interrupt_type no_irq_type = {
"none",
startup_none,
shutdown_none,
enable_none,
disable_none,
ack_none,
end_none
};
volatile unsigned long irq_err_count;
/*
* Generic, controller-independent functions:
*/
int get_irq_list(char *buf)
{
int i, j;
struct irqaction * action;
char *p = buf;
p += sprintf(p, " ");
for (j=0; j<smp_num_cpus; j++)
p += sprintf(p, "CPU%d ",j);
*p++ = '\n';
for (i = 0 ; i < NR_IRQS ; i++) {
action = irq_desc[i].action;
if (!action)
continue;
p += sprintf(p, "%3d: ",i);
#ifndef CONFIG_SMP
p += sprintf(p, "%10u ", kstat_irqs(i));
#else
for (j = 0; j < smp_num_cpus; j++)
p += sprintf(p, "%10u ",
kstat.irqs[cpu_logical_map(j)][i]);
#endif
p += sprintf(p, " %14s", irq_desc[i].handler->typename);
p += sprintf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
p += sprintf(p, ", %s", action->name);
*p++ = '\n';
}
p += sprintf(p, "NMI: ");
for (j = 0; j < smp_num_cpus; j++)
p += sprintf(p, "%10u ",
nmi_count(cpu_logical_map(j)));
p += sprintf(p, "\n");
#if CONFIG_SMP
p += sprintf(p, "LOC: ");
for (j = 0; j < smp_num_cpus; j++)
p += sprintf(p, "%10u ",
apic_timer_irqs[cpu_logical_map(j)]);
p += sprintf(p, "\n");
#endif
p += sprintf(p, "ERR: %10lu\n", irq_err_count);
return p - buf;
}
/*
* Global interrupt locks for SMP. Allow interrupts to come in on any
* CPU, yet make cli/sti act globally to protect critical regions..
*/
#ifdef CONFIG_SMP
unsigned char global_irq_holder = NO_PROC_ID;
unsigned volatile long global_irq_lock; /* pendantic: long for set_bit --RR */
extern void show_stack(unsigned long* esp);
static void show(char * str)
{
int i;
int cpu = smp_processor_id();
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [",irqs_running());
for(i=0;i < smp_num_cpus;i++)
printk(" %d",local_irq_count(i));
printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);
for(i=0;i < smp_num_cpus;i++)
printk(" %d",local_bh_count(i));
printk(" ]\nStack dumps:");
for(i = 0; i < smp_num_cpus; i++) {
unsigned long esp;
if (i == cpu)
continue;
printk("\nCPU %d:",i);
esp = init_tss[i].esp0;
if (!esp) {
/* tss->esp0 is set to NULL in cpu_init(),
* it's initialized when the cpu returns to user
* space. -- manfreds
*/
printk(" <unknown> ");
continue;
}
esp &= ~(THREAD_SIZE-1);
esp += sizeof(struct task_struct);
show_stack((void*)esp);
}
printk("\nCPU %d:",cpu);
show_stack(NULL);
printk("\n");
}
#define MAXCOUNT 100000000
/*
* I had a lockup scenario where a tight loop doing
* spin_unlock()/spin_lock() on CPU#1 was racing with
* spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
* apparently the spin_unlock() information did not make it
* through to CPU#0 ... nasty, is this by design, do we have to limit
* 'memory update oscillation frequency' artificially like here?
*
* Such 'high frequency update' races can be avoided by careful design, but
* some of our major constructs like spinlocks use similar techniques,
* it would be nice to clarify this issue. Set this define to 0 if you
* want to check whether your system freezes. I suspect the delay done
* by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
* i thought that such things are guaranteed by design, since we use
* the 'LOCK' prefix.
*/
#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
# define SYNC_OTHER_CORES(x) udelay(x+1)
#else
/*
* We have to allow irqs to arrive between __sti and __cli
*/
# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
#endif
static inline void wait_on_irq(int cpu)
{
int count = MAXCOUNT;
for (;;) {
/*
* Wait until all interrupts are gone. Wait
* for bottom half handlers unless we're
* already executing in one..
*/
if (!irqs_running())
if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))
break;
/* Duh, we have to loop. Release the lock to avoid deadlocks */
clear_bit(0,&global_irq_lock);
for (;;) {
if (!--count) {
show("wait_on_irq");
count = ~0;
}
__sti();
SYNC_OTHER_CORES(cpu);
__cli();
if (irqs_running())
continue;
if (global_irq_lock)
continue;
if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))
continue;
if (!test_and_set_bit(0,&global_irq_lock))
break;
}
}
}
/*
* This is called when we want to synchronize with
* interrupts. We may for example tell a device to
* stop sending interrupts: but to make sure there
* are no interrupts that are executing on another
* CPU we need to call this function.
*/
void synchronize_irq(void)
{
if (irqs_running()) {
/* Stupid approach */
cli();
sti();
}
}
static inline void get_irqlock(int cpu)
{
if (test_and_set_bit(0,&global_irq_lock)) {
/* do we already hold the lock? */
if ((unsigned char) cpu == global_irq_holder)
return;
/* Uhhuh.. Somebody else got it. Wait.. */
do {
do {
} while (test_bit(0,&global_irq_lock));
} while (test_and_set_bit(0,&global_irq_lock));
}
/*
* We also to make sure that nobody else is running
* in an interrupt context.
*/
wait_on_irq(cpu);
/*
* Ok, finally..
*/
global_irq_holder = cpu;
}
#define EFLAGS_IF_SHIFT 9
/*
* A global "cli()" while in an interrupt context
* turns into just a local cli(). Interrupts
* should use spinlocks for the (very unlikely)
* case that they ever want to protect against
* each other.
*
* If we already have local interrupts disabled,
* this will not turn a local disable into a
* global one (problems with spinlocks: this makes
* save_flags+cli+sti usable inside a spinlock).
*/
void __global_cli(void)
{
unsigned int flags;
__save_flags(flags);
if (flags & (1 << EFLAGS_IF_SHIFT)) {
int cpu = smp_processor_id();
__cli();
if (!local_irq_count(cpu))
get_irqlock(cpu);
}
}
void __global_sti(void)
{
int cpu = smp_processor_id();
if (!local_irq_count(cpu))
release_irqlock(cpu);
__sti();
}
/*
* SMP flags value to restore to:
* 0 - global cli
* 1 - global sti
* 2 - local cli
* 3 - local sti
*/
unsigned long __global_save_flags(void)
{
int retval;
int local_enabled;
unsigned long flags;
int cpu = smp_processor_id();
__save_flags(flags);
local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
/* default to local */
retval = 2 + local_enabled;
/* check for global flags if we're not in an interrupt */
if (!local_irq_count(cpu)) {
if (local_enabled)
retval = 1;
if (global_irq_holder == cpu)
retval = 0;
}
return retval;
}
void __global_restore_flags(unsigned long flags)
{
switch (flags) {
case 0:
__global_cli();
break;
case 1:
__global_sti();
break;
case 2:
__cli();
break;
case 3:
__sti();
break;
default:
printk("global_restore_flags: %08lx (%08lx)\n",
flags, (&flags)[-1]);
}
}
#endif
/*
* This should really return information about whether
* we should do bottom half handling etc. Right now we
* end up _always_ checking the bottom half, which is a
* waste of time and is not what some drivers would
* prefer.
*/
int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
{
int status;
int cpu = smp_processor_id();
irq_enter(cpu, irq);
status = 1; /* Force the "do bottom halves" bit */
if (!(action->flags & SA_INTERRUPT))
__sti();
do {
status |= action->flags;
action->handler(irq, action->dev_id, regs);
action = action->next;
} while (action);
if (status & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
__cli();
irq_exit(cpu, irq);
return status;
}
/*
* Generic enable/disable code: this just calls
* down into the PIC-specific version for the actual
* hardware disable after having gotten the irq
* controller lock.
*/
/**
* disable_irq_nosync - disable an irq without waiting
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Disables of an interrupt
* stack. Unlike disable_irq(), this function does not ensure existing
* instances of the IRQ handler have completed before returning.
*
* This function may be called from IRQ context.
*/
void inline disable_irq_nosync(unsigned int irq)
{
irq_desc_t *desc = irq_desc + irq;
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
if (!desc->depth++) {
desc->status |= IRQ_DISABLED;
desc->handler->disable(irq);
}
spin_unlock_irqrestore(&desc->lock, flags);
}
/**
* disable_irq - disable an irq and wait for completion
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Disables of an interrupt
* stack. That is for two disables you need two enables. This
* function waits for any pending IRQ handlers for this interrupt
* to complete before returning. If you use this function while
* holding a resource the IRQ handler may need you will deadlock.
*
* This function may be called - with care - from IRQ context.
*/
void disable_irq(unsigned int irq)
{
disable_irq_nosync(irq);
if (!local_irq_count(smp_processor_id())) {
do {
barrier();
} while (irq_desc[irq].status & IRQ_INPROGRESS);
}
}
/**
* enable_irq - enable interrupt handling on an irq
* @irq: Interrupt to enable
*
* Re-enables the processing of interrupts on this IRQ line
* providing no disable_irq calls are now in effect.
*
* This function may be called from IRQ context.
*/
void enable_irq(unsigned int irq)
{
irq_desc_t *desc = irq_desc + irq;
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
switch (desc->depth) {
case 1: {
unsigned int status = desc->status & ~IRQ_DISABLED;
desc->status = status;
if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
desc->status = status | IRQ_REPLAY;
hw_resend_irq(desc->handler,irq);
}
desc->handler->enable(irq);
/* fall-through */
}
default:
desc->depth--;
break;
case 0:
printk("enable_irq(%u) unbalanced from %p\n", irq,
__builtin_return_address(0));
}
spin_unlock_irqrestore(&desc->lock, flags);
}
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
asmlinkage unsigned int do_IRQ(struct pt_regs regs)
{
/*
* We ack quickly, we don't want the irq controller
* thinking we're snobs just because some other CPU has
* disabled global interrupts (we have already done the
* INT_ACK cycles, it's too late to try to pretend to the
* controller that we aren't taking the interrupt).
*
* 0 return value means that this irq is already being
* handled by some other CPU. (or is disabled)
*/
int irq = regs.orig_eax & 0xff; /* high bits used in ret_from_ code */
int cpu = smp_processor_id();
irq_desc_t *desc = irq_desc + irq;
struct irqaction * action;
unsigned int status;
kstat.irqs[cpu][irq]++;
spin_lock(&desc->lock);
desc->handler->ack(irq);
/*
REPLAY is when Linux resends an IRQ that was dropped earlier
WAITING is used by probe to mark irqs that are being tested
*/
status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
status |= IRQ_PENDING; /* we _want_ to handle it */
/*
* If the IRQ is disabled for whatever reason, we cannot
* use the action we have.
*/
action = NULL;
if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
action = desc->action;
status &= ~IRQ_PENDING; /* we commit to handling */
status |= IRQ_INPROGRESS; /* we are handling it */
}
desc->status = status;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -