⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 irq.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *	linux/arch/ia64/kernel/irq.c * *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar * * This file contains the code used by various IRQ handling routines: * asking for different IRQ's should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. *//* * (mostly architecture independent, will move to kernel/irq.c in 2.5.) * * IRQs are in fact implemented a bit like signal handlers for the kernel. * Naturally it's not a 1:1 relation, but there are similarities. */#include <linux/config.h>#include <linux/ptrace.h>#include <linux/errno.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/ioport.h>#include <linux/interrupt.h>#include <linux/timex.h>#include <linux/malloc.h>#include <linux/random.h>#include <linux/smp_lock.h>#include <linux/init.h>#include <linux/kernel_stat.h>#include <linux/irq.h>#include <linux/proc_fs.h>#include <asm/io.h>#include <asm/smp.h>#include <asm/system.h>#include <asm/bitops.h>#include <asm/uaccess.h>#include <asm/pgalloc.h>#include <asm/delay.h>#include <asm/irq.h>/* * Linux has a controller-independent x86 interrupt architecture. * every controller has a 'controller-template', that is used * by the main code to do the right thing. Each driver-visible * interrupt source is transparently wired to the apropriate * controller. Thus drivers need not be aware of the * interrupt-controller. * * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC, * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC. * (IO-APICs assumed to be messaging to Pentium local-APICs) * * the code is designed to be easily extended with new/different * interrupt controllers, without having to do assembly magic. *//* * Controller mappings for all interrupt sources: */irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =	{ [0 ... NR_IRQS-1] = { IRQ_DISABLED, &no_irq_type, NULL, 0, SPIN_LOCK_UNLOCKED}};static void register_irq_proc (unsigned int irq);/* * Special irq handlers. */void no_action(int cpl, void *dev_id, struct pt_regs *regs) { }/* * Generic no controller code */static void enable_none(unsigned int irq) { }static unsigned int startup_none(unsigned int irq) { return 0; }static void disable_none(unsigned int irq) { }static void ack_none(unsigned int irq){/* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves, it doesnt deserve * a generic callback i think. */#if CONFIG_X86	printk("unexpected IRQ trap at vector %02x\n", irq);#ifdef CONFIG_X86_LOCAL_APIC	/*	 * Currently unexpected vectors happen only on SMP and APIC.	 * We _must_ ack these because every local APIC has only N	 * irq slots per priority level, and a 'hanging, unacked' IRQ	 * holds up an irq slot - in excessive cases (when multiple	 * unexpected vectors occur) that might lock up the APIC	 * completely.	 */	ack_APIC_irq();#endif#endif#if CONFIG_IA64	printk("Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());#endif}/* startup is the same as "enable", shutdown is same as "disable" */#define shutdown_none	disable_none#define end_none	enable_nonestruct hw_interrupt_type no_irq_type = {	"none",	startup_none,	shutdown_none,	enable_none,	disable_none,	ack_none,	end_none};volatile unsigned long irq_err_count;/* * Generic, controller-independent functions: */int get_irq_list(char *buf){	int i, j;	struct irqaction * action;	char *p = buf;	p += sprintf(p, "           ");	for (j=0; j<smp_num_cpus; j++)		p += sprintf(p, "CPU%d       ",j);	*p++ = '\n';	for (i = 0 ; i < NR_IRQS ; i++) {		action = irq_desc[i].action;		if (!action) 			continue;		p += sprintf(p, "%3d: ",i);#ifndef CONFIG_SMP		p += sprintf(p, "%10u ", kstat_irqs(i));#else		for (j = 0; j < smp_num_cpus; j++)			p += sprintf(p, "%10u ",				kstat.irqs[cpu_logical_map(j)][i]);#endif		p += sprintf(p, " %14s", irq_desc[i].handler->typename);		p += sprintf(p, "  %s", action->name);		for (action=action->next; action; action = action->next)			p += sprintf(p, ", %s", action->name);		*p++ = '\n';	}	p += sprintf(p, "NMI: ");	for (j = 0; j < smp_num_cpus; j++)		p += sprintf(p, "%10u ",			nmi_count(cpu_logical_map(j)));	p += sprintf(p, "\n");#if defined(CONFIG_SMP) && defined(__i386__)	p += sprintf(p, "LOC: ");	for (j = 0; j < smp_num_cpus; j++)		p += sprintf(p, "%10u ",			apic_timer_irqs[cpu_logical_map(j)]);	p += sprintf(p, "\n");#endif	p += sprintf(p, "ERR: %10lu\n", irq_err_count);	return p - buf;}/* * Global interrupt locks for SMP. Allow interrupts to come in on any * CPU, yet make cli/sti act globally to protect critical regions.. */#ifdef CONFIG_SMPunsigned int global_irq_holder = NO_PROC_ID;volatile unsigned long global_irq_lock; /* long for set_bit --RR */extern void show_stack(unsigned long* esp);static void show(char * str){	int i;	int cpu = smp_processor_id();	printk("\n%s, CPU %d:\n", str, cpu);	printk("irq:  %d [",irqs_running());	for(i=0;i < smp_num_cpus;i++)		printk(" %d",local_irq_count(i));	printk(" ]\nbh:   %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);	for(i=0;i < smp_num_cpus;i++)		printk(" %d",local_bh_count(i));	printk(" ]\nStack dumps:");#if defined(__ia64__)	/*	 * We can't unwind the stack of another CPU without access to	 * the registers of that CPU.  And sending an IPI when we're	 * in a potentially wedged state doesn't sound like a smart	 * idea.	 */#elif defined(__i386__)	for(i=0;i< smp_num_cpus;i++) {		unsigned long esp;		if(i==cpu)			continue;		printk("\nCPU %d:",i);		esp = init_tss[i].esp0;		if(esp==NULL) {			/* tss->esp0 is set to NULL in cpu_init(),			 * it's initialized when the cpu returns to user			 * space. -- manfreds			 */			printk(" <unknown> ");			continue;		}		esp &= ~(THREAD_SIZE-1);		esp += sizeof(struct task_struct);		show_stack((void*)esp); 	}#else	You lose...#endif	printk("\nCPU %d:",cpu);	show_stack(NULL);	printk("\n");}	#define MAXCOUNT 100000000/* * I had a lockup scenario where a tight loop doing * spin_unlock()/spin_lock() on CPU#1 was racing with * spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but * apparently the spin_unlock() information did not make it * through to CPU#0 ... nasty, is this by design, do we have to limit * 'memory update oscillation frequency' artificially like here? * * Such 'high frequency update' races can be avoided by careful design, but * some of our major constructs like spinlocks use similar techniques, * it would be nice to clarify this issue. Set this define to 0 if you * want to check whether your system freezes.  I suspect the delay done * by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but * i thought that such things are guaranteed by design, since we use * the 'LOCK' prefix. */#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND# define SYNC_OTHER_CORES(x) udelay(x+1)#else/* * We have to allow irqs to arrive between __sti and __cli */# ifdef __ia64__#  define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop 0")# else#  define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")# endif#endifstatic inline void wait_on_irq(int cpu){	int count = MAXCOUNT;	for (;;) {		/*		 * Wait until all interrupts are gone. Wait		 * for bottom half handlers unless we're		 * already executing in one..		 */		if (!irqs_running())			if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))				break;		/* Duh, we have to loop. Release the lock to avoid deadlocks */		clear_bit(0,&global_irq_lock);		for (;;) {			if (!--count) {				show("wait_on_irq");				count = ~0;			}			__sti();			SYNC_OTHER_CORES(cpu);			__cli();			if (irqs_running())				continue;			if (global_irq_lock)				continue;			if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))				continue;			if (!test_and_set_bit(0,&global_irq_lock))				break;		}	}}/* * This is called when we want to synchronize with * interrupts. We may for example tell a device to * stop sending interrupts: but to make sure there * are no interrupts that are executing on another * CPU we need to call this function. */void synchronize_irq(void){	if (irqs_running()) {		/* Stupid approach */		cli();		sti();	}}static inline void get_irqlock(int cpu){	if (test_and_set_bit(0,&global_irq_lock)) {		/* do we already hold the lock? */		if (cpu == global_irq_holder)			return;		/* Uhhuh.. Somebody else got it. Wait.. */		do {			do {			} while (test_bit(0,&global_irq_lock));		} while (test_and_set_bit(0,&global_irq_lock));			}	/* 	 * We also to make sure that nobody else is running	 * in an interrupt context. 	 */	wait_on_irq(cpu);	/*	 * Ok, finally..	 */	global_irq_holder = cpu;}#define EFLAGS_IF_SHIFT 9/* * A global "cli()" while in an interrupt context * turns into just a local cli(). Interrupts * should use spinlocks for the (very unlikely) * case that they ever want to protect against * each other. * * If we already have local interrupts disabled, * this will not turn a local disable into a * global one (problems with spinlocks: this makes * save_flags+cli+sti usable inside a spinlock). */void __global_cli(void){	unsigned int flags;#ifdef __ia64__	__save_flags(flags);	if (flags & IA64_PSR_I) {		int cpu = smp_processor_id();		__cli();		if (!local_irq_count(cpu))			get_irqlock(cpu);	}#else	__save_flags(flags);	if (flags & (1 << EFLAGS_IF_SHIFT)) {		int cpu = smp_processor_id();		__cli();		if (!local_irq_count(cpu))			get_irqlock(cpu);	}#endif}void __global_sti(void){	int cpu = smp_processor_id();	if (!local_irq_count(cpu))		release_irqlock(cpu);	__sti();}/* * SMP flags value to restore to: * 0 - global cli * 1 - global sti * 2 - local cli * 3 - local sti */unsigned long __global_save_flags(void){	int retval;	int local_enabled;	unsigned long flags;	int cpu = smp_processor_id();	__save_flags(flags);#ifdef __ia64__	local_enabled = (flags & IA64_PSR_I) != 0;#else	local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;#endif	/* default to local */	retval = 2 + local_enabled;	/* check for global flags if we're not in an interrupt */	if (!local_irq_count(cpu)) {		if (local_enabled)			retval = 1;		if (global_irq_holder == cpu)			retval = 0;	}	return retval;}void __global_restore_flags(unsigned long flags){	switch (flags) {	case 0:		__global_cli();		break;	case 1:		__global_sti();		break;	case 2:		__cli();		break;	case 3:		__sti();		break;	default:		printk("global_restore_flags: %08lx (%08lx)\n",			flags, (&flags)[-1]);	}}#endif/* * This should really return information about whether * we should do bottom half handling etc. Right now we * end up _always_ checking the bottom half, which is a * waste of time and is not what some drivers would * prefer. */int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action){	int status;	int cpu = smp_processor_id();	irq_enter(cpu, irq);	status = 1;	/* Force the "do bottom halves" bit */	if (!(action->flags & SA_INTERRUPT))		__sti();	do {		status |= action->flags;		action->handler(irq, action->dev_id, regs);		action = action->next;	} while (action);	if (status & SA_SAMPLE_RANDOM)		add_interrupt_randomness(irq);	__cli();	irq_exit(cpu, irq);	return status;}/* * Generic enable/disable code: this just calls * down into the PIC-specific version for the actual * hardware disable after having gotten the irq * controller lock.  */void inline disable_irq_nosync(unsigned int irq){	irq_desc_t *desc = irq_desc + irq;	unsigned long flags;	spin_lock_irqsave(&desc->lock, flags);	if (!desc->depth++) {		desc->status |= IRQ_DISABLED;		desc->handler->disable(irq);	}	spin_unlock_irqrestore(&desc->lock, flags);}/* * Synchronous version of the above, making sure the IRQ is * no longer running on any other IRQ.. */void disable_irq(unsigned int irq){	disable_irq_nosync(irq);#ifdef CONFIG_SMP	if (!local_irq_count(smp_processor_id())) {		do {			barrier();		} while (irq_desc[irq].status & IRQ_INPROGRESS);	}#endif}void enable_irq(unsigned int irq){	irq_desc_t *desc = irq_desc + irq;	unsigned long flags;	spin_lock_irqsave(&desc->lock, flags);	switch (desc->depth) {	case 1: {		unsigned int status = desc->status & ~IRQ_DISABLED;		desc->status = status;		if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {			desc->status = status | IRQ_REPLAY;			hw_resend_irq(desc->handler,irq);		}		desc->handler->enable(irq);		/* fall-through */	}	default:		desc->depth--;		break;	case 0:		printk("enable_irq() unbalanced from %p\n", (void *) __builtin_return_address(0));	}	spin_unlock_irqrestore(&desc->lock, flags);}void do_IRQ_per_cpu(unsigned long irq, struct pt_regs *regs)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -