⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 irq.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* $Id: irq.c,v 1.94 2000/09/21 06:27:10 anton Exp $ * irq.c: UltraSparc IRQ handling/init/registry. * * Copyright (C) 1997  David S. Miller  (davem@caip.rutgers.edu) * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be) * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz) */#include <linux/config.h>#include <linux/ptrace.h>#include <linux/errno.h>#include <linux/kernel_stat.h>#include <linux/signal.h>#include <linux/mm.h>#include <linux/interrupt.h>#include <linux/malloc.h>#include <linux/random.h> /* XXX ADD add_foo_randomness() calls... -DaveM */#include <linux/init.h>#include <linux/delay.h>#include <asm/ptrace.h>#include <asm/processor.h>#include <asm/atomic.h>#include <asm/system.h>#include <asm/irq.h>#include <asm/sbus.h>#include <asm/iommu.h>#include <asm/upa.h>#include <asm/oplib.h>#include <asm/timer.h>#include <asm/smp.h>#include <asm/hardirq.h>#include <asm/softirq.h>#include <asm/starfire.h>/* Internal flag, should not be visible elsewhere at all. */#define SA_IMAP_MASKED		0x100#define SA_DMA_SYNC		0x200#ifdef CONFIG_SMPstatic void distribute_irqs(void);#endif/* UPA nodes send interrupt packet to UltraSparc with first data reg * value low 5 (7 on Starfire) bits holding the IRQ identifier being * delivered.  We must translate this into a non-vector IRQ so we can * set the softint on this cpu. * * To make processing these packets efficient and race free we use * an array of irq buckets below.  The interrupt vector handler in * entry.S feeds incoming packets into per-cpu pil-indexed lists. * The IVEC handler does not need to act atomically, the PIL dispatch * code uses CAS to get an atomic snapshot of the list and clear it * at the same time. */struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (64)));#ifndef CONFIG_SMPunsigned int __up_workvec[16] __attribute__ ((aligned (64)));#define irq_work(__cpu, __pil)	&(__up_workvec[(void)(__cpu), (__pil)])#else#define irq_work(__cpu, __pil)	&(cpu_data[(__cpu)].irq_worklists[(__pil)])#endif#ifdef CONFIG_PCI/* This is a table of physical addresses used to deal with SA_DMA_SYNC. * It is used for PCI only to synchronize DMA transfers with IRQ delivery * for devices behind busses other than APB on Sabre systems. * * Currently these physical addresses are just config space accesses * to the command register for that device. */unsigned long pci_dma_wsync;unsigned long dma_sync_reg_table[256];unsigned char dma_sync_reg_table_entry = 0;#endif/* This is based upon code in the 32-bit Sparc kernel written mostly by * David Redman (djhr@tadpole.co.uk). */#define MAX_STATIC_ALLOC	4static struct irqaction static_irqaction[MAX_STATIC_ALLOC];static int static_irq_count = 0;/* This is exported so that fast IRQ handlers can get at it... -DaveM */struct irqaction *irq_action[NR_IRQS+1] = {	  NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,	  NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL};int get_irq_list(char *buf){	int i, len = 0;	struct irqaction *action;#ifdef CONFIG_SMP	int j;#endif	for(i = 0; i < (NR_IRQS + 1); i++) {		if(!(action = *(i + irq_action)))			continue;		len += sprintf(buf + len, "%3d: ", i);#ifndef CONFIG_SMP		len += sprintf(buf + len, "%10u ", kstat_irqs(i));#else		for (j = 0; j < smp_num_cpus; j++)			len += sprintf(buf + len, "%10u ",				       kstat.irqs[cpu_logical_map(j)][i]);#endif		len += sprintf(buf + len, "%c %s",			       (action->flags & SA_INTERRUPT) ? '+' : ' ',			       action->name);		for(action = action->next; action; action = action->next) {			len += sprintf(buf+len, ",%s %s",				       (action->flags & SA_INTERRUPT) ? " +" : "",				       action->name);		}		len += sprintf(buf + len, "\n");	}	return len;}/* Now these are always passed a true fully specified sun4u INO. */void enable_irq(unsigned int irq){	struct ino_bucket *bucket = __bucket(irq);	unsigned long imap;	unsigned long tid;	imap = bucket->imap;	if (imap == 0UL)		return;	if(this_is_starfire == 0) {		/* We set it to our UPA MID. */		__asm__ __volatile__("ldxa [%%g0] %1, %0"				     : "=r" (tid)				     : "i" (ASI_UPA_CONFIG));		tid = ((tid & UPA_CONFIG_MID) << 9);	} else {		tid = (starfire_translate(imap, current->processor) << 26);	}	/* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product	 * of this SYSIO's preconfigured IGN in the SYSIO Control	 * Register, the hardware just mirrors that value here.	 * However for Graphics and UPA Slave devices the full	 * IMAP_INR field can be set by the programmer here.	 *	 * Things like FFB can now be handled via the new IRQ mechanism.	 */	upa_writel(IMAP_VALID | (tid & IMAP_TID), imap);}/* This now gets passed true ino's as well. */void disable_irq(unsigned int irq){	struct ino_bucket *bucket = __bucket(irq);	unsigned long imap;	imap = bucket->imap;	if (imap != 0UL) {		u32 tmp;		/* NOTE: We do not want to futz with the IRQ clear registers		 *       and move the state to IDLE, the SCSI code does call		 *       disable_irq() to assure atomicity in the queue cmd		 *       SCSI adapter driver code.  Thus we'd lose interrupts.		 */		tmp = upa_readl(imap);		tmp &= ~IMAP_VALID;		upa_writel(tmp, imap);	}}/* The timer is the one "weird" interrupt which is generated by * the CPU %tick register and not by some normal vectored interrupt * source.  To handle this special case, we use this dummy INO bucket. */static struct ino_bucket pil0_dummy_bucket = {	0,	/* irq_chain */	0,	/* pil */	0,	/* pending */	0,	/* flags */	0,	/* __unused */	NULL,	/* irq_info */	0UL,	/* iclr */	0UL,	/* imap */};unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap){	struct ino_bucket *bucket;	int ino;	if(pil == 0) {		if(iclr != 0UL || imap != 0UL) {			prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",				    iclr, imap);			prom_halt();		}		return __irq(&pil0_dummy_bucket);	}	/* RULE: Both must be specified in all other cases. */	if (iclr == 0UL || imap == 0UL) {		prom_printf("Invalid build_irq %d %d %016lx %016lx\n",			    pil, inofixup, iclr, imap);		prom_halt();	}		ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;	if(ino > NUM_IVECS) {		prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",			    ino, pil, inofixup, iclr, imap);		prom_halt();	}	/* Ok, looks good, set it up.  Don't touch the irq_chain or	 * the pending flag.	 */	bucket = &ivector_table[ino];	if ((bucket->flags & IBF_ACTIVE) ||	    (bucket->irq_info != NULL)) {		/* This is a gross fatal error if it happens here. */		prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");		prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",			    ino, pil, inofixup, iclr, imap);		prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",			    bucket->pil, bucket->iclr, bucket->imap);		prom_printf("IRQ: Cannot continue, halting...\n");		prom_halt();	}	bucket->imap  = imap;	bucket->iclr  = iclr;	bucket->pil   = pil;	bucket->flags = 0;	bucket->irq_info = NULL;	return __irq(bucket);}static void atomic_bucket_insert(struct ino_bucket *bucket){	unsigned long pstate;	unsigned int *ent;	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));	__asm__ __volatile__("wrpr %0, %1, %%pstate"			     : : "r" (pstate), "i" (PSTATE_IE));	ent = irq_work(smp_processor_id(), bucket->pil);	bucket->irq_chain = *ent;	*ent = __irq(bucket);	__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));}int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),		unsigned long irqflags, const char *name, void *dev_id){	struct irqaction *action, *tmp = NULL;	struct ino_bucket *bucket = __bucket(irq);	unsigned long flags;	int pending = 0;	if ((bucket != &pil0_dummy_bucket) &&	    (bucket < &ivector_table[0] ||	     bucket >= &ivector_table[NUM_IVECS])) {		unsigned int *caller;		__asm__ __volatile__("mov %%i7, %0" : "=r" (caller));		printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "		       "from %p, irq %08x.\n", caller, irq);		return -EINVAL;	}		if(!handler)	    return -EINVAL;	if (!bucket->pil)		irqflags &= ~SA_IMAP_MASKED;	else {		irqflags |= SA_IMAP_MASKED;		if (bucket->flags & IBF_PCI) {			/*			 * PCI IRQs should never use SA_INTERRUPT.			 */			irqflags &= ~(SA_INTERRUPT);			/*			 * Check wether we _should_ use DMA Write Sync			 * (for devices behind bridges behind APB). 			 */			if (bucket->flags & IBF_DMA_SYNC)				irqflags |= SA_DMA_SYNC;		}	}	save_and_cli(flags);	action = *(bucket->pil + irq_action);	if(action) {		if((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))			for (tmp = action; tmp->next; tmp = tmp->next)				;		else {			restore_flags(flags);			return -EBUSY;		}		if((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) {			printk("Attempt to mix fast and slow interrupts on IRQ%d "			       "denied\n", bucket->pil);			restore_flags(flags);			return -EBUSY;		}   		action = NULL;		/* Or else! */	}	/* If this is flagged as statically allocated then we use our	 * private struct which is never freed.	 */	if(irqflags & SA_STATIC_ALLOC) {	    if(static_irq_count < MAX_STATIC_ALLOC)		action = &static_irqaction[static_irq_count++];	    else		printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "		       "using kmalloc\n", irq, name);	}		if(action == NULL)	    action = (struct irqaction *)kmalloc(sizeof(struct irqaction),						 GFP_KERNEL);		if(!action) { 		restore_flags(flags);		return -ENOMEM;	}	if ((irqflags & SA_IMAP_MASKED) == 0) {		bucket->irq_info = action;		bucket->flags |= IBF_ACTIVE;	} else {		if((bucket->flags & IBF_ACTIVE) != 0) {			void *orig = bucket->irq_info;			void **vector = NULL;			if((bucket->flags & IBF_PCI) == 0) {				printk("IRQ: Trying to share non-PCI bucket.\n");				goto free_and_ebusy;			}			if((bucket->flags & IBF_MULTI) == 0) {				vector = kmalloc(sizeof(void *) * 4, GFP_KERNEL);				if(vector == NULL)					goto free_and_enomem;				/* We might have slept. */				if ((bucket->flags & IBF_MULTI) != 0) {					int ent;					kfree(vector);					vector = (void **)bucket->irq_info;					for(ent = 0; ent < 4; ent++) {						if (vector[ent] == NULL) {							vector[ent] = action;							break;						}					}					if (ent == 4)						goto free_and_ebusy;				} else {					vector[0] = orig;					vector[1] = action;					vector[2] = NULL;					vector[3] = NULL;					bucket->irq_info = vector;					bucket->flags |= IBF_MULTI;				}			} else {				int ent;				vector = (void **)orig;				for(ent = 0; ent < 4; ent++) {					if(vector[ent] == NULL) {						vector[ent] = action;						break;					}				}				if (ent == 4)					goto free_and_ebusy;			}		} else {			bucket->irq_info = action;			bucket->flags |= IBF_ACTIVE;		}		pending = bucket->pending;		if(pending)			bucket->pending = 0;	}	action->mask = (unsigned long) bucket;	action->handler = handler;	action->flags = irqflags;	action->name = name;	action->next = NULL;	action->dev_id = dev_id;	if(tmp)		tmp->next = action;	else		*(bucket->pil + irq_action) = action;	enable_irq(irq);	/* We ate the IVEC already, this makes sure it does not get lost. */	if(pending) {		atomic_bucket_insert(bucket);		set_softint(1 << bucket->pil);	}	restore_flags(flags);#ifdef CONFIG_SMP	distribute_irqs();#endif	return 0;free_and_ebusy:	kfree(action);	restore_flags(flags);	return -EBUSY;free_and_enomem:	kfree(action);	restore_flags(flags);	return -ENOMEM;}void free_irq(unsigned int irq, void *dev_id){	struct irqaction *action;	struct irqaction *tmp = NULL;	unsigned long flags;	struct ino_bucket *bucket = __bucket(irq), *bp;	if ((bucket != &pil0_dummy_bucket) &&	    (bucket < &ivector_table[0] ||	     bucket >= &ivector_table[NUM_IVECS])) {		unsigned int *caller;		__asm__ __volatile__("mov %%i7, %0" : "=r" (caller));		printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "		       "from %p, irq %08x.\n", caller, irq);		return;	}		action = *(bucket->pil + irq_action);	if(!action->handler) {		printk("Freeing free IRQ %d\n", bucket->pil);		return;	}	if(dev_id) {		for( ; action; action = action->next) {			if(action->dev_id == dev_id)				break;			tmp = action;		}		if(!action) {			printk("Trying to free free shared IRQ %d\n", bucket->pil);			return;		}	} else if(action->flags & SA_SHIRQ) {		printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);		return;	}	if(action->flags & SA_STATIC_ALLOC) {		printk("Attempt to free statically allocated IRQ %d (%s)\n",		       bucket->pil, action->name);		return;	}	save_and_cli(flags);	if(action && tmp)		tmp->next = action->next;	else		*(bucket->pil + irq_action) = action->next;	if(action->flags & SA_IMAP_MASKED) {		unsigned long imap = bucket->imap;		void **vector, *orig;		int ent;		orig = bucket->irq_info;		vector = (void **)orig;		if ((bucket->flags & IBF_MULTI) != 0) {			int other = 0;			void *orphan = NULL;			for(ent = 0; ent < 4; ent++) {				if(vector[ent] == action)					vector[ent] = NULL;				else if(vector[ent] != NULL) {					orphan = vector[ent];					other++;				}			}			/* Only free when no other shared irq			 * uses this bucket.			 */			if(other) {				if (other == 1) {					/* Convert back to non-shared bucket. */					bucket->irq_info = orphan;					bucket->flags &= ~(IBF_MULTI);					kfree(vector);				}				goto out;			}		} else {			bucket->irq_info = NULL;		}		/* This unique interrupt source is now inactive. */		bucket->flags &= ~IBF_ACTIVE;		/* See if any other buckets share this bucket's IMAP		 * and are still active.		 */		for(ent = 0; ent < NUM_IVECS; ent++) {			bp = &ivector_table[ent];			if(bp != bucket		&&			   bp->imap == imap	&&			   (bp->flags & IBF_ACTIVE) != 0)				break;		}		/* Only disable when no other sub-irq levels of		 * the same IMAP are active.		 */		if (ent == NUM_IVECS)			disable_irq(irq);	}out:	kfree(action);	restore_flags(flags);}#ifdef CONFIG_SMP/* Who has the global irq brlock */unsigned char global_irq_holder = NO_PROC_ID;static void show(char * str){	int cpu = smp_processor_id();	int i;	printk("\n%s, CPU %d:\n", str, cpu);	printk("irq:  %d [ ", irqs_running());	for (i = 0; i < smp_num_cpus; i++)		printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);	printk("]\nbh:   %d [ ",	       (spin_is_locked(&global_bh_lock) ? 1 : 0));	for (i = 0; i < smp_num_cpus; i++)		printk("%u ", local_bh_count(i));	printk("]\n");}#define MAXCOUNT 100000000#if 0#define SYNC_OTHER_ULTRAS(x)	udelay(x+1)#else#define SYNC_OTHER_ULTRAS(x)	membar("#Sync");#endifvoid synchronize_irq(void){	if (irqs_running()) {		cli();		sti();	}}static inline void get_irqlock(int cpu){	int count;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -