⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 irq.c

📁 上传linux-jx2410的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* $Id: irq.c,v 1.1.1.1 2004/02/04 12:55:51 laputa Exp $ * irq.c: UltraSparc IRQ handling/init/registry. * * Copyright (C) 1997  David S. Miller  (davem@caip.rutgers.edu) * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be) * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz) */#include <linux/config.h>#include <linux/ptrace.h>#include <linux/errno.h>#include <linux/kernel_stat.h>#include <linux/signal.h>#include <linux/mm.h>#include <linux/interrupt.h>#include <linux/slab.h>#include <linux/random.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/proc_fs.h>#include <linux/kbd_ll.h>#include <asm/ptrace.h>#include <asm/processor.h>#include <asm/atomic.h>#include <asm/system.h>#include <asm/irq.h>#include <asm/sbus.h>#include <asm/iommu.h>#include <asm/upa.h>#include <asm/oplib.h>#include <asm/timer.h>#include <asm/smp.h>#include <asm/hardirq.h>#include <asm/softirq.h>#include <asm/starfire.h>#include <asm/uaccess.h>#include <asm/cache.h>#ifdef CONFIG_SMPstatic void distribute_irqs(void);#endif/* UPA nodes send interrupt packet to UltraSparc with first data reg * value low 5 (7 on Starfire) bits holding the IRQ identifier being * delivered.  We must translate this into a non-vector IRQ so we can * set the softint on this cpu. * * To make processing these packets efficient and race free we use * an array of irq buckets below.  The interrupt vector handler in * entry.S feeds incoming packets into per-cpu pil-indexed lists. * The IVEC handler does not need to act atomically, the PIL dispatch * code uses CAS to get an atomic snapshot of the list and clear it * at the same time. */struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));#ifndef CONFIG_SMPunsigned int __up_workvec[16] __attribute__ ((aligned (SMP_CACHE_BYTES)));#define irq_work(__cpu, __pil)	&(__up_workvec[(void)(__cpu), (__pil)])#else#define irq_work(__cpu, __pil)	&(cpu_data[(__cpu)].irq_worklists[(__pil)])#endif#ifdef CONFIG_PCI/* This is a table of physical addresses used to deal with IBF_DMA_SYNC. * It is used for PCI only to synchronize DMA transfers with IRQ delivery * for devices behind busses other than APB on Sabre systems. * * Currently these physical addresses are just config space accesses * to the command register for that device. */unsigned long pci_dma_wsync;unsigned long dma_sync_reg_table[256];unsigned char dma_sync_reg_table_entry = 0;#endif/* This is based upon code in the 32-bit Sparc kernel written mostly by * David Redman (djhr@tadpole.co.uk). */#define MAX_STATIC_ALLOC	4static struct irqaction static_irqaction[MAX_STATIC_ALLOC];static int static_irq_count = 0;/* This is exported so that fast IRQ handlers can get at it... -DaveM */struct irqaction *irq_action[NR_IRQS+1] = {	  NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,	  NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL};static void register_irq_proc (unsigned int irq);/* * Upper 2b of irqaction->flags holds the ino. * irqaction->mask holds the smp affinity information. */#define put_ino_in_irqaction(action, irq) \	action->flags &= 0xffffffffffffUL; \	if (__bucket(irq) == &pil0_dummy_bucket) \		action->flags |= 0xdeadUL << 48;  \	else \		action->flags |= __irq_ino(irq) << 48;#define get_ino_in_irqaction(action)	(action->flags >> 48)#define put_smpaff_in_irqaction(action, smpaff)	(action)->mask = (smpaff)#define get_smpaff_in_irqaction(action) 	((action)->mask)int get_irq_list(char *buf){	int i, len = 0;	struct irqaction *action;#ifdef CONFIG_SMP	int j;#endif	for(i = 0; i < (NR_IRQS + 1); i++) {		if(!(action = *(i + irq_action)))			continue;		len += sprintf(buf + len, "%3d: ", i);#ifndef CONFIG_SMP		len += sprintf(buf + len, "%10u ", kstat_irqs(i));#else		for (j = 0; j < smp_num_cpus; j++)			len += sprintf(buf + len, "%10u ",				       kstat.irqs[cpu_logical_map(j)][i]);#endif		len += sprintf(buf + len, " %s:%lx", action->name, \						get_ino_in_irqaction(action));		for(action = action->next; action; action = action->next) {			len += sprintf(buf+len, ", %s:%lx", action->name, \						get_ino_in_irqaction(action));		}		len += sprintf(buf + len, "\n");	}	return len;}/* Now these are always passed a true fully specified sun4u INO. */void enable_irq(unsigned int irq){	struct ino_bucket *bucket = __bucket(irq);	unsigned long imap;	unsigned long tid;	imap = bucket->imap;	if (imap == 0UL)		return;	if (tlb_type == cheetah) {		/* We set it to our Safari AID. */		__asm__ __volatile__("ldxa [%%g0] %1, %0"				     : "=r" (tid)				     : "i" (ASI_SAFARI_CONFIG));		tid = ((tid & (0x3ffUL<<17)) << 9);		tid &= IMAP_AID_SAFARI;	} else if (this_is_starfire == 0) {		/* We set it to our UPA MID. */		__asm__ __volatile__("ldxa [%%g0] %1, %0"				     : "=r" (tid)				     : "i" (ASI_UPA_CONFIG));		tid = ((tid & UPA_CONFIG_MID) << 9);		tid &= IMAP_TID_UPA;	} else {		tid = (starfire_translate(imap, current->processor) << 26);		tid &= IMAP_TID_UPA;	}	/* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product	 * of this SYSIO's preconfigured IGN in the SYSIO Control	 * Register, the hardware just mirrors that value here.	 * However for Graphics and UPA Slave devices the full	 * IMAP_INR field can be set by the programmer here.	 *	 * Things like FFB can now be handled via the new IRQ mechanism.	 */	upa_writel(tid | IMAP_VALID, imap);}/* This now gets passed true ino's as well. */void disable_irq(unsigned int irq){	struct ino_bucket *bucket = __bucket(irq);	unsigned long imap;	imap = bucket->imap;	if (imap != 0UL) {		u32 tmp;		/* NOTE: We do not want to futz with the IRQ clear registers		 *       and move the state to IDLE, the SCSI code does call		 *       disable_irq() to assure atomicity in the queue cmd		 *       SCSI adapter driver code.  Thus we'd lose interrupts.		 */		tmp = upa_readl(imap);		tmp &= ~IMAP_VALID;		upa_writel(tmp, imap);	}}/* The timer is the one "weird" interrupt which is generated by * the CPU %tick register and not by some normal vectored interrupt * source.  To handle this special case, we use this dummy INO bucket. */static struct ino_bucket pil0_dummy_bucket = {	0,	/* irq_chain */	0,	/* pil */	0,	/* pending */	0,	/* flags */	0,	/* __unused */	NULL,	/* irq_info */	0UL,	/* iclr */	0UL,	/* imap */};unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap){	struct ino_bucket *bucket;	int ino;	if(pil == 0) {		if(iclr != 0UL || imap != 0UL) {			prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",				    iclr, imap);			prom_halt();		}		return __irq(&pil0_dummy_bucket);	}	/* RULE: Both must be specified in all other cases. */	if (iclr == 0UL || imap == 0UL) {		prom_printf("Invalid build_irq %d %d %016lx %016lx\n",			    pil, inofixup, iclr, imap);		prom_halt();	}		ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;	if(ino > NUM_IVECS) {		prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",			    ino, pil, inofixup, iclr, imap);		prom_halt();	}	/* Ok, looks good, set it up.  Don't touch the irq_chain or	 * the pending flag.	 */	bucket = &ivector_table[ino];	if ((bucket->flags & IBF_ACTIVE) ||	    (bucket->irq_info != NULL)) {		/* This is a gross fatal error if it happens here. */		prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n");		prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n",			    ino, pil, inofixup, iclr, imap);		prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n",			    bucket->pil, bucket->iclr, bucket->imap);		prom_printf("IRQ: Cannot continue, halting...\n");		prom_halt();	}	bucket->imap  = imap;	bucket->iclr  = iclr;	bucket->pil   = pil;	bucket->flags = 0;	bucket->irq_info = NULL;	return __irq(bucket);}static void atomic_bucket_insert(struct ino_bucket *bucket){	unsigned long pstate;	unsigned int *ent;	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));	__asm__ __volatile__("wrpr %0, %1, %%pstate"			     : : "r" (pstate), "i" (PSTATE_IE));	ent = irq_work(smp_processor_id(), bucket->pil);	bucket->irq_chain = *ent;	*ent = __irq(bucket);	__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));}int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),		unsigned long irqflags, const char *name, void *dev_id){	struct irqaction *action, *tmp = NULL;	struct ino_bucket *bucket = __bucket(irq);	unsigned long flags;	int pending = 0;	if ((bucket != &pil0_dummy_bucket) &&	    (bucket < &ivector_table[0] ||	     bucket >= &ivector_table[NUM_IVECS])) {		unsigned int *caller;		__asm__ __volatile__("mov %%i7, %0" : "=r" (caller));		printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "		       "from %p, irq %08x.\n", caller, irq);		return -EINVAL;	}		if(!handler)	    return -EINVAL;	if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {		/*	 	 * This function might sleep, we want to call it first,	 	 * outside of the atomic block. In SA_STATIC_ALLOC case,		 * random driver's kmalloc will fail, but it is safe.		 * If already initialized, random driver will not reinit.	 	 * Yes, this might clear the entropy pool if the wrong	 	 * driver is attempted to be loaded, without actually	 	 * installing a new handler, but is this really a problem,	 	 * only the sysadmin is able to do this.	 	 */		rand_initialize_irq(irq);	}	save_and_cli(flags);	action = *(bucket->pil + irq_action);	if(action) {		if((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ))			for (tmp = action; tmp->next; tmp = tmp->next)				;		else {			restore_flags(flags);			return -EBUSY;		}		action = NULL;		/* Or else! */	}	/* If this is flagged as statically allocated then we use our	 * private struct which is never freed.	 */	if(irqflags & SA_STATIC_ALLOC) {	    if(static_irq_count < MAX_STATIC_ALLOC)		action = &static_irqaction[static_irq_count++];	    else		printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "		       "using kmalloc\n", irq, name);	}		if(action == NULL)	    action = (struct irqaction *)kmalloc(sizeof(struct irqaction),						 GFP_KERNEL);		if(!action) { 		restore_flags(flags);		return -ENOMEM;	}	if (bucket == &pil0_dummy_bucket) {		bucket->irq_info = action;		bucket->flags |= IBF_ACTIVE;	} else {		if((bucket->flags & IBF_ACTIVE) != 0) {			void *orig = bucket->irq_info;			void **vector = NULL;			if((bucket->flags & IBF_PCI) == 0) {				printk("IRQ: Trying to share non-PCI bucket.\n");				goto free_and_ebusy;			}			if((bucket->flags & IBF_MULTI) == 0) {				vector = kmalloc(sizeof(void *) * 4, GFP_KERNEL);				if(vector == NULL)					goto free_and_enomem;				/* We might have slept. */				if ((bucket->flags & IBF_MULTI) != 0) {					int ent;					kfree(vector);					vector = (void **)bucket->irq_info;					for(ent = 0; ent < 4; ent++) {						if (vector[ent] == NULL) {							vector[ent] = action;							break;						}					}					if (ent == 4)						goto free_and_ebusy;				} else {					vector[0] = orig;					vector[1] = action;					vector[2] = NULL;					vector[3] = NULL;					bucket->irq_info = vector;					bucket->flags |= IBF_MULTI;				}			} else {				int ent;				vector = (void **)orig;				for(ent = 0; ent < 4; ent++) {					if(vector[ent] == NULL) {						vector[ent] = action;						break;					}				}				if (ent == 4)					goto free_and_ebusy;			}		} else {			bucket->irq_info = action;			bucket->flags |= IBF_ACTIVE;		}		pending = bucket->pending;		if(pending)			bucket->pending = 0;	}	action->handler = handler;	action->flags = irqflags;	action->name = name;	action->next = NULL;	action->dev_id = dev_id;	put_ino_in_irqaction(action, irq);	put_smpaff_in_irqaction(action, 0);	if(tmp)		tmp->next = action;	else		*(bucket->pil + irq_action) = action;	enable_irq(irq);	/* We ate the IVEC already, this makes sure it does not get lost. */	if(pending) {		atomic_bucket_insert(bucket);		set_softint(1 << bucket->pil);	}	restore_flags(flags);	if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC)))		register_irq_proc(__irq_ino(irq));#ifdef CONFIG_SMP	distribute_irqs();#endif	return 0;free_and_ebusy:	kfree(action);	restore_flags(flags);	return -EBUSY;free_and_enomem:	kfree(action);	restore_flags(flags);	return -ENOMEM;}void free_irq(unsigned int irq, void *dev_id){	struct irqaction *action;	struct irqaction *tmp = NULL;	unsigned long flags;	struct ino_bucket *bucket = __bucket(irq), *bp;	if ((bucket != &pil0_dummy_bucket) &&	    (bucket < &ivector_table[0] ||	     bucket >= &ivector_table[NUM_IVECS])) {		unsigned int *caller;		__asm__ __volatile__("mov %%i7, %0" : "=r" (caller));		printk(KERN_CRIT "free_irq: Old style IRQ removal attempt "		       "from %p, irq %08x.\n", caller, irq);		return;	}		action = *(bucket->pil + irq_action);	if(!action->handler) {		printk("Freeing free IRQ %d\n", bucket->pil);		return;	}	if(dev_id) {		for( ; action; action = action->next) {			if(action->dev_id == dev_id)				break;			tmp = action;		}		if(!action) {			printk("Trying to free free shared IRQ %d\n", bucket->pil);			return;		}	} else if(action->flags & SA_SHIRQ) {		printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -