⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 irq.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
/* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $ * irq.c: UltraSparc IRQ handling/init/registry. * * Copyright (C) 1997  David S. Miller  (davem@caip.rutgers.edu) * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be) * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz) */#include <linux/config.h>#include <linux/module.h>#include <linux/sched.h>#include <linux/ptrace.h>#include <linux/errno.h>#include <linux/kernel_stat.h>#include <linux/signal.h>#include <linux/mm.h>#include <linux/interrupt.h>#include <linux/slab.h>#include <linux/random.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/proc_fs.h>#include <linux/seq_file.h>#include <asm/ptrace.h>#include <asm/processor.h>#include <asm/atomic.h>#include <asm/system.h>#include <asm/irq.h>#include <asm/io.h>#include <asm/sbus.h>#include <asm/iommu.h>#include <asm/upa.h>#include <asm/oplib.h>#include <asm/timer.h>#include <asm/smp.h>#include <asm/starfire.h>#include <asm/uaccess.h>#include <asm/cache.h>#include <asm/cpudata.h>#include <asm/auxio.h>#ifdef CONFIG_SMPstatic void distribute_irqs(void);#endif/* UPA nodes send interrupt packet to UltraSparc with first data reg * value low 5 (7 on Starfire) bits holding the IRQ identifier being * delivered.  We must translate this into a non-vector IRQ so we can * set the softint on this cpu. * * To make processing these packets efficient and race free we use * an array of irq buckets below.  The interrupt vector handler in * entry.S feeds incoming packets into per-cpu pil-indexed lists. * The IVEC handler does not need to act atomically, the PIL dispatch * code uses CAS to get an atomic snapshot of the list and clear it * at the same time. */struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));/* This has to be in the main kernel image, it cannot be * turned into per-cpu data.  The reason is that the main * kernel image is locked into the TLB and this structure * is accessed from the vectored interrupt trap handler.  If * access to this structure takes a TLB miss it could cause * the 5-level sparc v9 trap stack to overflow. */struct irq_work_struct {	unsigned int	irq_worklists[16];};struct irq_work_struct __irq_work[NR_CPUS];#define irq_work(__cpu, __pil)	&(__irq_work[(__cpu)].irq_worklists[(__pil)])static struct irqaction *irq_action[NR_IRQS+1];/* This only synchronizes entities which modify IRQ handler * state and some selected user-level spots that want to * read things in the table.  IRQ handler processing orders * its' accesses such that no locking is needed. */static DEFINE_SPINLOCK(irq_action_lock);static void register_irq_proc (unsigned int irq);/* * Upper 2b of irqaction->flags holds the ino. * irqaction->mask holds the smp affinity information. */#define put_ino_in_irqaction(action, irq) \	action->flags &= 0xffffffffffffUL; \	if (__bucket(irq) == &pil0_dummy_bucket) \		action->flags |= 0xdeadUL << 48;  \	else \		action->flags |= __irq_ino(irq) << 48;#define get_ino_in_irqaction(action)	(action->flags >> 48)#define put_smpaff_in_irqaction(action, smpaff)	(action)->mask = (smpaff)#define get_smpaff_in_irqaction(action) 	((action)->mask)int show_interrupts(struct seq_file *p, void *v){	unsigned long flags;	int i = *(loff_t *) v;	struct irqaction *action;#ifdef CONFIG_SMP	int j;#endif	spin_lock_irqsave(&irq_action_lock, flags);	if (i <= NR_IRQS) {		if (!(action = *(i + irq_action)))			goto out_unlock;		seq_printf(p, "%3d: ", i);#ifndef CONFIG_SMP		seq_printf(p, "%10u ", kstat_irqs(i));#else		for (j = 0; j < NR_CPUS; j++) {			if (!cpu_online(j))				continue;			seq_printf(p, "%10u ",				   kstat_cpu(j).irqs[i]);		}#endif		seq_printf(p, " %s:%lx", action->name,			   get_ino_in_irqaction(action));		for (action = action->next; action; action = action->next) {			seq_printf(p, ", %s:%lx", action->name,				   get_ino_in_irqaction(action));		}		seq_putc(p, '\n');	}out_unlock:	spin_unlock_irqrestore(&irq_action_lock, flags);	return 0;}/* Now these are always passed a true fully specified sun4u INO. */void enable_irq(unsigned int irq){	struct ino_bucket *bucket = __bucket(irq);	unsigned long imap;	unsigned long tid;	imap = bucket->imap;	if (imap == 0UL)		return;	preempt_disable();	if (tlb_type == cheetah || tlb_type == cheetah_plus) {		unsigned long ver;		__asm__ ("rdpr %%ver, %0" : "=r" (ver));		if ((ver >> 32) == 0x003e0016) {			/* We set it to our JBUS ID. */			__asm__ __volatile__("ldxa [%%g0] %1, %0"					     : "=r" (tid)					     : "i" (ASI_JBUS_CONFIG));			tid = ((tid & (0x1fUL<<17)) << 9);			tid &= IMAP_TID_JBUS;		} else {			/* We set it to our Safari AID. */			__asm__ __volatile__("ldxa [%%g0] %1, %0"					     : "=r" (tid)					     : "i" (ASI_SAFARI_CONFIG));			tid = ((tid & (0x3ffUL<<17)) << 9);			tid &= IMAP_AID_SAFARI;		}	} else if (this_is_starfire == 0) {		/* We set it to our UPA MID. */		__asm__ __volatile__("ldxa [%%g0] %1, %0"				     : "=r" (tid)				     : "i" (ASI_UPA_CONFIG));		tid = ((tid & UPA_CONFIG_MID) << 9);		tid &= IMAP_TID_UPA;	} else {		tid = (starfire_translate(imap, smp_processor_id()) << 26);		tid &= IMAP_TID_UPA;	}	/* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product	 * of this SYSIO's preconfigured IGN in the SYSIO Control	 * Register, the hardware just mirrors that value here.	 * However for Graphics and UPA Slave devices the full	 * IMAP_INR field can be set by the programmer here.	 *	 * Things like FFB can now be handled via the new IRQ mechanism.	 */	upa_writel(tid | IMAP_VALID, imap);	preempt_enable();}/* This now gets passed true ino's as well. */void disable_irq(unsigned int irq){	struct ino_bucket *bucket = __bucket(irq);	unsigned long imap;	imap = bucket->imap;	if (imap != 0UL) {		u32 tmp;		/* NOTE: We do not want to futz with the IRQ clear registers		 *       and move the state to IDLE, the SCSI code does call		 *       disable_irq() to assure atomicity in the queue cmd		 *       SCSI adapter driver code.  Thus we'd lose interrupts.		 */		tmp = upa_readl(imap);		tmp &= ~IMAP_VALID;		upa_writel(tmp, imap);	}}/* The timer is the one "weird" interrupt which is generated by * the CPU %tick register and not by some normal vectored interrupt * source.  To handle this special case, we use this dummy INO bucket. */static struct irq_desc pil0_dummy_desc;static struct ino_bucket pil0_dummy_bucket = {	.irq_info	=	&pil0_dummy_desc,};static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup,			    unsigned long iclr, unsigned long imap,			    struct ino_bucket *bucket){	prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> "		    "(%d:%d:%016lx:%016lx), halting...\n",		    ino, bucket->pil, bucket->iclr, bucket->imap,		    pil, inofixup, iclr, imap);	prom_halt();}unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap){	struct ino_bucket *bucket;	int ino;	if (pil == 0) {		if (iclr != 0UL || imap != 0UL) {			prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",				    iclr, imap);			prom_halt();		}		return __irq(&pil0_dummy_bucket);	}	/* RULE: Both must be specified in all other cases. */	if (iclr == 0UL || imap == 0UL) {		prom_printf("Invalid build_irq %d %d %016lx %016lx\n",			    pil, inofixup, iclr, imap);		prom_halt();	}		ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;	if (ino > NUM_IVECS) {		prom_printf("Invalid INO %04x (%d:%d:%016lx:%016lx)\n",			    ino, pil, inofixup, iclr, imap);		prom_halt();	}	bucket = &ivector_table[ino];	if (bucket->flags & IBF_ACTIVE)		build_irq_error("IRQ: Trying to build active INO bucket.\n",				ino, pil, inofixup, iclr, imap, bucket);	if (bucket->irq_info) {		if (bucket->imap != imap || bucket->iclr != iclr)			build_irq_error("IRQ: Trying to reinit INO bucket.\n",					ino, pil, inofixup, iclr, imap, bucket);		goto out;	}	bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC);	if (!bucket->irq_info) {		prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");		prom_halt();	}	memset(bucket->irq_info, 0, sizeof(struct irq_desc));	/* Ok, looks good, set it up.  Don't touch the irq_chain or	 * the pending flag.	 */	bucket->imap  = imap;	bucket->iclr  = iclr;	bucket->pil   = pil;	bucket->flags = 0;out:	return __irq(bucket);}static void atomic_bucket_insert(struct ino_bucket *bucket){	unsigned long pstate;	unsigned int *ent;	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));	__asm__ __volatile__("wrpr %0, %1, %%pstate"			     : : "r" (pstate), "i" (PSTATE_IE));	ent = irq_work(smp_processor_id(), bucket->pil);	bucket->irq_chain = *ent;	*ent = __irq(bucket);	__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));}static int check_irq_sharing(int pil, unsigned long irqflags){	struct irqaction *action, *tmp;	action = *(irq_action + pil);	if (action) {		if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {			for (tmp = action; tmp->next; tmp = tmp->next)				;		} else {			return -EBUSY;		}	}	return 0;}static void append_irq_action(int pil, struct irqaction *action){	struct irqaction **pp = irq_action + pil;	while (*pp)		pp = &((*pp)->next);	*pp = action;}static struct irqaction *get_action_slot(struct ino_bucket *bucket){	struct irq_desc *desc = bucket->irq_info;	int max_irq, i;	max_irq = 1;	if (bucket->flags & IBF_PCI)		max_irq = MAX_IRQ_DESC_ACTION;	for (i = 0; i < max_irq; i++) {		struct irqaction *p = &desc->action[i];		u32 mask = (1 << i);		if (desc->action_active_mask & mask)			continue;		desc->action_active_mask |= mask;		return p;	}	return NULL;}int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),		unsigned long irqflags, const char *name, void *dev_id){	struct irqaction *action;	struct ino_bucket *bucket = __bucket(irq);	unsigned long flags;	int pending = 0;	if (unlikely(!handler))		return -EINVAL;	if (unlikely(!bucket->irq_info))		return -ENODEV;	if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {		/*	 	 * This function might sleep, we want to call it first,	 	 * outside of the atomic block. In SA_STATIC_ALLOC case,		 * random driver's kmalloc will fail, but it is safe.		 * If already initialized, random driver will not reinit.	 	 * Yes, this might clear the entropy pool if the wrong	 	 * driver is attempted to be loaded, without actually	 	 * installing a new handler, but is this really a problem,	 	 * only the sysadmin is able to do this.	 	 */		rand_initialize_irq(irq);	}	spin_lock_irqsave(&irq_action_lock, flags);	if (check_irq_sharing(bucket->pil, irqflags)) {		spin_unlock_irqrestore(&irq_action_lock, flags);		return -EBUSY;	}	action = get_action_slot(bucket);	if (!action) { 		spin_unlock_irqrestore(&irq_action_lock, flags);		return -ENOMEM;	}	bucket->flags |= IBF_ACTIVE;	pending = 0;	if (bucket != &pil0_dummy_bucket) {		pending = bucket->pending;		if (pending)			bucket->pending = 0;	}	action->handler = handler;	action->flags = irqflags;	action->name = name;	action->next = NULL;	action->dev_id = dev_id;	put_ino_in_irqaction(action, irq);	put_smpaff_in_irqaction(action, CPU_MASK_NONE);	append_irq_action(bucket->pil, action);	enable_irq(irq);	/* We ate the IVEC already, this makes sure it does not get lost. */	if (pending) {		atomic_bucket_insert(bucket);		set_softint(1 << bucket->pil);	}	spin_unlock_irqrestore(&irq_action_lock, flags);	if (bucket != &pil0_dummy_bucket)		register_irq_proc(__irq_ino(irq));#ifdef CONFIG_SMP	distribute_irqs();#endif	return 0;}EXPORT_SYMBOL(request_irq);static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id){	struct ino_bucket *bucket = __bucket(irq);	struct irqaction *action, **pp;	pp = irq_action + bucket->pil;	action = *pp;	if (unlikely(!action))		return NULL;	if (unlikely(!action->handler)) {		printk("Freeing free IRQ %d\n", bucket->pil);		return NULL;	}	while (action && action->dev_id != dev_id) {		pp = &action->next;		action = *pp;	}	if (likely(action))		*pp = action->next;	return action;}void free_irq(unsigned int irq, void *dev_id){	struct irqaction *action;	struct ino_bucket *bucket;	unsigned long flags;	spin_lock_irqsave(&irq_action_lock, flags);	action = unlink_irq_action(irq, dev_id);	spin_unlock_irqrestore(&irq_action_lock, flags);	if (unlikely(!action))		return;	synchronize_irq(irq);	spin_lock_irqsave(&irq_action_lock, flags);	bucket = __bucket(irq);	if (bucket != &pil0_dummy_bucket) {		struct irq_desc *desc = bucket->irq_info;		unsigned long imap = bucket->imap;		int ent, i;		for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {			struct irqaction *p = &desc->action[i];			if (p == action) {				desc->action_active_mask &= ~(1 << i);				break;			}		}		if (!desc->action_active_mask) {			/* This unique interrupt source is now inactive. */			bucket->flags &= ~IBF_ACTIVE;			/* See if any other buckets share this bucket's IMAP			 * and are still active.			 */			for (ent = 0; ent < NUM_IVECS; ent++) {				struct ino_bucket *bp = &ivector_table[ent];

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -