irq.c

来自「linux 内核源代码」· C语言 代码 · 共 980 行 · 第 1/2 页

C
980
字号
/* irq.c: UltraSparc IRQ handling/init/registry. * * Copyright (C) 1997, 2007  David S. Miller  (davem@davemloft.net) * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be) * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz) */#include <linux/module.h>#include <linux/sched.h>#include <linux/ptrace.h>#include <linux/errno.h>#include <linux/kernel_stat.h>#include <linux/signal.h>#include <linux/mm.h>#include <linux/interrupt.h>#include <linux/slab.h>#include <linux/random.h>#include <linux/init.h>#include <linux/delay.h>#include <linux/proc_fs.h>#include <linux/seq_file.h>#include <linux/bootmem.h>#include <linux/irq.h>#include <asm/ptrace.h>#include <asm/processor.h>#include <asm/atomic.h>#include <asm/system.h>#include <asm/irq.h>#include <asm/io.h>#include <asm/sbus.h>#include <asm/iommu.h>#include <asm/upa.h>#include <asm/oplib.h>#include <asm/prom.h>#include <asm/timer.h>#include <asm/smp.h>#include <asm/starfire.h>#include <asm/uaccess.h>#include <asm/cache.h>#include <asm/cpudata.h>#include <asm/auxio.h>#include <asm/head.h>#include <asm/hypervisor.h>#include <asm/cacheflush.h>/* UPA nodes send interrupt packet to UltraSparc with first data reg * value low 5 (7 on Starfire) bits holding the IRQ identifier being * delivered.  We must translate this into a non-vector IRQ so we can * set the softint on this cpu. * * To make processing these packets efficient and race free we use * an array of irq buckets below.  The interrupt vector handler in * entry.S feeds incoming packets into per-cpu pil-indexed lists. * * If you make changes to ino_bucket, please update hand coded assembler * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S */struct ino_bucket {/*0x00*/unsigned long __irq_chain_pa;	/* Virtual interrupt number assigned to this INO.  *//*0x08*/unsigned int __virt_irq;/*0x0c*/unsigned int __pad;};#define NUM_IVECS	(IMAP_INR + 1)struct ino_bucket *ivector_table;unsigned long ivector_table_pa;/* On several sun4u processors, it is illegal to mix bypass and * non-bypass accesses.  Therefore we access all INO buckets * using bypass accesses only. */static unsigned long bucket_get_chain_pa(unsigned long bucket_pa){	unsigned long ret;	__asm__ __volatile__("ldxa	[%1] %2, %0"			     : "=&r" (ret)			     : "r" (bucket_pa +				    offsetof(struct ino_bucket,					     __irq_chain_pa)),			       "i" (ASI_PHYS_USE_EC));	return ret;}static void bucket_clear_chain_pa(unsigned long bucket_pa){	__asm__ __volatile__("stxa	%%g0, [%0] %1"			     : /* no outputs */			     : "r" (bucket_pa +				    offsetof(struct ino_bucket,					     __irq_chain_pa)),			       "i" (ASI_PHYS_USE_EC));}static unsigned int bucket_get_virt_irq(unsigned long bucket_pa){	unsigned int ret;	__asm__ __volatile__("lduwa	[%1] %2, %0"			     : "=&r" (ret)			     : "r" (bucket_pa +				    offsetof(struct ino_bucket,					     __virt_irq)),			       "i" (ASI_PHYS_USE_EC));	return ret;}static void bucket_set_virt_irq(unsigned long bucket_pa,				unsigned int virt_irq){	__asm__ __volatile__("stwa	%0, [%1] %2"			     : /* no outputs */			     : "r" (virt_irq),			       "r" (bucket_pa +				    offsetof(struct ino_bucket,					     __virt_irq)),			       "i" (ASI_PHYS_USE_EC));}#define irq_work_pa(__cpu)	&(trap_block[(__cpu)].irq_worklist_pa)static struct {	unsigned int dev_handle;	unsigned int dev_ino;	unsigned int in_use;} virt_irq_table[NR_IRQS];static DEFINE_SPINLOCK(virt_irq_alloc_lock);unsigned char virt_irq_alloc(unsigned int dev_handle,			     unsigned int dev_ino){	unsigned long flags;	unsigned char ent;	BUILD_BUG_ON(NR_IRQS >= 256);	spin_lock_irqsave(&virt_irq_alloc_lock, flags);	for (ent = 1; ent < NR_IRQS; ent++) {		if (!virt_irq_table[ent].in_use)			break;	}	if (ent >= NR_IRQS) {		printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");		ent = 0;	} else {		virt_irq_table[ent].dev_handle = dev_handle;		virt_irq_table[ent].dev_ino = dev_ino;		virt_irq_table[ent].in_use = 1;	}	spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);	return ent;}#ifdef CONFIG_PCI_MSIvoid virt_irq_free(unsigned int virt_irq){	unsigned long flags;	if (virt_irq >= NR_IRQS)		return;	spin_lock_irqsave(&virt_irq_alloc_lock, flags);	virt_irq_table[virt_irq].in_use = 0;	spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);}#endif/* * /proc/interrupts printing: */int show_interrupts(struct seq_file *p, void *v){	int i = *(loff_t *) v, j;	struct irqaction * action;	unsigned long flags;	if (i == 0) {		seq_printf(p, "           ");		for_each_online_cpu(j)			seq_printf(p, "CPU%d       ",j);		seq_putc(p, '\n');	}	if (i < NR_IRQS) {		spin_lock_irqsave(&irq_desc[i].lock, flags);		action = irq_desc[i].action;		if (!action)			goto skip;		seq_printf(p, "%3d: ",i);#ifndef CONFIG_SMP		seq_printf(p, "%10u ", kstat_irqs(i));#else		for_each_online_cpu(j)			seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);#endif		seq_printf(p, " %9s", irq_desc[i].chip->typename);		seq_printf(p, "  %s", action->name);		for (action=action->next; action; action = action->next)			seq_printf(p, ", %s", action->name);		seq_putc(p, '\n');skip:		spin_unlock_irqrestore(&irq_desc[i].lock, flags);	}	return 0;}static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid){	unsigned int tid;	if (this_is_starfire) {		tid = starfire_translate(imap, cpuid);		tid <<= IMAP_TID_SHIFT;		tid &= IMAP_TID_UPA;	} else {		if (tlb_type == cheetah || tlb_type == cheetah_plus) {			unsigned long ver;			__asm__ ("rdpr %%ver, %0" : "=r" (ver));			if ((ver >> 32UL) == __JALAPENO_ID ||			    (ver >> 32UL) == __SERRANO_ID) {				tid = cpuid << IMAP_TID_SHIFT;				tid &= IMAP_TID_JBUS;			} else {				unsigned int a = cpuid & 0x1f;				unsigned int n = (cpuid >> 5) & 0x1f;				tid = ((a << IMAP_AID_SHIFT) |				       (n << IMAP_NID_SHIFT));				tid &= (IMAP_AID_SAFARI |					IMAP_NID_SAFARI);;			}		} else {			tid = cpuid << IMAP_TID_SHIFT;			tid &= IMAP_TID_UPA;		}	}	return tid;}struct irq_handler_data {	unsigned long	iclr;	unsigned long	imap;	void		(*pre_handler)(unsigned int, void *, void *);	void		*arg1;	void		*arg2;};#ifdef CONFIG_SMPstatic int irq_choose_cpu(unsigned int virt_irq){	cpumask_t mask = irq_desc[virt_irq].affinity;	int cpuid;	if (cpus_equal(mask, CPU_MASK_ALL)) {		static int irq_rover;		static DEFINE_SPINLOCK(irq_rover_lock);		unsigned long flags;		/* Round-robin distribution... */	do_round_robin:		spin_lock_irqsave(&irq_rover_lock, flags);		while (!cpu_online(irq_rover)) {			if (++irq_rover >= NR_CPUS)				irq_rover = 0;		}		cpuid = irq_rover;		do {			if (++irq_rover >= NR_CPUS)				irq_rover = 0;		} while (!cpu_online(irq_rover));		spin_unlock_irqrestore(&irq_rover_lock, flags);	} else {		cpumask_t tmp;		cpus_and(tmp, cpu_online_map, mask);		if (cpus_empty(tmp))			goto do_round_robin;		cpuid = first_cpu(tmp);	}	return cpuid;}#elsestatic int irq_choose_cpu(unsigned int virt_irq){	return real_hard_smp_processor_id();}#endifstatic void sun4u_irq_enable(unsigned int virt_irq){	struct irq_handler_data *data = get_irq_chip_data(virt_irq);	if (likely(data)) {		unsigned long cpuid, imap, val;		unsigned int tid;		cpuid = irq_choose_cpu(virt_irq);		imap = data->imap;		tid = sun4u_compute_tid(imap, cpuid);		val = upa_readq(imap);		val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |			 IMAP_AID_SAFARI | IMAP_NID_SAFARI);		val |= tid | IMAP_VALID;		upa_writeq(val, imap);	}}static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask){	sun4u_irq_enable(virt_irq);}static void sun4u_irq_disable(unsigned int virt_irq){	struct irq_handler_data *data = get_irq_chip_data(virt_irq);	if (likely(data)) {		unsigned long imap = data->imap;		unsigned long tmp = upa_readq(imap);		tmp &= ~IMAP_VALID;		upa_writeq(tmp, imap);	}}static void sun4u_irq_eoi(unsigned int virt_irq){	struct irq_handler_data *data = get_irq_chip_data(virt_irq);	struct irq_desc *desc = irq_desc + virt_irq;	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))		return;	if (likely(data))		upa_writeq(ICLR_IDLE, data->iclr);}static void sun4v_irq_enable(unsigned int virt_irq){	unsigned int ino = virt_irq_table[virt_irq].dev_ino;	unsigned long cpuid = irq_choose_cpu(virt_irq);	int err;	err = sun4v_intr_settarget(ino, cpuid);	if (err != HV_EOK)		printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "		       "err(%d)\n", ino, cpuid, err);	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);	if (err != HV_EOK)		printk(KERN_ERR "sun4v_intr_setstate(%x): "		       "err(%d)\n", ino, err);	err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);	if (err != HV_EOK)		printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n",		       ino, err);}static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask){	unsigned int ino = virt_irq_table[virt_irq].dev_ino;	unsigned long cpuid = irq_choose_cpu(virt_irq);	int err;	err = sun4v_intr_settarget(ino, cpuid);	if (err != HV_EOK)		printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "		       "err(%d)\n", ino, cpuid, err);}static void sun4v_irq_disable(unsigned int virt_irq){	unsigned int ino = virt_irq_table[virt_irq].dev_ino;	int err;	err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);	if (err != HV_EOK)		printk(KERN_ERR "sun4v_intr_setenabled(%x): "		       "err(%d)\n", ino, err);}static void sun4v_irq_eoi(unsigned int virt_irq){	unsigned int ino = virt_irq_table[virt_irq].dev_ino;	struct irq_desc *desc = irq_desc + virt_irq;	int err;	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))		return;	err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);	if (err != HV_EOK)		printk(KERN_ERR "sun4v_intr_setstate(%x): "		       "err(%d)\n", ino, err);}static void sun4v_virq_enable(unsigned int virt_irq){	unsigned long cpuid, dev_handle, dev_ino;	int err;	cpuid = irq_choose_cpu(virt_irq);	dev_handle = virt_irq_table[virt_irq].dev_handle;	dev_ino = virt_irq_table[virt_irq].dev_ino;	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);	if (err != HV_EOK)		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "		       "err(%d)\n",		       dev_handle, dev_ino, cpuid, err);	err = sun4v_vintr_set_state(dev_handle, dev_ino,				    HV_INTR_STATE_IDLE);	if (err != HV_EOK)		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"		       "HV_INTR_STATE_IDLE): err(%d)\n",		       dev_handle, dev_ino, err);	err = sun4v_vintr_set_valid(dev_handle, dev_ino,				    HV_INTR_ENABLED);	if (err != HV_EOK)		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"		       "HV_INTR_ENABLED): err(%d)\n",		       dev_handle, dev_ino, err);}static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask){	unsigned long cpuid, dev_handle, dev_ino;	int err;	cpuid = irq_choose_cpu(virt_irq);	dev_handle = virt_irq_table[virt_irq].dev_handle;	dev_ino = virt_irq_table[virt_irq].dev_ino;	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);	if (err != HV_EOK)		printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "		       "err(%d)\n",		       dev_handle, dev_ino, cpuid, err);}static void sun4v_virq_disable(unsigned int virt_irq){	unsigned long dev_handle, dev_ino;	int err;	dev_handle = virt_irq_table[virt_irq].dev_handle;	dev_ino = virt_irq_table[virt_irq].dev_ino;	err = sun4v_vintr_set_valid(dev_handle, dev_ino,				    HV_INTR_DISABLED);	if (err != HV_EOK)		printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx,"		       "HV_INTR_DISABLED): err(%d)\n",		       dev_handle, dev_ino, err);}static void sun4v_virq_eoi(unsigned int virt_irq){	struct irq_desc *desc = irq_desc + virt_irq;	unsigned long dev_handle, dev_ino;	int err;	if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))		return;	dev_handle = virt_irq_table[virt_irq].dev_handle;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?