irq.c

来自「优龙2410linux2.6.8内核源代码」· C语言 代码 · 共 1,262 行 · 第 1/3 页

C
1,262
字号
	bucket->flags |= IBF_INPROGRESS;	floppy_interrupt(irq, dev_cookie, regs);	upa_writel(ICLR_IDLE, bucket->iclr);	bucket->flags &= ~IBF_INPROGRESS;	irq_exit();}#endif/* The following assumes that the branch lies before the place we * are branching to.  This is the case for a trap vector... * You have been warned. */#define SPARC_BRANCH(dest_addr, inst_addr) \          (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))#define SPARC_NOP (0x01000000)static void install_fast_irq(unsigned int cpu_irq,			     irqreturn_t (*handler)(int, void *, struct pt_regs *)){	extern unsigned long sparc64_ttable_tl0;	unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;	unsigned int *insns;	ttent += 0x820;	ttent += (cpu_irq - 1) << 5;	insns = (unsigned int *) ttent;	insns[0] = SPARC_BRANCH(((unsigned long) handler),				((unsigned long)&insns[0]));	insns[1] = SPARC_NOP;	__asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));}int request_fast_irq(unsigned int irq,		     irqreturn_t (*handler)(int, void *, struct pt_regs *),		     unsigned long irqflags, const char *name, void *dev_id){	struct irqaction *action;	struct ino_bucket *bucket = __bucket(irq);	unsigned long flags;	/* No pil0 dummy buckets allowed here. */	if (bucket < &ivector_table[0] ||	    bucket >= &ivector_table[NUM_IVECS]) {		unsigned int *caller;		__asm__ __volatile__("mov %%i7, %0" : "=r" (caller));		printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "		       "from %p, irq %08x.\n", caller, irq);		return -EINVAL;	}			if (!handler)		return -EINVAL;	if ((bucket->pil == 0) || (bucket->pil == 14)) {		printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");		return -EBUSY;	}	spin_lock_irqsave(&irq_action_lock, flags);	action = *(bucket->pil + irq_action);	if (action) {		if (action->flags & SA_SHIRQ)			panic("Trying to register fast irq when already shared.\n");		if (irqflags & SA_SHIRQ)			panic("Trying to register fast irq as shared.\n");		printk("request_fast_irq: Trying to register yet already owned.\n");		spin_unlock_irqrestore(&irq_action_lock, flags);		return -EBUSY;	}	/*	 * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we	 * support smp intr affinity in this path.	 */	if (irqflags & SA_STATIC_ALLOC) {		if (static_irq_count < MAX_STATIC_ALLOC)			action = &static_irqaction[static_irq_count++];		else			printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "			       "using kmalloc\n", bucket->pil, name);	}	if (action == NULL)		action = (struct irqaction *)kmalloc(sizeof(struct irqaction),						     GFP_ATOMIC);	if (!action) {		spin_unlock_irqrestore(&irq_action_lock, flags);		return -ENOMEM;	}	install_fast_irq(bucket->pil, handler);	bucket->irq_info = action;	bucket->flags |= IBF_ACTIVE;	action->handler = handler;	action->flags = irqflags;	action->dev_id = NULL;	action->name = name;	action->next = NULL;	put_ino_in_irqaction(action, irq);	put_smpaff_in_irqaction(action, CPU_MASK_NONE);	*(bucket->pil + irq_action) = action;	enable_irq(irq);	spin_unlock_irqrestore(&irq_action_lock, flags);#ifdef CONFIG_SMP	distribute_irqs();#endif	return 0;}/* We really don't need these at all on the Sparc.  We only have * stubs here because they are exported to modules. */unsigned long probe_irq_on(void){	return 0;}EXPORT_SYMBOL(probe_irq_on);int probe_irq_off(unsigned long mask){	return 0;}EXPORT_SYMBOL(probe_irq_off);#ifdef CONFIG_SMPstatic int retarget_one_irq(struct irqaction *p, int goal_cpu){	struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;	unsigned long imap = bucket->imap;	unsigned int tid;	while (!cpu_online(goal_cpu)) {		if (++goal_cpu >= NR_CPUS)			goal_cpu = 0;	}	if (tlb_type == cheetah || tlb_type == cheetah_plus) {		tid = goal_cpu << 26;		tid &= IMAP_AID_SAFARI;	} else if (this_is_starfire == 0) {		tid = goal_cpu << 26;		tid &= IMAP_TID_UPA;	} else {		tid = (starfire_translate(imap, goal_cpu) << 26);		tid &= IMAP_TID_UPA;	}	upa_writel(tid | IMAP_VALID, imap);	while (!cpu_online(goal_cpu)) {		if (++goal_cpu >= NR_CPUS)			goal_cpu = 0;	}	return goal_cpu;}/* Called from request_irq. */static void distribute_irqs(void){	unsigned long flags;	int cpu, level;	spin_lock_irqsave(&irq_action_lock, flags);	cpu = 0;	/*	 * Skip the timer at [0], and very rare error/power intrs at [15].	 * Also level [12], it causes problems on Ex000 systems.	 */	for (level = 1; level < NR_IRQS; level++) {		struct irqaction *p = irq_action[level];		if (level == 12) continue;		while(p) {			cpu = retarget_one_irq(p, cpu);			p = p->next;		}	}	spin_unlock_irqrestore(&irq_action_lock, flags);}#endifstruct sun5_timer *prom_timers;static u64 prom_limit0, prom_limit1;static void map_prom_timers(void){	unsigned int addr[3];	int tnode, err;	/* PROM timer node hangs out in the top level of device siblings... */	tnode = prom_finddevice("/counter-timer");	/* Assume if node is not present, PROM uses different tick mechanism	 * which we should not care about.	 */	if (tnode == 0 || tnode == -1) {		prom_timers = (struct sun5_timer *) 0;		return;	}	/* If PROM is really using this, it must be mapped by him. */	err = prom_getproperty(tnode, "address", (char *)addr, sizeof(addr));	if (err == -1) {		prom_printf("PROM does not have timer mapped, trying to continue.\n");		prom_timers = (struct sun5_timer *) 0;		return;	}	prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);}static void kill_prom_timer(void){	if (!prom_timers)		return;	/* Save them away for later. */	prom_limit0 = prom_timers->limit0;	prom_limit1 = prom_timers->limit1;	/* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.	 * We turn both off here just to be paranoid.	 */	prom_timers->limit0 = 0;	prom_timers->limit1 = 0;	/* Wheee, eat the interrupt packet too... */	__asm__ __volatile__("	mov	0x40, %%g2\n""	ldxa	[%%g0] %0, %%g1\n""	ldxa	[%%g2] %1, %%g1\n""	stxa	%%g0, [%%g0] %0\n""	membar	#Sync\n"	: /* no outputs */	: "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)	: "g1", "g2");}void enable_prom_timer(void){	if (!prom_timers)		return;	/* Set it to whatever was there before. */	prom_timers->limit1 = prom_limit1;	prom_timers->count1 = 0;	prom_timers->limit0 = prom_limit0;	prom_timers->count0 = 0;}void init_irqwork_curcpu(void){	register struct irq_work_struct *workp asm("o2");	unsigned long tmp;	int cpu = hard_smp_processor_id();	memset(__irq_work + cpu, 0, sizeof(*workp));	/* Make sure we are called with PSTATE_IE disabled.  */	__asm__ __volatile__("rdpr	%%pstate, %0\n\t"			     : "=r" (tmp));	if (tmp & PSTATE_IE) {		prom_printf("BUG: init_irqwork_curcpu() called with "			    "PSTATE_IE enabled, bailing.\n");		__asm__ __volatile__("mov	%%i7, %0\n\t"				     : "=r" (tmp));		prom_printf("BUG: Called from %lx\n", tmp);		prom_halt();	}	/* Set interrupt globals.  */	workp = &__irq_work[cpu];	__asm__ __volatile__(	"rdpr	%%pstate, %0\n\t"	"wrpr	%0, %1, %%pstate\n\t"	"mov	%2, %%g6\n\t"	"wrpr	%0, 0x0, %%pstate\n\t"	: "=&r" (tmp)	: "i" (PSTATE_IG), "r" (workp));}/* Only invoked on boot processor. */void __init init_IRQ(void){	map_prom_timers();	kill_prom_timer();	memset(&ivector_table[0], 0, sizeof(ivector_table));	/* We need to clear any IRQ's pending in the soft interrupt	 * registers, a spurious one could be left around from the	 * PROM timer which we just disabled.	 */	clear_softint(get_softint());	/* Now that ivector table is initialized, it is safe	 * to receive IRQ vector traps.  We will normally take	 * one or two right now, in case some device PROM used	 * to boot us wants to speak to us.  We just ignore them.	 */	__asm__ __volatile__("rdpr	%%pstate, %%g1\n\t"			     "or	%%g1, %0, %%g1\n\t"			     "wrpr	%%g1, 0x0, %%pstate"			     : /* No outputs */			     : "i" (PSTATE_IE)			     : "g1");}static struct proc_dir_entry * root_irq_dir;static struct proc_dir_entry * irq_dir [NUM_IVECS];#ifdef CONFIG_SMPstatic int irq_affinity_read_proc (char *page, char **start, off_t off,			int count, int *eof, void *data){	struct ino_bucket *bp = ivector_table + (long)data;	struct irqaction *ap = bp->irq_info;	cpumask_t mask;	int len;	mask = get_smpaff_in_irqaction(ap);	if (cpus_empty(mask))		mask = cpu_online_map;	len = cpumask_scnprintf(page, count, mask);	if (count - len < 2)		return -EINVAL;	len += sprintf(page + len, "\n");	return len;}static inline void set_intr_affinity(int irq, cpumask_t hw_aff){	struct ino_bucket *bp = ivector_table + irq;	/* Users specify affinity in terms of hw cpu ids.	 * As soon as we do this, handler_irq() might see and take action.	 */	put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff);	/* Migration is simply done by the next cpu to service this	 * interrupt.	 */}static int irq_affinity_write_proc (struct file *file, const char __user *buffer,					unsigned long count, void *data){	int irq = (long) data, full_count = count, err;	cpumask_t new_value;	err = cpumask_parse(buffer, count, new_value);	/*	 * Do not allow disabling IRQs completely - it's a too easy	 * way to make the system unusable accidentally :-) At least	 * one online CPU still has to be targeted.	 */	cpus_and(new_value, new_value, cpu_online_map);	if (cpus_empty(new_value))		return -EINVAL;	set_intr_affinity(irq, new_value);	return full_count;}#endif#define MAX_NAMELEN 10static void register_irq_proc (unsigned int irq){	char name [MAX_NAMELEN];	if (!root_irq_dir || irq_dir[irq])		return;	memset(name, 0, MAX_NAMELEN);	sprintf(name, "%x", irq);	/* create /proc/irq/1234 */	irq_dir[irq] = proc_mkdir(name, root_irq_dir);#ifdef CONFIG_SMP	/* XXX SMP affinity not supported on starfire yet. */	if (this_is_starfire == 0) {		struct proc_dir_entry *entry;		/* create /proc/irq/1234/smp_affinity */		entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);		if (entry) {			entry->nlink = 1;			entry->data = (void *)(long)irq;			entry->read_proc = irq_affinity_read_proc;			entry->write_proc = irq_affinity_write_proc;		}	}#endif}void init_irq_proc (void){	/* create /proc/irq */	root_irq_dir = proc_mkdir("irq", NULL);}

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?