irq.c

来自「优龙2410linux2.6.8内核源代码」· C语言 代码 · 共 1,017 行 · 第 1/2 页

C
1,017
字号
			/* This is not a real spurrious interrupt, we			 * have to eoi it, so we jump to out			 */			mask_irq(irq);			goto out;		}		status &= ~IRQ_PENDING; /* we commit to handling */		status |= IRQ_INPROGRESS; /* we are handling it */	}	desc->status = status;	/*	 * If there is no IRQ handler or it was disabled, exit early.	   Since we set PENDING, if another processor is handling	   a different instance of this same irq, the other processor	   will take care of it.	 */	if (unlikely(!action))		goto out;	/*	 * Edge triggered interrupts need to remember	 * pending events.	 * This applies to any hw interrupts that allow a second	 * instance of the same irq to arrive while we are in do_IRQ	 * or in the handler. But the code here only handles the _second_	 * instance of the irq, not the third or fourth. So it is mostly	 * useful for irq hardware that does not mask cleanly in an	 * SMP environment.	 */	for (;;) {		spin_unlock(&desc->lock);#ifdef CONFIG_IRQSTACKS		/* Switch to the irq stack to handle this */		curtp = current_thread_info();		irqtp = hardirq_ctx[smp_processor_id()];		if (curtp != irqtp) {			irqtp->task = curtp->task;			irqtp->flags = 0;			action_ret = call_handle_irq_event(irq, regs, action, irqtp);			irqtp->task = NULL;			if (irqtp->flags)				set_bits(irqtp->flags, &curtp->flags);		} else#endif			action_ret = handle_irq_event(irq, regs, action);		spin_lock(&desc->lock);		if (!noirqdebug)			note_interrupt(irq, desc, action_ret);		if (likely(!(desc->status & IRQ_PENDING)))			break;		desc->status &= ~IRQ_PENDING;	}out:	desc->status &= ~IRQ_INPROGRESS;	/*	 * The ->end() handler has to deal with interrupts which got	 * disabled while the handler was running.	 */	if (desc->handler) {		if (desc->handler->end)			desc->handler->end(irq);		else if (desc->handler->enable)			desc->handler->enable(irq);	}	spin_unlock(&desc->lock);}#ifdef CONFIG_PPC_ISERIESvoid do_IRQ(struct pt_regs *regs){	struct paca_struct *lpaca;	struct ItLpQueue *lpq;	irq_enter();#ifdef CONFIG_DEBUG_STACKOVERFLOW	/* Debugging check for stack overflow: is there less than 4KB free? */	{		long sp;		sp = __get_SP() & (THREAD_SIZE-1);		if (unlikely(sp < (sizeof(struct thread_info) + 4096))) {			printk("do_IRQ: stack overflow: %ld\n",				sp - sizeof(struct thread_info));			dump_stack();		}	}#endif	lpaca = get_paca();#ifdef CONFIG_SMP	if (lpaca->lppaca.xIntDword.xFields.xIpiCnt) {		lpaca->lppaca.xIntDword.xFields.xIpiCnt = 0;		iSeries_smp_message_recv(regs);	}#endif /* CONFIG_SMP */	lpq = lpaca->lpqueue_ptr;	if (lpq && ItLpQueue_isLpIntPending(lpq))		lpevent_count += ItLpQueue_process(lpq, regs);	irq_exit();	if (lpaca->lppaca.xIntDword.xFields.xDecrInt) {		lpaca->lppaca.xIntDword.xFields.xDecrInt = 0;		/* Signal a fake decrementer interrupt */		timer_interrupt(regs);	}}#else	/* CONFIG_PPC_ISERIES */void do_IRQ(struct pt_regs *regs){	int irq;	irq_enter();#ifdef CONFIG_DEBUG_STACKOVERFLOW	/* Debugging check for stack overflow: is there less than 4KB free? */	{		long sp;		sp = __get_SP() & (THREAD_SIZE-1);		if (unlikely(sp < (sizeof(struct thread_info) + 4096))) {			printk("do_IRQ: stack overflow: %ld\n",				sp - sizeof(struct thread_info));			dump_stack();		}	}#endif	irq = ppc_md.get_irq(regs);	if (irq >= 0)		ppc_irq_dispatch_handler(regs, irq);	else		/* That's not SMP safe ... but who cares ? */		ppc_spurious_interrupts++;	irq_exit();}#endif	/* CONFIG_PPC_ISERIES */unsigned long probe_irq_on (void){	return 0;}EXPORT_SYMBOL(probe_irq_on);int probe_irq_off (unsigned long irqs){	return 0;}EXPORT_SYMBOL(probe_irq_off);unsigned int probe_irq_mask(unsigned long irqs){	return 0;}void __init init_IRQ(void){	static int once = 0;	if (once)		return;	once++;	ppc_md.init_IRQ();	irq_ctx_init();}static struct proc_dir_entry * root_irq_dir;static struct proc_dir_entry * irq_dir [NR_IRQS];static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];/* Protected by get_irq_desc(irq)->lock. */#ifdef CONFIG_IRQ_ALL_CPUScpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };#else  /* CONFIG_IRQ_ALL_CPUS */cpumask_t irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_NONE };#endif /* CONFIG_IRQ_ALL_CPUS */static int irq_affinity_read_proc (char *page, char **start, off_t off,			int count, int *eof, void *data){	int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);	if (count - len < 2)		return -EINVAL;	len += sprintf(page + len, "\n");	return len;}static int irq_affinity_write_proc (struct file *file, const char __user *buffer,					unsigned long count, void *data){	unsigned int irq = (long)data;	irq_desc_t *desc = get_irq_desc(irq);	int ret;	cpumask_t new_value, tmp;	if (!desc->handler->set_affinity)		return -EIO;	ret = cpumask_parse(buffer, count, new_value);	if (ret != 0)		return ret;	/*	 * We check for CPU_MASK_ALL in xics to send irqs to all cpus.	 * In some cases CPU_MASK_ALL is smaller than the cpumask (eg	 * NR_CPUS == 32 and cpumask is a long), so we mask it here to	 * be consistent.	 */	cpus_and(new_value, new_value, CPU_MASK_ALL);	/*	 * Grab lock here so cpu_online_map can't change, and also	 * protect irq_affinity[].	 */	spin_lock(&desc->lock);	/*	 * Do not allow disabling IRQs completely - it's a too easy	 * way to make the system unusable accidentally :-) At least	 * one online CPU still has to be targeted.	 */	cpus_and(tmp, new_value, cpu_online_map);	if (cpus_empty(tmp)) {		ret = -EINVAL;		goto out;	}	irq_affinity[irq] = new_value;	desc->handler->set_affinity(irq, new_value);	ret = count;out:	spin_unlock(&desc->lock);	return ret;}static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,			int count, int *eof, void *data){	int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);	if (count - len < 2)		return -EINVAL;	len += sprintf(page + len, "\n");	return len;}static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer,					unsigned long count, void *data){	cpumask_t *mask = (cpumask_t *)data;	unsigned long full_count = count, err;	cpumask_t new_value;	err = cpumask_parse(buffer, count, new_value);	if (err)		return err;	*mask = new_value;#ifdef CONFIG_PPC_ISERIES	{		unsigned i;		for (i=0; i<NR_CPUS; ++i) {			if ( paca[i].prof_buffer && cpu_isset(i, new_value) )				paca[i].prof_enabled = 1;			else				paca[i].prof_enabled = 0;		}	}#endif	return full_count;}#define MAX_NAMELEN 10static void register_irq_proc (unsigned int irq){	struct proc_dir_entry *entry;	char name [MAX_NAMELEN];	if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])		return;	memset(name, 0, MAX_NAMELEN);	sprintf(name, "%d", irq);	/* create /proc/irq/1234 */	irq_dir[irq] = proc_mkdir(name, root_irq_dir);	/* create /proc/irq/1234/smp_affinity */	entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);	if (entry) {		entry->nlink = 1;		entry->data = (void *)(long)irq;		entry->read_proc = irq_affinity_read_proc;		entry->write_proc = irq_affinity_write_proc;	}	smp_affinity_entry[irq] = entry;}unsigned long prof_cpu_mask = -1;void init_irq_proc (void){	struct proc_dir_entry *entry;	int i;	/* create /proc/irq */	root_irq_dir = proc_mkdir("irq", NULL);	/* create /proc/irq/prof_cpu_mask */	entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);	if (!entry)		return;	entry->nlink = 1;	entry->data = (void *)&prof_cpu_mask;	entry->read_proc = prof_cpu_mask_read_proc;	entry->write_proc = prof_cpu_mask_write_proc;	/*	 * Create entries for all existing IRQs.	 */	for_each_irq(i) {		if (get_irq_desc(i)->handler == NULL)			continue;		register_irq_proc(i);	}}irqreturn_t no_action(int irq, void *dev, struct pt_regs *regs){	return IRQ_NONE;}#ifndef CONFIG_PPC_ISERIES/* * Virtual IRQ mapping code, used on systems with XICS interrupt controllers. */#define UNDEFINED_IRQ 0xffffffffunsigned int virt_irq_to_real_map[NR_IRQS];/* * Don't use virtual irqs 0, 1, 2 for devices. * The pcnet32 driver considers interrupt numbers < 2 to be invalid, * and 2 is the XICS IPI interrupt. * We limit virtual irqs to 17 less than NR_IRQS so that when we * offset them by 16 (to reserve the first 16 for ISA interrupts) * we don't end up with an interrupt number >= NR_IRQS. */#define MIN_VIRT_IRQ	3#define MAX_VIRT_IRQ	(NR_IRQS - NUM_ISA_INTERRUPTS - 1)#define NR_VIRT_IRQS	(MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1)voidvirt_irq_init(void){	int i;	for (i = 0; i < NR_IRQS; i++)		virt_irq_to_real_map[i] = UNDEFINED_IRQ;}/* Create a mapping for a real_irq if it doesn't already exist. * Return the virtual irq as a convenience. */int virt_irq_create_mapping(unsigned int real_irq){	unsigned int virq, first_virq;	static int warned;	if (naca->interrupt_controller == IC_OPEN_PIC)		return real_irq;	/* no mapping for openpic (for now) */	/* don't map interrupts < MIN_VIRT_IRQ */	if (real_irq < MIN_VIRT_IRQ) {		virt_irq_to_real_map[real_irq] = real_irq;		return real_irq;	}	/* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */	virq = real_irq;	if (virq > MAX_VIRT_IRQ)		virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;	/* search for this number or a free slot */	first_virq = virq;	while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) {		if (virt_irq_to_real_map[virq] == real_irq)			return virq;		if (++virq > MAX_VIRT_IRQ)			virq = MIN_VIRT_IRQ;		if (virq == first_virq)			goto nospace;	/* oops, no free slots */	}	virt_irq_to_real_map[virq] = real_irq;	return virq; nospace:	if (!warned) {		printk(KERN_CRIT "Interrupt table is full\n");		printk(KERN_CRIT "Increase NR_IRQS (currently %d) "		       "in your kernel sources and rebuild.\n", NR_IRQS);		warned = 1;	}	return NO_IRQ;}/* * In most cases will get a hit on the very first slot checked in the * virt_irq_to_real_map.  Only when there are a large number of * IRQs will this be expensive. */unsigned int real_irq_to_virt_slowpath(unsigned int real_irq){	unsigned int virq;	unsigned int first_virq;	virq = real_irq;	if (virq > MAX_VIRT_IRQ)		virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ;	first_virq = virq;	do {		if (virt_irq_to_real_map[virq] == real_irq)			return virq;		virq++;		if (virq >= MAX_VIRT_IRQ)			virq = 0;	} while (first_virq != virq);	return NO_IRQ;}#endif /* CONFIG_PPC_ISERIES */#ifdef CONFIG_IRQSTACKSstruct thread_info *softirq_ctx[NR_CPUS];struct thread_info *hardirq_ctx[NR_CPUS];void irq_ctx_init(void){	struct thread_info *tp;	int i;	for (i = 0; i < NR_CPUS; i++) {		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);		tp = softirq_ctx[i];		tp->cpu = i;		tp->preempt_count = SOFTIRQ_OFFSET;		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);		tp = hardirq_ctx[i];		tp->cpu = i;		tp->preempt_count = HARDIRQ_OFFSET;	}}void do_softirq(void){	unsigned long flags;	struct thread_info *curtp, *irqtp;	if (in_interrupt())		return;	local_irq_save(flags);	if (local_softirq_pending()) {		curtp = current_thread_info();		irqtp = softirq_ctx[smp_processor_id()];		irqtp->task = curtp->task;		call_do_softirq(irqtp);		irqtp->task = NULL;	}	local_irq_restore(flags);}EXPORT_SYMBOL(do_softirq);#endif /* CONFIG_IRQSTACKS */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?