⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 irq.c

📁 linux-2.4.29操作系统的源码
💻 C
📖 第 1 页 / 共 3 页
字号:
			spin_unlock_irqrestore(&desc->lock,flags);			return -EBUSY;		}		/* add new interrupt at end of irq queue */		do {			p = &old->next;			old = *p;		} while (old);		shared = 1;	}	*p = new;	if (!shared) {		desc->depth = 0;		desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);		unmask_irq(irq);	}	spin_unlock_irqrestore(&desc->lock,flags);	register_irq_proc(irq);	return 0;}/* This could be promoted to a real free_irq() ... */static intdo_free_irq(int irq, void* dev_id){	irq_desc_t *desc = irqdesc(irq);	struct irqaction **p;	unsigned long flags;	spin_lock_irqsave(&desc->lock,flags);	p = &desc->action;	for (;;) {		struct irqaction * action = *p;		if (action) {			struct irqaction **pp = p;			p = &action->next;			if (action->dev_id != dev_id)				continue;			/* Found it - now remove it from the list of entries */			*pp = action->next;			if (!desc->action) {				desc->status |= IRQ_DISABLED;				mask_irq(irq);			}			spin_unlock_irqrestore(&desc->lock,flags);#ifdef CONFIG_SMP			/* Wait to make sure it's not being used on another CPU */			while (desc->status & IRQ_INPROGRESS)				barrier();#endif			irq_kfree(action);			return 0;		}		printk("Trying to free free IRQ%d\n",irq);		spin_unlock_irqrestore(&desc->lock,flags);		break;	}	return -ENOENT;}int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *),	unsigned long irqflags, const char * devname, void *dev_id){	struct irqaction *action;	int retval;	if (irq >= MAX_IRQS)		return -EINVAL;	if (!handler)		/* We could implement really free_irq() instead of that... */		return do_free_irq(irq, dev_id);		action = (struct irqaction *)		irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);	if (!action) {		printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq);		return -ENOMEM;	}		action->handler = handler;	action->flags = irqflags;						action->mask = 0;	action->name = devname;	action->dev_id = dev_id;	action->next = NULL;		retval = setup_irq(irq, action);	if (retval)		kfree(action);			return 0;}void free_irq(unsigned int irq, void *dev_id){	request_irq(irq, NULL, 0, NULL, dev_id);}/* * Generic enable/disable code: this just calls * down into the PIC-specific version for the actual * hardware disable after having gotten the irq * controller lock.  */ /** *	disable_irq_nosync - disable an irq without waiting *	@irq: Interrupt to disable * *	Disable the selected interrupt line. Disables of an interrupt *	stack. Unlike disable_irq(), this function does not ensure existing *	instances of the IRQ handler have completed before returning. * *	This function may be called from IRQ context. */  void disable_irq_nosync(unsigned int irq){	irq_desc_t *desc = irqdesc(irq);	unsigned long flags;	spin_lock_irqsave(&desc->lock, flags);	if (!desc->depth++) {		if (!(desc->status & IRQ_PER_CPU))			desc->status |= IRQ_DISABLED;		mask_irq(irq);	}	spin_unlock_irqrestore(&desc->lock, flags);}/** *	disable_irq - disable an irq and wait for completion *	@irq: Interrupt to disable * *	Disable the selected interrupt line. Disables of an interrupt *	stack. That is for two disables you need two enables. This *	function waits for any pending IRQ handlers for this interrupt *	to complete before returning. If you use this function while *	holding a resource the IRQ handler may need you will deadlock. * *	This function may be called - with care - from IRQ context. */ void disable_irq(unsigned int irq){	disable_irq_nosync(irq);	if (!local_irq_count(smp_processor_id())) {		do {			barrier();		} while (irqdesc(irq)->status & IRQ_INPROGRESS);	}}/** *	enable_irq - enable interrupt handling on an irq *	@irq: Interrupt to enable * *	Re-enables the processing of interrupts on this IRQ line *	providing no disable_irq calls are now in effect. * *	This function may be called from IRQ context. */ void enable_irq(unsigned int irq){	irq_desc_t *desc = irqdesc(irq);	unsigned long flags;	spin_lock_irqsave(&desc->lock, flags);	switch (desc->depth) {	case 1: {		unsigned int status = desc->status & ~IRQ_DISABLED;		desc->status = status;		if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {			desc->status = status | IRQ_REPLAY;			hw_resend_irq(desc->handler,irq);		}		unmask_irq(irq);		/* fall-through */	}	default:		desc->depth--;		break;	case 0:		printk("enable_irq(%u) unbalanced\n", irq);	}	spin_unlock_irqrestore(&desc->lock, flags);}/* This function as implemented was a potential source of data * corruption.  I pulled it for now, until it can be properly * implemented. DRENG */int get_irq_list(char *buf){	return(0);}int show_interrupts(struct seq_file *p, void *v){	int i, j;	struct irqaction * action;	irq_desc_t *desc;	struct hw_irq_stat *hwstat;	unsigned long *per_cpus;	unsigned long flags;	seq_printf(p, "           ");	for (j=0; j<smp_num_cpus; j++)		seq_printf(p, "CPU%d       ",j);	seq_putc(p, '\n');	for_each_irq(i) {		desc = irqdesc(i);		spin_lock_irqsave(&desc->lock, flags);		action = desc->action;		if (!action || !action->handler)			goto skip;		seq_printf(p, "%3d: ", i);		hwstat = get_irq_stat(desc);		per_cpus = get_irq_per_cpu(hwstat);		if (per_cpus) {		for (j = 0; j < smp_num_cpus; j++)				seq_printf(p, "%10lu ", per_cpus[j]);		} else {			seq_printf(p, "%10lu ", hwstat->irqs);		}		if (irqdesc(i)->handler)			seq_printf(p, " %s ", irqdesc(i)->handler->typename );		else			seq_printf(p, "  None      ");		seq_printf(p, "%s", (irqdesc(i)->status & IRQ_LEVEL) ? "Level " : "Edge  ");		seq_printf(p, "    %s",action->name);		for (action=action->next; action; action = action->next)			seq_printf(p, ", %s", action->name);		seq_putc(p, '\n');skip:		spin_unlock_irqrestore(&desc->lock, flags);	}#ifdef CONFIG_SMP	/* should this be per processor send/receive? */	seq_printf(p, "IPI (recv/sent): %10u/%u\n",		       atomic_read(&ipi_recv), atomic_read(&ipi_sent));#endif			seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);	return 0;}static inline voidhandle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action){	int status = 0;	if (!(action->flags & SA_INTERRUPT))		__sti();	do {		status |= action->flags;		action->handler(irq, action->dev_id, regs);		action = action->next;	} while (action);	if (status & SA_SAMPLE_RANDOM)		add_interrupt_randomness(irq);	__cli();}/* * Eventually, this should take an array of interrupts and an array size * so it can dispatch multiple interrupts. */void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq){	int status;	struct irqaction *action;	int cpu = smp_processor_id();	irq_desc_t *desc = irqdesc(irq);	struct hw_irq_stat *hwstat;	unsigned long *per_cpus;	/* Statistics. */	hwstat = get_irq_stat(desc);	/* same cache line as desc */	hwstat->irqs++;	per_cpus = get_irq_per_cpu(hwstat); /* same cache line for < 8 cpus */	if (per_cpus)		per_cpus[cpu]++;	if(irq < NR_IRQS) {	kstat.irqs[cpu][irq]++;	} else {		kstat.irqs[cpu][NR_IRQS-1]++;	}	spin_lock(&desc->lock);	ack_irq(irq);		/*	   REPLAY is when Linux resends an IRQ that was dropped earlier	   WAITING is used by probe to mark irqs that are being tested	   */	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);	if (!(status & IRQ_PER_CPU))		status |= IRQ_PENDING; /* we _want_ to handle it */	/*	 * If the IRQ is disabled for whatever reason, we cannot	 * use the action we have.	 */	action = NULL;	if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {		action = desc->action;		if (!action || !action->handler) {			ppc_spurious_interrupts++;			printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);			/* We can't call disable_irq here, it would deadlock */			if (!desc->depth)				desc->depth = 1;			desc->status |= IRQ_DISABLED;			/* This is not a real spurrious interrupt, we			 * have to eoi it, so we jump to out			 */			mask_irq(irq);			goto out;		}		status &= ~IRQ_PENDING; /* we commit to handling */		if (!(status & IRQ_PER_CPU))			status |= IRQ_INPROGRESS; /* we are handling it */	}	desc->status = status;	/*	 * If there is no IRQ handler or it was disabled, exit early.	   Since we set PENDING, if another processor is handling	   a different instance of this same irq, the other processor	   will take care of it.	 */	if (!action)		goto out;	/*	 * Edge triggered interrupts need to remember	 * pending events.	 * This applies to any hw interrupts that allow a second	 * instance of the same irq to arrive while we are in do_IRQ	 * or in the handler. But the code here only handles the _second_	 * instance of the irq, not the third or fourth. So it is mostly	 * useful for irq hardware that does not mask cleanly in an	 * SMP environment.	 */	for (;;) {		spin_unlock(&desc->lock);		handle_irq_event(irq, regs, action);		spin_lock(&desc->lock);				if (!(desc->status & IRQ_PENDING))			break;		desc->status &= ~IRQ_PENDING;	}	desc->status &= ~IRQ_INPROGRESS;out:	/*	 * The ->end() handler has to deal with interrupts which got	 * disabled while the handler was running.	 */	if (desc->handler) {		if (desc->handler->end)			desc->handler->end(irq);		else if (desc->handler->enable)			desc->handler->enable(irq);	}	spin_unlock(&desc->lock);}int do_IRQ(struct pt_regs *regs){	int cpu = smp_processor_id();	int irq, first = 1;#ifdef CONFIG_PPC_ISERIES	struct paca_struct *lpaca;	struct ItLpQueue *lpq;#endif	irq_enter(cpu);#ifdef CONFIG_PPC_ISERIES	lpaca = get_paca();#ifdef CONFIG_SMP	if (lpaca->xLpPaca.xIntDword.xFields.xIpiCnt) {		lpaca->xLpPaca.xIntDword.xFields.xIpiCnt = 0;		iSeries_smp_message_recv(regs);	}#endif /* CONFIG_SMP */	lpq = lpaca->lpQueuePtr;	if (lpq && ItLpQueue_isLpIntPending(lpq))		lpEvent_count += ItLpQueue_process(lpq, regs);#else	/*	 * Every arch is required to implement ppc_md.get_irq.	 * This function will either return an irq number or -1 to	 * indicate there are no more pending.  But the first time	 * through the loop this means there wasn't an IRQ pending.	 * The value -2 is for buggy hardware and means that this IRQ	 * has already been handled. -- Tom	 */	while ((irq = ppc_md.get_irq(regs)) >= 0) {		ppc_irq_dispatch_handler(regs, irq);		first = 0;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -