⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 irq.c

📁 上传linux-jx2410的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
		return;	}	if(action->flags & SA_STATIC_ALLOC) {		printk("Attempt to free statically allocated IRQ %d (%s)\n",		       bucket->pil, action->name);		return;	}	save_and_cli(flags);	if(action && tmp)		tmp->next = action->next;	else		*(bucket->pil + irq_action) = action->next;	if (bucket != &pil0_dummy_bucket) {		unsigned long imap = bucket->imap;		void **vector, *orig;		int ent;		orig = bucket->irq_info;		vector = (void **)orig;		if ((bucket->flags & IBF_MULTI) != 0) {			int other = 0;			void *orphan = NULL;			for(ent = 0; ent < 4; ent++) {				if(vector[ent] == action)					vector[ent] = NULL;				else if(vector[ent] != NULL) {					orphan = vector[ent];					other++;				}			}			/* Only free when no other shared irq			 * uses this bucket.			 */			if(other) {				if (other == 1) {					/* Convert back to non-shared bucket. */					bucket->irq_info = orphan;					bucket->flags &= ~(IBF_MULTI);					kfree(vector);				}				goto out;			}		} else {			bucket->irq_info = NULL;		}		/* This unique interrupt source is now inactive. */		bucket->flags &= ~IBF_ACTIVE;		/* See if any other buckets share this bucket's IMAP		 * and are still active.		 */		for(ent = 0; ent < NUM_IVECS; ent++) {			bp = &ivector_table[ent];			if(bp != bucket		&&			   bp->imap == imap	&&			   (bp->flags & IBF_ACTIVE) != 0)				break;		}		/* Only disable when no other sub-irq levels of		 * the same IMAP are active.		 */		if (ent == NUM_IVECS)			disable_irq(irq);	}out:	kfree(action);	restore_flags(flags);}#ifdef CONFIG_SMP/* Who has the global irq brlock */unsigned char global_irq_holder = NO_PROC_ID;static void show(char * str){	int cpu = smp_processor_id();	int i;	printk("\n%s, CPU %d:\n", str, cpu);	printk("irq:  %d [ ", irqs_running());	for (i = 0; i < smp_num_cpus; i++)		printk("%u ", __brlock_array[i][BR_GLOBALIRQ_LOCK]);	printk("]\nbh:   %d [ ",	       (spin_is_locked(&global_bh_lock) ? 1 : 0));	for (i = 0; i < smp_num_cpus; i++)		printk("%u ", local_bh_count(i));	printk("]\n");}#define MAXCOUNT 100000000#if 0#define SYNC_OTHER_ULTRAS(x)	udelay(x+1)#else#define SYNC_OTHER_ULTRAS(x)	membar("#Sync");#endifvoid synchronize_irq(void){	if (irqs_running()) {		cli();		sti();	}}static inline void get_irqlock(int cpu){	int count;	if ((unsigned char)cpu == global_irq_holder)		return;	count = MAXCOUNT;again:	br_write_lock(BR_GLOBALIRQ_LOCK);	for (;;) {		spinlock_t *lock;		if (!irqs_running() &&		    (local_bh_count(smp_processor_id()) || !spin_is_locked(&global_bh_lock)))			break;		br_write_unlock(BR_GLOBALIRQ_LOCK);		lock = &__br_write_locks[BR_GLOBALIRQ_LOCK].lock;		while (irqs_running() ||		       spin_is_locked(lock) ||		       (!local_bh_count(smp_processor_id()) && spin_is_locked(&global_bh_lock))) {			if (!--count) {				show("get_irqlock");				count = (~0 >> 1);			}			__sti();			SYNC_OTHER_ULTRAS(cpu);			__cli();		}		goto again;	}	global_irq_holder = cpu;}void __global_cli(void){	unsigned long flags;	__save_flags(flags);	if(flags == 0) {		int cpu = smp_processor_id();		__cli();		if (! local_irq_count(cpu))			get_irqlock(cpu);	}}void __global_sti(void){	int cpu = smp_processor_id();	if (! local_irq_count(cpu))		release_irqlock(cpu);	__sti();}unsigned long __global_save_flags(void){	unsigned long flags, local_enabled, retval;	__save_flags(flags);	local_enabled = ((flags == 0) ? 1 : 0);	retval = 2 + local_enabled;	if (! local_irq_count(smp_processor_id())) {		if (local_enabled)			retval = 1;		if (global_irq_holder == (unsigned char) smp_processor_id())			retval = 0;	}	return retval;}void __global_restore_flags(unsigned long flags){	switch (flags) {	case 0:		__global_cli();		break;	case 1:		__global_sti();		break;	case 2:		__cli();		break;	case 3:		__sti();		break;	default:	{		unsigned long pc;		__asm__ __volatile__("mov %%i7, %0" : "=r" (pc));		printk("global_restore_flags: Bogon flags(%016lx) caller %016lx\n",		       flags, pc);	}	}}#endif /* CONFIG_SMP */void catch_disabled_ivec(struct pt_regs *regs){	int cpu = smp_processor_id();	struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));	/* We can actually see this on Ultra/PCI PCI cards, which are bridges	 * to other devices.  Here a single IMAP enabled potentially multiple	 * unique interrupt sources (which each do have a unique ICLR register.	 *	 * So what we do is just register that the IVEC arrived, when registered	 * for real the request_irq() code will check the bit and signal	 * a local CPU interrupt for it.	 */#if 0	printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",	       bucket - &ivector_table[0], regs->tpc);#endif	*irq_work(cpu, 0) = 0;	bucket->pending = 1;}/* Tune this... */#define FORWARD_VOLUME		12#ifdef CONFIG_SMPstatic inline void redirect_intr(int cpu, struct ino_bucket *bp){	/* Ok, here is what is going on:	 * 1) Retargeting IRQs on Starfire is very	 *    expensive so just forget about it on them.	 * 2) Moving around very high priority interrupts	 *    is a losing game.	 * 3) If the current cpu is idle, interrupts are	 *    useful work, so keep them here.  But do not	 *    pass to our neighbour if he is not very idle.	 * 4) If sysadmin explicitly asks for directed intrs,	 *    Just Do It.	 */	struct irqaction *ap = bp->irq_info;	unsigned long cpu_mask = get_smpaff_in_irqaction(ap);	unsigned int buddy, ticks;	if (cpu_mask == 0)		cpu_mask = ~0UL;	if (this_is_starfire != 0 ||	    bp->pil >= 10 || current->pid == 0)		goto out;	/* 'cpu' is the MID (ie. UPAID), calculate the MID	 * of our buddy.	 */	buddy = cpu_number_map(cpu) + 1;	if (buddy >= NR_CPUS ||	    cpu_logical_map(buddy) == -1)		buddy = 0;	ticks = 0;	while ((cpu_mask & (1UL << buddy)) == 0) {		buddy++;		if (buddy >= NR_CPUS ||		    cpu_logical_map(buddy) == -1)			buddy = cpu_logical_map(0);		if (++ticks > NR_CPUS) {			put_smpaff_in_irqaction(ap, 0);			goto out;		}	}	if (buddy == cpu_number_map(cpu))		goto out;	buddy = cpu_logical_map(buddy);	/* Voo-doo programming. */	if (cpu_data[buddy].idle_volume < FORWARD_VOLUME)		goto out;	/* This just so happens to be correct on Cheetah	 * at the moment.	 */	buddy <<= 26;	/* Push it to our buddy. */	upa_writel(buddy | IMAP_VALID, bp->imap);out:	return;}#endifvoid handler_irq(int irq, struct pt_regs *regs){	struct ino_bucket *bp, *nbp;	int cpu = smp_processor_id();#ifndef CONFIG_SMP	/*	 * Check for TICK_INT on level 14 softint.	 */	{		unsigned long clr_mask = 1 << irq;		unsigned long tick_mask;		if (SPARC64_USE_STICK)			tick_mask = (1UL << 16);		else			tick_mask = (1UL << 0);		if ((irq == 14) && (get_softint() & tick_mask)) {			irq = 0;			clr_mask = tick_mask;		}		clear_softint(clr_mask);	}#else	int should_forward = 1;	clear_softint(1 << irq);#endif	irq_enter(cpu, irq);	kstat.irqs[cpu][irq]++;#ifdef CONFIG_PCI	if (irq == 9)		kbd_pt_regs = regs;#endif	/* Sliiiick... */#ifndef CONFIG_SMP	bp = ((irq != 0) ?	      __bucket(xchg32(irq_work(cpu, irq), 0)) :	      &pil0_dummy_bucket);#else	bp = __bucket(xchg32(irq_work(cpu, irq), 0));#endif	for ( ; bp != NULL; bp = nbp) {		unsigned char flags = bp->flags;		unsigned char random = 0;		nbp = __bucket(bp->irq_chain);		bp->irq_chain = 0;		if ((flags & IBF_ACTIVE) != 0) {#ifdef CONFIG_PCI			if ((flags & IBF_DMA_SYNC) != 0) {				upa_readl(dma_sync_reg_table[bp->synctab_ent]);				upa_readq(pci_dma_wsync);			}#endif			if ((flags & IBF_MULTI) == 0) {				struct irqaction *ap = bp->irq_info;				ap->handler(__irq(bp), ap->dev_id, regs);				random |= ap->flags & SA_SAMPLE_RANDOM;			} else {				void **vector = (void **)bp->irq_info;				int ent;				for (ent = 0; ent < 4; ent++) {					struct irqaction *ap = vector[ent];					if (ap != NULL) {						ap->handler(__irq(bp), ap->dev_id, regs);						random |= ap->flags & SA_SAMPLE_RANDOM;					}				}			}			/* Only the dummy bucket lacks IMAP/ICLR. */			if (bp->pil != 0) {#ifdef CONFIG_SMP				if (should_forward) {					redirect_intr(cpu, bp);					should_forward = 0;				}#endif				upa_writel(ICLR_IDLE, bp->iclr);				/* Test and add entropy */				if (random)					add_interrupt_randomness(irq);			}		} else			bp->pending = 1;	}	irq_exit(cpu, irq);}#ifdef CONFIG_BLK_DEV_FDextern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs);void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs){	struct irqaction *action = *(irq + irq_action);	struct ino_bucket *bucket;	int cpu = smp_processor_id();	irq_enter(cpu, irq);	kstat.irqs[cpu][irq]++;	*(irq_work(cpu, irq)) = 0;	bucket = get_ino_in_irqaction(action) + ivector_table;	floppy_interrupt(irq, dev_cookie, regs);	upa_writel(ICLR_IDLE, bucket->iclr);	irq_exit(cpu, irq);}#endif/* The following assumes that the branch lies before the place we * are branching to.  This is the case for a trap vector... * You have been warned. */#define SPARC_BRANCH(dest_addr, inst_addr) \          (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))#define SPARC_NOP (0x01000000)static void install_fast_irq(unsigned int cpu_irq,			     void (*handler)(int, void *, struct pt_regs *)){	extern unsigned long sparc64_ttable_tl0;	unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;	unsigned int *insns;	ttent += 0x820;	ttent += (cpu_irq - 1) << 5;	insns = (unsigned int *) ttent;	insns[0] = SPARC_BRANCH(((unsigned long) handler),				((unsigned long)&insns[0]));	insns[1] = SPARC_NOP;	__asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));}int request_fast_irq(unsigned int irq,		     void (*handler)(int, void *, struct pt_regs *),		     unsigned long irqflags, const char *name, void *dev_id){	struct irqaction *action;	struct ino_bucket *bucket = __bucket(irq);	unsigned long flags;	/* No pil0 dummy buckets allowed here. */	if (bucket < &ivector_table[0] ||	    bucket >= &ivector_table[NUM_IVECS]) {		unsigned int *caller;		__asm__ __volatile__("mov %%i7, %0" : "=r" (caller));		printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt "		       "from %p, irq %08x.\n", caller, irq);		return -EINVAL;	}			if(!handler)		return -EINVAL;	if ((bucket->pil == 0) || (bucket->pil == 14)) {		printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n");		return -EBUSY;	}	action = *(bucket->pil + irq_action);	if(action) {		if(action->flags & SA_SHIRQ)			panic("Trying to register fast irq when already shared.\n");		if(irqflags & SA_SHIRQ)			panic("Trying to register fast irq as shared.\n");		printk("request_fast_irq: Trying to register yet already owned.\n");		return -EBUSY;	}	/*

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -