⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ip27-irq.c

📁 内核linux2.4.20,可跟rtlinux3.2打补丁 组成实时linux系统,编译内核
💻 C
📖 第 1 页 / 共 2 页
字号:
		return -ENOMEM;	action->handler = handler;	action->flags = irqflags;	action->mask = 0;	action->name = devname;	action->next = NULL;	action->dev_id = dev_id;	DBG("request_irq(): %s  devid= 0x%x\n", devname, dev_id);	retval = setup_irq(irq, action);	DBG("request_irq(): retval= %d\n", retval);	if (retval)		kfree(action);	return retval;}void free_irq(unsigned int irq, void *dev_id){	struct irqaction * action, **p;	unsigned long flags;	if (irq >= NR_IRQS) {		printk("Trying to free IRQ%d\n", irq);		return;	}	for (p = irq + irq_action; (action = *p) != NULL; p = &action->next) {		if (action->dev_id != dev_id)			continue;		/* Found it - now free it */		save_and_cli(flags);		*p = action->next;		if (irq >= BASE_PCI_IRQ)			bridge_shutdown(irq);		restore_flags(flags);		kfree(action);		return;	}	printk("Trying to free free IRQ%d\n",irq);}/* Useless ISA nonsense.  */unsigned long probe_irq_on (void){	panic("probe_irq_on called!\n");	return 0;}int probe_irq_off (unsigned long irqs){	return 0;}void __init init_IRQ(void){	set_except_vector(0, ip27_irq);}#ifdef CONFIG_SMP/* * This following are the global intr on off routines, copied almost * entirely from i386 code. */int global_irq_holder = NO_PROC_ID;spinlock_t global_irq_lock = SPIN_LOCK_UNLOCKED;extern void show_stack(unsigned long* esp);static void show(char * str){	int i;	int cpu = smp_processor_id();	printk("\n%s, CPU %d:\n", str, cpu);	printk("irq:  %d [",irqs_running());	for(i=0;i < smp_num_cpus;i++)		printk(" %d",local_irq_count(i));	printk(" ]\nbh:   %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);	for(i=0;i < smp_num_cpus;i++)		printk(" %d",local_bh_count(i));	printk(" ]\nStack dumps:");	for(i = 0; i < smp_num_cpus; i++) {		if (i == cpu)			continue;		printk("\nCPU %d:",i);		printk("Code not developed yet\n");		/* show_stack(0); */	}	printk("\nCPU %d:",cpu);	printk("Code not developed yet\n");	/* show_stack(NULL); */	printk("\n");}#define MAXCOUNT 		100000000#define SYNC_OTHER_CORES(x)	udelay(x+1)static inline void wait_on_irq(int cpu){	int count = MAXCOUNT;	for (;;) {		/*		 * Wait until all interrupts are gone. Wait		 * for bottom half handlers unless we're		 * already executing in one..		 */		if (!irqs_running())			if (local_bh_count(cpu) || !spin_is_locked(&global_bh_lock))				break;		/* Duh, we have to loop. Release the lock to avoid deadlocks */		spin_unlock(&global_irq_lock);		for (;;) {			if (!--count) {				show("wait_on_irq");				count = ~0;			}			__sti();			SYNC_OTHER_CORES(cpu);			__cli();			if (irqs_running())				continue;			if (spin_is_locked(&global_irq_lock))				continue;			if (!local_bh_count(cpu) && spin_is_locked(&global_bh_lock))				continue;			if (spin_trylock(&global_irq_lock))				break;		}	}}void synchronize_irq(void){	if (irqs_running()) {		/* Stupid approach */		cli();		sti();	}}static inline void get_irqlock(int cpu){	if (!spin_trylock(&global_irq_lock)) {		/* do we already hold the lock? */		if ((unsigned char) cpu == global_irq_holder)			return;		/* Uhhuh.. Somebody else got it. Wait.. */		spin_lock(&global_irq_lock);	}	/*	 * We also to make sure that nobody else is running	 * in an interrupt context.	 */	wait_on_irq(cpu);	/*	 * Ok, finally..	 */	global_irq_holder = cpu;}void __global_cli(void){	unsigned int flags;	__save_flags(flags);	if (flags & ST0_IE) {		int cpu = smp_processor_id();		__cli();		if (!local_irq_count(cpu))			get_irqlock(cpu);	}}void __global_sti(void){	int cpu = smp_processor_id();	if (!local_irq_count(cpu))		release_irqlock(cpu);	__sti();}/* * SMP flags value to restore to: * 0 - global cli * 1 - global sti * 2 - local cli * 3 - local sti */unsigned long __global_save_flags(void){	int retval;	int local_enabled;	unsigned long flags;	int cpu = smp_processor_id();	__save_flags(flags);	local_enabled = (flags & ST0_IE);	/* default to local */	retval = 2 + local_enabled;	/* check for global flags if we're not in an interrupt */	if (!local_irq_count(cpu)) {		if (local_enabled)			retval = 1;		if (global_irq_holder == cpu)			retval = 0;	}	return retval;}void __global_restore_flags(unsigned long flags){	switch (flags) {		case 0:			__global_cli();			break;		case 1:			__global_sti();			break;		case 2:			__cli();			break;		case 3:			__sti();			break;		default:			printk("global_restore_flags: %08lx\n", flags);	}}#endif /* CONFIG_SMP *//* * Get values that vary depending on which CPU and bit we're operating on. */static hub_intmasks_t *intr_get_ptrs(cpuid_t cpu, int bit, int *new_bit,				hubreg_t **intpend_masks, int *ip){	hub_intmasks_t *hub_intmasks;	hub_intmasks = &cpu_data[cpu].p_intmasks;	if (bit < N_INTPEND_BITS) {		*intpend_masks = hub_intmasks->intpend0_masks;		*ip = 0;		*new_bit = bit;	} else {		*intpend_masks = hub_intmasks->intpend1_masks;		*ip = 1;		*new_bit = bit - N_INTPEND_BITS;	}	return hub_intmasks;}int intr_connect_level(int cpu, int bit){	int ip;	int slice = cputoslice(cpu);	volatile hubreg_t *mask_reg;	hubreg_t *intpend_masks;	nasid_t nasid = COMPACT_TO_NASID_NODEID(cputocnode(cpu));	(void)intr_get_ptrs(cpu, bit, &bit, &intpend_masks, &ip);	/* Make sure it's not already pending when we connect it. */	REMOTE_HUB_CLR_INTR(nasid, bit + ip * N_INTPEND_BITS);	intpend_masks[0] |= (1ULL << (u64)bit);	if (ip == 0) {		mask_reg = REMOTE_HUB_ADDR(nasid, PI_INT_MASK0_A + 				PI_INT_MASK_OFFSET * slice);	} else {		mask_reg = REMOTE_HUB_ADDR(nasid, PI_INT_MASK1_A + 				PI_INT_MASK_OFFSET * slice);	}	HUB_S(mask_reg, intpend_masks[0]);	return(0);}int intr_disconnect_level(int cpu, int bit){	int ip;	int slice = cputoslice(cpu);	volatile hubreg_t *mask_reg;	hubreg_t *intpend_masks;	nasid_t nasid = COMPACT_TO_NASID_NODEID(cputocnode(cpu));	(void)intr_get_ptrs(cpu, bit, &bit, &intpend_masks, &ip);	intpend_masks[0] &= ~(1ULL << (u64)bit);	if (ip == 0) {		mask_reg = REMOTE_HUB_ADDR(nasid, PI_INT_MASK0_A + 				PI_INT_MASK_OFFSET * slice);	} else {		mask_reg = REMOTE_HUB_ADDR(nasid, PI_INT_MASK1_A + 				PI_INT_MASK_OFFSET * slice);	}	HUB_S(mask_reg, intpend_masks[0]);	return(0);}void handle_resched_intr(int irq, void *dev_id, struct pt_regs *regs){	/* Nothing, the return from intr will work for us */}extern void smp_call_function_interrupt(void);void install_cpuintr(int cpu){#ifdef CONFIG_SMP#if (CPUS_PER_NODE == 2)	static int done = 0;	/*	 * This is a hack till we have a pernode irqlist. Currently,	 * just have the master cpu set up the handlers for the per	 * cpu irqs.	 */	if (done == 0) {		int j;		if (request_irq(CPU_RESCHED_A_IRQ, handle_resched_intr, 							0, "resched", 0))			panic("intercpu intr unconnectible\n");		if (request_irq(CPU_RESCHED_B_IRQ, handle_resched_intr, 							0, "resched", 0))			panic("intercpu intr unconnectible\n");		if (request_irq(CPU_CALL_A_IRQ, smp_call_function_interrupt,							0, "callfunc", 0))			panic("intercpu intr unconnectible\n");		if (request_irq(CPU_CALL_B_IRQ, smp_call_function_interrupt,							0, "callfunc", 0))			panic("intercpu intr unconnectible\n");		for (j = 0; j < PERNODE_LEVELS; j++)			LEVEL_TO_IRQ(0, j) = -1;		LEVEL_TO_IRQ(0, FAST_IRQ_TO_LEVEL(CPU_RESCHED_A_IRQ)) = 							CPU_RESCHED_A_IRQ;		LEVEL_TO_IRQ(0, FAST_IRQ_TO_LEVEL(CPU_RESCHED_B_IRQ)) = 							CPU_RESCHED_B_IRQ;		LEVEL_TO_IRQ(0, FAST_IRQ_TO_LEVEL(CPU_CALL_A_IRQ)) = 							CPU_CALL_A_IRQ;		LEVEL_TO_IRQ(0, FAST_IRQ_TO_LEVEL(CPU_CALL_B_IRQ)) = 							CPU_CALL_B_IRQ;		for (j = 1; j < MAX_COMPACT_NODES; j++)			memcpy(&node_level_to_irq[j][0], 			&node_level_to_irq[0][0], 			sizeof(node_level_to_irq[0][0])*PERNODE_LEVELS);		done = 1;	}	intr_connect_level(cpu, FAST_IRQ_TO_LEVEL(CPU_RESCHED_A_IRQ + 							cputoslice(cpu)));	intr_connect_level(cpu, FAST_IRQ_TO_LEVEL(CPU_CALL_A_IRQ +							cputoslice(cpu)));#else /* CPUS_PER_NODE */#error Must redefine this for more than 2 CPUS.#endif /* CPUS_PER_NODE */#endif /* CONFIG_SMP */}void install_tlbintr(int cpu){#if 0	int intr_bit = N_INTPEND_BITS + TLB_INTR_A + cputoslice(cpu);	intr_connect_level(cpu, intr_bit);#endif}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -