⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 irq.c

📁 microwindows移植到S3C44B0的源码
💻 C
📖 第 1 页 / 共 2 页
字号:
	}	desc->status &= ~IRQ_INPROGRESS;out:	/*	 * The ->end() handler has to deal with interrupts which got	 * disabled while the handler was running.	 */	if (irq_desc[irq].handler) {		if (irq_desc[irq].handler->end)			irq_desc[irq].handler->end(irq);		else if (irq_desc[irq].handler->enable)			irq_desc[irq].handler->enable(irq);	}	spin_unlock(&desc->lock);}int do_IRQ(struct pt_regs *regs){	int cpu = smp_processor_id();	int irq, first = 1;        hardirq_enter( cpu );	for (;;) {		/*		 * Every arch is required to implement ppc_md.get_irq.		 * This function will either return an irq number or -1 to		 * indicate there are no more pending.  But the first time		 * through the loop this means there wasn't and IRQ pending.		 * The value -2 is for buggy hardware and means that this IRQ		 * has already been handled. -- Tom		 */		irq = ppc_md.get_irq( regs );		if (irq >= 0)			ppc_irq_dispatch_handler( regs, irq );		else {			if (irq != -2 && first)				/* That's not SMP safe ... but who cares ? */				ppc_spurious_interrupts++;			break;		}		first = 0;	}        hardirq_exit( cpu );	if (softirq_pending(cpu))		do_softirq();	return 1; /* lets ret_from_int know we can do checks */}unsigned long probe_irq_on (void){	return 0;}int probe_irq_off (unsigned long irqs){	return 0;}unsigned int probe_irq_mask(unsigned long irqs){	return 0;}void __init init_IRQ(void){	static int once = 0;	if ( once )		return;	else		once++;		ppc_md.init_IRQ();}#ifdef CONFIG_SMPunsigned char global_irq_holder = NO_PROC_ID;unsigned volatile long global_irq_lock; /* pendantic :long for set_bit--RR*/atomic_t global_irq_count;atomic_t global_bh_count;static void show(char * str){	int i;	unsigned long *stack;	int cpu = smp_processor_id();	printk("\n%s, CPU %d:\n", str, cpu);	printk("irq:  %d [%d %d]\n",	       atomic_read(&global_irq_count),	       local_irq_count(0),	       local_irq_count(1));	printk("bh:   %d [%d %d]\n",	       atomic_read(&global_bh_count),	       local_bh_count(0),	       local_bh_count(1));	stack = (unsigned long *) &str;	for (i = 40; i ; i--) {		unsigned long x = *++stack;		if (x > (unsigned long) &init_task_union && x < (unsigned long) &vsprintf) {			printk("<[%08lx]> ", x);		}	}}static inline void wait_on_bh(void){	int count = MAXCOUNT;	do {		if (!--count) {			show("wait_on_bh");			count = ~0;		}		/* nothing .. wait for the other bh's to go away */	} while (atomic_read(&global_bh_count) != 0);}static inline void wait_on_irq(int cpu){	int count = MAXCOUNT;	for (;;) {		/*		 * Wait until all interrupts are gone. Wait		 * for bottom half handlers unless we're		 * already executing in one..		 */		if (!atomic_read(&global_irq_count)) {			if (local_bh_count(cpu)			    || !atomic_read(&global_bh_count))				break;		}		/* Duh, we have to loop. Release the lock to avoid deadlocks */		clear_bit(0,&global_irq_lock);		for (;;) {			if (!--count) {				show("wait_on_irq");				count = ~0;			}			__sti();			/* don't worry about the lock race Linus found			 * on intel here. -- Cort			 */			__cli();			if (atomic_read(&global_irq_count))				continue;			if (global_irq_lock)				continue;			if (!local_bh_count(cpu)			    && atomic_read(&global_bh_count))				continue;			if (!test_and_set_bit(0,&global_irq_lock))				break;		}	}}/* * This is called when we want to synchronize with * bottom half handlers. We need to wait until * no other CPU is executing any bottom half handler. * * Don't wait if we're already running in an interrupt * context or are inside a bh handler. */void synchronize_bh(void){	if (atomic_read(&global_bh_count) && !in_interrupt())		wait_on_bh();}/* * This is called when we want to synchronize with * interrupts. We may for example tell a device to * stop sending interrupts: but to make sure there * are no interrupts that are executing on another * CPU we need to call this function. */void synchronize_irq(void){	if (atomic_read(&global_irq_count)) {		/* Stupid approach */		cli();		sti();	}}static inline void get_irqlock(int cpu){	unsigned int loops = MAXCOUNT;	if (test_and_set_bit(0,&global_irq_lock)) {		/* do we already hold the lock? */		if ((unsigned char) cpu == global_irq_holder)			return;		/* Uhhuh.. Somebody else got it. Wait.. */		do {			do {				if (loops-- == 0) {					printk("get_irqlock(%d) waiting, global_irq_holder=%d\n", cpu, global_irq_holder);#ifdef CONFIG_XMON					xmon(0);#endif				}			} while (test_bit(0,&global_irq_lock));		} while (test_and_set_bit(0,&global_irq_lock));			}	/* 	 * We also need to make sure that nobody else is running	 * in an interrupt context. 	 */	wait_on_irq(cpu);	/*	 * Ok, finally..	 */	global_irq_holder = cpu;}/* * A global "cli()" while in an interrupt context * turns into just a local cli(). Interrupts * should use spinlocks for the (very unlikely) * case that they ever want to protect against * each other. * * If we already have local interrupts disabled, * this will not turn a local disable into a * global one (problems with spinlocks: this makes * save_flags+cli+sti usable inside a spinlock). */void __global_cli(void){	unsigned long flags;		__save_flags(flags);	if (flags & (1 << 15)) {		int cpu = smp_processor_id();		__cli();		if (!local_irq_count(cpu))			get_irqlock(cpu);	}}void __global_sti(void){	int cpu = smp_processor_id();	if (!local_irq_count(cpu))		release_irqlock(cpu);	__sti();}/* * SMP flags value to restore to: * 0 - global cli * 1 - global sti * 2 - local cli * 3 - local sti */unsigned long __global_save_flags(void){	int retval;	int local_enabled;	unsigned long flags;	__save_flags(flags);	local_enabled = (flags >> 15) & 1;	/* default to local */	retval = 2 + local_enabled;	/* check for global flags if we're not in an interrupt */	if (!local_irq_count(smp_processor_id())) {		if (local_enabled)			retval = 1;		if (global_irq_holder == (unsigned char) smp_processor_id())			retval = 0;	}	return retval;}inttb(long vals[],   int  max_size){   register unsigned long *orig_sp __asm__ ("r1");   register unsigned long lr __asm__ ("r3");   unsigned long *sp;   int i;   asm volatile ("mflr 3");   vals[0] = lr;   sp = (unsigned long *) *orig_sp;   sp = (unsigned long *) *sp;   for (i=1; i<max_size; i++) {      if (sp == 0) {         break;      }      vals[i] = *(sp+1);      sp = (unsigned long *) *sp;   }   return i;}void __global_restore_flags(unsigned long flags){	switch (flags) {	case 0:		__global_cli();		break;	case 1:		__global_sti();		break;	case 2:		__cli();		break;	case 3:		__sti();		break;	default:	{		unsigned long trace[5];                int           count;                int           i;		printk("global_restore_flags: %08lx (%08lx)\n",			flags, (&flags)[-1]);                count = tb(trace, 5);                printk("tb:");                for(i=0; i<count; i++) {			printk(" %8.8lx", trace[i]);		}		printk("\n");	}	}}#endif /* CONFIG_SMP */static struct proc_dir_entry *root_irq_dir;static struct proc_dir_entry *irq_dir[NR_IRQS];static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];#ifdef CONFIG_IRQ_ALL_CPUS#define DEFAULT_CPU_AFFINITY 0xffffffff#else#define DEFAULT_CPU_AFFINITY 0x00000001#endifunsigned int irq_affinity [NR_IRQS] =	{ [0 ... NR_IRQS-1] = DEFAULT_CPU_AFFINITY };#define HEX_DIGITS 8static int irq_affinity_read_proc (char *page, char **start, off_t off,			int count, int *eof, void *data){	if (count < HEX_DIGITS+1)		return -EINVAL;	return sprintf (page, "%08x\n", irq_affinity[(int)data]);}static unsigned int parse_hex_value (const char *buffer,		unsigned long count, unsigned long *ret){	unsigned char hexnum [HEX_DIGITS];	unsigned long value;	int i;	if (!count)		return -EINVAL;	if (count > HEX_DIGITS)		count = HEX_DIGITS;	if (copy_from_user(hexnum, buffer, count))		return -EFAULT;	/*	 * Parse the first 8 characters as a hex string, any non-hex char	 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.	 */	value = 0;	for (i = 0; i < count; i++) {		unsigned int c = hexnum[i];		switch (c) {			case '0' ... '9': c -= '0'; break;			case 'a' ... 'f': c -= 'a'-10; break;			case 'A' ... 'F': c -= 'A'-10; break;		default:			goto out;		}		value = (value << 4) | c;	}out:	*ret = value;	return 0;}static int irq_affinity_write_proc (struct file *file, const char *buffer,					unsigned long count, void *data){	int irq = (int) data, full_count = count, err;	unsigned long new_value;	if (!irq_desc[irq].handler->set_affinity)		return -EIO;	err = parse_hex_value(buffer, count, &new_value);	/*	 * Do not allow disabling IRQs completely - it's a too easy	 * way to make the system unusable accidentally :-) At least	 * one online CPU still has to be targeted.	 *	 * We assume a 1-1 logical<->physical cpu mapping here.  If	 * we assume that the cpu indices in /proc/irq/../smp_affinity	 * are actually logical cpu #'s then we have no problem.	 *  -- Cort <cort@fsmlabs.com>	 */	if (!(new_value & cpu_online_map))		return -EINVAL;	irq_affinity[irq] = new_value;	irq_desc[irq].handler->set_affinity(irq, new_value);	return full_count;}static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,			int count, int *eof, void *data){	unsigned long *mask = (unsigned long *) data;	if (count < HEX_DIGITS+1)		return -EINVAL;	return sprintf (page, "%08lx\n", *mask);}static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,					unsigned long count, void *data){	unsigned long *mask = (unsigned long *) data, full_count = count, err;	unsigned long new_value;	err = parse_hex_value(buffer, count, &new_value);	if (err)		return err;	*mask = new_value;	return full_count;}#define MAX_NAMELEN 10static void register_irq_proc (unsigned int irq){	struct proc_dir_entry *entry;	char name [MAX_NAMELEN];	if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])		return;	memset(name, 0, MAX_NAMELEN);	sprintf(name, "%d", irq);	/* create /proc/irq/1234 */	irq_dir[irq] = proc_mkdir(name, root_irq_dir);	/* create /proc/irq/1234/smp_affinity */	entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);	entry->nlink = 1;	entry->data = (void *)irq;	entry->read_proc = irq_affinity_read_proc;	entry->write_proc = irq_affinity_write_proc;	smp_affinity_entry[irq] = entry;}unsigned long prof_cpu_mask = -1;void init_irq_proc (void){	struct proc_dir_entry *entry;	int i;	/* create /proc/irq */	root_irq_dir = proc_mkdir("irq", 0);	/* create /proc/irq/prof_cpu_mask */	entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);	entry->nlink = 1;	entry->data = (void *)&prof_cpu_mask;	entry->read_proc = prof_cpu_mask_read_proc;	entry->write_proc = prof_cpu_mask_write_proc;	/*	 * Create entries for all existing IRQs.	 */	for (i = 0; i < NR_IRQS; i++) {		if (irq_desc[i].handler == NULL)			continue;		register_irq_proc(i);	}}void no_action(int irq, void *dev, struct pt_regs *regs){}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -