⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rtl_core.c

📁 fsmlabs的real time linux的内核
💻 C
📖 第 1 页 / 共 2 页
字号:
	rtl_spin_lock_irqsave(&rtl_global.hard_irq_controller_lock, flags);	for (i=0; i < IRQ_ARRAY_SIZE; i++) {		irqs = rtl_global.pending[i] & rtl_global.soft_enabled[i];		if (!irqs)			continue;		j = ffz(~irqs);		clear_bit(j, &rtl_global.pending[i]);		rtl_spin_unlock_irqrestore(&rtl_global.hard_irq_controller_lock, flags);		return pi_toirq (j, i);	}	rtl_spin_unlock_irqrestore(&rtl_global.hard_irq_controller_lock, flags);	return IRQ_NOT_VALID;}void rtl_soft_cli(void){	DeclareAndInit(cpu_id);	if ( L_TEST(l_ienable) )		last_cli[cpu_id] = (unsigned long)__builtin_return_address(0);	L_CLEAR(l_ienable);}void rtl_soft_sti_no_emulation(void){	DeclareAndInit(cpu_id);	if ( !L_TEST(l_ienable) )		last_cli[cpu_id] = 0;	L_SET(l_ienable);}void rtl_process_pending(void){	int irq = 0;	int last_irq = 0;	DeclareAndInit(cpu_id);	rtl_soft_cli(); /*disable soft interrupts !*/       	do{		irq = IRQ_NOT_VALID;	       	G_CLEAR(g_pend_since_sti);	       	L_CLEAR(l_pend_since_sti);#ifdef __LOCAL_IRQS__	       	while ( (irq = get_lpended_irq()) != IRQ_NOT_VALID ) {#ifdef DEBUG_PENDING			local_pending = 0;#endif /* DEBUG_PENDING */			soft_dispatch_local(irq);		}#endif#ifdef __RTL_LOCALIRQS__		if (!test_bit(cpu_id, &rtl_reserved_cpumask))#endif		{			while ( (irq = get_gpended_irq()) != IRQ_NOT_VALID )			{				last_irq = irq;#ifdef DEBUG_PENDING				global_pending = 0;#endif /* DEBUG_PENDING */				soft_dispatch_global(irq);			}		}#ifdef __RTL_LOCALIRQS__		}while(irq != IRQ_NOT_VALID || (!test_bit(cpu_id, &rtl_reserved_cpumask) && G_TEST(g_pend_since_sti)) || L_TEST(l_pend_since_sti));#else	}while(irq != IRQ_NOT_VALID || G_TEST(g_pend_since_sti) || L_TEST(l_pend_since_sti));#endif#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)	/* process any bottom halves */	if (  softirq_active(cpu_id) & softirq_mask(cpu_id) )		do_softirq();#endif		}void rtl_soft_sti(void){	DeclareAndInit(cpu_id);	/*debug_test_enabled("rtl_soft_sti");*/	if ( L_TEST(l_pend_since_sti) || G_TEST(g_pend_since_sti)#if LINUX_VERSION_CODE >= 0x020300	   || (softirq_active(cpu_id) & softirq_mask(cpu_id) )#endif	   )		rtl_process_pending();	rtl_soft_sti_no_emulation();}void rtl_soft_save_flags(unsigned long *x){	DeclareAndInit(cpu_id);	*x = (L_TEST(l_ienable)? ARCH_DEFINED_ENABLE: ARCH_DEFINED_DISABLE);}void rtl_soft_restore_flags(unsigned long x){	if(x == ARCH_DEFINED_ENABLE)rtl_soft_sti();	else rtl_soft_cli();}void rtl_soft_save_and_cli(unsigned long *x){	extern unsigned long last_cli[NR_CPUS];	DeclareAndInit(cpu_id);		rtl_soft_save_flags(x);	rtl_soft_cli();	/*	 * If we make a sti->cli transition record the	 * caller so we have some useful data instead of the	 * address of this function.	 *   -- Cort	 */	if ( (ARCH_DEFINED_ENABLE && (*x & ARCH_DEFINED_ENABLE))	     || (!ARCH_DEFINED_ENABLE && !(*x & ARCH_DEFINED_DISABLE)) )		last_cli[cpu_id] = (ulong)__builtin_return_address(0);}void rtl_soft_local_irq_save(unsigned long *x){	rtl_soft_save_flags(x);	rtl_soft_cli();}void rtl_soft_local_irq_restore(unsigned long x){	rtl_soft_restore_flags(x);}void rtl_virt_disable(unsigned int irq){	G_DISABLE(irq);}void rtl_virt_enable(unsigned int irq){	rtl_irqstate_t flags;	rtl_no_interrupts(flags);		G_ENABLED(irq);	if(!G_ISPEND(irq))	{		rtl_hard_enable_irq(irq);		rtl_restore_interrupts(flags);	}	else	{		HardDeclareAndInit(cpu_id);		rtl_restore_interrupts(flags);		if( L_TEST(l_ienable))			__sti(); /* emulate the bastard */	}}/* these are exported so that they can be called by rt drivers */void rtl_global_pend_irq(int ix) { G_PEND(ix); G_SET(g_pend_since_sti); }int rtl_global_ispending_irq(int ix) { return G_ISPEND(ix); }void rtl_hard_enable_irq(unsigned int ix){	rtl_irqstate_t flags;	rtl_no_interrupts (flags);	rtl_spin_lock(&rtl_global.hard_irq_controller_lock);	rtl_irq_controller_enable(ix);	rtl_spin_unlock(&rtl_global.hard_irq_controller_lock);	rtl_restore_interrupts (flags);}void rtl_hard_disable_irq(unsigned int ix){	rtl_irqstate_t flags;	rtl_no_interrupts (flags);	rtl_spin_lock(&rtl_global.hard_irq_controller_lock);	rtl_irq_controller_disable(ix);	rtl_spin_unlock(&rtl_global.hard_irq_controller_lock);	rtl_restore_interrupts (flags);}/* these are used by schedulers to make sure that Linux interrupts   do not advance and delay RT tasks  Both  need to be called with irqs disabled */void rtl_make_rt_system_active(void){	HardDeclareAndInit(cpu_id);	L_SET(l_busy);}void rtl_make_rt_system_idle(void){	HardDeclareAndInit(cpu_id);        L_CLEAR(l_busy);}unsigned int rtl_rt_system_is_idle(void){	HardDeclareAndInit(cpu_id);        return !L_TEST(l_busy);}void rtl_make_psc_active(void){	HardDeclareAndInit(cpu_id);	L_SET(l_psc_active);}void rtl_make_psc_inactive(void){	HardDeclareAndInit(cpu_id);        L_CLEAR(l_psc_active);}int rtl_is_psc_active(void){	HardDeclareAndInit(cpu_id);        return L_TEST(l_psc_active);}/* requesting and freeing rt interrupts *//* TODO resolve the smp synchronization problem here */int rtl_request_global_irq(unsigned int irq, 			   unsigned int (*handler)(unsigned int, struct pt_regs *)){	if (!G_TEST_RTH(irq))	{		rtl_global_handlers[irq].handler =handler;		G_SET_RTH(irq);		mb();		if(rtl_global_handlers[irq].handler == handler)		{			rtl_hard_enable_irq (irq);			return 0;		}	}	return -EBUSY;}int rtl_free_global_irq(unsigned int irq ){	if (!G_TEST_AND_CLEAR_RTH(irq))		return -EINVAL;	return 0;	/* don't need to clear the handler, because it will never	   be invoked -- see rtl_intercept. If we wanted to clear the handler	   we would have a problem with synchronization in the smp case */}MODULE_AUTHOR("FSMLabs <support@fsmlabs.com>");MODULE_DESCRIPTION("RTLinux Main Module");int quiet;MODULE_PARM(quiet, "i");int init_module(void){	int ret;	if ( arch_takeover() )	{		printk("arch_takeover failed\n");		return -1;	}	if ( !quiet )		printk("RTLinux Extensions Loaded (http://www.fsmlabs.com/)\n");	ret = rtl_printf_init();	if (ret < 0)		return ret;	rtl_soft_sti(); 	rtlinux_suspend_linux_init();	return 0;}void cleanup_module(void){	HardDeclareAndInit(cpu_id);	rtl_printf_cleanup();	/*	 * Process any pending interrupts, _hard_ disable	 * then go on.  This way, we don't get any interrupts	 * while we're vulnerable and giving up the architecture.	 *   -- Cort	 *	 *   This works for the current processor only -- Michael	 */	rtl_hard_cli();	rtl_soft_sti_no_emulation();	do {		rtl_hard_sti();		rtl_process_pending();		rtl_hard_cli();	} while ( G_TEST(g_pend_since_sti) || L_TEST(l_pend_since_sti));	arch_giveup();	rtlinux_suspend_linux_cleanup();	rtl_hard_sti();}spinlock_t debug_lock = SPIN_LOCK_UNLOCKED;void rtl_debug(void){	int i;	unsigned long flags, xxx_last_cli[NR_CPUS];	memcpy( (void *)xxx_last_cli, (void *)last_cli, sizeof(last_cli) );	#define WIDTH(x) ((int)(sizeof(x)*2))		rtl_spin_lock(&debug_lock);	rtl_hard_save_flags(flags);	printk( "RTL: cpu %d\n", rtl_getcpuid() );	if ( ARCH_DEFINED_ENABLE == 0 )		printk( "RTL: hard flags %0*lx %s\n", WIDTH(flags), flags,			(flags&ARCH_DEFINED_DISABLE) ? "disabled" : "enabled" );	else		printk( "RTL: hard flags %0*lx %s\n", WIDTH(flags), flags,			(flags&ARCH_DEFINED_ENABLE) ? "enabled" : "disabled" );			printk( "RTL: global flags %0*lx %s%s\n",		WIDTH(rtl_global.flags), rtl_global.flags,		((rtl_global.flags>>g_pend_since_sti)&1) ? "pend_since_sti " : "",		((rtl_global.flags>>g_initializing)&1) ? "initializing " : "");	printk( "RTL: global pending " );	for ( i = 0; i < IRQ_ARRAY_SIZE; i++ )		printk( "%0*lx ", WIDTH(rtl_global.pending[i]),					rtl_global.pending[i] );	printk("\n");	printk( "RTL: global enabled " );	for ( i = 0; i < IRQ_ARRAY_SIZE; i++ )		printk( "%0*lx ", WIDTH(rtl_global.soft_enabled[i]),					rtl_global.soft_enabled[i] );	printk( "\n" );	for ( i = 0 ; i < rtl_num_cpus(); i++ )	{		int cpu = cpu_logical_map (i);		printk( "RTL: cpu%d "#ifdef __LOCAL_IRQS__			"local pending %08x "#endif						"flags: %08x %s%s%s last_cli: %0*lx\n", cpu,			rtl_local[cpu].flags,#ifdef __LOCAL_IRQS__			rtl_local[cpu].pending,#endif						((rtl_local[cpu].flags>>l_ienable)&1)?"ienabled ":"disabled ",			((rtl_local[cpu].flags>>l_pend_since_sti)&1)?"pend_since_sti ":"",			((rtl_local[cpu].flags>>l_busy)&1)?"busy":"",			WIDTH(xxx_last_cli[i]), xxx_last_cli[i] );	}	rtl_spin_unlock(&debug_lock);#undef WIDTH	}void rtl_soft_irq_type(int unused, void *junk, struct pt_regs *garbage){	printk("rtl_soft_irq_type(): shouldn't have been called!\n");}/* TODO VY: needs some synchronization here. Doesn't request_irq also   have a problem? */int rtl_get_soft_irq (void (*handler) (int, void *, struct pt_regs *),                     const char *devname){	int i;	int debug = 0;	for (i = RTL_NR_IRQS - 1; i > 15; i--)	{		if ( !(debug = request_irq (i, handler, 0, devname, 0)) )		{			rtl_virt_enable(i);			/* This needs to be done this way since having the 'continue'			 * below causes egcs-2.90.29 with target MIPS (host PPC)			 * to emit code that will execute both the true _and_ the			 * false case of the above if.			 *  -- Cort			 */			goto bad_mips_gcc;		}	}	printk("RTL_GET_SOFT_IRQ %d: request=%d\n", i, debug);	return -1;bad_mips_gcc:	return i;}/* compatibility irq handler table */#include <asm/rt_irq.h>typedef void (*RTL_V1_HANDLER)(void);RTL_V1_HANDLER rtl_v1_irq[NR_IRQS];extern unsigned int rtl_compat_irq_handler(unsigned int irq, struct pt_regs *regs){	rtl_v1_irq[irq]();	rtl_hard_enable_irq(irq);	return 0;}int request_RTirq(unsigned   int   irq,   void (*handler)(void)){	int ret;	rtl_v1_irq[irq] = handler;	ret = rtl_request_global_irq(irq, rtl_compat_irq_handler);	if (ret)		return ret;	rtl_hard_enable_irq(irq);	return 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -