⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rtl_core.c

📁 fsmlabs的real time linux的内核
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * (C) Finite State Machine Labs Inc. 1999-2000 <business@fsmlabs.com> * * Released under the terms of GPL 2. * Open RTLinux makes use of a patented process described in * US Patent 5,995,745. Use of this process is governed * by the Open RTLinux Patent License which can be obtained from * www.fsmlabs.com/PATENT or by sending email to * licensequestions@fsmlabs.com */#include <linux/kernel.h>#include <linux/version.h>#include <linux/errno.h>#include <linux/slab.h>#include <linux/timex.h>#include <linux/spinlock.h>#include <linux/module.h>#include <linux/interrupt.h>#include <linux/console.h>#include <linux/irq.h>#include <linux/config.h>#include <asm/uaccess.h>#include <asm/system.h>#include <asm/irq.h>#include <asm/segment.h>#include <asm/ptrace.h>#include <arch/constants.h>#include <rtl_conf.h>#include <rtl_printf.h>#include <rtl_core.h>#include <rtl_sync.h>#include <rtl.h>#include <reserve_cpu.h>#include <rtl_debug.h>#include <linux/irq.h>MODULE_LICENSE("GPL v2");/* * Thanks to the wonders of EXTRAVERSION in the kernel, poor naming,  * bad Linux planning and plagues of frogs we have to have this and can't * trigger off KERNEL_VERSION() or the like. *    -- Cort */#if 0#ifndef softirq_active#define softirq_active(x) (softirq_state[x].active)#endif#ifndef softirq_mask#define softirq_mask(x) (softirq_state[x].mask)#endif#else#ifndef softirq_active#define softirq_active(x) (softirq_pending(x))#endif#ifndef softirq_mask#define softirq_mask(x) (1)#endif#endif/* do checks for and print errors when we stop handling interrupts */#undef DEBUG_PENDING#ifdef DEBUG_PENDINGstatic unsigned long global_pending = 0;#ifdef __LOCAL_IRQS__static unsigned long local_pending = 0;#endif /* __LOCAL_IRQS__ */#endif /* DEBUG_PENDING */void rtl_free_soft_irq(unsigned int irq){	free_irq(irq, 0);}void rtl_hard_disable_irq(unsigned int ix);void rtl_debug(void);static inline void debug_test_enabled(char *s);unsigned long last_cli[NR_CPUS];#if __GNUC__ < 2 || (__GNUC__ == 2 && __GNUC_MINOR__ < 96)/* * Workaround a bug that causes bad accesses when doing * __builtin_return_address() from leaf functions so we make * those functions non-leaf. *   -- Cort */ void non_leaf(void) {}#define rtl_builtin_return_address(X) ({ 		\	void *i = __builtin_return_address(X);	\	non_leaf();					\	i;						\})#define __builtin_return_address(x) rtl_builtin_return_address(x)#endif /* gcc bug */#ifdef CONFIG_RTL_TRACER#ifndef rtl_tracevoid rtl_trace_default(int event_id, long event_data, void * eip) { }void (*rtl_trace)(int event_id, long event_data, void * eip) = rtl_trace_default;void rtl_trace2(int event_id, long event_data) { rtl_trace(event_id, event_data, __builtin_return_address(0)); }#endif#endif/* unsigned rtl_reserved_cpumask = 0; */void conpr(const char *s){	long flags;	static spinlock_t rtl_conpr_lock = SPIN_LOCK_UNLOCKED;	struct console *c;	int len = strlen(s);	rtl_hard_savef_and_cli(flags);	rtl_spin_lock(&rtl_conpr_lock);	c = console_drivers;	while(c) {		if ((c->flags & CON_ENABLED) && c->write)			c->write(c, s, len);		c = c->next;	}	rtl_spin_unlock(&rtl_conpr_lock);	rtl_hard_restore_flags(flags);}void conprn(const unsigned int hexnum){	int i;	unsigned int d;	unsigned int n = hexnum;    	char s[10];	s[9] = 0;	s[8] = ' ';	for (i=7; i>=0; i--) {		d = n % 16;		if (d < 10) {			d += '0';		} else {			d += 'a' - 10;		}		s[i] = d; 		n = n / 16;	}    	conpr(s);}/* assuming 255 global irqs and 31 max local vectors    On the x86 we only have local irqs when we are smp.   But in PowerPC and other we may have local irqs from   on chip timers and other advanced technologies   *//* bit positions for flags, constants, and macros for global structure */#define IRQ_NOT_VALID -1#define IRQ_ARRAY_SIZE ((256/8)/sizeof(ulong)) /*256 global irqs */#define IRQ_ZINIT {0}#if BITS_PER_LONG == 32#define IRQ_NZINIT {~0x0,~0x0,~0x0,~0x0,~0x0,~0x0,~0x0,~0x0}#else#define IRQ_NZINIT {~0x0,~0x0,~0x0,~0x0}#endif#if BITS_PER_LONG == 32#define irq_toi(x)  ((x>>5)&7)#define irq_top(x)  ((x)& 0x1fUL)#else#define irq_toi(x)  ((x>>6)&15)#define irq_top(x)  ((x)& 0x3fUL)#endif#define pi_toirq(p,i) ( (p) + ((i)*BITS_PER_LONG))#define G_PEND(f) set_bit(irq_top(f),&rtl_global.pending[irq_toi(f)])#define G_UNPEND(f) clear_bit(irq_top(f),&rtl_global.pending[irq_toi(f)])#define G_ISPEND(f) test_bit(irq_top(f),&rtl_global.pending[irq_toi(f)])/* clear and set global enabled irq bits */#define G_ENABLED(f) set_bit(irq_top(f),&rtl_global.soft_enabled[irq_toi(f)])#define G_DISABLE(f) clear_bit(irq_top(f),&rtl_global.soft_enabled[irq_toi(f)])#define G_ISENABLED(f) test_bit(irq_top(f),&rtl_global.soft_enabled[irq_toi(f)])/* clear and set real time handlers (RealTimeHandlers) */#define G_SET_RTH(f) set_bit(irq_top(f),&rtl_global.rtirq[irq_toi(f)])#define G_CLEAR_RTH(f) clear_bit(irq_top(f),&rtl_global.rtirq[irq_toi(f)])#define G_TEST_RTH(f) test_bit(irq_top(f),&rtl_global.rtirq[irq_toi(f)])#define G_TEST_AND_SET_RTH(f) test_and_set_bit(irq_top(f),&rtl_global.rtirq[irq_toi(f)])#define G_TEST_AND_CLEAR_RTH(f) test_and_clear_bit(irq_top(f),&rtl_global.rtirq[irq_toi(f)])/* global flags */#define g_rtl_started 0#define g_pend_since_sti 1#define g_initializing 2#define g_initialized 3#define G_SET(f) set_bit(f,&rtl_global.flags)#define G_CLEAR(f) clear_bit(f,&rtl_global.flags)#define G_TEST(f) test_bit(f,&rtl_global.flags)#define G_TEST_AND_SET(f) test_and_set_bit(f,&rtl_global.flags)#define G_TEST_AND_CLEAR(f) test_and_clear_bit(f,&rtl_global.flags)/* Bit positions of flags for local structure and macros   for operating on them */#define l_idle 0#define l_ienable 1#define l_pend_since_sti 2#define l_busy 3#define l_psc_active 4#define L_SET(f) set_bit(f,&rtl_local[cpu_id].flags)#define L_CLEAR(f) clear_bit(f,&rtl_local[cpu_id].flags)#define L_TEST(f) test_bit(f,&rtl_local[cpu_id].flags)#define L_TEST_AND_SET(f) test_and_set_bit(f,&rtl_local[cpu_id].flags)#define L_PEND(f) set_bit(f,&rtl_local[cpu_id].pending)#define L_UNPEND(f) clear_bit(f,&rtl_local[cpu_id].pending)#define L_ISPEND(f) test_bit(f,&rtl_local[cpu_id].pending)#define L_SET_RTH(f) set_bit(f,&rtl_local[cpu_id].rtirq)#define L_CLEAR_RTH(f) clear_bit(f,&rtl_local[cpu_id].rtirq)#define L_TEST_RTH(f) test_bit(f,&rtl_local[cpu_id].rtirq)#define L_TEST_AND_SET_RTH(f) test_and_set_bit(f,&rtl_local[cpu_id].rtirq)#define L_TEST_AND_CLEAR_RTH(f) test_and_clear_bit(f,&rtl_local[cpu_id].rtirq)#define dispatch_rtl_handler(irq,r) rtl_global_handlers[irq].handler(irq,r)/* TODO soft smp_processor_id doesn't work here???? -- Michael */#define DeclareAndInit(cpu_id)  unsigned int cpu_id = rtl_getcpuid()#define HardDeclareAndInit(cpu_id)  unsigned int cpu_id = rtl_getcpuid()/* The basic control data structures local and global*/struct rtl_local rtl_local[NR_CPUS];struct rtl_global{	spinlock_t hard_irq_controller_lock;	unsigned long flags;	unsigned long pending[IRQ_ARRAY_SIZE];	unsigned long soft_enabled[IRQ_ARRAY_SIZE];	unsigned long rtirq[IRQ_ARRAY_SIZE];};struct rtl_global rtl_global ={ SPIN_LOCK_UNLOCKED,0,IRQ_ZINIT,IRQ_NZINIT,IRQ_ZINIT} ;/* RTLinux interrupts */struct rtl_global_handlers{        unsigned int (*handler)(unsigned int irq, struct pt_regs *r);}rtl_global_handlers[IRQ_MAX_COUNT];#ifdef __LOCAL_IRQS__void rtl_local_pend_vec(int vector,int cpu_id){	int i = VECTOR_TO_LOCAL_PND(vector);	L_PEND(i);	L_SET(l_pend_since_sti);}int rtl_local_ispending_irq(int ix){ 	HardDeclareAndInit(cpu_id);	return L_ISPEND(ix);}#endifextern void * rtl_code[];#include "arch/arch.h"static inline void debug_test_enabled(char *s){	unsigned long flags;	rtl_hard_save_flags(flags);	if ( (ARCH_DEFINED_ENABLE && !(flags & ARCH_DEFINED_ENABLE))	     || (!ARCH_DEFINED_ENABLE && (flags & ARCH_DEFINED_DISABLE)) )	{		do_first(10) {			rtl_printf("%s: intrs hard disabled! called from %p\n",\			   s, __builtin_return_address(0));		}	}}/* rtl_intercept intercepts global interrupts */#define RUN_LINUX_HANDLER(irq) (G_ISPEND(irq) && !L_TEST(l_busy)\       	&& L_TEST(l_ienable) && G_ISENABLED(irq))intercept_t rtl_intercept(MACHDEPREGS regs){	int irq;	HardDeclareAndInit(cpu_id);		rtl_spin_lock(&rtl_global.hard_irq_controller_lock);	if ((irq = rtl_irq_controller_get_irq(regs)) != -1)	{		rtl_trace (RTL_TRACE_INTERCEPT, irq, (void *) instruction_pointer(MACHDEPREGS_PTR(regs)));		rtl_irq_controller_ack(irq); /* may also mask, if needed */				if(G_TEST_RTH(irq)){ /* this is a RT irq */			/* if RT wants to share it pends */			rtl_spin_unlock(&rtl_global.hard_irq_controller_lock);			dispatch_rtl_handler(irq,MACHDEPREGS_PTR(regs));			rtl_spin_lock(&rtl_global.hard_irq_controller_lock);		} else {			G_PEND(irq);			G_SET(g_pend_since_sti);		}		if(RUN_LINUX_HANDLER(irq))		{			/* unpend so dispatch doesn't dispatch 2 times*/			G_UNPEND(irq);			rtl_soft_cli(); /* disable local soft interrupts */			G_DISABLE(irq); /* disable this irq */			rtl_spin_unlock(&rtl_global.hard_irq_controller_lock);			rtl_hard_sti();#ifdef DEBUG_PENDING			global_pending = 0;#endif /* DEBUG_PENDING */			dispatch_linux_irq(MACHDEPREGS_PTR(regs),irq);			rtl_trace2 (RTL_TRACE_INTERCEPT_EXIT, irq);			RETURN_FROM_INTERRUPT_LINUX; /* goes via ret_from_intr */		}#ifdef DEBUG_PENDING		/*		 * If a Linux interrupt has been pended, and we haven't		 * handled it, increment the global pending count.		 *  -- Cort Dougan <cort@fsmlabs.com		 */		if ( G_ISPEND(irq) && (global_pending++ > 20) )		{			rtl_hard_cli();			printk("Too many global intrs pended irq %d from %08lx\n",			       irq, instruction_pointer(regs));			printk("didn't run handler because:\n"			       "ispend %d !l_busy %d ienable %d genable %d\n",			       G_ISPEND(irq), !L_TEST(l_busy),			       L_TEST(l_ienable), G_ISENABLED(irq) );			rtl_debug();			while(1);		}#endif /* DEBUG_PENDING */				/* get here if irq==-1 or if otherwise can't run linux handler */	}	rtl_spin_unlock(&rtl_global.hard_irq_controller_lock);	rtl_trace2 (RTL_TRACE_INTERCEPT_EXIT, irq);	RETURN_FROM_INTERRUPT;}#ifdef __LOCAL_IRQS__static inline unsigned int get_lpended_irq(void){	int i;	DeclareAndInit(cpu_id); /* only called in Linux context */	if(rtl_local[cpu_id].pending){		i = ffz(~rtl_local[cpu_id].pending);		clear_bit(i,&rtl_local[cpu_id].pending);		i = LOCAL_PND_TO_VECTOR(i);	}	else i = IRQ_NOT_VALID;	return i ;}intercept_t rtl_local_intercept(MACHDEPREGS regs){	int pnd;	HardDeclareAndInit(cpu_id);	/* no lock needed because we are already hard cli and only	   use local per-cpu structures. The rtl_irq_controller	   operations MUST vector to local only hardware or must	   use spinlocks */	pnd = MACHDEPREGS_TO_PND(regs);	rtl_trace (RTL_TRACE_LOCAL_INTERCEPT, LOCAL_PND_TO_VECTOR(pnd), (void *) instruction_pointer(MACHDEPREGS_PTR(regs)));	rtl_local_irq_controller_ack();	if(L_TEST_RTH(pnd)){ /* this is a RT irq */		dispatch_rtl_local_handler(pnd,MACHDEPREGS_PTR(regs));/* if RT wants to share it pends */	}	else{		L_PEND(pnd);		L_SET(l_pend_since_sti);	}		if(!L_ISPEND(pnd) || L_TEST(l_busy) || !L_TEST(l_ienable) )	{		rtl_trace2 (RTL_TRACE_LOCAL_INTERCEPT_EXIT, LOCAL_PND_TO_VECTOR(pnd));#ifdef DEBUG_PENDING		if ( L_ISPEND(pnd) && (local_pending++ > 20) )		{			printk("Too many local intrs pended pnd %d\n", pnd);			rtl_debug();			rtl_soft_sti();			while(1);		}#endif /* DEBUG_PENDING */				RETURN_FROM_LOCAL;	}	else	{		L_UNPEND(pnd); /* yes it is stupid, see above */		rtl_soft_cli(); /* disable local soft interrupts */		rtl_hard_sti();#ifdef DEBUG_PENDING		local_pending = 0;#endif /* DEBUG_PENDING */		dispatch_local_linux_irq(MACHDEPREGS_PTR(regs),pnd);	}	rtl_trace2 (RTL_TRACE_LOCAL_INTERCEPT_EXIT, LOCAL_PND_TO_VECTOR(pnd));	RETURN_FROM_LOCAL_LINUX;}#endif/* tools for soft_sti */static inline unsigned int get_gpended_irq(void){	unsigned int i, j;	rtl_irqstate_t flags;	unsigned long irqs;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -