⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched_lxrt.c

📁 rtai-3.1-test3的源代码(Real-Time Application Interface )
💻 C
📖 第 1 页 / 共 5 页
字号:
{	return RT_SCHED_MUP;}void rt_preempt_always(int yes_no){	int cpuid;	for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) {		rt_smp_preempt_always[cpuid] = yes_no ? 1 : 0;	}}void rt_preempt_always_cpuid(int yes_no, unsigned int cpuid){	rt_smp_preempt_always[cpuid] = yes_no ? 1 : 0;}RT_TRAP_HANDLER rt_set_task_trap_handler( RT_TASK *task, unsigned int vec, RT_TRAP_HANDLER handler){	RT_TRAP_HANDLER old_handler;	if (!task || (vec >= RTAI_NR_TRAPS)) {		return (RT_TRAP_HANDLER) -EINVAL;	}	old_handler = task->task_trap_handler[vec];	task->task_trap_handler[vec] = handler;	return old_handler;}static int OneShot = ONE_SHOT;MODULE_PARM(OneShot, "i");static int PreemptAlways = PREEMPT_ALWAYS;MODULE_PARM(PreemptAlways, "i");static int Latency = TIMER_LATENCY;MODULE_PARM(Latency, "i");static int SetupTimeTIMER = TIMER_SETUP_TIME;MODULE_PARM(SetupTimeTIMER, "i");extern void krtai_objects_release(void);static void frstk_srq_handler(void){        while (frstk_srq.out != frstk_srq.in) {		sched_free(frstk_srq.mp[frstk_srq.out]);		frstk_srq.out = (frstk_srq.out + 1) & (MAX_FRESTK_SRQ - 1);	}}static void nihil(void) { };struct rt_fun_entry rt_fun_lxrt[MAX_LXRT_FUN];void reset_rt_fun_entries(struct rt_native_fun_entry *entry){	while (entry->fun.fun) {		if (entry->index >= MAX_LXRT_FUN) {			rt_printk("*** RESET ENTRY %d FOR USER SPACE CALLS EXCEEDS ALLOWD TABLE SIZE %d, NOT USED ***\n", entry->index, MAX_LXRT_FUN);		} else {			rt_fun_lxrt[entry->index] = (struct rt_fun_entry){ 1, nihil };		}		entry++;        }}int set_rt_fun_entries(struct rt_native_fun_entry *entry){	int error;	error = 0;	while (entry->fun.fun) {		if (rt_fun_lxrt[entry->index].fun != nihil) {			rt_printk("*** SUSPICIOUS ENTRY ASSIGNEMENT FOR USER SPACE CALL AT %d, DUPLICATED INDEX OR REPEATED INITIALIZATION ***\n", entry->index);			error = -1;		} else if (entry->index >= MAX_LXRT_FUN) {			rt_printk("*** ASSIGNEMENT ENTRY %d FOR USER SPACE CALLS EXCEEDS ALLOWED TABLE SIZE %d, NOT USED ***\n", entry->index, MAX_LXRT_FUN);			error = -1;		} else {			rt_fun_lxrt[entry->index] = entry->fun;		}		entry++;        }	if (error) {		reset_rt_fun_entries(entry);	}	return 0;}void *rt_get_lxrt_fun_entry(int index) {	return rt_fun_lxrt[index].fun;}static void lxrt_killall (void){    int cpuid;    stop_rt_timer();    for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++)	while (rt_linux_task.next)	    rt_task_delete(rt_linux_task.next);}static int lxrt_notify_reboot (struct notifier_block *nb, unsigned long event, void *p){    switch (event)	{	case SYS_DOWN:	case SYS_HALT:	case SYS_POWER_OFF:	    /* FIXME: this is far too late. */	    printk("LXRT: REBOOT NOTIFIED -- KILLING TASKS\n");	    lxrt_killall();	}    return NOTIFY_DONE;}/* ++++++++++++++++++++++++++ TIME CONVERSIONS +++++++++++++++++++++++++++++ */RTIME count2nano(RTIME counts){	int sign;	if (counts > 0) {		sign = 1;	} else {		sign = 0;		counts = - counts;	}	counts = oneshot_timer_cpuid ?		 llimd(counts, 1000000000, tuned.cpu_freq):		 llimd(counts, 1000000000, TIMER_FREQ);	return sign ? counts : - counts;}RTIME nano2count(RTIME ns){	int sign;	if (ns > 0) {		sign = 1;	} else {		sign = 0;		ns = - ns;	}	ns =  oneshot_timer_cpuid ?	      llimd(ns, tuned.cpu_freq, 1000000000) :	      llimd(ns, TIMER_FREQ, 1000000000);	return sign ? ns : - ns;}RTIME count2nano_cpuid(RTIME counts, unsigned int cpuid){	int sign;	if (counts > 0) {		sign = 1;	} else {		sign = 0;		counts = - counts;	}	counts = oneshot_timer ?		 llimd(counts, 1000000000, tuned.cpu_freq):		 llimd(counts, 1000000000, TIMER_FREQ);	return sign ? counts : - counts;}RTIME nano2count_cpuid(RTIME ns, unsigned int cpuid){	int sign;	if (ns > 0) {		sign = 1;	} else {		sign = 0;		ns = - ns;	}	ns =  oneshot_timer ?	      llimd(ns, tuned.cpu_freq, 1000000000) :	      llimd(ns, TIMER_FREQ, 1000000000);	return sign ? ns : - ns;}/* +++++++++++++++++++++++++++++++ TIMINGS ++++++++++++++++++++++++++++++++++ */RTIME rt_get_time(void){	int cpuid;	return rt_smp_oneshot_timer[cpuid = hard_cpu_id()] ? rdtsc() : rt_smp_times[cpuid].tick_time;}RTIME rt_get_time_cpuid(unsigned int cpuid){	return oneshot_timer ? rdtsc(): rt_times.tick_time;}RTIME rt_get_time_ns(void){	int cpuid = hard_cpu_id();	return oneshot_timer ? llimd(rdtsc(), 1000000000, tuned.cpu_freq) :	    		       llimd(rt_times.tick_time, 1000000000, TIMER_FREQ);}RTIME rt_get_time_ns_cpuid(unsigned int cpuid){	return oneshot_timer ? llimd(rdtsc(), 1000000000, tuned.cpu_freq) :			       llimd(rt_times.tick_time, 1000000000, TIMER_FREQ);}RTIME rt_get_cpu_time_ns(void){	return llimd(rdtsc(), 1000000000, tuned.cpu_freq);}/* +++++++++++++++++++++++++++ SECRET BACK DOORS ++++++++++++++++++++++++++++ */RT_TASK *rt_get_base_linux_task(RT_TASK **base_linux_tasks){        int cpuid;        for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {                base_linux_tasks[cpuid] = rt_smp_linux_task + cpuid;        }        return rt_smp_linux_task;}RT_TASK *rt_alloc_dynamic_task(void){#ifdef CONFIG_RTAI_MALLOC        return rt_malloc(sizeof(RT_TASK)); // For VC's, proxies and C++ support.#else	return NULL;#endif}/* +++++++++++++++++++++++++++ WATCHDOG SUPPORT ++++++++++++++++++++++++++++ */RT_TASK **rt_register_watchdog(RT_TASK *wd, int cpuid){    	RT_TASK *task;	if (lxrt_wdog_task[cpuid]) return (RT_TASK**) -EBUSY;	task = &rt_linux_task;	while ((task = task->next)) {		if (task != wd && task->priority == RT_SCHED_HIGHEST_PRIORITY) {			return (RT_TASK**) -EBUSY;		}	}	lxrt_wdog_task[cpuid] = wd;	return (RT_TASK**) 0;}void rt_deregister_watchdog(RT_TASK *wd, int cpuid){    	if (lxrt_wdog_task[cpuid] != wd) return;	lxrt_wdog_task[cpuid] = NULL;}/* +++++++++++++++ SUPPORT FOR LINUX TASKS AND KERNEL THREADS +++++++++++++++ *///#define ECHO_SYSW#ifdef ECHO_SYSW#define SYSW_DIAG_MSG(x) x#else#define SYSW_DIAG_MSG(x)#endifstatic RT_TRAP_HANDLER lxrt_old_trap_handler;#ifdef CONFIG_RTAI_FPU_SUPPORTstatic void init_fpu(struct task_struct *tsk){        init_xfpu();        tsk->used_math = 1;        set_tsk_used_fpu(tsk);}#elsestatic void init_fpu(struct task_struct *tsk) { }#endifstruct fun_args { int a0; int a1; int a2; int a3; int a4; int a5; int a6; int a7; int a8; int a9; long long (*fun)(int, ...); };void rt_schedule_soft(RT_TASK *rt_task){	struct fun_args *funarg;	int cpuid, priority;	if ((priority = rt_task->priority) < BASE_SOFT_PRIORITY) {		rt_task->priority += BASE_SOFT_PRIORITY;	}	rt_global_cli();	rt_task->state |= RT_SCHED_READY;	while (rt_task->state != RT_SCHED_READY) {		current->state = TASK_HARDREALTIME;		rt_global_sti();		schedule();		rt_global_cli();	}	LOCK_LINUX(cpuid = hard_cpu_id());	enq_soft_ready_task(rt_task);	rt_smp_current[cpuid] = rt_task;	rt_global_sti();	funarg = (void *)rt_task->fun_args;	rt_task->retval = funarg->fun(funarg->a0, funarg->a1, funarg->a2, funarg->a3, funarg->a4, funarg->a5, funarg->a6, funarg->a7, funarg->a8, funarg->a9);	rt_global_cli();	rt_task->priority = priority;	rt_task->state = 0;	(rt_task->rprev)->rnext = rt_task->rnext;       	(rt_task->rnext)->rprev = rt_task->rprev;	rt_smp_current[cpuid] = &rt_linux_task;	rt_schedule();	UNLOCK_LINUX(cpuid);	rt_global_sti();	schedule();}static inline void fast_schedule(RT_TASK *new_task){	struct task_struct *prev;	int cpuid;	if (((new_task)->state |= RT_SCHED_READY) == RT_SCHED_READY) {		enq_ready_task(new_task);		rt_release_global_lock();		LOCK_LINUX(cpuid = hard_cpu_id());		rt_linux_task.lnxtsk = prev = kthreadb[cpuid];#define rt_current (rt_smp_current[cpuid])		UEXECTIME();#undef rt_current		rt_smp_current[cpuid] = new_task;		prev = lxrt_context_switch(prev, new_task->lnxtsk,cpuid);		if (prev->used_math) {			restore_fpu(prev);		}		UNLOCK_LINUX(cpuid);	}}static void lxrt_migration_handler (unsigned virq){    struct klist_t *klistp = &klistb[hard_cpu_id()];    RT_TASK *rt_task;    while (klistp->out != klistp->in)	{	rt_task = klistp->task[klistp->out];	klistp->out = (klistp->out + 1) & (MAX_WAKEUP_SRQ - 1);	rt_global_cli();	fast_schedule(rt_task);	rt_global_sti();	}}static void kthread_b(int cpuid){	sprintf(current->comm, "RTAI_KTHRD_B:%d", cpuid);	put_current_on_cpu(cpuid);	kthreadb[cpuid] = current;	rtai_set_linux_task_priority(current, SCHED_FIFO, KTHREAD_B_PRIO);	sigfillset(&current->blocked);	up(&resem[cpuid]);	while (!endkthread) {		current->state = TASK_UNINTERRUPTIBLE;		schedule();		/* Escalate the request to the RTAI domain */		adeos_trigger_irq(lxrt_migration_virq);	}	kthreadb[cpuid] = 0;}static RT_TASK thread_task[NR_RT_CPUS];static int rsvr_cnt[NR_RT_CPUS];#define RESERVOIR 4static int Reservoir = RESERVOIR;MODULE_PARM(Reservoir, "i");static int taskidx[NR_RT_CPUS];static struct task_struct **taskav[NR_RT_CPUS];static struct task_struct *__get_kthread(int cpuid){	unsigned long flags;	struct task_struct *p;	flags = rt_global_save_flags_and_cli();	if (taskidx[cpuid] > 0) {		p = taskav[cpuid][--taskidx[cpuid]];		rt_global_restore_flags(flags);		return p;	}	rt_global_restore_flags(flags);	return 0;}static void thread_fun(int cpuid) {	void steal_from_linux(RT_TASK *);	void give_back_to_linux(RT_TASK *);	RT_TASK *task;	rtai_set_linux_task_priority(current, SCHED_FIFO, KTHREAD_F_PRIO);	sprintf(current->comm, "F:HARD:%d:%d", cpuid, ++rsvr_cnt[cpuid]);	current->this_rt_task[0] = task = &thread_task[cpuid];	current->this_rt_task[1] = task->lnxtsk = current;	sigfillset(&current->blocked);	put_current_on_cpu(cpuid);	steal_from_linux(task);	init_fpu(current);	rt_task_suspend(task);	current->comm[0] = 'U';	task = (RT_TASK *)current->this_rt_task[0];	task->exectime[1] = rdtsc();	((void (*)(int))task->max_msg_size[0])(task->max_msg_size[1]);	rt_task_suspend(task);}static void kthread_m(int cpuid){	struct task_struct *lnxtsk;	struct klist_t *klistp;	RT_TASK *task;	(task = &thread_task[cpuid])->magic = RT_TASK_MAGIC;	task->runnable_on_cpus = cpuid;	sprintf(current->comm, "RTAI_KTHRD_M:%d", cpuid);	put_current_on_cpu(cpuid);	kthreadm[cpuid] = current;	klistp = &klistm[cpuid];	rtai_set_linux_task_priority(current, SCHED_FIFO, KTHREAD_M_PRIO);	sigfillset(&current->blocked);	up(&resem[cpuid]);	while (!endkthread) {		current->state = TASK_UNINTERRUPTIBLE;		schedule();		while (klistp->out != klistp->in) {			unsigned long hard;			rt_global_cli();			rt_global_sti();			hard = (unsigned long)(lnxtsk = klistp->task[klistp->out]);			if (hard > 1) {				lnxtsk->state = TASK_ZOMBIE;				lnxtsk->exit_signal = SIGCHLD;				waitpid(lnxtsk->pid, 0, 0);			} else {				rt_global_cli();				if (taskidx[cpuid] < Reservoir) {					rt_global_sti();					task->suspdepth = task->state = 0;					kernel_thread((void *)thread_fun, (void *)cpuid, 0);					while (task->state != (RT_SCHED_READY | RT_SCHED_SUSPENDED)) {						current->state = TASK_INTERRUPTIBLE;						schedule_timeout(1);					}					rt_global_cli();					taskav[cpuid][taskidx[cpuid]++] = (void *)task->lnxtsk;				}				rt_global_sti();				klistp->out = (klistp->out + 1) & (MAX_WAKEUP_SRQ - 1);				if (hard) {					rt_task_resume((void *)klistp->task[klistp->out]);				} else {					up(&resem[cpuid]);				}			}			klistp->out = (klistp->out + 1) & (MAX_WAKEUP_SRQ - 1);		}	}	kthreadm[cpuid] = 0;}void steal_from_linux(RT_TASK *rt_task){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -