📄 sched_lxrt.c
字号:
return rt_kthread_init_cpuid(task, rt_thread, data, stack_size, priority, uses_fpu, signal, cpuid);}int rt_task_init(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void)){ return rt_kthread_init(task, rt_thread, data, stack_size, priority, uses_fpu, signal);}#endif /* USE_RTAI_TASKS */void rt_set_runnable_on_cpuid(RT_TASK *task, unsigned int cpuid){ unsigned long flags; RT_TASK *linux_task; return; if (cpuid >= NR_RT_CPUS) { cpuid = get_min_tasks_cpuid(); } flags = rt_global_save_flags_and_cli(); switch (rt_smp_oneshot_timer[task->runnable_on_cpus] | (rt_smp_oneshot_timer[cpuid] << 1)) { case 1: task->period = llimd(task->period, TIMER_FREQ, tuned.cpu_freq); task->resume_time = llimd(task->resume_time, TIMER_FREQ, tuned.cpu_freq); break; case 2: task->period = llimd(task->period, tuned.cpu_freq, TIMER_FREQ); task->resume_time = llimd(task->resume_time, tuned.cpu_freq, TIMER_FREQ); break; } if (!((task->prev)->next = task->next)) { rt_smp_linux_task[task->runnable_on_cpus].prev = task->prev; } else { (task->next)->prev = task->prev; } task->runnable_on_cpus = cpuid; if ((task->state & RT_SCHED_DELAYED)) { (task->tprev)->tnext = task->tnext; (task->tnext)->tprev = task->tprev; enq_timed_task(task); } task->next = 0; (linux_task = rt_smp_linux_task + cpuid)->prev->next = task; task->prev = linux_task->prev; linux_task->prev = task; rt_global_restore_flags(flags);}void rt_set_runnable_on_cpus(RT_TASK *task, unsigned long run_on_cpus){ int cpuid; return; run_on_cpus &= cpu_online_map; cpuid = get_min_tasks_cpuid(); if (!test_bit(cpuid, &run_on_cpus)) { cpuid = ffnz(run_on_cpus); } rt_set_runnable_on_cpuid(task, cpuid);}int rt_check_current_stack(void){ DECLARE_RT_CURRENT; char *sp; ASSIGN_RT_CURRENT; if (rt_current != &rt_linux_task) { sp = get_stack_pointer(); return (sp - (char *)(rt_current->stack_bottom)); } else { return -0x7FFFFFFF; }}#define TASK_TO_SCHEDULE() \ do { prio = (new_task = rt_linux_task.rnext)->priority; } while(0)#if ALLOW_RR#define RR_YIELD() \if (rt_current->policy > 0) { \ rt_current->rr_remaining = rt_current->yield_time - rt_times.tick_time; \ if (rt_current->rr_remaining <= 0) { \ rt_current->rr_remaining = rt_current->rr_quantum; \ if (rt_current->state == RT_SCHED_READY) { \ RT_TASK *task; \ task = rt_current->rnext; \ while (rt_current->priority == task->priority) { \ task = task->rnext; \ } \ if (task != rt_current->rnext) { \ (rt_current->rprev)->rnext = rt_current->rnext; \ (rt_current->rnext)->rprev = rt_current->rprev; \ task->rprev = (rt_current->rprev = task->rprev)->rnext = rt_current; \ rt_current->rnext = task; \ } \ } \ } \}#define RR_SETYT() \ if (new_task->policy > 0) { \ new_task->yield_time = rt_time_h + new_task->rr_remaining; \ }#define RR_SPREMP() \ if (new_task->policy > 0) { \ preempt = 1; \ if (new_task->yield_time < intr_time) { \ intr_time = new_task->yield_time; \ } \ } else { \ preempt = 0; \ }#define RR_TPREMP() \ if (new_task->policy > 0) { \ preempt = 1; \ if (new_task->yield_time < rt_times.intr_time) { \ rt_times.intr_time = new_task->yield_time; \ } \ } else { \ preempt = (preempt_always || prio == RT_SCHED_LINUX_PRIORITY); \ }#else#define RR_YIELD()#define RR_SETYT()#define RR_SPREMP() \do { preempt = 0; } while (0)#define RR_TPREMP() \ do { preempt = (preempt_always || prio == RT_SCHED_LINUX_PRIORITY); } while (0)#endif#define restore_fpu(tsk) \ do { restore_fpenv_lxrt((tsk)); set_tsk_used_fpu(tsk); } while (0)static volatile int to_linux_depth[NR_RT_CPUS];#define LOCK_LINUX(cpuid) \do { \ if (!to_linux_depth[cpuid]++) { \ set_bit(cpuid, &rtai_cpu_lxrt); \ rt_switch_to_real_time(cpuid); \ } \} while (0)#define UNLOCK_LINUX(cpuid) \do { \ if (to_linux_depth[cpuid]) { \ if (!--to_linux_depth[cpuid]) { \ rt_switch_to_linux(cpuid); \ clear_bit(cpuid, &rtai_cpu_lxrt); \ } \ } else { \ rt_printk("*** ERROR: EXCESS LINUX_UNLOCK ***\n"); \ } \} while (0)#define ANTICIPATE#define EXECTIME#ifdef EXECTIMEstatic RTIME switch_time[NR_RT_CPUS];#define KEXECTIME() \do { \ RTIME now; \ now = rdtsc(); \ if (!rt_current->lnxtsk) { \ rt_current->exectime[0] += (now - switch_time[cpuid]); \ } \ switch_time[cpuid] = now; \} while (0)#define UEXECTIME() \do { \ RTIME now; \ now = rdtsc(); \ if (rt_current->is_hard) { \ rt_current->exectime[0] += (now - switch_time[cpuid]); \ } \ switch_time[cpuid] = now; \} while (0)#else#define KEXECTIME()#define UEXECTIME()#endifstatic inline struct task_struct *lxrt_context_switch (struct task_struct *prev, struct task_struct *next, int cpuid){ struct task_struct *svprev = prev; struct mm_struct *oldmm = prev->active_mm;#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) switch_mm(oldmm,next->active_mm,next,cpuid);#else /* >= 2.6.0 */ switch_mm(oldmm,next->active_mm,next);#endif /* < 2.6.0 */ if (!next->mm)#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) enter_lazy_tlb(oldmm,next,cpuid);#else /* >= 2.6.0 */ enter_lazy_tlb(oldmm,next);#endif /* < 2.6.0 */ lxrt_switch_to(prev,next,prev); return svprev;}static inline void make_current_soft(RT_TASK *rt_current){ void rt_schedule(void); rt_current->state &= ~RT_SCHED_READY; rt_current->force_soft = 0; wake_up_srq.task[wake_up_srq.in] = rt_current->lnxtsk; wake_up_srq.in = (wake_up_srq.in + 1) & (MAX_WAKEUP_SRQ - 1); rt_pend_linux_srq(wake_up_srq.srq); (rt_current->rprev)->rnext = rt_current->rnext; (rt_current->rnext)->rprev = rt_current->rprev; rt_schedule(); rt_current->is_hard = 0; if ((rt_current->state |= RT_SCHED_READY) != RT_SCHED_READY) { current->state = TASK_HARDREALTIME; rt_schedule(); }}#ifdef CONFIG_SMPvoid rt_schedule_on_schedule_ipi(void){ DECLARE_RT_CURRENT; RTIME intr_time, now; RT_TASK *task, *new_task; int prio, delay, preempt; ASSIGN_RT_CURRENT; sched_rqsted[cpuid] = 1; prio = RT_SCHED_LINUX_PRIORITY; task = new_task = &rt_linux_task; sched_get_global_lock(cpuid); RR_YIELD(); if (oneshot_running) {#ifdef ANTICIPATE rt_time_h = rdtsc() + rt_half_tick; wake_up_timed_tasks(cpuid);#endif TASK_TO_SCHEDULE(); RR_SETYT(); intr_time = shot_fired ? rt_times.intr_time : rt_times.intr_time + ONESHOT_SPAN; RR_SPREMP(); task = &rt_linux_task; while ((task = task->tnext) != &rt_linux_task) { if (task->priority <= prio && task->resume_time < intr_time) { rt_times.intr_time = task->resume_time; goto fire; } } if (preempt||(!shot_fired && prio == RT_SCHED_LINUX_PRIORITY)) { if (preempt) { rt_times.intr_time = intr_time; }fire: shot_fired = 1; delay = (int)(rt_times.intr_time - (now = rdtsc())) - tuned.latency; if (delay >= tuned.setup_time_TIMER_CPUNIT) { delay = imuldiv(delay, TIMER_FREQ, tuned.cpu_freq); } else { delay = tuned.setup_time_TIMER_UNIT; rt_times.intr_time = now + (tuned.setup_time_TIMER_CPUNIT); } rt_set_timer_delay(delay); } } else { TASK_TO_SCHEDULE(); RR_SETYT(); } sched_release_global_lock(cpuid); if (new_task != rt_current) { if (!new_task->lnxtsk || !rt_current->lnxtsk) { if (rt_current->lnxtsk) { LOCK_LINUX(cpuid); save_cr0_and_clts(linux_cr0); rt_linux_task.nextp = (void *)rt_current; } else if (new_task->lnxtsk) { rt_linux_task.prevp = (void *)new_task; new_task = (void *)rt_linux_task.nextp; } KEXECTIME(); rt_exchange_tasks(rt_smp_current[cpuid], new_task); if (rt_current->lnxtsk) { UNLOCK_LINUX(cpuid); restore_cr0(linux_cr0); if (rt_current != (void *)rt_linux_task.prevp) { new_task = (void *)rt_linux_task.prevp; goto schedlnxtsk; } } else if (rt_current->uses_fpu) { enable_fpu(); if (rt_current != fpu_task) { save_fpenv(fpu_task->fpu_reg); fpu_task = rt_current; restore_fpenv(fpu_task->fpu_reg); } } if (rt_current->signal) { (*rt_current->signal)(); } hard_cli(); return; }schedlnxtsk: rt_smp_current[cpuid] = new_task; if (new_task->is_hard || rt_current->is_hard) { struct task_struct *prev = rtai_get_current(cpuid); DECL_CPUS_ALLOWED; SAVE_CPUS_ALLOWED; if (!rt_current->is_hard) { LOCK_LINUX(cpuid); rt_linux_task.lnxtsk = prev; SET_CPUS_ALLOWED; } UEXECTIME(); prev = lxrt_context_switch(prev, new_task->lnxtsk,cpuid); if (prev->used_math) { restore_fpu(prev); } if (rt_current->signal) { rt_current->signal(); } if (!rt_current->is_hard) { UNLOCK_LINUX(cpuid); RST_CPUS_ALLOWED; } else if (rt_current->force_soft) { make_current_soft(rt_current); } } } hard_cli(); return;}#endif#define enq_soft_ready_task(ready_task) \do { \ RT_TASK *task = rt_smp_linux_task[cpuid].rnext; \ while (ready_task->priority >= task->priority) { \ if ((task = task->rnext)->priority < 0) break; \ } \ task->rprev = (ready_task->rprev = task->rprev)->rnext = ready_task; \ ready_task->rnext = task; \} while (0)void rt_schedule(void){ DECLARE_RT_CURRENT; RTIME intr_time, now; RT_TASK *task, *new_task; int prio, delay, preempt; ASSIGN_RT_CURRENT; sched_rqsted[cpuid] = 1; prio = RT_SCHED_LINUX_PRIORITY; task = new_task = &rt_linux_task; sched_get_global_lock(cpuid); RR_YIELD(); if (oneshot_running) {#ifdef ANTICIPATE rt_time_h = rdtsc() + rt_half_tick; wake_up_timed_tasks(cpuid);#endif TASK_TO_SCHEDULE(); RR_SETYT(); intr_time = shot_fired ? rt_times.intr_time : rt_times.intr_time + ONESHOT_SPAN; RR_SPREMP(); task = &rt_linux_task; while ((task = task->tnext) != &rt_linux_task) { if (task->priority <= prio && task->resume_time < intr_time) { rt_times.intr_time = task->resume_time; goto fire; } } if (preempt||(!shot_fired && prio == RT_SCHED_LINUX_PRIORITY)) { if (preempt) { rt_times.intr_time = intr_time; }fire: shot_fired = 1; delay = (int)(rt_times.intr_time - (now = rdtsc())) - tuned.latency; if (delay >= tuned.setup_time_TIMER_CPUNIT) { delay = imuldiv(delay, TIMER_FREQ, tuned.cpu_freq); } else { delay = tuned.setup_time_TIMER_UNIT; rt_times.intr_time = now + (tuned.setup_time_TIMER_CPUNIT); } rt_set_timer_delay(delay); } } else { TASK_TO_SCHEDULE(); RR_SETYT(); } sched_release_global_lock(cpuid); if (new_task != rt_current) { if (!new_task->lnxtsk || !rt_current->lnxtsk) { if (rt_current->lnxtsk) { LOCK_LINUX(cpuid); save_cr0_and_clts(linux_cr0); rt_linux_task.nextp = (void *)rt_current; } else if (new_task->lnxtsk) { rt_linux_task.prevp = (void *)new_task; new_task = (void *)rt_linux_task.nextp; } KEXECTIME(); rt_exchange_tasks(rt_smp_current[cpuid], new_task); if (rt_current->lnxtsk) { UNLOCK_LINUX(cpuid); restore_cr0(linux_cr0); if (rt_current != (void *)rt_linux_task.prevp) { new_task = (void *)rt_linux_task.prevp; goto schedlnxtsk; } } else if (rt_current->uses_fpu) { enable_fpu(); if (rt_current != fpu_task) { save_fpenv(fpu_task->fpu_reg); fpu_task = rt_current; restore_fpenv(fpu_task->fpu_reg); } } if (rt_current->signal) { (*rt_current->signal)(); } hard_cli(); return; }schedlnxtsk: rt_smp_current[cpuid] = new_task; if (new_task->is_hard || rt_current->is_hard) { struct task_struct *prev = rtai_get_current(cpuid); DECL_CPUS_ALLOWED; SAVE_CPUS_ALLOWED; if (!rt_current->is_hard) { LOCK_LINUX(cpuid); rt_linux_task.lnxtsk = prev; SET_CPUS_ALLOWED; } UEXECTIME(); prev = lxrt_context_switch(prev, new_task->lnxtsk,cpuid); if (prev->used_math) { restore_fpu(prev); } if (rt_current->signal) { rt_current->signal(); } if (!rt_current->is_hard) { UNLOCK_LINUX(cpuid); RST_CPUS_ALLOWED; if (rt_current->state != RT_SCHED_READY || (rt_current != &rt_linux_task && prev->state == TASK_HARDREALTIME)) { goto sched_soft; } } else if (rt_current->force_soft) { make_current_soft(rt_current); } } else {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -