📄 rtai_sched.c
字号:
imuldiv(tuned.setup_time_TIMER_CPUNIT, 1000000000, tuned.cpu_freq)); PROC_PRINT("Number of RT CPUs in system: %d\n\n", NR_RT_CPUS); PROC_PRINT("Priority Period(ns) FPU Sig State Task RT_TASK *\n" ); PROC_PRINT("------------------------------------------------------\n" );/** Display all the active RT tasks and their state.** Note: As a temporary hack the tasks are given an id which is* the order they appear in the task list, needs fixing!*/ while ((task = task->next)) {/** The display for the task period is set to an integer (%d) as 64 bit* numbers are not currently handled correctly by the kernel routines.* Hence the period display will be wrong for time periods > ~4 secs.*/ PROC_PRINT("%-9d %-11lu %-4s %-4s 0x%-4x %-4d %p\n", task->priority, (unsigned long)count2nano(task->period), task->uses_fpu ? "Yes" : "No", task->signal ? "Yes" : "No", task->state, i, task); i++; } PROC_PRINT("TIMED\n"); task = &rt_linux_task; while ((task = task->tnext) != &rt_linux_task) { PROC_PRINT("> %p ", task); } PROC_PRINT("\nREADY\n"); task = &rt_linux_task; while ((task = task->rnext) != &rt_linux_task) { PROC_PRINT("> %p ", task); } PROC_PRINT("\n"); PROC_PRINT_DONE;} /* End function - rtai_read_sched */static int rtai_proc_sched_register(void) { struct proc_dir_entry *proc_sched_ent; proc_sched_ent = create_proc_entry("scheduler", S_IFREG|S_IRUGO|S_IWUSR, rtai_proc_root); if (!proc_sched_ent) { printk("Unable to initialize /proc/rtai/scheduler\n"); return(-1); } proc_sched_ent->read_proc = rtai_read_sched; return(0);} /* End function - rtai_proc_sched_register */static void rtai_proc_sched_unregister(void) { remove_proc_entry("scheduler", rtai_proc_root);} /* End function - rtai_proc_sched_unregister *//* ------------------< end of proc filesystem section >------------------*/#endif /* CONFIG_PROC_FS *//* ++++++++++++++++++++++++++ TIME CONVERSIONS +++++++++++++++++++++++++++++ */RTIME count2nano(RTIME counts){ int sign; if (counts > 0) { sign = 1; } else { sign = 0; counts = - counts; } counts = oneshot_timer ? llimd(counts, 1000000000, tuned.cpu_freq): llimd(counts, 1000000000, TIMER_FREQ); return sign ? counts : - counts;}RTIME nano2count(RTIME ns){ int sign; if (ns > 0) { sign = 1; } else { sign = 0; ns = - ns; } ns = oneshot_timer ? llimd(ns, tuned.cpu_freq, 1000000000) : llimd(ns, TIMER_FREQ, 1000000000); return sign ? ns : - ns;}RTIME count2nano_cpuid(RTIME counts, unsigned int cpuid){ return count2nano(counts);}RTIME nano2count_cpuid(RTIME ns, unsigned int cpuid){ return nano2count(ns);}/* +++++++++++++++++++++++++++++++ TIMINGS ++++++++++++++++++++++++++++++++++ */RTIME rt_get_time(void){ return oneshot_timer ? rdtsc(): rt_times.tick_time;}RTIME rt_get_time_cpuid(unsigned int cpuid){ return oneshot_timer ? rdtsc(): rt_times.tick_time;}RTIME rt_get_time_ns(void){ return oneshot_timer ? llimd(rdtsc(), 1000000000, tuned.cpu_freq) : llimd(rt_times.tick_time, 1000000000, TIMER_FREQ);}RTIME rt_get_time_ns_cpuid(unsigned int cpuid){ return oneshot_timer ? llimd(rdtsc(), 1000000000, tuned.cpu_freq) : llimd(rt_times.tick_time, 1000000000, TIMER_FREQ);}RTIME rt_get_cpu_time_ns(void){ return llimd(rdtsc(), 1000000000, tuned.cpu_freq);}void rt_task_yield(void){ unsigned long flags; TRACE_RTAI_TASK(TRACE_RTAI_EV_TASK_YIELD, 0, 0, 0); hard_save_flags_and_cli(flags); { RT_TASK *task; task = rt_current->rnext; while (rt_current->priority == task->priority) { task = task->rnext; } if (task != rt_current->rnext) { (rt_current->rprev)->rnext = rt_current->rnext; (rt_current->rnext)->rprev = rt_current->rprev; task->rprev = (rt_current->rprev = task->rprev)->rnext = rt_current; rt_current->rnext = task; rt_schedule(); } } hard_restore_flags(flags);}int rt_task_suspend(RT_TASK *task){ unsigned long flags; if (!task) { task = rt_current; } else if (task->magic != RT_TASK_MAGIC) { return -EINVAL; } TRACE_RTAI_TASK(TRACE_RTAI_EV_TASK_SUSPEND, task->tid, 0, 0); hard_save_flags_and_cli(flags); if (!task->suspdepth++ && !task->owndres) { rem_ready_task(task); task->state |= SUSPENDED; if (task == rt_current) { rt_schedule(); } } hard_restore_flags(flags); return 0;}int rt_task_resume(RT_TASK *task){ unsigned long flags; if (task->magic != RT_TASK_MAGIC) { return -EINVAL; } TRACE_RTAI_TASK(TRACE_RTAI_EV_TASK_RESUME, task->tid, 0, 0); hard_save_flags_and_cli(flags); if (task->suspdepth > 0 && !(--task->suspdepth)) { rem_timed_task(task); if (((task->state &= ~SUSPENDED) & ~DELAYED) == READY) { enq_ready_task(task); rt_schedule(); } } else { task->suspdepth--; } hard_restore_flags(flags); return 0;}int rt_task_make_periodic_relative_ns(RT_TASK *task, RTIME start_delay, RTIME period){ unsigned long flags; if (task->magic != RT_TASK_MAGIC) { return -EINVAL; } TRACE_RTAI_TASK(TRACE_RTAI_EV_TASK_MAKE_PERIOD_RELATIVE, task->tid, start_delay, period); start_delay = nano2count(start_delay); period = nano2count(period); hard_save_flags_and_cli(flags); task->resume_time = rt_get_time() + start_delay; task->period = period; task->suspdepth = 0; if (!(task->state & DELAYED)) { rem_ready_task(task); task->state = (task->state & ~SUSPENDED) | DELAYED; enq_timed_task(task); } rt_schedule(); hard_restore_flags(flags); return 0;}int rt_task_make_periodic(RT_TASK *task, RTIME start_time, RTIME period){ long flags; if (task->magic != RT_TASK_MAGIC) { return -EINVAL; } TRACE_RTAI_TASK(TRACE_RTAI_EV_TASK_MAKE_PERIOD, task->tid, start_time, period); hard_save_flags_and_cli(flags); task->resume_time = start_time; task->period = period; task->suspdepth = 0; if (!(task->state & DELAYED)) { rem_ready_task(task); task->state = (task->state & ~SUSPENDED) | DELAYED; enq_timed_task(task); } rt_schedule(); hard_restore_flags(flags); return 0;}void rt_task_wait_period(void){ unsigned long flags; TRACE_RTAI_TASK(TRACE_RTAI_EV_TASK_WAIT_PERIOD, 0, 0, 0); hard_save_flags_and_cli(flags); if (rt_current->resync_frame) { // Request from watchdog rt_current->resync_frame = 0; rt_current->resume_time = rt_get_time(); } else if ((rt_current->resume_time += rt_current->period) > rt_time_h) { rt_current->state |= DELAYED; rem_ready_current(); enq_timed_task(rt_current); rt_schedule(); } hard_restore_flags(flags);}void rt_task_set_resume_end_times(RTIME resume, RTIME end){ long flags; hard_save_flags_and_cli(flags); rt_current->policy = -1; rt_current->priority = 0; if (resume > 0) { rt_current->resume_time = resume; } else { rt_current->resume_time -= resume; } if (end > 0) { rt_current->period = end; } else { rt_current->period = rt_current->resume_time - end; } rt_current->state |= DELAYED; rem_ready_current(); enq_timed_task(rt_current); rt_schedule(); hard_restore_flags(flags);}RTIME next_period(void){ return rt_current->resume_time + rt_current->period;}void rt_busy_sleep(int ns){ RTIME end_time; TRACE_RTAI_TASK(TRACE_RTAI_EV_TASK_BUSY_SLEEP, ns, 0, 0); end_time = rdtsc() + llimd(ns, tuned.cpu_freq, 1000000000); while (rdtsc() < end_time);}void rt_sleep(RTIME delay){ unsigned long flags; TRACE_RTAI_TASK(TRACE_RTAI_EV_TASK_SLEEP, 0, delay, 0); hard_save_flags_and_cli(flags); if ((rt_current->resume_time = (oneshot_timer ? rdtsc(): rt_times.tick_time) + delay) > rt_time_h) { rt_current->state |= DELAYED; rem_ready_current(); enq_timed_task(rt_current); rt_schedule(); } hard_restore_flags(flags);}void rt_sleep_until(RTIME time){ unsigned long flags; TRACE_RTAI_TASK(TRACE_RTAI_EV_TASK_SLEEP_UNTIL, 0, time, 0); hard_save_flags_and_cli(flags); if ((rt_current->resume_time = time) > rt_time_h) { rt_current->state |= DELAYED; rem_ready_current(); enq_timed_task(rt_current); rt_schedule(); } hard_restore_flags(flags);}int rt_task_wakeup_sleeping(RT_TASK *task){ unsigned long flags; if (task->magic != RT_TASK_MAGIC) { return -EINVAL; } hard_save_flags_and_cli(flags); rem_timed_task(task); if (task->state != READY && (task->state &= ~DELAYED) == READY) { enq_ready_task(task); rt_schedule(); } hard_restore_flags(flags); return 0;}/* +++++++++++++++++++++++++++++ SEMAPHORES ++++++++++++++++++++++++++++++++ */static inline void enqueue_blocked(RT_TASK *task, QUEUE *queue, int qtype){ QUEUE *q; task->blocked_on = (q = queue); if (!qtype) { while ((q = q->next) != queue && (q->task)->priority <= task->priority); } q->prev = (task->queue.prev = q->prev)->next = &(task->queue); task->queue.next = q;}static inline void dequeue_blocked(RT_TASK *task){ task->prio_passed_to = NOTHING; (task->queue.prev)->next = task->queue.next; (task->queue.next)->prev = task->queue.prev; task->blocked_on = NOTHING;}static __volatile__ inline void pass_prio(RT_TASK *to, RT_TASK *from){ QUEUE *q; from->prio_passed_to = to; while (to && to->priority > from->priority) { to->priority = from->priority; if (to->state == READY) { (to->rprev)->rnext = to->rnext; (to->rnext)->rprev = to->rprev; enq_ready_task(to); } else if ((q = to->blocked_on) && !((to->state & SEMAPHORE) && ((SEM *)q)->qtype)) { (to->queue.prev)->next = to->queue.next; (to->queue.next)->prev = to->queue.prev; while ((q = q->next) != to->blocked_on && (q->task)->priority <= to->priority); q->prev = (to->queue.prev = q->prev)->next = &(to->queue); to->queue.next = q; } to = to->prio_passed_to; }}void rt_typed_sem_init(SEM *sem, int value, int type){ sem->magic = RT_SEM_MAGIC; sem->count = value; sem->qtype = type != RES_SEM && (type & FIFO_Q) ? 1 : 0; type = (type & 3) - 2; if ((sem->type = type) < 0 && value > 1) { sem->count = 1; } else if (type > 0) { sem->type = sem->count = 1; } sem->queue.prev = &(sem->queue); sem->queue.next = &(sem->queue); sem->queue.task = sem->owndby = 0;}void rt_sem_init(SEM *sem, int value){ TRACE_RTAI_SEM(TRACE_RTAI_EV_SEM_INIT, sem, value); rt_typed_sem_init(sem, value, CNT_SEM);}int rt_sem_delete(SEM *sem){ unsigned long flags; RT_TASK *task; int sched; QUEUE *q; if (sem->magic != RT_SEM_MAGIC) { return SEM_ERR; } TRACE_RTAI_SEM(TRACE_RTAI_EV_SEM_DELETE, sem, 0); sched = 0; q = &(sem->queue); hard_save_flags_and_cli(flags); sem->magic = 0; while ((q = q->next) != &(sem->queue) && (task = q->task)) { rem_timed_task(task); if (task->state != READY && (task->state &= ~(SEMAPHORE | DELAYED)) == READY) { enq_ready_task(task); sched = 1; } } if ((task = sem->owndby) && sem->type > 0) { if (task->owndres & SEMHLF) { --task->owndres; } if (!task->owndres) { sched = renq_ready_task(task, task->base_priority); } else if (!(task->owndres & SEMHLF)) { int priority; sched = renq_ready_task(task, task->base_priority > (priority = ((task->msg_queue.next)->task)->priority) ? priority : task->base_priority); } if (task->suspdepth) { if (task->suspdepth > 0) { task->state |= SUSPENDED; rem_ready_task(task); sched = 1; } else { rt_task_delete(task); } } } if (sched) { rt_schedule(); } hard_restore_flags(flags); return 0;}int rt_sem_signal(SEM *sem){ unsigned long flags; RT_TASK *task; if (sem->magic != RT_SEM_MAGIC) { return SEM_ERR; } TRACE_RTAI_SEM(TRACE_RTAI_EV_SEM_SIGNAL, sem, 0); hard_save_flags_and_cli(flags); if (sem->type) { if (sem->type > 1) { sem->type--; hard_restore_flags(flags); return 0; } if (++sem->count > 1) { sem->count = 1; } } else { sem->count++; } if ((task = (sem->queue.next)->task)) { dequeue_blocked(task); rem_timed_task(task); if (task->state != READY && (task->state &= ~(SEMAPHORE | DELAYED)) == READY) { enq_ready_task(task); if (sem->type <= 0) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -