📄 common.c
字号:
* created with @ref rt_task_init(), as suitable for a periodic * execution, with period @e period, when @ref rt_task_wait_period() * is called. * * The time of first execution is defined through @e start_time or @e * start_delay. @e start_time is an absolute value measured in clock * ticks. @e start_delay is relative to the current time and measured * in nanoseconds. * * @param task is a pointer to the task you want to make periodic. * * @param start_delay is the time, to wait before the task start * running, in nanoseconds. * * @param period corresponds to the period of the task, in nanoseconds. * * @retval 0 on success. A negative value on failure as described below: * - @b EINVAL: task does not refer to a valid task. * * Recall that the term clock ticks depends on the mode in which the hard * timer runs. So if the hard timer was set as periodic a clock tick will * last as the period set in start_rt_timer, while if oneshot mode is used * a clock tick will last as the inverse of the running frequency of the * hard timer in use and irrespective of any period used in the call to * start_rt_timer. */int rt_task_make_periodic_relative_ns(RT_TASK *task, RTIME start_delay, RTIME period){ long flags; if (task->magic != RT_TASK_MAGIC) { return -EINVAL; } start_delay = nano2count_cpuid(start_delay, task->runnable_on_cpus); period = nano2count_cpuid(period, task->runnable_on_cpus); flags = rt_global_save_flags_and_cli(); task->resume_time = rt_get_time_cpuid(task->runnable_on_cpus) + start_delay; task->period = period; task->suspdepth = 0; if (!(task->state & RT_SCHED_DELAYED)) { rem_ready_task(task); task->state = (task->state & ~RT_SCHED_SUSPENDED) | RT_SCHED_DELAYED; enq_timed_task(task);} RT_SCHEDULE(task, hard_cpu_id()); rt_global_restore_flags(flags); return 0;}/** * @anchor rt_task_make_periodic * Make a task run periodically * * rt_task_make_periodic mark the task @e task, previously created * with @ref rt_task_init(), as suitable for a periodic execution, with * period @e period, when @ref rt_task_wait_period() is called. * * The time of first execution is defined through @e start_time or @e * start_delay. @e start_time is an absolute value measured in clock * ticks. @e start_delay is relative to the current time and measured * in nanoseconds. * * @param task is a pointer to the task you want to make periodic. * * @param start_time is the absolute time to wait before the task start * running, in clock ticks. * * @param period corresponds to the period of the task, in clock ticks. * * @retval 0 on success. A negative value on failure as described * below: * - @b EINVAL: task does not refer to a valid task. * * See also: @ref rt_task_make_periodic_relative_ns(). * Recall that the term clock ticks depends on the mode in which the hard * timer runs. So if the hard timer was set as periodic a clock tick will * last as the period set in start_rt_timer, while if oneshot mode is used * a clock tick will last as the inverse of the running frequency of the * hard timer in use and irrespective of any period used in the call to * start_rt_timer. * */int rt_task_make_periodic(RT_TASK *task, RTIME start_time, RTIME period){ long flags; if (task->magic != RT_TASK_MAGIC) { return -EINVAL; } flags = rt_global_save_flags_and_cli(); task->resume_time = start_time; task->period = period; task->suspdepth = 0; if (!(task->state & RT_SCHED_DELAYED)) { rem_ready_task(task); task->state = (task->state & ~RT_SCHED_SUSPENDED) | RT_SCHED_DELAYED; enq_timed_task(task); } RT_SCHEDULE(task, hard_cpu_id()); rt_global_restore_flags(flags); return 0;}/** * @anchor rt_task_wait_period * Wait till next period. * * rt_task_wait_period suspends the execution of the currently running * real time task until the next period is reached. * The task must have * been previously marked for a periodic execution by calling * @ref rt_task_make_periodic() or @ref rt_task_make_periodic_relative_ns(). * * @note The task is suspended only temporarily, i.e. it simply gives * up control until the next time period. */void rt_task_wait_period(void){ DECLARE_RT_CURRENT; long flags; flags = rt_global_save_flags_and_cli(); ASSIGN_RT_CURRENT; if (rt_current->resync_frame) { // Request from watchdog rt_current->resync_frame = 0;#ifdef CONFIG_SMP rt_current->resume_time = oneshot_timer ? rdtsc() : sqilter ? rt_smp_times[cpuid].tick_time : rt_times.tick_time;#else rt_current->resume_time = oneshot_timer ? rdtsc() : rt_times.tick_time;#endif } else if ((rt_current->resume_time += rt_current->period) > rt_time_h) { rt_current->state |= RT_SCHED_DELAYED; rem_ready_current(rt_current); enq_timed_task(rt_current); rt_schedule(); } rt_global_restore_flags(flags);}void rt_task_set_resume_end_times(RTIME resume, RTIME end){ RT_TASK *rt_current; long flags; flags = rt_global_save_flags_and_cli(); rt_current = RT_CURRENT; rt_current->policy = -1; rt_current->priority = 0; if (resume > 0) { rt_current->resume_time = resume; } else { rt_current->resume_time -= resume; } if (end > 0) { rt_current->period = end; } else { rt_current->period = rt_current->resume_time - end; } rt_current->state |= RT_SCHED_DELAYED; rem_ready_current(rt_current); enq_timed_task(rt_current); rt_schedule(); rt_global_restore_flags(flags);}int rt_set_resume_time(RT_TASK *task, RTIME new_resume_time){ long flags; if (task->magic != RT_TASK_MAGIC) { return -EINVAL; } flags = rt_global_save_flags_and_cli(); if (task->state & RT_SCHED_DELAYED) { if (((task->resume_time = new_resume_time) - (task->tnext)->resume_time) > 0) { rem_timed_task(task); enq_timed_task(task); rt_global_restore_flags(flags); return 0; } } rt_global_restore_flags(flags); return -ETIME;}int rt_set_period(RT_TASK *task, RTIME new_period){ long flags; if (task->magic != RT_TASK_MAGIC) { return -EINVAL; } hard_save_flags_and_cli(flags); task->period = new_period; hard_restore_flags(flags); return 0;}/** * @anchor next_period * @brief Get the time a periodic task will be resumed after calling * rt_task_wait_period. * * this function returns the time when the caller task will run * next. Combined with the appropriate @ref rt_get_time function() it * can be used for checking the fraction of period used or any period * overrun. * * @return Next period time in internal count units. */RTIME next_period(void){ RT_TASK *rt_current; unsigned long flags; flags = rt_global_save_flags_and_cli(); rt_current = RT_CURRENT; rt_global_restore_flags(flags); return rt_current->resume_time + rt_current->period;}/** * @anchor rt_busy_sleep * @brief Delay/suspend execution for a while. * * rt_busy_sleep delays the execution of the caller task without * giving back the control to the scheduler. This function burns away * CPU cycles in a busy wait loop so it should be used only for very * short synchronization delays. On machine not having a TSC clock it * can lead to many microseconds uncertain busy sleeps because of the * need of reading the 8254 timer. * * @param ns is the number of nanoseconds to wait. * * See also: @ref rt_sleep(), @ref rt_sleep_until(). * * @note A higher priority task or interrupt handler can run before * the task goes to sleep, so the actual time spent in these * functions may be longer than that specified. */void rt_busy_sleep(int ns){ RTIME end_time; end_time = rdtsc() + llimd(ns, tuned.cpu_freq, 1000000000); while (rdtsc() < end_time);}/** * @anchor rt_sleep * @brief Delay/suspend execution for a while. * * rt_sleep suspends execution of the caller task for a time of delay * internal count units. During this time the CPU is used by other * tasks. * * @param delay Corresponds to the time the task is going to be suspended. * * See also: @ref rt_busy_sleep(), @ref rt_sleep_until(). * * @note A higher priority task or interrupt handler can run before * the task goes to sleep, so the actual time spent in these * functions may be longer than the the one specified. */void rt_sleep(RTIME delay){ DECLARE_RT_CURRENT; unsigned long flags; flags = rt_global_save_flags_and_cli(); ASSIGN_RT_CURRENT; if ((rt_current->resume_time = get_time() + delay) > rt_time_h) { rt_current->state |= RT_SCHED_DELAYED; rem_ready_current(rt_current); enq_timed_task(rt_current); rt_schedule(); } rt_global_restore_flags(flags);}/** * @anchor rt_sleep_until * @brief Delay/suspend execution for a while. * * rt_sleep_until is similar to @ref rt_sleep() but the parameter time * is the absolute time till the task have to be suspended. If the * given time is already passed this call has no effect. * * @param time Absolute time till the task have to be suspended * * See also: @ref rt_busy_sleep(), @ref rt_sleep_until(). * * @note A higher priority task or interrupt handler can run before * the task goes to sleep, so the actual time spent in these * functions may be longer than the the one specified. */void rt_sleep_until(RTIME time){ DECLARE_RT_CURRENT; unsigned long flags; flags = rt_global_save_flags_and_cli(); ASSIGN_RT_CURRENT; if ((rt_current->resume_time = time) > rt_time_h) { rt_current->state |= RT_SCHED_DELAYED; rem_ready_current(rt_current); enq_timed_task(rt_current); rt_schedule(); } rt_global_restore_flags(flags);}int rt_task_wakeup_sleeping(RT_TASK *task){ unsigned long flags; if (task->magic != RT_TASK_MAGIC) { return -EINVAL; } flags = rt_global_save_flags_and_cli(); rem_timed_task(task); if (task->state != RT_SCHED_READY && (task->state &= ~RT_SCHED_DELAYED) == RT_SCHED_READY) { enq_ready_task(task); RT_SCHEDULE(task, hard_cpu_id()); } rt_global_restore_flags(flags); return 0;}int rt_nanosleep(struct timespec *rqtp, struct timespec *rmtp){ RTIME expire; if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec < 0) { return -EINVAL; } rt_sleep_until(expire = rt_get_time() + timespec2count(rqtp)); if ((expire -= rt_get_time()) > 0) { if (rmtp) { count2timespec(expire, rmtp); } return -EINTR; } return 0;}/* +++++++++++++++++++ READY AND TIMED QUEUE MANIPULATION +++++++++++++++++++ */void rt_enq_ready_edf_task(RT_TASK *ready_task){ enq_ready_edf_task(ready_task);}void rt_enq_ready_task(RT_TASK *ready_task){ enq_ready_task(ready_task);}int rt_renq_ready_task(RT_TASK *ready_task, int priority){ return renq_ready_task(ready_task, priority);}void rt_rem_ready_task(RT_TASK *task){ rem_ready_task(task);}void rt_rem_ready_current(RT_TASK *rt_current){ rem_ready_current(rt_current);}void rt_enq_timed_task(RT_TASK *timed_task){ enq_timed_task(timed_task);}void rt_wake_up_timed_tasks(int cpuid){#ifdef CONFIG_SMP wake_up_timed_tasks(cpuid & sqilter);#else wake_up_timed_tasks(0);#endif}void rt_rem_timed_task(RT_TASK *task){ rem_timed_task(task);}void rt_enqueue_blocked(RT_TASK *task, QUEUE *queue, int qtype){ enqueue_blocked(task, queue, qtype);}void rt_dequeue_blocked(RT_TASK *task){ dequeue_blocked(task);}int rt_renq_current(RT_TASK *rt_current, int priority){ return renq_current(rt_current, priority);}/* ++++++++++++++++++++++++ NAMED TASK INIT/DELETE ++++++++++++++++++++++++++ */RT_TASK *rt_named_task_init(const char *task_name, void (*thread)(int), int data, int stack_size, int prio, int uses_fpu, void(*signal)(void)){ RT_TASK *task; unsigned long name; if ((task = rt_get_adr(name = nam2num(task_name)))) { return task; } if ((task = rt_malloc(sizeof(RT_TASK))) && !rt_task_init(task, thread, data, stack_size, prio, uses_fpu, signal)) { if (rt_register(name, task, IS_TASK, 0)) { return task; } rt_task_delete(task); } rt_free(task); return (RT_TASK *)0;}RT_TASK *rt_named_task_init_cpuid(const char *task_name, void (*thread)(int), int data, int stack_size, int prio, int uses_fpu, void(*signal)(void), unsigned int run_on_cpu){ RT_TASK *task; unsigned long name; if ((task = rt_get_adr(name = nam2num(task_name)))) { return task; } if ((task = rt_malloc(sizeof(RT_TASK))) && !rt_task_init_cpuid(task, thread, data, stack_size, prio, uses_fpu, signal, run_on_cpu)) { if (rt_register(name, task, IS_TASK, 0)) { return task; } rt_task_delete(task); } rt_free(task); return (RT_TASK *)0;}int rt_named_task_delete(RT_TASK *task){ if (!rt_task_delete(task)) { rt_free(task); } return rt_drg_on_adr(task);}/* +++++++++++++++++++++++++++++++ REGISTRY +++++++++++++++++++++++++++++++++ */static volatile int max_slots;static struct rt_registry_entry_struct lxrt_list[MAX_SLOTS + 1] = { { 0, 0, 0, 0, 0 }, };static spinlock_t list_lock = SPIN_LOCK_UNLOCKED;static inline int registr(unsigned long name, void *adr, int type, struct task_struct *tsk){ unsigned long flags; int i, slot;/* * Register a resource. This allows other programs (RTAI and/or user space) * to use the same resource because they can find the address from the name.*/ // index 0 is reserved for the null slot. while ((slot = max_slots) < MAX_SLOTS) { for (i = 1; i <= max_slots; i++) { if (lxrt_list[i].name == name) { return 0; } } flags = rt_spin_lock_irqsave(&list_lock); if (slot == max_slots && max_slots < MAX_SLOTS) { slot = ++max_slots; lxrt_list[slot].name = name; lxrt_list[slot].adr = adr; lxrt_list[slot].tsk = tsk; lxrt_list[slot].pid = tsk ? tsk->pid : 0 ; lxrt_list[slot].type = type; lxrt_list[slot].count = 1; rt_spin_unlock_irqrestore(flags, &list_lock); return slot; } rt_spin_unlock_irqrestore(flags, &list_lock); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -