📄 task.c
字号:
xnlock_get_irqsave(&nklock,s); task = xeno_h2obj_validate(task,XENO_TASK_MAGIC,RT_TASK); if (!task) { err = xeno_handle_error(task,XENO_TASK_MAGIC,RT_TASK); goto unlock_and_exit; } if (xnpod_unblockable_p()) { err = -EPERM; goto unlock_and_exit; } if (task->suspend_depth++ == 0) xnpod_suspend_thread(&task->thread_base, XNSUSP, XN_INFINITE, NULL); unlock_and_exit: xnlock_put_irqrestore(&nklock,s); return err;}/** * @fn int rt_task_resume(RT_TASK *task) * @brief Resume a real-time task. * * Forcibly resume the execution of a task which has been previously * suspended by a call to rt_task_suspend(). * * The suspension nesting count is decremented so that * rt_task_resume() will only resume the task if this count falls down * to zero as a result of the current invocation. * * @param task The descriptor address of the affected task. * * @return 0 is returned upon success. Otherwise: * * - -EINVAL is returned if @a task is not a task descriptor. * * - -EIDRM is returned if @a task is a deleted task descriptor. * * Environments: * * This service can be called from: * * - Kernel module initialization/cleanup code * - Interrupt service routine * - Kernel-based task * - User-space task * * Rescheduling: possible if the suspension nesting level falls down * to zero as a result of the current invocation. */int rt_task_resume (RT_TASK *task){ int err = 0; spl_t s; xnlock_get_irqsave(&nklock,s); task = xeno_h2obj_validate(task,XENO_TASK_MAGIC,RT_TASK); if (!task) { err = xeno_handle_error(task,XENO_TASK_MAGIC,RT_TASK); goto unlock_and_exit; } if (task->suspend_depth > 0 && --task->suspend_depth == 0) { xnpod_resume_thread(&task->thread_base,XNSUSP); xnpod_schedule(); } unlock_and_exit: xnlock_put_irqrestore(&nklock,s); return err;}/** * @fn int rt_task_delete(RT_TASK *task) * @brief Delete a real-time task. * * Terminate a task and release all the real-time kernel resources it * currently holds. A task exists in the system since rt_task_create() * has been called to create it, so this service must be called in * order to destroy it afterwards. * * Native tasks implement a mechanism by which they are immune from * deletion by other tasks while they run into a deemed safe section * of code. This feature is used internally by the native skin in * order to prevent tasks from being deleted in the middle of a * critical section, without resorting to interrupt masking when the * latter is not an option. For this reason, the caller of * rt_task_delete() might be blocked and a rescheduling take place, * waiting for the target task to exit such critical section. * * The DELETE hooks are called on behalf of the calling context (if * any). The information stored in the task control block remains * valid until all hooks have been called. * * @param task The descriptor address of the affected task. If @a task * is NULL, the current task is deleted. * * @return 0 is returned upon success. Otherwise: * * - -EINVAL is returned if @a task is not a task descriptor. * * - -EPERM is returned if @a task is NULL but not called from a task * context, or this service was called from an asynchronous context. * * - -EINTR is returned if rt_task_unblock() has been invoked for the * caller while it was waiting for @a task to exit a safe section. In * such a case, the deletion process has been aborted and @a task * remains unaffected. * * - -EDEADLK is returned if the caller is self-deleting while running * in the middle of a safe section. * * - -EIDRM is returned if @a task is a deleted task descriptor. * * Environments: * * This service can be called from: * * - Kernel module initialization/cleanup code * only if @a task is non-NULL. * * - Kernel-based task * - Any user-space context (conforming call) * * Rescheduling: always if @a task is NULL, and possible if the * deleted task is currently running into a safe section. */int rt_task_delete (RT_TASK *task){ int err = 0; spl_t s; if (!task) { if (!xnpod_primary_p()) return -EPERM; task = xeno_current_task(); } else if (xnpod_asynch_p()) return -EPERM; xnlock_get_irqsave(&nklock,s); task = xeno_h2obj_validate(task,XENO_TASK_MAGIC,RT_TASK); if (!task) { err = xeno_handle_error(task,XENO_TASK_MAGIC,RT_TASK); goto unlock_and_exit; } /* Make sure the target task is out of any safe section. */ err = __native_task_safewait(task); if (err) goto unlock_and_exit; /* Does not return if task is current. */ xnpod_delete_thread(&task->thread_base); unlock_and_exit: xnlock_put_irqrestore(&nklock,s); return err;}/** * @fn int rt_task_yield(void) * @brief Manual round-robin. * * Move the current task to the end of its priority group, so that the * next equal-priority task in ready state is switched in. * * @return 0 is returned upon success. Otherwise: * * - -EPERM is returned if this service was called from a context * which cannot sleep (e.g. interrupt, non-realtime or scheduler * locked). * * Environments: * * This service can be called from: * * - Kernel-based task * - User-space task * * Rescheduling: always if a next equal-priority task is ready to run, * otherwise, this service leads to a no-op. */int rt_task_yield (void){ if (xnpod_unblockable_p()) return -EPERM; xnpod_yield(); return 0;}/** * @fn int rt_task_set_periodic(RT_TASK *task,RTIME idate,RTIME period) * @brief Make a real-time task periodic. * * Make a task periodic by programing its first release point and its * period in the processor time line. Subsequent calls to * rt_task_wait_period() will delay the task until the next periodic * release point in the processor timeline is reached. * * @param task The descriptor address of the affected task. This task * is immediately delayed until the first periodic release point is * reached. If @a task is NULL, the current task is set periodic. * * @param idate The initial (absolute) date of the first release * point, expressed in clock ticks (see note). The affected task will * be delayed until this point is reached. If @a idate is equal to * TM_NOW, the current system date is used, and no initial delay takes * place. * @param period The period of the task, expressed in clock ticks (see * note). Passing TM_INFINITE attempts to stop the task's periodic * timer; in the latter case, the routine always exits succesfully, * regardless of the previous state of this timer. * * @return 0 is returned upon success. Otherwise: * * - -EINVAL is returned if @a task is not a task descriptor. * * - -EIDRM is returned if @a task is a deleted task descriptor. * * - -ETIMEDOUT is returned if @a idate is different from TM_INFINITE * and represents a date in the past. * * - -EWOULDBLOCK is returned if the system timer has not been started * using rt_timer_start(). * * - -EPERM is returned if @a task is NULL but not called from a task * context. * Environments: * * This service can be called from: * * - Kernel module initialization/cleanup code or interrupt only if @a * task is non-NULL. * * - Kernel-based task * - User-space task (switches to primary mode) * * Rescheduling: always if the operation affects the current task and * @a idate has not elapsed yet. * * @note This service is sensitive to the current operation mode of * the system timer, as defined by the rt_timer_start() service. In * periodic mode, clock ticks are interpreted as periodic jiffies. In * oneshot mode, clock ticks are interpreted as nanoseconds. */int rt_task_set_periodic (RT_TASK *task, RTIME idate, RTIME period){ int err; spl_t s; if (!task) { if (!xnpod_primary_p()) return -EPERM; task = xeno_current_task(); } xnlock_get_irqsave(&nklock,s); task = xeno_h2obj_validate(task,XENO_TASK_MAGIC,RT_TASK); if (!task) { err = xeno_handle_error(task,XENO_TASK_MAGIC,RT_TASK); goto unlock_and_exit; } err = xnpod_set_thread_periodic(&task->thread_base,idate,period); unlock_and_exit: xnlock_put_irqrestore(&nklock,s); return err;}/** * @fn int rt_task_wait_period(unsigned long *overruns_r) * @brief Wait for the next periodic release point. * * Make the current task wait for the next periodic release point in * the processor time line. * * @param overruns_r If non-NULL, @a overruns_r must be a pointer to a * memory location which will be written with the count of pending * overruns. This value is copied only when rt_task_wait_period() * returns -ETIMEDOUT or success; the memory location remains * unmodified otherwise. If NULL, this count will never be copied * back. * * @return 0 is returned upon success; if @a overruns_r is valid, zero * is copied to the pointed memory location. Otherwise: * * - -EWOULDBLOCK is returned if rt_task_set_periodic() has not * previously been called for the calling task. * * - -EINTR is returned if rt_task_unblock() has been called for the * waiting task before the next periodic release point has been * reached. In this case, the overrun counter is reset too. * * - -ETIMEDOUT is returned if a timer overrun occurred, which * indicates that a previous release point has been missed by the * calling task. If @a overruns_r is valid, the count of pending * overruns is copied to the pointed memory location. * * - -EPERM is returned if this service was called from a context * which cannot sleep (e.g. interrupt, non-realtime or scheduler * locked). * * Environments: * * This service can be called from: * * - Kernel-based task * - User-space task (switches to primary mode) * * Rescheduling: always, unless the current release point has already * been reached. In the latter case, the current task immediately * returns from this service without being delayed. */int rt_task_wait_period (unsigned long *overruns_r){ if (xnpod_unblockable_p()) return -EPERM; return xnpod_wait_thread_period(overruns_r);}/** * @fn int rt_task_set_priority(RT_TASK *task,int prio) * @brief Change the base priority of a real-time task. * * Changing the base priority of a task does not affect the priority * boost the target task might have obtained as a consequence of a * previous priority inheritance. * * @param task The descriptor address of the affected task. * * @param prio The new task priority. This value must range from [1 * .. 99] (inclusive) where 1 is the lowest effective priority. * @return 0 is returned upon success. Otherwise: * * - -EINVAL is returned if @a task is not a task descriptor, or if @a * prio is invalid. * * - -EPERM is returned if @a task is NULL but not called from a task * context. * * - -EIDRM is returned if @a task is a deleted task descriptor. * * Side-effects: * * - This service calls the rescheduling procedure. * * - Assigning the same priority to a running or ready task moves it * to the end of its priority group, thus causing a manual * round-robin. * * Environments: * * This service can be called from: * * - Kernel module initialization/cleanup code * - Interrupt service routine * only if @a task is non-NULL. * * - Kernel-based task * - User-space task * * Rescheduling: possible if @a task is the current one. */int rt_task_set_priority (RT_TASK *task, int prio){ int oldprio; spl_t s; if (prio < T_LOPRIO || prio > T_HIPRIO) return -EINVAL; if (!task) { if (!xnpod_primary_p()) return -EPERM; task = xeno_current_task(); } xnlock_get_irqsave(&nklock,s); task = xeno_h2obj_validate(task,XENO_TASK_MAGIC,RT_TASK); if (!task) { oldprio = xeno_handle_error(task,XENO_TASK_MAGIC,RT_TASK); goto unlock_and_exit; } oldprio = xnthread_base_priority(&task->thread_base); xnpod_renice_thread(&task->thread_base,prio); xnpod_schedule(); unlock_and_exit: xnlock_put_irqrestore(&nklock,s); return oldprio;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -