📄 sched_lxrt.c
字号:
sched_soft: UNLOCK_LINUX(cpuid); rt_global_sti(); schedule(); rt_global_cli(); rt_current->state |= RT_SCHED_READY; while (rt_current->state != RT_SCHED_READY) { (rt_current->lnxtsk)->state = TASK_HARDREALTIME; rt_global_sti(); schedule(); rt_global_cli(); } LOCK_LINUX(cpuid); enq_soft_ready_task(rt_current); rt_smp_current[cpuid] = rt_current; } } hard_cli(); return;}void rt_spv_RMS(int cpuid){ RT_TASK *task; int prio; if (cpuid < 0 || cpuid >= num_online_cpus()) { cpuid = hard_cpu_id(); } prio = 0; task = &rt_linux_task; while ((task = task->next)) { RT_TASK *task, *htask; RTIME period; htask = 0; task = &rt_linux_task; period = RT_TIME_END; while ((task = task->next)) { if (task->priority >= 0 && task->policy >= 0 && task->period && task->period < period) { period = (htask = task)->period; } } if (htask) { htask->priority = -1; htask->base_priority = prio++; } else { goto ret; } }ret: task = &rt_linux_task; while ((task = task->next)) { if (task->priority < 0) { task->priority = task->base_priority; } } return;}void rt_sched_lock(void){ DECLARE_RT_CURRENT; unsigned long flags; flags = rt_global_save_flags_and_cli(); ASSIGN_RT_CURRENT; if (rt_current->priority >= 0) { rt_current->sched_lock_priority = rt_current->priority; sched_rqsted[cpuid] = rt_current->priority = -1; } else { rt_current->priority--; } rt_global_restore_flags(flags);}void rt_sched_unlock(void){ DECLARE_RT_CURRENT; unsigned long flags; flags = rt_global_save_flags_and_cli(); ASSIGN_RT_CURRENT; if (rt_current->priority < 0 && !(++rt_current->priority)) { if ((rt_current->priority = rt_current->sched_lock_priority) != RT_SCHED_LINUX_PRIORITY) { (rt_current->rprev)->rnext = rt_current->rnext; (rt_current->rnext)->rprev = rt_current->rprev; enq_ready_task(rt_current); } if (sched_rqsted[cpuid] > 0) { rt_schedule(); } } rt_global_restore_flags(flags);}int clr_rtext(RT_TASK *task){ DECLARE_RT_CURRENT; unsigned long flags; QUEUE *q; if (task->magic != RT_TASK_MAGIC || task->priority == RT_SCHED_LINUX_PRIORITY) { return -EINVAL; } flags = rt_global_save_flags_and_cli(); ASSIGN_RT_CURRENT; if (!(task->owndres & SEMHLF) || task == rt_current || rt_current->priority == RT_SCHED_LINUX_PRIORITY) { call_exit_handlers(task); rem_timed_task(task); if (task->blocked_on) { (task->queue.prev)->next = task->queue.next; (task->queue.next)->prev = task->queue.prev; if (task->state & RT_SCHED_SEMAPHORE) { ((SEM *)(task->blocked_on))->count++; if (((SEM *)(task->blocked_on))->type && ((SEM *)(task->blocked_on))->count > 1) { ((SEM *)(task->blocked_on))->count = 1; } } } q = &(task->msg_queue); while ((q = q->next) != &(task->msg_queue)) { rem_timed_task(q->task); if ((q->task)->state != RT_SCHED_READY && ((q->task)->state &= ~(RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_DELAYED)) == RT_SCHED_READY) { enq_ready_task(q->task); } (q->task)->blocked_on = 0; } q = &(task->ret_queue); while ((q = q->next) != &(task->ret_queue)) { rem_timed_task(q->task); if ((q->task)->state != RT_SCHED_READY && ((q->task)->state &= ~(RT_SCHED_RETURN | RT_SCHED_DELAYED)) == RT_SCHED_READY) { enq_ready_task(q->task); } (q->task)->blocked_on = 0; } if (!((task->prev)->next = task->next)) { rt_smp_linux_task[task->runnable_on_cpus].prev = task->prev; } else { (task->next)->prev = task->prev; } if (rt_smp_fpu_task[task->runnable_on_cpus] == task) { rt_smp_fpu_task[task->runnable_on_cpus] = rt_smp_linux_task + task->runnable_on_cpus;; } if (!task->lnxtsk) { frstk_srq.mp[frstk_srq.in] = task->stack_bottom; frstk_srq.in = (frstk_srq.in + 1) & (MAX_FRESTK_SRQ - 1); rt_pend_linux_srq(frstk_srq.srq); } task->magic = 0; rem_ready_task(task); task->state = 0; atomic_dec((atomic_t *)(tasks_per_cpu + task->runnable_on_cpus)); if (task == rt_current) { rt_schedule(); } } else { task->suspdepth = -0x7FFFFFFF; } rt_global_restore_flags(flags); return 0;}int rt_task_delete(RT_TASK *task){ if (!clr_rtext(task)) { if (task->lnxtsk) { start_stop_kthread(task, 0, 0, 0, 0, 0, 0); } } return 0;}int rt_get_timer_cpu(void){ return 1;}static void rt_timer_handler(void){ DECLARE_RT_CURRENT; RTIME now; RT_TASK *task, *new_task; int prio, delay, preempt; ASSIGN_RT_CURRENT; sched_rqsted[cpuid] = 1; prio = RT_SCHED_LINUX_PRIORITY; task = new_task = &rt_linux_task;#ifdef CONFIG_X86_REMOTE_DEBUG if (oneshot_timer) { // Resync after possibly hitting a breakpoint rt_times.intr_time = rdtsc(); }#endif rt_times.tick_time = rt_times.intr_time; rt_time_h = rt_times.tick_time + rt_half_tick; if (rt_times.tick_time >= rt_times.linux_time) { rt_times.linux_time += rt_times.linux_tick; update_linux_timer(); } sched_get_global_lock(cpuid); wake_up_timed_tasks(cpuid); RR_YIELD(); TASK_TO_SCHEDULE(); RR_SETYT(); if (oneshot_timer) { rt_times.intr_time = rt_times.tick_time + ONESHOT_SPAN; RR_TPREMP(); task = &rt_linux_task; while ((task = task->tnext) != &rt_linux_task) { if (task->priority <= prio && task->resume_time < rt_times.intr_time) { rt_times.intr_time = task->resume_time; shot_fired = 1; goto fire; } } if ((shot_fired = preempt)) { rt_times.intr_time = rt_times.linux_time > rt_times.tick_time ? rt_times.linux_time : rt_times.tick_time + (rt_times.linux_tick >> 1);fire: delay = (int)(rt_times.intr_time - (now = rdtsc())) - tuned.latency; if (delay >= tuned.setup_time_TIMER_CPUNIT) { delay = imuldiv(delay, TIMER_FREQ, tuned.cpu_freq); } else { delay = tuned.setup_time_TIMER_UNIT; rt_times.intr_time = now + (tuned.setup_time_TIMER_CPUNIT); } rt_set_timer_delay(delay); } } else { rt_times.intr_time += rt_times.periodic_tick; rt_set_timer_delay(0); } sched_release_global_lock(cpuid); if (new_task != rt_current) { if (!new_task->lnxtsk || !rt_current->lnxtsk) { if (rt_current->lnxtsk) { LOCK_LINUX(cpuid); save_cr0_and_clts(linux_cr0); rt_linux_task.nextp = (void *)rt_current; } else if (new_task->lnxtsk) { rt_linux_task.prevp = (void *)new_task; new_task = (void *)rt_linux_task.nextp; } KEXECTIME(); rt_exchange_tasks(rt_smp_current[cpuid], new_task); if (rt_current->lnxtsk) { UNLOCK_LINUX(cpuid); restore_cr0(linux_cr0); if (rt_current != (void *)rt_linux_task.prevp) { new_task = (void *)rt_linux_task.prevp; goto schedlnxtsk; } } else if (rt_current->uses_fpu) { enable_fpu(); if (rt_current != fpu_task) { save_fpenv(fpu_task->fpu_reg); fpu_task = rt_current; restore_fpenv(fpu_task->fpu_reg); } } if (rt_current->signal) { (*rt_current->signal)(); } hard_cli(); return; }schedlnxtsk: if (new_task->is_hard || rt_current->is_hard) { struct task_struct *prev = rtai_get_current(cpuid); DECL_CPUS_ALLOWED; SAVE_CPUS_ALLOWED; if (!rt_current->is_hard) { LOCK_LINUX(cpuid); rt_linux_task.lnxtsk = prev; SET_CPUS_ALLOWED; } rt_smp_current[cpuid] = new_task; UEXECTIME(); prev = lxrt_context_switch(prev, new_task->lnxtsk,cpuid); if (prev->used_math) { restore_fpu(prev); } if (rt_current->signal) { rt_current->signal(); } if (!rt_current->is_hard) { UNLOCK_LINUX(cpuid); RST_CPUS_ALLOWED; } else if (rt_current->force_soft) { make_current_soft(rt_current); } } } hard_cli(); return;}static irqreturn_t recover_jiffies(int irq, void *dev_id, struct pt_regs *regs){ rt_global_cli(); if (linux_times->tick_time >= linux_times->linux_time) { linux_times->linux_time += linux_times->linux_tick; rt_pend_linux_irq(TIMER_8254_IRQ); } rt_global_sti(); BROADCAST_TO_LOCAL_TIMERS(); return RTAI_LINUX_IRQ_HANDLED;} int rt_is_hard_timer_running(void) { int cpuid, running; for (running = cpuid = 0; cpuid < num_online_cpus(); cpuid++) { if (rt_time_h) { running |= (1 << cpuid); } } return running;}void rt_set_periodic_mode(void) { int cpuid; stop_rt_timer(); for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { oneshot_timer = oneshot_running = 0; }}void rt_set_oneshot_mode(void){ int cpuid; stop_rt_timer(); for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { oneshot_timer = 1; }}#ifdef __USE_APIC__void start_rt_apic_timers(struct apic_timer_setup_data *setup_data, unsigned int rcvr_jiffies_cpuid){ unsigned long flags, cpuid; rt_request_apic_timers(rt_timer_handler, setup_data); flags = rt_global_save_flags_and_cli(); for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { if (setup_data[cpuid].mode > 0) { oneshot_timer = oneshot_running = 0; tuned.timers_tol[cpuid] = rt_half_tick = (rt_times.periodic_tick + 1)>>1; } else { oneshot_timer = oneshot_running = 1; tuned.timers_tol[cpuid] = rt_half_tick = (tuned.latency + 1)>>1; } rt_time_h = rt_times.tick_time + rt_half_tick; shot_fired = 1; } linux_times = rt_smp_times + (rcvr_jiffies_cpuid < NR_RT_CPUS ? rcvr_jiffies_cpuid : 0); rt_global_restore_flags(flags); rt_free_linux_irq(TIMER_8254_IRQ, &rtai_broadcast_to_local_timers); rt_request_linux_irq(TIMER_8254_IRQ, recover_jiffies, "rtai_jif_chk", recover_jiffies);}RTIME start_rt_timer(int period){ int cpuid; struct apic_timer_setup_data setup_data[NR_RT_CPUS]; for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { setup_data[cpuid].mode = oneshot_timer ? 0 : 1; setup_data[cpuid].count = count2nano(period); } start_rt_apic_timers(setup_data, hard_cpu_id()); return period;}void stop_rt_timer(void){ unsigned long flags; int cpuid; rt_free_linux_irq(TIMER_8254_IRQ, recover_jiffies); rt_free_apic_timers(); flags = rt_global_save_flags_and_cli(); for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { oneshot_timer = oneshot_running = 0; } rt_global_restore_flags(flags); rt_busy_sleep(10000000);}#elseRTIME start_rt_timer(int period){#define cpuid 0#undef rt_times unsigned long flags; flags = rt_global_save_flags_and_cli(); if (oneshot_timer) { rt_request_timer(rt_timer_handler, 0, 0); tuned.timers_tol[0] = rt_half_tick = (tuned.latency + 1)>>1; oneshot_running = shot_fired = 1; } else { rt_request_timer(rt_timer_handler, period > LATCH? LATCH: period, 0); tuned.timers_tol[0] = rt_half_tick = (rt_times.periodic_tick + 1)>>1; } rt_smp_times[cpuid].linux_tick = rt_times.linux_tick; rt_smp_times[cpuid].tick_time = rt_times.tick_time; rt_smp_times[cpuid].intr_time = rt_times.intr_time; rt_smp_times[cpuid].linux_time = rt_times.linux_time; rt_smp_times[cpuid].periodic_tick = rt_times.periodic_tick; rt_time_h = rt_times.tick_time + rt_half_tick; linux_times = rt_smp_times; rt_global_restore_flags(flags); rt_request_linux_irq(TIMER_8254_IRQ, recover_jiffies, "rtai_jif_chk", recover_jiffies); return period;#undef cpuid#define rt_times (rt_smp_times[cpuid])}void start_rt_apic_timers(struct apic_timer_setup_data *setup_mode, unsigned int rcvr_jiffies_cpuid){ int cpuid, period; period = 0; for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { period += setup_mode[cpuid].mode; } if (period == NR_RT_CPUS) { period = 2000000000; for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { if (setup_mode[cpuid].count < period) { period = setup_mode[cpuid].count; } } start_rt_timer(nano2count(period)); } else { rt_set_oneshot_mode(); start_rt_timer(0); }}void stop_rt_timer(void){ unsigned long flags; rt_free_linux_irq(TIMER_8254_IRQ, recover_jiffies); rt_free_timer(); flags = rt_global_save_flags_and_cli(); rt_smp_oneshot_timer[0] = rt_smp_oneshot_running[0] = 0; rt_global_restore_flags(flags); rt_busy_sleep(10000000);}#endifRTIME start_rt_timer_cpuid(int period, int cpuid){ return start_rt_timer(period);}int rt_sched_type(void)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -