📄 sched_smp.c
字号:
sched_release_global_lock(hard_cpu_id()); if (rt_current->signal) { (*rt_current->signal)(); } return; } else { sched_release_global_lock(cpuid); } return;}#elsestatic void rt_schedule_on_schedule_ipi(void){ DECLARE_RT_CURRENT; RT_TASK *task, *new_task; int prio; ASSIGN_RT_CURRENT; sched_rqsted[cpuid] = 1; do { prio = RT_SCHED_LINUX_PRIORITY; new_task = &rt_linux_task; task = &rt_base_linux_task; read_lock(&task_list_lock); RR_YIELD(); TASK_TO_SCHEDULE_ON_IPI(); RR_SETYT(); read_unlock(&task_list_lock); if (rt_current->running && rt_current->priority <= prio) { return; } sched_get_global_lock(cpuid); if (new_task->state == RT_SCHED_READY && !new_task->running) { rt_current->running = 0; new_task->running = new_task->state = RT_SCHED_READY; if (rt_current == &rt_linux_task) { rt_switch_to_real_time(cpuid); save_cr0_and_clts(linux_cr0); } if (rt_current->uses_fpu) { enable_fpu(); save_fpenv(rt_current->fpu_reg); if (new_task->uses_fpu) { restore_fpenv(new_task->fpu_reg); } } else if (new_task->uses_fpu) { enable_fpu(); restore_fpenv(new_task->fpu_reg); } KEXECTIME(); rt_exchange_tasks(rt_smp_current[cpuid], new_task); sched_release_global_lock(hard_cpu_id()); if (rt_current->signal) { (*rt_current->signal)(); } return; } sched_release_global_lock(cpuid); } while (1);}#endif#endif#define ANTICIPATEvoid rt_schedule(void){ DECLARE_RT_CURRENT; RT_TASK *task, *new_task; RTIME intr_time, now; unsigned int cpus_with_ready_tasks; int prio, delay, preempt; task = &rt_base_linux_task; cpus_with_ready_tasks = 0; prio = RT_SCHED_LINUX_PRIORITY; ASSIGN_RT_CURRENT; new_task = &rt_linux_task; rt_current->running = 0; RR_YIELD(); if (oneshot_running) {#ifdef ANTICIPATE rt_time_h = rdtsc() + rt_half_tick; wake_up_timed_tasks(0);#endif TASK_TO_SCHEDULE(); RR_SETYT(); intr_time = shot_fired ? rt_times.intr_time : rt_times.intr_time + rt_times.linux_tick; RR_SPREMP(); task = &rt_base_linux_task; while ((task = task->tnext) != &rt_base_linux_task) { if (task->priority <= prio && task->resume_time < intr_time) { intr_time = task->resume_time; preempt = 1; break; } } if (preempt || (!shot_fired && (prio == RT_SCHED_LINUX_PRIORITY))) { shot_fired = 1; if (preempt) { rt_times.intr_time = intr_time; } delay = (int)(rt_times.intr_time - (now = rdtsc())) - tuned.latency; if (delay >= tuned.setup_time_TIMER_CPUNIT) { delay = imuldiv(delay, TIMER_FREQ, tuned.cpu_freq); } else { delay = tuned.setup_time_TIMER_UNIT; rt_times.intr_time = now + (tuned.setup_time_TIMER_CPUNIT); } set_timer_cpu(1 << cpuid); rt_set_timer_delay(delay); } } else { TASK_TO_SCHEDULE(); RR_SETYT(); } new_task->running = new_task->state = RT_SCHED_READY; smp_send_sched_ipi(cpus_with_ready_tasks & ~(1 << cpuid)); if (new_task != rt_current) { if (rt_current == &rt_linux_task) { rt_switch_to_real_time(cpuid); save_cr0_and_clts(linux_cr0); } if (rt_current->uses_fpu) { enable_fpu(); save_fpenv(rt_current->fpu_reg); if (new_task->uses_fpu) { restore_fpenv(new_task->fpu_reg); } } else if (new_task->uses_fpu) { enable_fpu(); restore_fpenv(new_task->fpu_reg); } KEXECTIME(); if (new_task == &rt_linux_task) { restore_cr0(linux_cr0); rt_switch_to_linux(cpuid); /* From now on, the Linux stage is re-enabled, but not sync'ed until we have actually switched to the Linux task, so that we don't end up running the Linux IRQ handlers on behalf of a non-Linux stack context... */ } rt_exchange_tasks(rt_smp_current[cpuid], new_task); if (rt_current->signal) { sched_release_global_lock(cpuid = hard_cpu_id()); (*rt_current->signal)(); sched_get_global_lock(cpuid); } }}void rt_spv_RMS(int cpuid){ RT_TASK *task; int prio; prio = 0; task = &rt_base_linux_task; while ((task = task->next)) { RT_TASK *task, *htask; RTIME period; htask = 0; task = &rt_base_linux_task; period = RT_TIME_END; while ((task = task->next)) { if (task->priority >= 0 && task->policy >= 0 && task->period && task->period < period) { period = (htask = task)->period; } } if (htask) { htask->priority = -1; htask->base_priority = prio++; } else { goto ret; } }ret: task = &rt_base_linux_task; while ((task = task->next)) { if (task->priority < 0) { task->priority = task->base_priority; } } return;}void rt_sched_lock(void){ DECLARE_RT_CURRENT; unsigned long flags; flags = rt_global_save_flags_and_cli(); ASSIGN_RT_CURRENT; if (rt_current->priority >= 0) { rt_current->sched_lock_priority = rt_current->priority; sched_rqsted[cpuid] = rt_current->priority = -1; } else { rt_current->priority--; } rt_global_restore_flags(flags);}void rt_sched_unlock(void){ DECLARE_RT_CURRENT; unsigned long flags; flags = rt_global_save_flags_and_cli(); ASSIGN_RT_CURRENT; if (rt_current->priority < 0 && !(++rt_current->priority)) { if ((rt_current->priority = rt_current->sched_lock_priority) != RT_SCHED_LINUX_PRIORITY) { (rt_current->rprev)->rnext = rt_current->rnext; (rt_current->rnext)->rprev = rt_current->rprev; enq_ready_task(rt_current); } if (sched_rqsted[cpuid] > 0) { rt_schedule(); } } rt_global_restore_flags(flags);}int rt_task_delete(RT_TASK *task){ RT_TASK *rt_current; unsigned long flags; QUEUE *q; if (task->magic != RT_TASK_MAGIC || task->priority == RT_SCHED_LINUX_PRIORITY) { return -EINVAL; } flags = rt_global_save_flags_and_cli(); rt_current = RT_CURRENT; if (!(task->owndres & SEMHLF) || task == rt_current || rt_current->priority == RT_SCHED_LINUX_PRIORITY) { call_exit_handlers(task); rem_timed_task(task); if (task->blocked_on) { (task->queue.prev)->next = task->queue.next; (task->queue.next)->prev = task->queue.prev; if (task->state & RT_SCHED_SEMAPHORE) { ((SEM *)(task->blocked_on))->count++; if (((SEM *)(task->blocked_on))->type && ((SEM *)(task->blocked_on))->count > 1) { ((SEM *)(task->blocked_on))->count = 1; } } } q = &(task->msg_queue); while ((q = q->next) != &(task->msg_queue)) { rem_timed_task(q->task); if ((q->task)->state != RT_SCHED_READY && ((q->task)->state &= ~(RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_DELAYED)) == RT_SCHED_READY) { enq_ready_task(q->task); } (q->task)->blocked_on = 0; } q = &(task->ret_queue); while ((q = q->next) != &(task->ret_queue)) { rem_timed_task(q->task); if ((q->task)->state != RT_SCHED_READY && ((q->task)->state &= ~(RT_SCHED_RETURN | RT_SCHED_DELAYED)) == RT_SCHED_READY) { enq_ready_task(q->task); } (q->task)->blocked_on = 0; } write_lock(&task_list_lock); if (!((task->prev)->next = task->next)) { rt_base_linux_task.prev = task->prev; } else { (task->next)->prev = task->prev; } write_unlock(&task_list_lock); frstk_srq.mp[frstk_srq.in] = task->stack_bottom; frstk_srq.in = (frstk_srq.in + 1) & (MAX_FRESTK_SRQ - 1); task->magic = 0; rt_pend_linux_srq(frstk_srq.srq); rem_ready_task(task); task->state = 0; if (task == rt_current) { rt_schedule(); } } else { task->suspdepth = -0x7FFFFFFF; } rt_global_restore_flags(flags); return 0;}static void rt_timer_handler(void){ DECLARE_RT_CURRENT; RTIME now; RT_TASK *task, *new_task; unsigned int cpus_with_ready_tasks; int prio, delay, preempt; ASSIGN_RT_CURRENT; sched_rqsted[cpuid] = 1; task = &rt_base_linux_task; prio = RT_SCHED_LINUX_PRIORITY; cpus_with_ready_tasks = 0; new_task = &rt_linux_task; sched_get_global_lock(cpuid); is_timer_cpu(cpuid);#ifdef CONFIG_X86_REMOTE_DEBUG if (oneshot_timer) { // Resync after possibly hitting a breakpoint rt_times.intr_time = rdtsc(); }#endif rt_times.tick_time = rt_times.intr_time; rt_time_h = rt_times.tick_time + rt_half_tick; if (rt_times.tick_time >= rt_times.linux_time) { rt_times.linux_time += rt_times.linux_tick; update_linux_timer(); } rt_current->running = 0; wake_up_timed_tasks(0); RR_YIELD(); TASK_TO_SCHEDULE(); RR_SETYT(); if (oneshot_timer) { rt_times.intr_time = rt_times.linux_time > rt_times.tick_time ? rt_times.linux_time : rt_times.tick_time + rt_times.linux_tick; RR_TPREMP(); task = &rt_base_linux_task; while ((task = task->tnext) != &rt_base_linux_task) { if (task->priority <= prio && task->resume_time < rt_times.intr_time) { rt_times.intr_time = task->resume_time; preempt = 1; break; } } if ((shot_fired = preempt)) { delay = (int)(rt_times.intr_time - (now = rdtsc())) - tuned.latency; if (delay >= tuned.setup_time_TIMER_CPUNIT) { delay = imuldiv(delay, TIMER_FREQ, tuned.cpu_freq); } else { delay = tuned.setup_time_TIMER_UNIT; rt_times.intr_time = now + (tuned.setup_time_TIMER_CPUNIT); } set_timer_cpu(1 << cpuid); rt_set_timer_delay(delay); } } else { rt_times.intr_time += rt_times.periodic_tick; rt_set_timer_delay(0); } new_task->running = new_task->state = RT_SCHED_READY; smp_send_sched_ipi(cpus_with_ready_tasks & ~(1 << cpuid)); if (new_task != rt_current) { if (rt_current == &rt_linux_task) { rt_switch_to_real_time(cpuid); save_cr0_and_clts(linux_cr0); } if (rt_current->uses_fpu) { enable_fpu(); save_fpenv(rt_current->fpu_reg); if (new_task->uses_fpu) { restore_fpenv(new_task->fpu_reg); } } else if (new_task->uses_fpu) { enable_fpu(); restore_fpenv(new_task->fpu_reg); } KEXECTIME(); rt_exchange_tasks(rt_smp_current[cpuid], new_task); sched_release_global_lock(hard_cpu_id()); if (rt_current->signal) { (*rt_current->signal)(); } return; } else { sched_release_global_lock(cpuid); } return;}static irqreturn_t recover_jiffies(int irq, void *dev_id, struct pt_regs *regs){ rt_global_cli(); if (rt_times.tick_time >= rt_times.linux_time) { rt_times.linux_time += rt_times.linux_tick; rt_pend_linux_irq(TIMER_8254_IRQ); } rt_global_sti(); BROADCAST_TO_LOCAL_TIMERS(); return RTAI_LINUX_IRQ_HANDLED;} int rt_is_hard_timer_running(void){ return (rt_time_h > 0);}void rt_set_periodic_mode(void){ stop_rt_timer(); oneshot_timer = oneshot_running = 0;}void rt_set_oneshot_mode(void){ stop_rt_timer(); oneshot_timer = 1;}RTIME start_rt_timer(int period){ unsigned long flags; flags = rt_global_save_flags_and_cli(); if (oneshot_timer) { rt_request_timer(rt_timer_handler, 0, TIMER_CHIP == "APIC"); tuned.timers_tol[0] = rt_half_tick = (tuned.latency + 1)>>1; oneshot_running = shot_fired = 1; } else { rt_request_timer(rt_timer_handler, period > LATCH? LATCH: period, TIMER_CHIP == "APIC"); tuned.timers_tol[0] = rt_half_tick = (rt_times.periodic_tick + 1)>>1; } set_timer_cpu(1 << hard_cpu_id()); rt_time_h = rt_times.tick_time + rt_half_tick; rt_global_restore_flags(flags); FREE_LOCAL_TIMERS(); rt_request_linux_irq(TIMER_8254_IRQ, recover_jiffies, "rtai_jif_chk", recover_jiffies); return period;}#ifdef __USE_APIC__RTIME start_rt_timer_cpuid(int period, int cpuid){ unsigned long flags; flags = rt_global_save_flags_and_cli(); if (oneshot_timer) { rt_request_timer_cpuid(rt_timer_handler, 0, cpuid); tuned.timers_tol[0] = rt_half_tick = (tuned.latency + 1)>>1; oneshot_running = shot_fired = 1; } else { rt_request_timer_cpuid(rt_timer_handler, period, cpuid); tuned.timers_tol[0] = rt_half_tick = (rt_times.periodic_tick + 1)>>1; } set_timer_cpu(1 << cpuid); rt_time_h = rt_times.tick_time + rt_half_tick; FREE_LOCAL_TIMERS(); rt_global_restore_flags(flags); rt_request_linux_irq(TIMER_8254_IRQ, recover_jiffies, "rtai_jif_chk", recover_jiffies); return period;}#elseRTIME start_rt_timer_cpuid(int period, int cpuid){ return start_rt_timer(period);}#endifvoid start_rt_apic_timers(struct apic_timer_setup_data *setup_mode, unsigned int rcvr_jiffies_cpuid){ int cpuid, period; period = 0; for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { period += setup_mode[cpuid].mode; } if (period == NR_RT_CPUS) { period = 2000000000; for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { if (setup_mode[cpuid].count < period) { period = setup_mode[cpuid].count; } } start_rt_timer(nano2count(period)); } else { rt_set_oneshot_mode(); start_rt_timer(0); }}void stop_rt_timer(void){ unsigned long flags; rt_free_linux_irq(TIMER_8254_IRQ, recover_jiffies); rt_free_timer(); flags = rt_global_save_flags_and_cli(); set_timer_cpu(cpu_present_map); oneshot_timer = oneshot_running = 0; rt_global_restore_flags(flags); rt_busy_sleep(10000000);}static inline RT_TASK *__whoami(void){ RT_TASK *rt_current; unsigned long flags; hard_save_flags_and_cli(flags); rt_current = RT_CURRENT; hard_restore_flags(flags); return rt_current;}int rt_sched_type(void){
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -