📄 sched_mup.c
字号:
if (preempt) { rt_times.intr_time = intr_time; } delay = (int)(rt_times.intr_time - (now = rdtsc())) - tuned.latency; if (delay >= tuned.setup_time_TIMER_CPUNIT) { delay = imuldiv(delay, TIMER_FREQ, tuned.cpu_freq); } else { delay = tuned.setup_time_TIMER_UNIT; rt_times.intr_time = now + (tuned.setup_time_TIMER_CPUNIT); } rt_set_timer_delay(delay); } } else { TASK_TO_SCHEDULE(); RR_SETYT(); } sched_release_global_lock(cpuid); if (new_task != rt_current) { if (rt_current == &rt_linux_task) { rt_switch_to_real_time(cpuid); save_cr0_and_clts(linux_cr0); } if (new_task->uses_fpu) { enable_fpu(); if (new_task != fpu_task) { save_fpenv(fpu_task->fpu_reg); fpu_task = new_task; restore_fpenv(fpu_task->fpu_reg); } } KEXECTIME(); if (new_task == &rt_linux_task) { restore_cr0(linux_cr0); rt_switch_to_linux(cpuid); /* From now on, the Linux stage is re-enabled, but not sync'ed until we have actually switched to the Linux task, so that we don't end up running the Linux IRQ handlers on behalf of a non-Linux stack context... */ } rt_exchange_tasks(rt_smp_current[cpuid], new_task); if (rt_current->signal) { (*rt_current->signal)(); } }}void rt_spv_RMS(int cpuid){ RT_TASK *task; int prio; if (cpuid < 0 || cpuid >= smp_num_cpus) { cpuid = hard_cpu_id(); } prio = 0; task = &rt_linux_task; while ((task = task->next)) { RT_TASK *task, *htask; RTIME period; htask = 0; task = &rt_linux_task; period = RT_TIME_END; while ((task = task->next)) { if (task->priority >= 0 && task->policy >= 0 && task->period && task->period < period) { period = (htask = task)->period; } } if (htask) { htask->priority = -1; htask->base_priority = prio++; } else { goto ret; } }ret: task = &rt_linux_task; while ((task = task->next)) { if (task->priority < 0) { task->priority = task->base_priority; } } return;}void rt_sched_lock(void){ DECLARE_RT_CURRENT; unsigned long flags; flags = rt_global_save_flags_and_cli(); ASSIGN_RT_CURRENT; if (rt_current->priority >= 0) { rt_current->sched_lock_priority = rt_current->priority; sched_rqsted[cpuid] = rt_current->priority = -1; } else { rt_current->priority--; } rt_global_restore_flags(flags);}void rt_sched_unlock(void){ DECLARE_RT_CURRENT; unsigned long flags; flags = rt_global_save_flags_and_cli(); ASSIGN_RT_CURRENT; if (rt_current->priority < 0 && !(++rt_current->priority)) { if ((rt_current->priority = rt_current->sched_lock_priority) != RT_SCHED_LINUX_PRIORITY) { (rt_current->rprev)->rnext = rt_current->rnext; (rt_current->rnext)->rprev = rt_current->rprev; enq_ready_task(rt_current); } if (sched_rqsted[cpuid] > 0) { rt_schedule(); } } rt_global_restore_flags(flags);}int rt_task_delete(RT_TASK *task){ DECLARE_RT_CURRENT; unsigned long flags; QUEUE *q; if (task->magic != RT_TASK_MAGIC || task->priority == RT_SCHED_LINUX_PRIORITY) { return -EINVAL; } flags = rt_global_save_flags_and_cli(); ASSIGN_RT_CURRENT; if (!(task->owndres & SEMHLF) || task == rt_current || rt_current->priority == RT_SCHED_LINUX_PRIORITY) { call_exit_handlers(task); rem_timed_task(task); if (task->blocked_on) { (task->queue.prev)->next = task->queue.next; (task->queue.next)->prev = task->queue.prev; if (task->state & RT_SCHED_SEMAPHORE) { ((SEM *)(task->blocked_on))->count++; if (((SEM *)(task->blocked_on))->type && ((SEM *)(task->blocked_on))->count > 1) { ((SEM *)(task->blocked_on))->count = 1; } } } q = &(task->msg_queue); while ((q = q->next) != &(task->msg_queue)) { rem_timed_task(q->task); if ((q->task)->state != RT_SCHED_READY && ((q->task)->state &= ~(RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_DELAYED)) == RT_SCHED_READY) { enq_ready_task(q->task); } (q->task)->blocked_on = 0; } q = &(task->ret_queue); while ((q = q->next) != &(task->ret_queue)) { rem_timed_task(q->task); if ((q->task)->state != RT_SCHED_READY && ((q->task)->state &= ~(RT_SCHED_RETURN | RT_SCHED_DELAYED)) == RT_SCHED_READY) { enq_ready_task(q->task); } (q->task)->blocked_on = 0; } if (!((task->prev)->next = task->next)) { rt_smp_linux_task[task->runnable_on_cpus].prev = task->prev; } else { (task->next)->prev = task->prev; } if (rt_smp_fpu_task[task->runnable_on_cpus] == task) { rt_smp_fpu_task[task->runnable_on_cpus] = rt_smp_linux_task + task->runnable_on_cpus;; } frstk_srq.mp[frstk_srq.in] = task->stack_bottom; frstk_srq.in = (frstk_srq.in + 1) & (MAX_FRESTK_SRQ - 1); task->magic = 0; rt_pend_linux_srq(frstk_srq.srq); rem_ready_task(task); task->state = 0; if (task == rt_current) { rt_schedule(); } } else { task->suspdepth = -0x7FFFFFFF; } rt_global_restore_flags(flags); return 0;}int rt_get_timer_cpu(void){ return 1;}static void rt_timer_handler(void){ DECLARE_RT_CURRENT; RTIME now; RT_TASK *task, *new_task; int prio, delay, preempt; ASSIGN_RT_CURRENT; sched_rqsted[cpuid] = 1; task = new_task = &rt_linux_task; prio = RT_SCHED_LINUX_PRIORITY;#ifdef CONFIG_X86_REMOTE_DEBUG if (oneshot_timer) { // Resync after possibly hitting a breakpoint rt_times.intr_time = rdtsc(); }#endif rt_times.tick_time = rt_times.intr_time; rt_time_h = rt_times.tick_time + rt_half_tick; if (rt_times.tick_time >= rt_times.linux_time) { rt_times.linux_time += rt_times.linux_tick; update_linux_timer(); } sched_get_global_lock(cpuid); wake_up_timed_tasks(cpuid); RR_YIELD(); TASK_TO_SCHEDULE(); RR_SETYT(); if (oneshot_timer) { rt_times.intr_time = rt_times.linux_time > rt_times.tick_time ? rt_times.linux_time : rt_times.tick_time + rt_times.linux_tick; RR_TPREMP(); task = &rt_linux_task; while ((task = task->tnext) != &rt_linux_task) { if (task->priority <= prio && task->resume_time < rt_times.intr_time) { rt_times.intr_time = task->resume_time; preempt = 1; break; } } if ((shot_fired = preempt)) { delay = (int)(rt_times.intr_time - (now = rdtsc())) - tuned.latency; if (delay >= tuned.setup_time_TIMER_CPUNIT) { delay = imuldiv(delay, TIMER_FREQ, tuned.cpu_freq); } else { delay = tuned.setup_time_TIMER_UNIT; rt_times.intr_time = now + (tuned.setup_time_TIMER_CPUNIT); } rt_set_timer_delay(delay); } } else { rt_times.intr_time += rt_times.periodic_tick; rt_set_timer_delay(0); } sched_release_global_lock(cpuid); if (new_task != rt_current) { if (rt_current == &rt_linux_task) { rt_switch_to_real_time(cpuid); save_cr0_and_clts(linux_cr0); } if (new_task->uses_fpu) { enable_fpu(); if (new_task != fpu_task) { save_fpenv(fpu_task->fpu_reg); fpu_task = new_task; restore_fpenv(fpu_task->fpu_reg); } } KEXECTIME(); rt_exchange_tasks(rt_smp_current[cpuid], new_task); if (rt_current->signal) { (*rt_current->signal)(); } } return;}static void recover_jiffies(int irq, void *dev_id, struct pt_regs *regs){ rt_global_cli(); if (linux_times->tick_time >= linux_times->linux_time) { linux_times->linux_time += linux_times->linux_tick; rt_pend_linux_irq(TIMER_8254_IRQ); } rt_global_sti(); BROADCAST_TO_LOCAL_TIMERS();} int rt_is_hard_timer_running(void){ int cpuid, running; for (running = cpuid = 0; cpuid < smp_num_cpus; cpuid++) { if (rt_time_h) { set_bit(cpuid, &running); } } return running;}void rt_set_periodic_mode(void) { int cpuid; stop_rt_timer(); for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { oneshot_timer = oneshot_running = 0; }}void rt_set_oneshot_mode(void){ int cpuid; stop_rt_timer(); for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { oneshot_timer = 1; }}#ifdef __USE_APIC__void start_rt_apic_timers(struct apic_timer_setup_data *setup_data, unsigned int rcvr_jiffies_cpuid){ unsigned long flags, cpuid; rt_request_apic_timers(rt_timer_handler, setup_data); flags = rt_global_save_flags_and_cli(); for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { if (setup_data[cpuid].mode > 0) { oneshot_timer = oneshot_running = 0; tuned.timers_tol[cpuid] = rt_half_tick = (rt_times.periodic_tick + 1)>>1; } else { oneshot_timer = oneshot_running = 1; tuned.timers_tol[cpuid] = rt_half_tick = (tuned.latency + 1)>>1; } rt_time_h = rt_times.tick_time + rt_half_tick; shot_fired = 1; } linux_times = rt_smp_times + (rcvr_jiffies_cpuid < NR_RT_CPUS ? rcvr_jiffies_cpuid : 0); rt_global_restore_flags(flags); rt_free_linux_irq(TIMER_8254_IRQ, &rtai_broadcast_to_local_timers); rt_request_linux_irq(TIMER_8254_IRQ, recover_jiffies, "rtai_jif_chk", recover_jiffies);}RTIME start_rt_timer(int period){ int cpuid; struct apic_timer_setup_data setup_data[NR_RT_CPUS]; for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { setup_data[cpuid].mode = oneshot_timer ? 0 : 1; setup_data[cpuid].count = count2nano(period); } start_rt_apic_timers(setup_data, hard_cpu_id()); return period;}void stop_rt_timer(void){ unsigned long flags; int cpuid; rt_free_linux_irq(TIMER_8254_IRQ, recover_jiffies); rt_free_apic_timers(); flags = rt_global_save_flags_and_cli(); for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { oneshot_timer = oneshot_running = 0; } rt_global_restore_flags(flags); rt_busy_sleep(10000000);}#elseRTIME start_rt_timer(int period){#define cpuid 0#undef rt_times unsigned long flags; flags = rt_global_save_flags_and_cli(); if (oneshot_timer) { rt_request_timer(rt_timer_handler, 0, 0); tuned.timers_tol[0] = rt_half_tick = (tuned.latency + 1)>>1; oneshot_running = shot_fired = 1; } else { rt_request_timer(rt_timer_handler, period > LATCH? LATCH: period, 0); tuned.timers_tol[0] = rt_half_tick = (rt_times.periodic_tick + 1)>>1; } rt_smp_times[cpuid].linux_tick = rt_times.linux_tick; rt_smp_times[cpuid].tick_time = rt_times.tick_time; rt_smp_times[cpuid].intr_time = rt_times.intr_time; rt_smp_times[cpuid].linux_time = rt_times.linux_time; rt_smp_times[cpuid].periodic_tick = rt_times.periodic_tick; rt_time_h = rt_times.tick_time + rt_half_tick; linux_times = rt_smp_times; rt_global_restore_flags(flags); rt_request_linux_irq(TIMER_8254_IRQ, recover_jiffies, "rtai_jif_chk", recover_jiffies); return period;#undef cpuid#define rt_times (rt_smp_times[cpuid])}void start_rt_apic_timers(struct apic_timer_setup_data *setup_mode, unsigned int rcvr_jiffies_cpuid){ int cpuid, period; period = 0; for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { period += setup_mode[cpuid].mode; } if (period == NR_RT_CPUS) { period = 2000000000; for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { if (setup_mode[cpuid].count < period) { period = setup_mode[cpuid].count; } } start_rt_timer(nano2count(period)); } else { rt_set_oneshot_mode(); start_rt_timer(0); }}void stop_rt_timer(void){ unsigned long flags; rt_free_linux_irq(TIMER_8254_IRQ, recover_jiffies); rt_free_timer(); flags = rt_global_save_flags_and_cli(); rt_smp_oneshot_timer[0] = rt_smp_oneshot_running[0] = 0; rt_global_restore_flags(flags); rt_busy_sleep(10000000);}#endifRTIME start_rt_timer_cpuid(int period, int cpuid){ return start_rt_timer(period);}int rt_sched_type(void){ return RT_SCHED_MUP;}void rt_preempt_always(int yes_no){ int cpuid; for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { rt_smp_preempt_always[cpuid] = yes_no ? 1 : 0; }}void rt_preempt_always_cpuid(int yes_no, unsigned int cpuid){ rt_smp_preempt_always[cpuid] = yes_no ? 1 : 0;}RT_TRAP_HANDLER rt_set_task_trap_handler( RT_TASK *task, unsigned int vec, RT_TRAP_HANDLER handler){ RT_TRAP_HANDLER old_handler; if (!task || (vec >= RTAI_NR_TRAPS)) { return (RT_TRAP_HANDLER) -EINVAL; } old_handler = task->task_trap_handler[vec]; task->task_trap_handler[vec] = handler; return old_handler;}int rt_trap_handler(int vec, int signo, struct pt_regs *regs, void *dummy_data){ DECLARE_RT_CURRENT; ASSIGN_RT_CURRENT; if (!rt_current) return 0; if (rt_current->task_trap_handler[vec]) { return rt_current->task_trap_handler[vec]( vec, signo, regs, rt_current); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -