📄 sched_mup.c
字号:
/* * Copyright (C) 1999-2003 Paolo Mantegazza <mantegazza@aero.polimi.it> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *//*ACKNOWLEDGMENTS:- Steve Papacharalambous (stevep@zentropix.com) has contributed a very informative proc filesystem procedure.- Stefano Picerno (stefanopp@libero.it) for suggesting a simple fix to distinguish a timeout from an abnormal retrun in timed sem waits.- Geoffrey Martin (gmartin@altersys.com) for a fix to functions with timeouts.*/#ifdef CONFIG_RTAI_MAINTAINER_PMA#define ALLOW_RR 1#define ONE_SHOT 0#define PREEMPT_ALWAYS 0#define LINUX_FPU 1#else /* STANDARD SETTINGS */#define ALLOW_RR 1#define ONE_SHOT 0#define PREEMPT_ALWAYS 0#define LINUX_FPU 1#endif#define cpu_present_map cpu_online_map#include <linux/module.h>#include <linux/kernel.h>#include <linux/version.h>#include <linux/errno.h>#include <linux/slab.h>#include <linux/timex.h>#include <linux/sched.h>#include <asm/param.h>#include <asm/system.h>#include <asm/io.h>#include <asm/segment.h>#ifdef CONFIG_PROC_FS#include <linux/stat.h>#include <linux/proc_fs.h>#include <rtai_proc_fs.h>#endif#ifdef CONFIG_PROC_FS// proc filesystem additions.static int rtai_proc_sched_register(void);static void rtai_proc_sched_unregister(void);// End of proc filesystem additions. #endif#include <rtai.h>#include <asm/rtai_sched.h>#include <rtai_sched.h>#include <rtai_schedcore.h>MODULE_LICENSE("GPL");/* +++++++++++++++++ WHAT MUST BE AVAILABLE EVERYWHERE ++++++++++++++++++++++ */RT_TASK rt_smp_linux_task[NR_RT_CPUS];RT_TASK *rt_smp_current[NR_RT_CPUS];RTIME rt_smp_time_h[NR_RT_CPUS];int rt_smp_oneshot_timer[NR_RT_CPUS];struct klist_t wake_up_srq;/* +++++++++++++++ END OF WHAT MUST BE AVAILABLE EVERYWHERE +++++++++++++++++ */static int sched_rqsted[NR_RT_CPUS];static int rt_smp_linux_cr0[NR_RT_CPUS];static RT_TASK *rt_smp_fpu_task[NR_RT_CPUS];static int rt_smp_half_tick[NR_RT_CPUS];static int rt_smp_oneshot_running[NR_RT_CPUS];static int rt_smp_shot_fired[NR_RT_CPUS];static int rt_smp_preempt_always[NR_RT_CPUS];static struct rt_times *linux_times;static RT_TASK *wdog_task[NR_RT_CPUS];#define fpu_task (rt_smp_fpu_task[cpuid])//#define rt_linux_task (rt_smp_linux_task[cpuid])#define rt_half_tick (rt_smp_half_tick[cpuid])#define oneshot_running (rt_smp_oneshot_running[cpuid])#define oneshot_timer_cpuid (rt_smp_oneshot_timer[hard_cpu_id()])#define shot_fired (rt_smp_shot_fired[cpuid])#define preempt_always (rt_smp_preempt_always[cpuid])#define rt_times (rt_smp_times[cpuid])#define linux_cr0 (rt_smp_linux_cr0[cpuid])#define MAX_FRESTK_SRQ 64static struct { int srq, in, out; void *mp[MAX_FRESTK_SRQ]; } frstk_srq;#ifdef CONFIG_SMPunsigned long sqilter = 0xFFFFFFFF;#endif#ifdef __USE_APIC__#define TIMER_FREQ RTAI_FREQ_APIC#define TIMER_LATENCY RTAI_LATENCY_APIC#define TIMER_SETUP_TIME RTAI_SETUP_TIME_APIC#define update_linux_timer()irqreturn_t rtai_broadcast_to_local_timers(int irq,void *dev_id,struct pt_regs *regs);#define BROADCAST_TO_LOCAL_TIMERS() rtai_broadcast_to_local_timers(-1,NULL,NULL)#define rt_request_sched_ipi() rt_request_cpu_own_irq(SCHED_IPI, rt_schedule)#define rt_free_sched_ipi() rt_free_cpu_own_irq(SCHED_IPI)static atomic_t scheduling_cpus = ATOMIC_INIT(0);static inline void sched_get_global_lock(int cpuid){ if (!test_and_set_bit(cpuid, locked_cpus)) { while (test_and_set_bit(31, locked_cpus) && !atomic_read(&scheduling_cpus)) {#ifdef STAGGER STAGGER(cpuid);#endif } } atomic_inc(&scheduling_cpus);}static inline void sched_release_global_lock(int cpuid){ if (test_and_clear_bit(cpuid, locked_cpus) && atomic_dec_and_test(&scheduling_cpus)) { test_and_clear_bit(31, locked_cpus);#ifdef STAGGER STAGGER(cpuid);#endif }}#else#define TIMER_FREQ RTAI_FREQ_8254#define TIMER_LATENCY RTAI_LATENCY_8254#define TIMER_SETUP_TIME RTAI_SETUP_TIME_8254#define update_linux_timer() rt_pend_linux_irq(TIMER_8254_IRQ)#define BROADCAST_TO_LOCAL_TIMERS()#define rt_request_sched_ipi()#define rt_free_sched_ipi()#define sched_get_global_lock(cpuid)#define sched_release_global_lock(cpuid)#endif/* ++++++++++++++++++++++++++++++++ TASKS ++++++++++++++++++++++++++++++++++ */#define TASK_TO_SCHEDULE() \ do { prio = (new_task = rt_linux_task.rnext)->priority; } while(0)static int tasks_per_cpu[NR_RT_CPUS] = { 0, };static void rt_startup(void(*rt_thread)(int), int data){ extern int rt_task_delete(RT_TASK *); rt_global_sti(); RT_CURRENT->exectime[1] = rdtsc(); rt_thread(data); rt_task_delete(rt_smp_current[hard_cpu_id()]);}int rt_task_init_cpuid(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void), unsigned int cpuid){ int *st, i; unsigned long flags; if (smp_num_cpus <= 1) { cpuid = 0; } if (task->magic == RT_TASK_MAGIC || cpuid >= NR_RT_CPUS || priority < 0) { return -EINVAL; } if (!(st = (int *)sched_malloc(stack_size))) { return -ENOMEM; } if (wdog_task[cpuid] && wdog_task[cpuid] != task && priority == RT_SCHED_HIGHEST_PRIORITY) { rt_printk("Highest priority reserved for RTAI watchdog\n"); return -EBUSY; } task->bstack = task->stack = (int *)(((unsigned long)st + stack_size - 0x10) & ~0xF); task->stack[0] = 0; task->uses_fpu = uses_fpu ? 1 : 0; task->runnable_on_cpus = cpuid; atomic_inc((atomic_t *)(tasks_per_cpu + cpuid)); *(task->stack_bottom = st) = 0; task->lnxtsk = 0; task->magic = RT_TASK_MAGIC; task->policy = 0; task->is_hard = 1; task->suspdepth = 1; task->state = (RT_SCHED_SUSPENDED | RT_SCHED_READY); task->owndres = 0; task->priority = task->base_priority = priority; task->prio_passed_to = 0; task->period = 0; task->resume_time = RT_TIME_END; task->queue.prev = &(task->queue); task->queue.next = &(task->queue); task->queue.task = task; task->msg_queue.prev = &(task->msg_queue); task->msg_queue.next = &(task->msg_queue); task->msg_queue.task = task; task->msg = 0; task->ret_queue.prev = &(task->ret_queue); task->ret_queue.next = &(task->ret_queue); task->ret_queue.task = NOTHING; task->tprev = task->tnext = task->rprev = task->rnext = task; task->blocked_on = NOTHING; task->signal = signal; for (i = 0; i < RTAI_NR_TRAPS; i++) { task->task_trap_handler[i] = NULL; } task->tick_queue = NOTHING; task->trap_handler_data = NOTHING; task->resync_frame = 0; task->ExitHook = 0; task->exectime[0] = 0; task->system_data_ptr = 0; init_arch_stack(); flags = rt_global_save_flags_and_cli(); task->next = 0; rt_linux_task.prev->next = task; task->prev = rt_linux_task.prev; rt_linux_task.prev = task; cpuid = hard_cpu_id(); init_fp_env(); rt_global_restore_flags(flags); return 0;}static int get_min_tasks_cpuid(void){ int i, cpuid, min; min = tasks_per_cpu[cpuid = 0]; for (i = 1; i < NR_RT_CPUS; i++) { if (tasks_per_cpu[i] < min) { min = tasks_per_cpu[cpuid = i]; } } return cpuid;}int rt_task_init(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void)){ return rt_task_init_cpuid(task, rt_thread, data, stack_size, priority, uses_fpu, signal, get_min_tasks_cpuid());}int rt_kthread_init_cpuid(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void), unsigned int cpuid){ return 0;}int rt_kthread_init(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void)){ return 0;}void rt_set_runnable_on_cpuid(RT_TASK *task, unsigned int cpuid){ unsigned long flags; RT_TASK *linux_task; if (cpuid >= NR_RT_CPUS) { cpuid = get_min_tasks_cpuid(); } flags = rt_global_save_flags_and_cli(); switch (rt_smp_oneshot_timer[task->runnable_on_cpus] | (rt_smp_oneshot_timer[cpuid] << 1)) { case 1: task->period = llimd(task->period, TIMER_FREQ, tuned.cpu_freq); task->resume_time = llimd(task->resume_time, TIMER_FREQ, tuned.cpu_freq); break; case 2: task->period = llimd(task->period, tuned.cpu_freq, TIMER_FREQ); task->resume_time = llimd(task->resume_time, tuned.cpu_freq, TIMER_FREQ); break; } if (!((task->prev)->next = task->next)) { rt_smp_linux_task[task->runnable_on_cpus].prev = task->prev; } else { (task->next)->prev = task->prev; } task->runnable_on_cpus = cpuid; if ((task->state & RT_SCHED_DELAYED)) { (task->tprev)->tnext = task->tnext; (task->tnext)->tprev = task->tprev; enq_timed_task(task); } task->next = 0; (linux_task = rt_smp_linux_task + cpuid)->prev->next = task; task->prev = linux_task->prev; linux_task->prev = task; rt_global_restore_flags(flags);}void rt_set_runnable_on_cpus(RT_TASK *task, unsigned long run_on_cpus){ int cpuid; run_on_cpus &= cpu_present_map; cpuid = get_min_tasks_cpuid(); if (!test_bit(cpuid, &run_on_cpus)) { cpuid = ffnz(run_on_cpus); } rt_set_runnable_on_cpuid(task, cpuid);}int rt_check_current_stack(void){ DECLARE_RT_CURRENT; char *sp; if ((rt_current = rt_smp_current[cpuid = hard_cpu_id()]) != &rt_linux_task) { sp = get_stack_pointer(); return (sp - (char *)(rt_current->stack_bottom)); } else { return -0x7FFFFFFF; }}#if ALLOW_RR#define RR_YIELD() \if (rt_current->policy > 0) { \ rt_current->rr_remaining = rt_current->yield_time - rt_times.tick_time; \ if (rt_current->rr_remaining <= 0) { \ rt_current->rr_remaining = rt_current->rr_quantum; \ if (rt_current->state == RT_SCHED_READY) { \ RT_TASK *task; \ task = rt_current->rnext; \ while (rt_current->priority == task->priority) { \ task = task->rnext; \ } \ if (task != rt_current->rnext) { \ (rt_current->rprev)->rnext = rt_current->rnext; \ (rt_current->rnext)->rprev = rt_current->rprev; \ task->rprev = (rt_current->rprev = task->rprev)->rnext = rt_current; \ rt_current->rnext = task; \ } \ } \ } \}#define RR_SETYT() \ if (new_task->policy > 0) { \ new_task->yield_time = rt_time_h + new_task->rr_remaining; \ }#define RR_SPREMP() \ if (new_task->policy > 0) { \ preempt = 1; \ if (new_task->yield_time < intr_time) { \ intr_time = new_task->yield_time; \ } \ } else { \ preempt = 0; \ }#define RR_TPREMP() \ if (new_task->policy > 0) { \ preempt = 1; \ if (new_task->yield_time < rt_times.intr_time) { \ rt_times.intr_time = new_task->yield_time; \ } \ } else { \ preempt = (preempt_always || prio == RT_SCHED_LINUX_PRIORITY); \ }#else#define RR_YIELD()#define RR_SETYT()#define RR_SPREMP() \do { preempt = 0; } while (0)#define RR_TPREMP() \ do { preempt = (preempt_always || prio == RT_SCHED_LINUX_PRIORITY); } while (0)#endif#define ANTICIPATE#define EXECTIME#ifdef EXECTIMERTIME switch_time[NR_RT_CPUS];#define KEXECTIME() \do { \ RTIME now; \ now = rdtsc(); \ if (!rt_current->lnxtsk) { \ rt_current->exectime[0] += (now - switch_time[cpuid]); \ } \ switch_time[cpuid] = now; \} while (0)#else#define KEXECTIME()#endifvoid rt_schedule(void){ DECLARE_RT_CURRENT; RTIME intr_time, now; RT_TASK *task, *new_task; int prio, delay, preempt; prio = RT_SCHED_LINUX_PRIORITY; ASSIGN_RT_CURRENT; sched_rqsted[cpuid] = 1; task = new_task = &rt_linux_task; sched_get_global_lock(cpuid); RR_YIELD(); if (oneshot_running) {#ifdef ANTICIPATE rt_time_h = rdtsc() + rt_half_tick; wake_up_timed_tasks(cpuid);#endif TASK_TO_SCHEDULE(); RR_SETYT(); intr_time = shot_fired ? rt_times.intr_time : rt_times.intr_time + rt_times.linux_tick; RR_SPREMP(); task = &rt_linux_task; while ((task = task->tnext) != &rt_linux_task) { if (task->priority <= prio && task->resume_time < intr_time) { intr_time = task->resume_time; preempt = 1; break; } } if (preempt || (!shot_fired && prio == RT_SCHED_LINUX_PRIORITY)) { shot_fired = 1;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -