📄 sched_smp.c
字号:
/* * Copyright (C) 1999-2003 Paolo Mantegazza <mantegazza@aero.polimi.it> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *//*ACKNOWLEDGMENTS: - Steve Papacharalambous (stevep@zentropix.com) has contributed a very informative proc filesystem procedure.- Stefano Picerno (stefanopp@libero.it) for suggesting a simple fix to distinguish a timeout from an abnormal retrun in timed sem waits.- Geoffrey Martin (gmartin@altersys.com) for a fix to functions with timeouts.*/#ifdef CONFIG_RTAI_MAINTAINER_PMA#define ALLOW_RR 1#define ONE_SHOT 0#define PREEMPT_ALWAYS 0#define LINUX_FPU 1#else /* STANDARD SETTINGS */#define ALLOW_RR 1#define ONE_SHOT 0#define PREEMPT_ALWAYS 0#define LINUX_FPU 1#endif#include <linux/module.h>#include <linux/kernel.h>#include <linux/version.h>#include <linux/errno.h>#include <linux/slab.h>#include <linux/timex.h>#include <linux/sched.h>#include <asm/param.h>#include <asm/system.h>#include <asm/io.h>#include <asm/segment.h>#ifdef CONFIG_PROC_FS#include <linux/stat.h>#include <linux/proc_fs.h>#include <rtai_proc_fs.h>#endif#include <rtai.h>#include <asm/rtai_sched.h>#include <rtai_sched.h>#include <rtai_schedcore.h>MODULE_LICENSE("GPL");#ifdef __USE_APIC__#define TIMER_CHIP "APIC"#define TIMER_FREQ RTAI_FREQ_APIC#define TIMER_LATENCY RTAI_LATENCY_APIC#define TIMER_SETUP_TIME RTAI_SETUP_TIME_APICstatic volatile unsigned long timer_cpu;#define set_timer_cpu(cpu_map) do { timer_cpu = cpu_map; } while (0)#define is_timer_cpu(cpuid) \ if (!test_bit((cpuid), &timer_cpu)) { \ sched_release_global_lock((cpuid)); \ return; \ }int rt_get_timer_cpu(void) { return timer_cpu; }#define update_linux_timer()irqreturn_t rtai_broadcast_to_local_timers(int irq,void *dev_id,struct pt_regs *regs);#define BROADCAST_TO_LOCAL_TIMERS() rtai_broadcast_to_local_timers(-1,NULL,NULL)#define FREE_LOCAL_TIMERS() rt_free_linux_irq(TIMER_8254_IRQ, &rtai_broadcast_to_local_timers)#else#define TIMER_CHIP "8254"#define TIMER_FREQ RTAI_FREQ_8254#define TIMER_LATENCY RTAI_LATENCY_8254#define TIMER_SETUP_TIME RTAI_SETUP_TIME_8254#define set_timer_cpu(cpu_map) #define is_timer_cpu(cpuid)int rt_get_timer_cpu(void) { return -EINVAL; }#define update_linux_timer() rt_pend_linux_irq(TIMER_8254_IRQ)#define BROADCAST_TO_LOCAL_TIMERS()#define FREE_LOCAL_TIMERS()#endif#ifdef CONFIG_PROC_FS// proc filesystem additions.static int rtai_proc_sched_register(void);static void rtai_proc_sched_unregister(void);// End of proc filesystem additions. #endif/* +++++++++++++++++ WHAT MUST BE AVAILABLE EVERYWHERE ++++++++++++++++++++++ */RT_TASK rt_smp_linux_task[NR_RT_CPUS];RT_TASK *rt_smp_current[NR_RT_CPUS];RTIME rt_smp_time_h[1];int rt_smp_oneshot_timer[1];struct klist_t wake_up_srq;/* +++++++++++++++ END OF WHAT MUST BE AVAILABLE EVERYWHERE +++++++++++++++++ */static int sched_rqsted[NR_RT_CPUS];DEFINE_LINUX_SMP_CR0;#define linux_cr0 (linux_smp_cr0[cpuid])//#define rt_linux_task (rt_smp_linux_task[cpuid])#define rt_base_linux_task (rt_smp_linux_task[0])//static RTIME rt_time_h;#undef rt_time_h#define rt_time_h (rt_smp_time_h[0])static int rt_half_tick;//static int oneshot_timer;#undef oneshot_timer#define oneshot_timer (rt_smp_oneshot_timer[0])static int oneshot_running;static int shot_fired;static int preempt_always;static rwlock_t task_list_lock = RW_LOCK_UNLOCKED;static RT_TASK *wdog_task[NR_RT_CPUS];#define MAX_FRESTK_SRQ 64static struct { int srq, in, out; void *mp[MAX_FRESTK_SRQ]; } frstk_srq;/* ++++++++++++++++++++++++++++++++ TASKS ++++++++++++++++++++++++++++++++++ */#define TASK_TO_SCHEDULE_ON_IPI() \ while ((task = task->rnext) != &rt_base_linux_task) { \ if (!task->running && test_bit(cpuid, &(task->runnable_on_cpus))) { \ prio = (new_task = task)->priority; \ break; \ } \ }#define TASK_TO_SCHEDULE() \ while ((task = task->rnext) != &rt_base_linux_task) { \ if (!task->running) { \ if (test_bit(cpuid, &(task->runnable_on_cpus))) { \ prio = (new_task = task)->priority; \ while ((task = task->rnext) != &rt_base_linux_task) { \ if (!task->running) { \ cpus_with_ready_tasks |= task->runnable_on_cpus; \ } \ } \ break; \ } else { \ cpus_with_ready_tasks |= task->runnable_on_cpus; \ } \ } \ }static void rt_startup(void(*rt_thread)(int), int data){ extern int rt_task_delete(RT_TASK *); rt_global_sti(); RT_CURRENT->exectime[1] = rdtsc(); rt_thread(data); rt_task_delete(rt_smp_current[hard_cpu_id()]);}#ifdef CONFIG_SMPunsigned long sqilter = 0;#define cpu_present_map cpu_online_mapstatic inline void sched_get_global_lock(int cpuid){ if (!test_and_set_bit(cpuid, locked_cpus)) { while (test_and_set_bit(31, locked_cpus)) {#ifdef STAGGER STAGGER(cpuid);#endif } }}static inline void sched_release_global_lock(int cpuid){ if (test_and_clear_bit(cpuid, locked_cpus)) { test_and_clear_bit(31, locked_cpus);#ifdef STAGGER STAGGER(cpuid);#endif }}static inline void smp_send_sched_ipi(unsigned long dest){ if (dest) { send_sched_ipi(dest); }}#define rt_request_sched_ipi() rt_request_cpu_own_irq(SCHED_IPI, rt_schedule_on_schedule_ipi)#define rt_free_sched_ipi() rt_free_cpu_own_irq(SCHED_IPI)#elsestatic unsigned long cpu_present_map = 1;#define sched_get_global_lock(cpuid)#define sched_release_global_lock(cpuid)#define smp_send_sched_ipi(dest)#define rt_request_sched_ipi()#define rt_free_sched_ipi()#endifint rt_task_init(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void)){ unsigned long flags; int cpuid, *st, i, ok; if (task->magic == RT_TASK_MAGIC || priority < 0) { return -EINVAL; } if (!(st = (int *)sched_malloc(stack_size))) { return -ENOMEM; } if (priority == RT_SCHED_HIGHEST_PRIORITY) { // watchdog reserves highest for (i = ok = 0; i < NR_RT_CPUS; i++) { // priority task on each CPU if (!wdog_task[i] || wdog_task[i] == task) { ok = 1; break; } } if (!ok) { rt_printk("Highest priority reserved for RTAI watchdog\n"); return -EBUSY; } } task->bstack = task->stack = (int *)(((unsigned long)st + stack_size - 0x10) & ~0xF); task->stack[0] = 0; task->uses_fpu = uses_fpu ? 1 : 0; task->runnable_on_cpus = cpu_present_map; *(task->stack_bottom = st) = 0; task->lnxtsk = 0; task->magic = RT_TASK_MAGIC; task->policy = 0; task->is_hard = 1; task->suspdepth = 1; task->state = (RT_SCHED_SUSPENDED | RT_SCHED_READY); task->running = 0; task->owndres = 0; task->priority = task->base_priority = priority; task->prio_passed_to = 0; task->period = 0; task->resume_time = RT_TIME_END; task->queue.prev = &(task->queue); task->queue.next = &(task->queue); task->queue.task = task; task->msg_queue.prev = &(task->msg_queue); task->msg_queue.next = &(task->msg_queue); task->msg_queue.task = task; task->msg = 0; task->ret_queue.prev = &(task->ret_queue); task->ret_queue.next = &(task->ret_queue); task->ret_queue.task = NOTHING; task->tprev = task->tnext = task->rprev = task->rnext = task; task->blocked_on = NOTHING; task->signal = signal; for (i = 0; i < RTAI_NR_TRAPS; i++) { task->task_trap_handler[i] = NULL; } task->tick_queue = NOTHING; task->trap_handler_data = NOTHING; task->resync_frame = 0; task->ExitHook = 0; task->exectime[0] = 0; task->system_data_ptr = 0; init_arch_stack(); flags = rt_global_save_flags_and_cli(); cpuid = hard_cpu_id();#define fpu_task (&rt_linux_task) // needed just by the very next line of code init_fp_env(); task->next = 0; read_lock(&task_list_lock); rt_base_linux_task.prev->next = task; task->prev = rt_base_linux_task.prev; rt_base_linux_task.prev = task; read_unlock(&task_list_lock); rt_global_restore_flags(flags); return 0;}int rt_task_init_cpuid(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void), unsigned int cpuid){ int retval; if (num_online_cpus() <= 1) { cpuid = 0; } retval = rt_task_init(task, rt_thread, data, stack_size, priority, uses_fpu, signal); rt_set_runnable_on_cpus(task, 1L << cpuid); return retval;}int rt_kthread_init_cpuid(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void), unsigned int cpuid){ return 0;}int rt_kthread_init(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void)){ return 0;}void rt_set_runnable_on_cpus(RT_TASK *task, unsigned long runnable_on_cpus){ unsigned long flags; flags = rt_global_save_flags_and_cli(); if (!(task->runnable_on_cpus = (runnable_on_cpus & cpu_present_map))) { task->runnable_on_cpus = 1; } rt_global_restore_flags(flags);}void rt_set_runnable_on_cpuid(RT_TASK *task, unsigned int cpuid){ rt_set_runnable_on_cpus(task, 1L << cpuid);}int rt_check_current_stack(void){ DECLARE_RT_CURRENT; unsigned long flags; char *sp; hard_save_flags_and_cli(flags); if ((ASSIGN_RT_CURRENT) != &rt_linux_task) { sp = get_stack_pointer(); hard_restore_flags(flags); return (sp - (char *)(rt_current->stack_bottom)); } else { hard_restore_flags(flags); return -0x7FFFFFFF; }}#if ALLOW_RRstatic RTIME global_yield_time = RT_TIME_END;static int global_policy;#define RR_YIELD() \if (rt_current->policy > 0) { \ rt_current->rr_remaining = rt_current->yield_time - rt_times.tick_time; \ if (rt_current->rr_remaining <= 0) { \ rt_current->rr_remaining = rt_current->rr_quantum; \ if (rt_current->state == RT_SCHED_READY) { \ RT_TASK *task; \ task = rt_current->rnext; \ while (rt_current->priority == task->priority) { \ task = task->rnext; \ } \ if (task != rt_current->rnext) { \ rt_current->running = 0; \ (rt_current->rprev)->rnext = rt_current->rnext; \ (rt_current->rnext)->rprev = rt_current->rprev; \ task->rprev = (rt_current->rprev = task->rprev)->rnext = rt_current; \ rt_current->rnext = task; \ } \ } \ } \}#define RR_SETYT() \if (new_task->policy > 0) { \ new_task->yield_time = rt_time_h + new_task->rr_remaining; \ global_policy = 1; \ if (new_task->yield_time < global_yield_time) { \ global_yield_time = new_task->yield_time; \ } \}#define RR_SPREMP() \if (global_policy > 0) { \ preempt = 1; \ if (global_yield_time < intr_time) { \ RTIME t; \ t = intr_time; \ intr_time = global_yield_time; \ global_yield_time = t; \ } else { \ global_yield_time = intr_time; \ } \ global_policy = 0; \} else { \ preempt = 0; \}#define RR_TPREMP() \if (global_policy > 0) { \ preempt = 1; \ if (global_yield_time < rt_times.intr_time) { \ RTIME t; \ t = rt_times.intr_time; \ rt_times.intr_time = global_yield_time; \ global_yield_time = t; \ } else { \ global_yield_time = rt_times.intr_time; \ } \ global_policy = 0; \} else { \ preempt = (preempt_always || prio == RT_SCHED_LINUX_PRIORITY); \}#else#define RR_YIELD()#define RR_SETYT()#define RR_SPREMP() \do { preempt = 0; } while (0)#define RR_TPREMP() \ do { preempt = (preempt_always || prio == RT_SCHED_LINUX_PRIORITY); } while (0)#endif#define EXECTIME#ifdef EXECTIMEstatic RTIME switch_time[NR_RT_CPUS];#define KEXECTIME() \do { \ RTIME now; \ now = rdtsc(); \ if (!rt_current->lnxtsk) { \ rt_current->exectime[0] += (now - switch_time[cpuid]); \ } \ switch_time[cpuid] = now; \} while (0)#else#define KEXECTIME()#endif#ifdef CONFIG_SMP//#define CAUTIOUS#ifdef CAUTIOUSstatic void rt_schedule_on_schedule_ipi(void){ DECLARE_RT_CURRENT; RT_TASK *task, *new_task; int prio; prio = RT_SCHED_LINUX_PRIORITY; ASSIGN_RT_CURRENT; sched_rqsted[cpuid] = 1; new_task = &rt_linux_task; task = &rt_base_linux_task; sched_get_global_lock(cpuid); rt_current->running = 0; RR_YIELD(); TASK_TO_SCHEDULE_ON_IPI(); RR_SETYT(); new_task->running = new_task->state = RT_SCHED_READY; if (new_task != rt_current) { if (rt_current == &rt_linux_task) { rt_switch_to_real_time(cpuid); save_cr0_and_clts(linux_cr0); } if (rt_current->uses_fpu) { enable_fpu(); save_fpenv(rt_current->fpu_reg); if (new_task->uses_fpu) { restore_fpenv(new_task->fpu_reg); } } else if (new_task->uses_fpu) { enable_fpu(); restore_fpenv(new_task->fpu_reg); } KEXECTIME(); rt_exchange_tasks(rt_smp_current[cpuid], new_task);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -