📄 sched_lxrt.c
字号:
/* * Copyright (C) 1999-2003 Paolo Mantegazza <mantegazza@aero.polimi.it> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *//*ACKNOWLEDGMENTS:- Steve Papacharalambous (stevep@zentropix.com) has contributed a very informative proc filesystem procedure.- Stefano Picerno (stefanopp@libero.it) for suggesting a simple fix to distinguish a timeout from an abnormal retrun in timed sem waits.- Geoffrey Martin (gmartin@altersys.com) for a fix to functions with timeouts.*//* #define USE_RTAI_TASKS */#ifdef CONFIG_RTAI_MAINTAINER_PMA#define ALLOW_RR 1#define ONE_SHOT 0#define PREEMPT_ALWAYS 0#define LINUX_FPU 1#else /* STANDARD SETTINGS */#define ALLOW_RR 1#define ONE_SHOT 0#define PREEMPT_ALWAYS 0#define LINUX_FPU 1#endif#include <linux/module.h>#include <linux/kernel.h>#include <linux/version.h>#include <linux/errno.h>#include <linux/slab.h>#include <linux/timex.h>#include <linux/sched.h>#include <linux/irq.h>#include <linux/reboot.h>#include <asm/param.h>#include <asm/system.h>#include <asm/io.h>#include <asm/segment.h>#include <asm/hw_irq.h>#include <asm/uaccess.h>#include <asm/mmu_context.h>#define __KERNEL_SYSCALLS__#include <linux/unistd.h>static int errno;#ifdef CONFIG_PROC_FS#include <linux/stat.h>#include <linux/proc_fs.h>#include <rtai_proc_fs.h>static int rtai_proc_sched_register(void);static void rtai_proc_sched_unregister(void);int rtai_proc_lxrt_register(void);void rtai_proc_lxrt_unregister(void);#endif#include <rtai.h>#include <asm/rtai_sched.h>#include <rtai_lxrt.h>#include <rtai_registry.h>#include <rtai_nam2num.h>#include <rtai_schedcore.h>MODULE_LICENSE("GPL");/* +++++++++++++++++ WHAT MUST BE AVAILABLE EVERYWHERE ++++++++++++++++++++++ */RT_TASK rt_smp_linux_task[NR_RT_CPUS];RT_TASK *rt_smp_current[NR_RT_CPUS];RTIME rt_smp_time_h[NR_RT_CPUS];int rt_smp_oneshot_timer[NR_RT_CPUS];struct klist_t wake_up_srq;/* +++++++++++++++ END OF WHAT MUST BE AVAILABLE EVERYWHERE +++++++++++++++++ */static int sched_rqsted[NR_RT_CPUS];static int rt_smp_linux_cr0[NR_RT_CPUS];static RT_TASK *rt_smp_fpu_task[NR_RT_CPUS];static int rt_smp_half_tick[NR_RT_CPUS];static int rt_smp_oneshot_running[NR_RT_CPUS];static int rt_smp_shot_fired[NR_RT_CPUS];static int rt_smp_preempt_always[NR_RT_CPUS];static struct rt_times *linux_times;static RT_TASK *lxrt_wdog_task[NR_RT_CPUS];static int (*lxrt_signal_handler)(struct task_struct *task, int sig);static unsigned lxrt_migration_virq;static int lxrt_notify_reboot(struct notifier_block *nb, unsigned long event, void *ptr);static struct notifier_block lxrt_notifier_reboot = { .notifier_call = &lxrt_notify_reboot, .next = NULL, .priority = 0};static struct klist_t klistb[NR_RT_CPUS];static struct task_struct *kthreadb[NR_RT_CPUS];static struct klist_t klistm[NR_RT_CPUS];static struct task_struct *kthreadm[NR_RT_CPUS];static struct semaphore resem[NR_RT_CPUS];static int endkthread;#define fpu_task (rt_smp_fpu_task[cpuid])#define rt_half_tick (rt_smp_half_tick[cpuid])#define oneshot_running (rt_smp_oneshot_running[cpuid])#define oneshot_timer_cpuid (rt_smp_oneshot_timer[hard_cpu_id()])#define shot_fired (rt_smp_shot_fired[cpuid])#define preempt_always (rt_smp_preempt_always[cpuid])#define rt_times (rt_smp_times[cpuid])#define linux_cr0 (rt_smp_linux_cr0[cpuid])#define MAX_FRESTK_SRQ 64static struct { int srq, in, out; void *mp[MAX_FRESTK_SRQ];} frstk_srq;#define KTHREAD_B_PRIO MIN_LINUX_RTPRIO#define KTHREAD_M_PRIO MAX_LINUX_RTPRIO#define KTHREAD_F_PRIO MAX_LINUX_RTPRIO - 1#ifdef CONFIG_SMPunsigned long sqilter = 0xFFFFFFFF;#endif#ifdef __USE_APIC__#define TIMER_FREQ RTAI_FREQ_APIC#define TIMER_LATENCY RTAI_LATENCY_APIC#define TIMER_SETUP_TIME RTAI_SETUP_TIME_APIC#define ONESHOT_SPAN (0x7FFFFFFFLL*(CPU_FREQ/TIMER_FREQ))#define update_linux_timer()#else /* !__USE_APIC__ */#define TIMER_FREQ RTAI_FREQ_8254#define TIMER_LATENCY RTAI_LATENCY_8254#define TIMER_SETUP_TIME RTAI_SETUP_TIME_8254#define ONESHOT_SPAN (0x7FFF*(CPU_FREQ/TIMER_FREQ))#define update_linux_timer() rt_pend_linux_irq(TIMER_8254_IRQ)#endif /* __USE_APIC__ */#ifdef CONFIG_SMP#define BROADCAST_TO_LOCAL_TIMERS() rtai_broadcast_to_local_timers(0,NULL,NULL)#define rt_request_sched_ipi() rt_request_cpu_own_irq(SCHED_IPI, rt_schedule_on_schedule_ipi)#define rt_free_sched_ipi() rt_free_cpu_own_irq(SCHED_IPI)static atomic_t scheduling_cpus = ATOMIC_INIT(0);static inline void sched_get_global_lock(int cpuid){ if (!test_and_set_bit(cpuid, locked_cpus)) { while (test_and_set_bit(31, locked_cpus) && !atomic_read(&scheduling_cpus)) {#ifdef STAGGER STAGGER(cpuid);#endif } } atomic_inc(&scheduling_cpus);}static inline void sched_release_global_lock(int cpuid){ if (test_and_clear_bit(cpuid, locked_cpus) && atomic_dec_and_test(&scheduling_cpus)) { test_and_clear_bit(31, locked_cpus);#ifdef STAGGER STAGGER(cpuid);#endif }}#else /* !CONFIG_SMP */#define BROADCAST_TO_LOCAL_TIMERS()#define rt_request_sched_ipi() 0#define rt_free_sched_ipi()#define sched_get_global_lock(cpuid)#define sched_release_global_lock(cpuid)#endif /* CONFIG_SMP */#ifdef CONFIG_SMP#define DECL_CPUS_ALLOWED unsigned long cpus_allowed#define SAVE_CPUS_ALLOWED do { cpus_allowed = prev->cpus_allowed; } while (0)#define SET_CPUS_ALLOWED do { prev->cpus_allowed = 1 << cpuid; } while (0)#define RST_CPUS_ALLOWED do { prev->cpus_allowed = cpus_allowed; } while (0)#else /* !CONFIG_SMP */#define DECL_CPUS_ALLOWED#define SAVE_CPUS_ALLOWED#define SET_CPUS_ALLOWED#define RST_CPUS_ALLOWED#endif /* CONFIG_SMP *//* ++++++++++++++++++++++++++++++++ TASKS ++++++++++++++++++++++++++++++++++ */static int tasks_per_cpu[NR_RT_CPUS] = { 0, };int get_min_tasks_cpuid(void){ int i, cpuid, min; min = tasks_per_cpu[cpuid = 0]; for (i = 1; i < NR_RT_CPUS; i++) { if (tasks_per_cpu[i] < min) { min = tasks_per_cpu[cpuid = i]; } } return cpuid;}static inline _syscall3(int, sched_setaffinity, pid_t, pid, int, len, unsigned long *, mask)static void put_current_on_cpu(int cpuid){#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) current->cpus_allowed = 1 << cpuid; while (cpuid != hard_cpu_id()) { current->state = TASK_INTERRUPTIBLE; schedule_timeout(1); }#else /* KERNEL_VERSION >= 2.6.0 */ mm_segment_t old_fs; unsigned long mask; int retval; mask = 1 << cpuid; old_fs = get_fs(); set_fs(KERNEL_DS); retval = sched_setaffinity(current->pid, sizeof(mask), &mask); set_fs(old_fs); if (retval < 0) { ((RT_TASK *)(current->this_rt_task[0]))->runnable_on_cpus = cpuid; current->cpus_allowed = mask; rt_printk("LXRT: Linux cannot move task %d (pid) to cpu %d (#), forced to stay only on the cpu (%d) where it is now.\n", current->pid, cpuid, hard_cpu_id()); }#endif /* KERNEL_VERSION < 2.6.0 */}int set_rtext(RT_TASK *task, int priority, int uses_fpu, void(*signal)(void), unsigned int cpuid, struct task_struct *relink){ unsigned long flags; if (num_online_cpus() <= 1) { cpuid = 0; } if (task->magic == RT_TASK_MAGIC || cpuid >= NR_RT_CPUS || priority < 0) { return -EINVAL; } if (lxrt_wdog_task[cpuid] && lxrt_wdog_task[cpuid] != task && priority == RT_SCHED_HIGHEST_PRIORITY) { rt_printk("Highest priority reserved for RTAI watchdog\n"); return -EBUSY; } task->uses_fpu = uses_fpu ? 1 : 0; task->runnable_on_cpus = cpuid; (task->stack_bottom = (int *)&task->fpu_reg)[0] = 0; task->magic = RT_TASK_MAGIC; task->policy = 0; task->owndres = 0; task->priority = task->base_priority = priority; task->prio_passed_to = 0; task->period = 0; task->resume_time = RT_TIME_END; task->queue.prev = task->queue.next = &(task->queue); task->queue.task = task; task->msg_queue.prev = task->msg_queue.next = &(task->msg_queue); task->msg_queue.task = task; task->msg = 0; task->ret_queue.prev = task->ret_queue.next = &(task->ret_queue); task->ret_queue.task = NOTHING; task->tprev = task->tnext = task->rprev = task->rnext = task; task->blocked_on = NOTHING; task->signal = signal; memset(task->task_trap_handler, 0, RTAI_NR_TRAPS*sizeof(void *)); task->tick_queue = NOTHING; task->trap_handler_data = NOTHING; task->resync_frame = 0; task->ExitHook = 0; task->usp_flags = task->usp_flags_mask = task->force_soft = 0; task->msg_buf[0] = 0; task->exectime[0] = 0; task->system_data_ptr = 0; atomic_inc((atomic_t *)(tasks_per_cpu + cpuid)); if (relink) { task->suspdepth = task->is_hard = 1; task->state = RT_SCHED_READY | RT_SCHED_SUSPENDED; relink->this_rt_task[0] = task; task->lnxtsk = relink; } else { task->suspdepth = task->is_hard = 0; task->state = RT_SCHED_READY; current->this_rt_task[0] = task; current->this_rt_task[1] = task->lnxtsk = current; put_current_on_cpu(cpuid); } flags = rt_global_save_flags_and_cli(); task->next = 0; rt_linux_task.prev->next = task; task->prev = rt_linux_task.prev; rt_linux_task.prev = task; rt_global_restore_flags(flags); return 0;}static void start_stop_kthread(RT_TASK *, void (*)(int), int, int, int, void(*)(void), int);int rt_kthread_init_cpuid(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void), unsigned int cpuid){ start_stop_kthread(task, rt_thread, data, priority, uses_fpu, signal, cpuid); return (int)task->retval;}int rt_kthread_init(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void)){ return rt_kthread_init_cpuid(task, rt_thread, data, stack_size, priority, uses_fpu, signal, get_min_tasks_cpuid());}#ifdef USE_RTAI_TASKSstatic void rt_startup(void(*rt_thread)(int), int data){ extern int rt_task_delete(RT_TASK *); rt_global_sti(); RT_CURRENT->exectime[1] = rdtsc(); rt_thread(data); rt_task_delete(rt_smp_current[hard_cpu_id()]); rt_printk("LXRT: task %p returned but could not be delated.\n", rt_smp_current[hard_cpu_id()]); }int rt_task_init_cpuid(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void), unsigned int cpuid){ int *st, i; unsigned long flags; if (num_online_cpus() <= 1) { cpuid = 0; } if (task->magic == RT_TASK_MAGIC || cpuid >= NR_RT_CPUS || priority < 0) { return -EINVAL; } if (!(st = (int *)sched_malloc(stack_size))) { return -ENOMEM; } if (lxrt_wdog_task[cpuid] && lxrt_wdog_task[cpuid] != task && priority == RT_SCHED_HIGHEST_PRIORITY) { rt_printk("Highest priority reserved for RTAI watchdog\n"); return -EBUSY; } task->bstack = task->stack = (int *)(((unsigned long)st + stack_size - 0x10) & ~0xF); task->stack[0] = 0; task->uses_fpu = uses_fpu ? 1 : 0; task->runnable_on_cpus = cpuid; atomic_inc((atomic_t *)(tasks_per_cpu + cpuid)); *(task->stack_bottom = st) = 0; task->magic = RT_TASK_MAGIC; task->policy = 0; task->suspdepth = 1; task->state = (RT_SCHED_SUSPENDED | RT_SCHED_READY); task->owndres = 0; task->is_hard = 1; task->lnxtsk = 0; task->priority = task->base_priority = priority; task->prio_passed_to = 0; task->period = 0; task->resume_time = RT_TIME_END; task->queue.prev = &(task->queue); task->queue.next = &(task->queue); task->queue.task = task; task->msg_queue.prev = &(task->msg_queue); task->msg_queue.next = &(task->msg_queue); task->msg_queue.task = task; task->msg = 0; task->ret_queue.prev = &(task->ret_queue); task->ret_queue.next = &(task->ret_queue); task->ret_queue.task = NOTHING; task->tprev = task->tnext = task->rprev = task->rnext = task; task->blocked_on = NOTHING; task->signal = signal; for (i = 0; i < RTAI_NR_TRAPS; i++) { task->task_trap_handler[i] = NULL; } task->tick_queue = SOMETHING; task->trap_handler_data = NOTHING; task->resync_frame = 0; task->ExitHook = 0; task->exectime[0] = 0; task->system_data_ptr = 0; init_arch_stack(); flags = rt_global_save_flags_and_cli(); task->next = 0; rt_linux_task.prev->next = task; task->prev = rt_linux_task.prev; rt_linux_task.prev = task; cpuid = hard_cpu_id(); init_fp_env(); rt_global_restore_flags(flags); return 0;}int rt_task_init(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void)){ return rt_task_init_cpuid(task, rt_thread, data, stack_size, priority, uses_fpu, signal, get_min_tasks_cpuid());}#else /* !USE_RTAI_TASKS */int rt_task_init_cpuid(RT_TASK *task, void (*rt_thread)(int), int data, int stack_size, int priority, int uses_fpu, void(*signal)(void), unsigned int cpuid){
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -