📄 sched_lxrt.c
字号:
int cpuid; struct klist_t *klistp; cpuid = rt_task->runnable_on_cpus; put_current_on_cpu(cpuid); klistp = &klistb[cpuid]; hard_cli(); klistp->task[klistp->in] = rt_task; klistp->in = (klistp->in + 1) & (MAX_WAKEUP_SRQ - 1); hard_sti(); current->state = TASK_HARDREALTIME | 0x80000000; wake_up_process(kthreadb[cpuid]); schedule(); rt_task->is_hard = 1; rt_task->exectime[1] = rdtsc(); current->state = TASK_HARDREALTIME; hard_sti(); if (current->used_math) { restore_fpu(current); }}void give_back_to_linux(RT_TASK *rt_task){ rt_global_cli(); wake_up_srq.task[wake_up_srq.in] = rt_task->lnxtsk; wake_up_srq.in = (wake_up_srq.in + 1) & (MAX_WAKEUP_SRQ - 1); rt_pend_linux_srq(wake_up_srq.srq); (rt_task->rprev)->rnext = rt_task->rnext; (rt_task->rnext)->rprev = rt_task->rprev; rt_task->state = 0; rt_schedule(); /* Perform Linux's scheduling tail now since we woke up outside the regular schedule() point. */ __adeos_schedule_back_root(rt_task->lnxtsk); rt_global_sti(); rt_task->is_hard = 0;}static struct task_struct *get_kthread(int get, int cpuid, void *lnxtsk){ struct task_struct *kthread; struct klist_t *klistp; RT_TASK *this_task; int hard; klistp = &klistm[cpuid]; if (get) { while (!(kthread = __get_kthread(cpuid))) { this_task = rt_smp_current[hard_cpu_id()]; rt_global_cli(); klistp->task[klistp->in] = (void *)(hard = this_task->is_hard > 0 ? 1 : 0); klistp->in = (klistp->in + 1) & (MAX_WAKEUP_SRQ - 1); klistp->task[klistp->in] = (void *)this_task; klistp->in = (klistp->in + 1) & (MAX_WAKEUP_SRQ - 1); wake_up_srq.task[wake_up_srq.in] = kthreadm[cpuid]; wake_up_srq.in = (wake_up_srq.in + 1) & (MAX_WAKEUP_SRQ - 1); rt_pend_linux_srq(wake_up_srq.srq); rt_global_sti(); if (hard) { rt_task_suspend(this_task); } else { down(&resem[cpuid]); } } rt_global_cli(); klistp->task[klistp->in] = 0; klistp->in = (klistp->in + 1) & (MAX_WAKEUP_SRQ - 1); klistp->task[klistp->in] = 0; } else { kthread = 0; rt_global_cli(); klistp->task[klistp->in] = lnxtsk; } klistp->in = (klistp->in + 1) & (MAX_WAKEUP_SRQ - 1); wake_up_srq.task[wake_up_srq.in] = kthreadm[cpuid]; wake_up_srq.in = (wake_up_srq.in + 1) & (MAX_WAKEUP_SRQ - 1); rt_pend_linux_srq(wake_up_srq.srq); rt_global_sti(); return kthread;}static void start_stop_kthread(RT_TASK *task, void (*rt_thread)(int), int data, int priority, int uses_fpu, void(*signal)(void), int runnable_on_cpus){ if (rt_thread) { task->retval = set_rtext(task, priority, uses_fpu, signal, runnable_on_cpus, get_kthread(1, runnable_on_cpus, 0)); task->max_msg_size[0] = (int)rt_thread; task->max_msg_size[1] = data; } else { get_kthread(0, task->runnable_on_cpus, task->lnxtsk); }}static void wake_up_srq_handler(void){ while (wake_up_srq.out != wake_up_srq.in) { wake_up_process(wake_up_srq.task[wake_up_srq.out]); wake_up_srq.out = (wake_up_srq.out + 1) & (MAX_WAKEUP_SRQ - 1); } set_need_resched();}static int lxrt_handle_trap(int vec, int signo, struct pt_regs *regs, void *dummy_data){ struct task_struct *tsk; DECLARE_RT_CURRENT; RT_TASK *rt_task; ASSIGN_RT_CURRENT; if (!rt_current->lnxtsk) { if (rt_current->task_trap_handler[vec]) return rt_current->task_trap_handler[vec](vec,signo,regs,rt_current); rt_printk("Default Trap Handler: vector %d: Suspend RT task %p\n", vec,rt_current); rt_task_suspend(rt_current); return 1; } tsk = rtai_get_current(cpuid); if ((rt_task = tsk->this_rt_task[0]) && rt_task->is_hard > 0) { give_back_to_linux(rt_task); rt_task->is_hard = 2; } return 0;}static int lxrt_handle_signal(struct task_struct *lnxtsk, int sig){ RT_TASK *task = (RT_TASK *)lnxtsk->this_rt_task[0]; if ((task->force_soft = task->is_hard > 0)) { rt_global_cli(); if (task->state != RT_SCHED_READY) { task->state &= ~RT_SCHED_READY; enq_ready_task(task); RT_SCHEDULE(task, hard_cpu_id()); } rt_global_sti(); return 0; } if (task->state) { lnxtsk->state = TASK_INTERRUPTIBLE; } return 1;}#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)static struct mmreq { int in, out, count;#define MAX_MM 32 /* Should be more than enough (must be a power of 2). */#define bump_mmreq(x) do { x = (x + 1) & (MAX_MM - 1); } while(0) struct mm_struct *mm[MAX_MM];} lxrt_mmrqtab[NR_CPUS];static void lxrt_intercept_schedule_head (adevinfo_t *evinfo){ struct { struct task_struct *prev, *next; } *evdata = (__typeof(evdata))evinfo->evdata; struct task_struct *prev = evdata->prev; /* The SCHEDULE_HEAD event is sent by the (Adeosized) Linux kernel each time it's about to switch a process out. This hook is aimed at preventing the last active MM from being dropped during the LXRT real-time operations since it's a lengthy atomic operation. See kernel/sched.c (schedule()) for more. The MM dropping is simply postponed until the SCHEDULE_TAIL event is received, right after the incoming task has been switched in. */ if (!prev->mm) { struct mmreq *p = lxrt_mmrqtab + task_cpu(prev); struct mm_struct *oldmm = prev->active_mm; BUG_ON(p->count >= MAX_MM); /* Prevent the MM from being dropped in schedule(), then pend a request to drop it later in lxrt_intercept_schedule_tail(). */ atomic_inc(&oldmm->mm_count); p->mm[p->in] = oldmm; bump_mmreq(p->in); p->count++; } adeos_propagate_event(evinfo);}#endif /* KERNEL_VERSION < 2.6.0 */static void lxrt_intercept_schedule_tail (adevinfo_t *evinfo){ if (evinfo->domid == RTAI_DOMAIN_ID) /* About to resume after the transition to hard LXRT mode. Do _not_ propagate this event so that Linux's tail scheduling won't be performed. */ return;#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) { struct mmreq *p;#ifdef CONFIG_PREEMPT preempt_disable();#endif /* CONFIG_PREEMPT */ p = lxrt_mmrqtab + smp_processor_id(); while (p->out != p->in) { struct mm_struct *oldmm = p->mm[p->out]; mmdrop(oldmm); bump_mmreq(p->out); p->count--; }#ifdef CONFIG_PREEMPT preempt_enable();#endif /* CONFIG_PREEMPT */ }#endif /* KERNEL_VERSION < 2.6.0 */ adeos_propagate_event(evinfo);}static void lxrt_intercept_signal (adevinfo_t *evinfo){ if (lxrt_signal_handler) { struct { struct task_struct *task; int sig; } *evdata = (__typeof(evdata))evinfo->evdata; struct task_struct *task = evdata->task; if (evdata->sig == SIGKILL && (task->policy == SCHED_FIFO || task->policy == SCHED_RR) && task->ptd[0]) { if (!lxrt_signal_handler(task,evdata->sig)) /* Don't propagate so that Linux won't further process the signal. */ return; } } adeos_propagate_event(evinfo);}static void lxrt_intercept_syscall (adevinfo_t *evinfo){ adeos_declare_cpuid; unsigned long flags; adeos_get_cpu(flags); if (test_bit(cpuid,&rtai_cpu_lxrt)) { struct task_struct *t = evinfo->domid == adp_current->domid ? current : rtai_get_root_current(cpuid); RT_TASK *task = t->this_rt_task[0]; give_back_to_linux(task); task->is_hard = 2; SYSW_DIAG_MSG(rt_printk("FORCING IT SOFT, PID = %d.\n", t->pid);); } adeos_put_cpu(flags); adeos_propagate_event(evinfo);}/* ++++++++++++++++++++++++++ SCHEDULER PROC FILE +++++++++++++++++++++++++++ */#ifdef CONFIG_PROC_FS/* -----------------------< proc filesystem section >-------------------------*/static int rtai_read_sched(char *page, char **start, off_t off, int count, int *eof, void *data){ PROC_PRINT_VARS; int cpuid, i = 1; unsigned long t; RT_TASK *task; PROC_PRINT("\nRTAI LXRT Real Time Task Scheduler.\n\n"); PROC_PRINT(" Calibrated CPU Frequency: %lu Hz\n", tuned.cpu_freq); PROC_PRINT(" Calibrated 8254 interrupt to scheduler latency: %d ns\n", imuldiv(tuned.latency - tuned.setup_time_TIMER_CPUNIT, 1000000000, tuned.cpu_freq)); PROC_PRINT(" Calibrated one shot setup time: %d ns\n\n", imuldiv(tuned.setup_time_TIMER_CPUNIT, 1000000000, tuned.cpu_freq)); PROC_PRINT("Number of RT CPUs in system: %d\n\n", NR_RT_CPUS); PROC_PRINT("Priority Period(ns) FPU Sig State CPU Task HD/SF PID RT_TASK * TIME\n" ); PROC_PRINT("------------------------------------------------------------------------------\n" ); for (cpuid = 0; cpuid < NR_RT_CPUS; cpuid++) { task = &rt_linux_task;/** Display all the active RT tasks and their state.** Note: As a temporary hack the tasks are given an id which is* the order they appear in the task list, needs fixing!*/ while ((task = task->next)) {/** The display for the task period is set to an integer (%d) as 64 bit* numbers are not currently handled correctly by the kernel routines.* Hence the period display will be wrong for time periods > ~4 secs.*/ t = 0; if ((!task->lnxtsk || task->is_hard) && task->exectime[1]) { t = 1000UL*(unsigned long)llimd(task->exectime[0], 10, tuned.cpu_freq)/(unsigned long)llimd(rdtsc() - task->exectime[1], 10, tuned.cpu_freq); } PROC_PRINT("%-10d %-11lu %-4s %-4s 0x%-4x %-4d %-4d %-4d %-4d %p %-lu\n", task->priority, (unsigned long)count2nano_cpuid(task->period, task->runnable_on_cpus), task->uses_fpu ? "Yes" : "No", task->signal ? "Yes" : "No", task->state, cpuid, i, task->is_hard, task->lnxtsk ? task->lnxtsk->pid : 0, task, t); i++; } /* End while loop - display all RT tasks on a CPU. */ PROC_PRINT("TIMED\n"); task = &rt_linux_task; while ((task = task->tnext) != &rt_linux_task) { PROC_PRINT("> %p ", task); } PROC_PRINT("\nREADY\n"); task = &rt_linux_task; while ((task = task->rnext) != &rt_linux_task) { PROC_PRINT("> %p ", task); } } /* End for loop - display RT tasks on all CPUs. */ PROC_PRINT_DONE;} /* End function - rtai_read_sched */static int rtai_proc_sched_register(void) { struct proc_dir_entry *proc_sched_ent; proc_sched_ent = create_proc_entry("scheduler", S_IFREG|S_IRUGO|S_IWUSR, rtai_proc_root); if (!proc_sched_ent) { printk("Unable to initialize /proc/rtai/scheduler\n"); return(-1); } proc_sched_ent->read_proc = rtai_read_sched; return(0);} /* End function - rtai_proc_sched_register */static void rtai_proc_sched_unregister(void) { remove_proc_entry("scheduler", rtai_proc_root);} /* End function - rtai_proc_sched_unregister *//* --------------------< end of proc filesystem section >---------------------*/#endif /* CONFIG_PROC_FS *//* ++++++++++++++ SCHEDULER ENTRIES AND RELATED INITIALISATION ++++++++++++++ */static struct rt_native_fun_entry rt_sched_entries[] = { { { 0, rt_named_task_init }, NAMED_TASK_INIT }, { { 0, rt_named_task_init_cpuid }, NAMED_TASK_INIT_CPUID }, { { 0, rt_named_task_delete }, NAMED_TASK_DELETE }, { { 1, rt_task_yield }, YIELD }, { { 1, rt_task_suspend }, SUSPEND }, { { 1, rt_task_resume }, RESUME }, { { 1, rt_task_make_periodic }, MAKE_PERIODIC }, { { 1, rt_task_wait_period }, WAIT_PERIOD }, { { 1, rt_sleep }, SLEEP }, { { 1, rt_sleep_until }, SLEEP_UNTIL }, { { 0, start_rt_timer }, START_TIMER }, { { 0, stop_rt_timer }, STOP_TIMER }, { { 0, rt_get_time }, GET_TIME }, { { 0, count2nano }, COUNT2NANO }, { { 0, nano2count }, NANO2COUNT }, { { 0, rt_busy_sleep }, BUSY_SLEEP }, { { 0, rt_set_periodic_mode }, SET_PERIODIC_MODE }, { { 0, rt_set_oneshot_mode }, SET_ONESHOT_MODE }, { { 0, rt_task_signal_handler }, SIGNAL_HANDLER }, { { 0, rt_task_use_fpu }, TASK_USE_FPU }, { { 0, rt_linux_use_fpu }, LINUX_USE_FPU }, { { 0, rt_preempt_always }, PREEMPT_ALWAYS_GEN }, { { 0, rt_get_time_ns }, GET_TIME_NS }, { { 0, rt_get_cpu_time_ns }, GET_CPU_TIME_NS }, { { 0, rt_set_runnable_on_cpus }, SET_RUNNABLE_ON_CPUS }, { { 0, rt_set_runnable_on_cpuid }, SET_RUNNABLE_ON_CPUID }, { { 0, rt_get_timer_cpu }, GET_TIMER_CPU }, { { 0, start_rt_apic_timers }, START_RT_APIC_TIMERS }, { { 0, rt_preempt_always_cpuid }, PREEMPT_ALWAYS_CPUID }, { { 0, count2nano_cpuid }, COUNT2NANO_CPUID }, { { 0, nano2count_cpuid }, NANO2COUNT_CPUID }, { { 0, rt_get_time_cpuid }, GET_TIME_CPUID }, { { 0, rt_get_time_ns_cpuid }, GET_TIME_NS_CPUID }, { { 1, rt_task_make_periodic_relative_ns }, MAKE_PERIODIC_NS }, { { 0, rt_set_sched_policy }, SET_SCHED_POLICY }, { { 1, rt_task_set_resume_end_times }, SET_RESUME_END }, { { 0, rt_spv_RMS }, SPV_RMS }, { { 0, rt_task_wakeup_sleeping }, WAKEUP_SLEEPING }, { { 1, rt_change_prio }, CHANGE_TASK_PRIO }, { { 0, rt_set_resume_time }, SET_RESUME_TIME }, { { 0, rt_set_period }, SET_PERIOD }, { { 0, rt_is_hard_timer_running }, HARD_TIMER_RUNNING }, { { 0, rt_get_adr }, GET_ADR }, { { 0, rt_get_name }, GET_NAME }, { { 0, 0 }, 000 }};static int lxrt_init(void){ int lxrt_init_archdep(void); int cpuid, err; err = lxrt_init_archdep(); if (err) return err; /* 2865600023UL is nam2num("USPAPP") */ if ((wake_up_srq.srq = rt_request_srq(2865600023UL, wake_up_srq_handler, 0)) < 0) { printk("LXRT: no wake_up_srq available.\n"); return wake_up_srq.srq; } /* We will start stealing Linux tasks as soon as the reservoir is instantiated, so create the migration service now. */ lxrt_migration_virq = adeos_alloc_irq(); adeos_virtualize_irq_from(&rtai_domain, lxrt_migration_virq, lxrt_migration_handler, NULL, IPIPE_HANDLE_MASK); if (Reservoir <= 0) Reservoir = 1; Reservoir = (Reservoir + NR_RT_CPUS - 1)/NR_RT_CPUS; for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) { taskav[cpuid] = (void *)kmalloc(Reservoir*sizeof(void *), GFP_KERNEL); init_MUTEX_LOCKED(&resem[cpuid]); kernel_thread((void *)kthread_b, (void *)cpuid, 0); kernel_thread((void *)kthread_m, (void *)cpuid, 0); down(&resem[cpuid]); down(&resem[cpuid]); klistm[cpuid].in = (2*Reservoir) & (MAX_WAKEUP_SRQ - 1); wake_up_process(kthreadm[cpuid]); } for (cpuid = 0; cpuid < MAX_LXRT_FUN; cpuid++) { rt_fun_lxrt[cpuid].type = 1; rt_fun_lxrt[cpuid].fun = nihil; } set_rt_fun_entries(rt_sched_entries); lxrt_old_trap_handler = rt_set_rtai_trap_handler(lxrt_handle_trap); lxrt_signal_handler = lxrt_handle_signal;#ifdef CONFIG_PROC_FS rtai_proc_lxrt_register();#endif /* Must be called on behalf of the Linux domain. */#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) adeos_catch_event(ADEOS_SCHEDULE_HEAD,&lxrt_intercept_schedule_head);#endif /* KERNEL_VERSION < 2.6.0 */ adeos_catch_event(ADEOS_SCHEDULE_TAIL,&lxrt_intercept_schedule_tail); adeos_catch_event(ADEOS_SIGNAL_PROCESS,&lxrt_intercept_signal); adeos_catch_event_from(&rtai_domain,ADEOS_SYSCALL_PROLOGUE,&lxrt_intercept_syscall);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -