📄 rtsched.h
字号:
move_rr_back: prev->need_resched = 0; smp_label_a next = get_next_task(prev, this_cpu); smp_label_b next->run_list.next = (struct list_head *)1; sched_data->curr = next; re_queue_cpu(next,sched_data); spin_unlock_irq(&runqueue_lock); if (unlikely(prev == next)) { goto same_process; }#ifdef CONFIG_SMP /* * maintain the per-process 'last schedule' value. * (this has to be recalculated even if we reschedule to * the same process) Currently this is only used on SMP, * and it's approximate, so we do not have to maintain * it while holding the runqueue spinlock. */ sched_data->last_schedule = get_cycles(); /* * We drop the scheduler lock early (it's a global spinlock), * thus we have to lock the previous process from getting * rescheduled during switch_to() (since we are still on his stack). * * Here is how we do it. The cpus_runnable flag will be held until * the task is truly available. On the other hand, this task * is put in the ready queue during the above runqueue_lock so * it may be picked up by another cpu. Suppose that cpu is this * one. Now the prior cpu left the task in the ready queue and * we have just pluck it from there. No conflict so far, but if * cpus_runnable is not clear, the other cpu is still in the switch code. * There are no locks there SAVE THIS ONE!!! Oh woe is me! * At the same time, under these conditions, i.e. a task is * coming out of the ready queue before we actually switch, it * would be good to not switch cpus. So lets define a "wanted" * bit in the cpus_runnable member. Oops, it is now a cpu bit mask * so, since only a few folks look at it, we will fudge it a bit. * Choose an addition that is more than on bit away from a single bit * * We will spin here waiting for cpus_runnable to go to zero. Until * this happens, we must not change the processor value as * interrupt code depends on this being right for "current". */#define WANTED 10#define TAKEN 20 { unsigned long cur_cpus_runnable = next->cpus_runnable; atomic_add(WANTED,(atomic_t *)&next->cpus_runnable); /* * It is either "WANTED+cur_cpus_runnable" which means we * need to wait or is: * A. The old cpu_id + WANTED or * B. WANTED - 1 which means it cleared (or was clear). * C. TAKEN + cur_cpus_runnable */ while ((cur_cpus_runnable != ~0UL) && (volatile int)next->cpus_runnable == WANTED + cur_cpus_runnable) { unsigned long my_cpu = 1 << this_cpu; barrier(); /* * OK, so while we wait, lets look in on prev and see * if he is wanted. */ if ( (volatile int)prev->cpus_runnable != my_cpu) { /* * Another cpu wants the task we have yet to * switch away from. Lets steal it back. * Once WANTED is set on prev, we can clear it * either here or in schedule_tail. The other * cpu can clear it by coming here where it will * be known by him as next... * Here, we set it to (TAKEN+my_cpu), in * schedule_tail it is set to my_cpu */ spin_lock_irq(&runqueue_lock); if ( (volatile int)prev->cpus_runnable != my_cpu) { spin_unlock_irq(&runqueue_lock); continue; } /* * Three possibilities on the state of next: * 0.) cpus_runnable has gone to ~0UL. Means the * prior cpu has finished and is not * interested. So put back in ready queue. * 5.) Other cpu noticed our interest and stoled * it back (cpus_runnable will be * TAKEN + his flag). Do nothing. * 3.) No change, put back in the ready queue * Note, case 3 presents a bit of a race on our * clearing the WANTED bit. So, we subtract and * if the result is negative, set it to zero. */ if ( (volatile int)next->cpus_runnable != cur_cpus_runnable + TAKEN) { atomic_add(-WANTED, (atomic_t *)&next->cpus_runnable); if ((volatile int)next->cpus_runnable < 0) { next->cpus_runnable = ~0UL; } add_to_runqueue(next,this_cpu); } /* * So much for "next". Now lets take prev. * Setting cpus_runnable to TAKEN+old will pop the * waiter out of the wait loop. * We then wait for him to clear TAKEN to * complete the handshake. We hand shake here * to keep the other cpu from seeing some later * state that may be wrong. */ prev->cpus_runnable = TAKEN + my_cpu; next = prev; spin_unlock_irq(&runqueue_lock); while ((volatile int)prev->cpus_runnable == TAKEN + my_cpu) { barrier(); } spin_lock_irq(&runqueue_lock); goto _smp_label_b; } } /* * if we poped out of the while because cpus_runnable has TAKEN * set it means the prior owner stoled back the task. Time to * rescan the ready queue (after clearing the TAKEN bit to * complete the handshake). The other possibilities are: * cpus_runnable = WANTED -1 ( was clear when we started) * cpus_runnable = -1 (was his, but the other cpu finished, * seting -1) */ if ((volatile int)next->cpus_runnable == TAKEN + cur_cpus_runnable){ atomic_add(-TAKEN,(atomic_t *)&next->cpus_runnable); spin_lock_irq(&runqueue_lock); goto _smp_label_a; } } /* * Gosh wasn't that fun! */ task_set_cpu(next,this_cpu);#endif /* CONFIG_SMP */ /* * An interesting problem here. Since we turned on interrupts, * we could now have a need schedule flag set in prev. Actually * this can only happen on interrupt and then only be meaningful * if it is done by a wakeup() call to reschedule_idle(). This * is covered as that code will set the need_resched flag in the * task found by cpu_curr() which comes from the cpu structs * which we have already updated. * The remaining problems come from left over timeouts against * prev, but he was the target and he is gone now... unless * we did not really switch. So in the switch path we will * clear the need_resched flag, not in the no switch path. */ kstat.context_swtch++; /* * there are 3 processes which are affected by a context switch: * * prev == .... ==> (last => next) * * It's the 'much more previous' 'prev' that is on next's stack, * but prev is set to (the just run) 'last' process by switch_to(). * This might sound slightly confusing but makes tons of sense. */ prepare_to_switch(); { struct mm_struct *mm = next->mm; struct mm_struct *oldmm = prev->active_mm; if (!mm) { if (next->active_mm) BUG(); next->active_mm = oldmm; atomic_inc(&oldmm->mm_count); enter_lazy_tlb(oldmm, next, this_cpu); } else { if (next->active_mm != mm) BUG(); switch_mm(oldmm, mm, next, this_cpu); } if (!prev->mm) { prev->active_mm = NULL; mmdrop(oldmm); } } TRACE_SCHEDCHANGE(prev, next); /* * This just switches the register state and the * stack. */ switch_to(prev, next, prev); __schedule_tail(prev); prev->need_resched = 0;same_process: reacquire_kernel_lock(current); preempt_enable_no_resched(); if ( ! current->need_resched) {#ifdef CONFIG_PREEMPT_TIMES if (preempt_get_count()) { if (current->pid) { preempt_lock_force_start(); } else { preempt_lock_force_stop(); } }#endif return; } /* The task managed to get its need_resched flag set already! */ goto try_try_again; move_rr_last: prev->counter = NICE_TO_TICKS(prev->nice); move_yield_last: if (prev->effprio) /* non-real time tasks get cleared later */ prev->policy &= ~SCHED_YIELD; add_last_runqueue(prev); goto move_rr_back;}static inline struct task_struct *find_process_by_pid(pid_t pid);static int setscheduler(pid_t pid, int policy, struct sched_param *param){ struct sched_param lp; struct task_struct *p; int retval; retval = -EINVAL; if (!param || pid < 0) goto out_nounlock; retval = -EFAULT; if (copy_from_user(&lp, param, sizeof(struct sched_param))) goto out_nounlock; /* * We play safe to avoid deadlocks. */ read_lock_irq(&tasklist_lock); spin_lock(&runqueue_lock); p = find_process_by_pid(pid); retval = -ESRCH; if (!p) goto out_unlock; if (policy < 0) policy = p->policy; else { retval = -EINVAL; if (policy != SCHED_FIFO && policy != SCHED_RR && policy != SCHED_OTHER) goto out_unlock; } /* * Valid priorities for SCHED_FIFO and SCHED_RR are 1..MAX_PRI, valid * priority for SCHED_OTHER is 0. */ retval = -EINVAL; if (lp.sched_priority < 0 || lp.sched_priority > MAX_PRI) goto out_unlock; if ((policy == SCHED_OTHER) != (lp.sched_priority == 0)) goto out_unlock; retval = -EPERM; if ((policy == SCHED_FIFO || policy == SCHED_RR) && !capable(CAP_SYS_NICE)) goto out_unlock; if ((current->euid != p->euid) && (current->euid != p->uid) && !capable(CAP_SYS_NICE)) goto out_unlock; retval = 0; p->policy = policy; if ( policy == SCHED_FIFO) { p->counter = -100; /* we don't count down neg couters */ }else{ p->counter = NICE_TO_TICKS(p->nice); } p->rt_priority = lp.sched_priority; spin_unlock_irq(&runqueue_lock); set_newprio(p,lp.sched_priority); goto out_readunlock; out_unlock: spin_unlock_irq(&runqueue_lock); out_readunlock: read_unlock(&tasklist_lock);out_nounlock: return retval;}asmlinkage long sys_sched_yield(void){ /* * Trick. sched_yield() first checks to see if it will be REALLY * lonly in the ready queue. It just returns if it is the only * game in town. The multilple ready queues really help here. * (This test does not have * to be atomic.) In threaded applications this optimization * gets triggered quite often. */ if ( ! list_empty(Rdy_Q_Hed(current->effprio))){ /* * I think this is safe as only the current task can * here and only the current task will be clearing this bit */ current->policy |= SCHED_YIELD; schedule(); } return 0;}/* Seems to be the first place we hear about a given cpu as it comes up. * A new (including the first) cpu is reporting for duty. Since he is * already running we must patch him into the processor queue. * We get here the first time the processor enters the idle code and also * one more time for the boot cpu so... be careful to not redo what is * already done. Also note that the fork that created the task put it * in the ready queue, so we need to take it out, except the initial cpus * task was not created by a fork. No matter, the removal code works even * then. * We give the idle task prioity -1 to keep it out of the way of tasks * that have real work to do. */extern unsigned long wait_init_idle;void __init init_idle(void){ struct schedule_data * sched_data; int cpu=smp_processor_id(); sched_data = &aligned_data[cpu].schedule_data; if (task_on_rq(current)) { del_from_runqueue(current); } sched_data->curr = current; sched_data->last_schedule = get_cycles(); current->effprio = current->rt_priority = 0; sched_data->effprio = -1; /* idle flag */ sched_data->cpu = cpu; clear_bit(current->processor, &wait_init_idle);#ifdef CONFIG_SMP if ( ! sched_data->schedule_data_list.next ) { list_add_tail(&sched_data->schedule_data_list,&hed_cpu_prio); }#endif#ifdef CONFIG_PREEMPT if (current->processor) current->preempt_count = 0;#endif}extern void init_timervecs (void);void __init sched_init(void){ /* * We have to do a little magic to get the first * process right in SMP mode. */ int cpu = smp_processor_id(); int nr; int i; init_task.processor = cpu; /* Init the ready queue */ for (i=0;i<=MAX_PRI ;i++){ INIT_LIST_HEAD(Rdy_Q_Hed(i)); } for(nr = 0; nr < PIDHASH_SZ; nr++) pidhash[nr] = NULL; printk("rtsched version " VERSION_DATE "\n"); init_timervecs(); init_bh(TIMER_BH, timer_bh); init_bh(TQUEUE_BH, tqueue_bh); init_bh(IMMEDIATE_BH, immediate_bh); /* * The boot idle thread does lazy MMU switching as well: */ atomic_inc(&init_mm.mm_count); enter_lazy_tlb(&init_mm, current, cpu);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -