⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rtsched.h

📁 Linux2.4.20针对三星公司的s3c2410开发板的内核改造。
💻 H
📖 第 1 页 / 共 3 页
字号:
         * runq.ticks during all this.  Also, we don't give it all back         * if the yielder has more than the next guy.         */        oldcounter = 0;        if ( unlikely(prev->policy == (SCHED_YIELD | SCHED_OTHER)) ){                if ( unlikely(prev->counter == runq.ticks)) {                        prev->policy = SCHED_OTHER;                        runq.ticks = 0;                }else{                        oldcounter = prev->counter;                        prev->counter = 0;                }        }        c = -1000;        if (likely(runq.ticks > 0)) {                do {                        int weight;                        struct task_struct *p =                                 list_entry(next, struct task_struct, run_list);                        /* if (can_schedule(p, this_cpu))*/ {                                weight = goodness(p, this_cpu, prev->active_mm);                                if (weight > c)                                        c = weight, it = p;                        }                        next = next->next;                } while (next != rqptr);                /*                 * if we get out of sync with the runq.ticks counter                  * force it to 0 and catch it next time around. Note we                 * catch a negative counter on entry.                 */                if ( unlikely(c <= 0 )){                         runq.ticks = 0;                }        }else{#ifdef CONFIG_SMP                /*                 * Here we update the tasks that are current on other                  * processors                 */                struct list_head *wkptr,                        *cptr=&aligned_data[(this_cpu)].                        schedule_data.                        schedule_data_list;                runq.ticks = 0;                list_for_each ( wkptr, &hed_cpu_prio) {                        struct task_struct *p;                        if (cptr == wkptr ) continue;                        p = list_entry(wkptr,                                       struct schedule_data,                                        schedule_data_list)->curr;                        if ( p->effprio == 0){                                p->counter = (p->counter >> 1) +                                         NICE_TO_TICKS(p->nice);                                p->counter_recalc++;                        }                }#else                runq.ticks = 0;#endif                runq.recalc++;                do {                        int weight;                        struct task_struct *p =                                 list_entry(next, struct task_struct, run_list);                        runq.ticks +=                                 p->counter = NICE_TO_TICKS(p->nice);                        p->counter_recalc++;                        /* if (can_schedule(p, this_cpu)) */                        {                                weight = goodness(p, this_cpu, prev->active_mm);                                if (weight > c)                                        c = weight, it = p;                        }                        next = next->next;                } while (next != rqptr);        }        /* Undo the stuff we did for SCHED_YIELD.  We know we did something         * if oldcounter != 0.         */        if (unlikely(oldcounter)){                                prev->counter = (it->counter < oldcounter) ?                         it->counter :                         oldcounter;                runq.ticks += prev->counter-oldcounter;                prev->policy &= ~SCHED_YIELD;        }        goto back_from_figure_non_rt_next;}/* Add to the head of the run queue */static inline void add_to_runqueue(struct task_struct * p,int cpu){	struct list_head *next;        int prio;        /* idle tasks, don't get put in the list */        if (unlikely(p == idle_task(cpu))) return;          prio = p->effprio;        next = Rdy_Q_Hed(prio);        if (list_empty(next)) { /* an empty queue */                set_rq_bit(prio);                if (high_prio < prio) {                        high_prio = prio;                }        }        list_add(&p->run_list,next);        p->newprio = newprio_ready_q;        if ( likely(!p->effprio )) {                int diff,c;		if ((diff = runq.recalc - p->counter_recalc) != 0) { 			p->counter_recalc = runq.recalc;			c = NICE_TO_TICKS(p->nice) << 1;			p->counter = diff > 8 ? c - 1 :  /* max priority */				                c + ((p->counter - c) >> diff);		}                runq.ticks += p->counter;        }	nr_running++;}/* * This function is only called from schedule() so it need not worry * about updating the counter as it should never be out of date. * If you change this, remember to do the update. */static inline void add_last_runqueue(struct task_struct * p){	struct list_head *next = Rdy_Q_Hed(p->effprio);        if (list_empty(next)) {         /* empty list, set the bit */                set_rq_bit(p->effprio);                if (p->effprio > high_prio){                        high_prio = p->effprio;                }        }        list_add_tail(&p->run_list,next);        p->newprio = newprio_ready_q;        if ( !p->effprio ) runq.ticks += p->counter;        nr_running++;}static inline void move_first_runqueue(struct task_struct * p){	list_del(&p->run_list);	list_add_tail(&p->run_list, Rdy_Q_Hed(p->effprio));}/* * When we have a task in some queue by priority, we need * to provide a way to change that priority.  Depending on the * queue we must do different things.  We handle this by putting * a function address in the task_struct (newprio()). * * First a front end routine to take care of the case were the task * is not in any priority queues.  We take the runqueue_lock * here, so the caller must not.  Since we may be called * recursively, protect against a dead lock. */static struct task_struct *newprio_inuse;static int newprio_inuse_count;void set_newprio(struct task_struct * tptr, int newprio){	if ( newprio_inuse != current){                spin_lock_irq(&runqueue_lock);                newprio_inuse = current;        }        newprio_inuse_count++;        if (! tptr->newprio ) {                tptr->effprio = newprio;        }else if ( tptr->effprio != newprio) {                tptr->newprio(tptr,newprio);        }        if ( ! --newprio_inuse_count ){                spin_unlock_irq(&runqueue_lock);                newprio_inuse = 0;        }}/* * Here are the routines we use for the ready queue and an executing * process.  Note that the executing process may fall out of favor * as a result of the change.  We do the right thing. Note that newprio * is not cleared so we test here to see if the task is still running. */static void newprio_ready_q(struct task_struct * tptr,int newprio){        _del_from_runqueue(tptr);        tptr->effprio = newprio;	add_to_runqueue(tptr,0);	reschedule_idle(tptr);}#ifdef CONFIG_SMPstatic void newprio_executing(struct task_struct *tptr,int newprio){        int cpu;        struct schedule_data *sched_data;        if(!newprio || newprio < tptr->effprio){		tptr->need_resched = 1;        }        cpu = tptr->processor;	sched_data = & aligned_data[cpu].schedule_data;        tptr->effprio = newprio;        if( sched_data->curr != tptr) return; /* if not expected, out of here */        re_queue_cpu(tptr,sched_data);	if ((cpu != smp_processor_id()) && tptr->need_resched)		smp_send_reschedule(cpu);}#endif/* * Wake up a process. Put it on the ready-queue if it's not * already there.  The "current" process is not on the  * ready-queue (it makes it much easier to figure out if we * need to preempt, esp. the real time case).  It is possible * to wake the current process.  This happens when it is waken * before schedule has had a chance to put it properly to * sleep.  If schedule did not turn on ints in the middle of * things this would all be ok, however, it does so we have the * possibility of being in that window.   * The "current" process is never on the * run-queue (except when the actual re-schedule is in * progress), and as such you're allowed to do the simpler * "current->state = TASK_RUNNING" to mark yourself runnable * without the overhead of this. */static inline int try_to_wake_up(struct task_struct * p, int synchronous){	unsigned long flags;	int success = 0;	TRACE_PROCESS(TRACE_EV_PROCESS_WAKEUP, p->pid, p->state);	/*	 * We want the common case fall through straight, thus the goto.	 */	spin_lock_irqsave(&runqueue_lock, flags);	p->state = TASK_RUNNING;	if ( task_on_runqueue(p) )		goto out;	add_to_runqueue(p,0);	if (!synchronous /*|| !(p->cpus_allowed & (1 << smp_processor_id())*/)		reschedule_idle(p);	success = 1;out:	spin_unlock_irqrestore(&runqueue_lock, flags);	return success;}inline int wake_up_process(struct task_struct * p){	return try_to_wake_up(p, 0);}/* * schedule_tail() is getting called from the fork return path. This * cleans up all remaining scheduler things, without impacting the * common case. */static inline void __schedule_tail(struct task_struct *prev){#ifdef CONFIG_SMP	/*	 * fast path falls through. We have to clear cpus_runnable before	 * checking prev->state to avoid a wakeup race. Protect against	 * the task exiting early.	 */	task_lock(prev);	task_release_cpu(prev);	mb();        if (task_on_rq(prev))		goto needs_resched;out_unlock:	task_unlock(prev);	/* Synchronise here with release_task() if prev is TASK_ZOMBIE */	return;	/*	 * Slow path - we 'push' the previous process and	 * reschedule_idle() will attempt to find a new	 * processor for it. (but it might preempt the	 * current process as well.) We must take the runqueue	 * lock and re-check prev->state to be correct. It might	 * still happen that this process has a preemption	 * 'in progress' already - but this is not a problem and	 * might happen in other circumstances as well.	 */needs_resched:	{		unsigned long flags;		/*		 * Avoid taking the runqueue lock in cases where		 * no preemption-check is necessery:                  * Note: Idle task is NEVER on the ready queue so                 *       no need to check if prev was idle.		 */		spin_lock_irqsave(&runqueue_lock, flags);		if (task_on_rq(prev) /* && !task_has_cpu(prev)*/ )			reschedule_idle(prev);		spin_unlock_irqrestore(&runqueue_lock, flags);		goto out_unlock;	}#define smp_label_a _smp_label_a:#define smp_label_b _smp_label_b:#else	prev->policy &= ~SCHED_YIELD;#define smp_label_a#define smp_label_b #endif /* CONFIG_SMP */}asmlinkage void schedule_tail(struct task_struct *prev){	__schedule_tail(prev);	preempt_enable();}/* *  'schedule()' is the scheduler function. It's a very simple and nice * scheduler: it's not perfect, but certainly works for most things. * * The goto is "interesting". * *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other * tasks can run. It can not be killed, and it cannot sleep. The 'state' * information in task[0] is never used. */asmlinkage void schedule(void){	struct schedule_data * sched_data;	struct task_struct *prev, *next;	int this_cpu;#ifdef CONFIG_PREEMPT_TIMES	if (preempt_get_count()) {		preempt_lock_force_stop();	}#endif	spin_lock_prefetch(&runqueue_lock); try_try_again:	preempt_disable(); 	if (unlikely(!current->active_mm)) BUG();	prev = current;	this_cpu = prev->processor;	if (unlikely(in_interrupt())) {		printk("Scheduling in interrupt\n");		BUG();	}	release_kernel_lock(prev, this_cpu);	/*	 * 'sched_data' is protected by the fact that we can run	 * only one process per CPU.	 */	sched_data = & aligned_data[this_cpu].schedule_data;	spin_lock_irq(&runqueue_lock);#ifdef CONFIG_PREEMPT        /*         * Note that this is an '&' NOT an '&&'...         */        if (preempt_get_count() & PREEMPT_ACTIVE) goto sw_TASK_RUNNING;#endif		if (prev->state == TASK_INTERRUPTIBLE) {		//case TASK_INTERRUPTIBLE:                if (likely( ! signal_pending(prev))) {                        goto sw_default;                }                prev->state = TASK_RUNNING;        }	if (prev->state != TASK_RUNNING) {                goto sw_default;        }        //case TASK_RUNNING:#ifdef CONFIG_PREEMPT sw_TASK_RUNNING:#endif        /*         * move an exhausted RR process to be last..          * Do the same for Yields         */        if (!prev->counter && (prev->policy & SCHED_RR))                goto move_rr_last;        if (prev->policy & SCHED_YIELD)                goto move_yield_last;        /*         * There is a case where current is already         * in the ready que.  That is where it was         * on the way out, but the wait already         * expired, so wake_up_process has already         * done it.  In this case, we don't!!         */        if (!task_on_rq(prev))                add_to_runqueue(prev,this_cpu);        goto move_rr_back;        //default: sw_default:        prev->sleep_time = jiffies;        prev->run_list.next = 0;	

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -