⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rtsched.h

📁 Linux2.4.20针对三星公司的s3c2410开发板的内核改造。
💻 H
📖 第 1 页 / 共 3 页
字号:
/* *  linux/kernel/rtsched.h * *  NOTE: This is a .h file that is mostly source, not the usual convention. *        It is coded this way to allow the depend rules to correctly set *        up the make file dependencies.  This is an alternate scheduler *        that replaces the core scheduler in sched.c.  It does not, however, *        replace most of the static support functions that call schedule. *        By making this an include file for sched.c, all of those functions *        are retained without the need for duplicate code and its attendant *        support issues.  At the same time, keeping it a seperate file allows *        diff and patch to work most cleanly and correctly. * *  Kernel scheduler and related syscalls * *  Copyright (C) 1991, 1992  Linus Torvalds *  Copyright (C) 2000, 2001 MontaVista Software Inc. * *  1998-12-28  Implemented better SMP scheduling by Ingo Molnar *  2000-03-15  Added the Real Time run queue support by George Anzinger *  2000-8-29   Added code to do lazy recalculation of counters  *              by George Anzinger *//* * 'sched.c' is the main kernel file. It contains scheduling primitives * (sleep_on, wakeup, schedule etc) as well as a number of simple system * call functions (type getpid()), which just extract a field from * current-task */#ifndef preempt_disable#define preempt_disable()#define preempt_enable()#define preempt_get_count() 0#define preempt_enable_no_resched()#endif/* * scheduler variables */#define VERSION_DATE "<20011203.1609.50>"/* * We align per-CPU scheduling data on cacheline boundaries, * to prevent cacheline ping-pong. */static union {	struct schedule_data {		struct task_struct * curr;		cycles_t last_schedule;                struct list_head schedule_data_list;                int cpu,effprio;	} schedule_data;	char __pad [SMP_CACHE_BYTES];} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0,{0,0},0,0}}};#define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.currstatic void newprio_ready_q(struct task_struct * tptr,int newprio);#ifdef CONFIG_SMPstatic void newprio_executing(struct task_struct *tptr,int newprio);static struct list_head hed_cpu_prio __cacheline_aligned =                                                 LIST_HEAD_INIT(hed_cpu_prio);#endif/* * task_on_rq tests for task actually in the ready queue. * task_on_runque tests for task either on ready queue or being executed * (by virtue of our seting a running tasks run_list.next to 1) */#define task_on_rq(p) ((unsigned)p->run_list.next > 1)static struct list_head rq[MAX_PRI+1]  ____cacheline_aligned;static struct ready_queue {        int recalc;            /* # of counter recalculations on SCHED_OTHER */        int ticks;             /* # of ticks for all in SCHED_OTHER ready Q */} runq ____cacheline_aligned;/* set the bit map up with guard bits below.  This will result in * priority -1 if there are no tasks in the ready queue which will * happen as we are not putting the idle tasks in the ready queue. */static struct {        int guard;        int rq_bit_ary[(MAX_PRI/32) +1];}rq_bits = {-1,{0,0,0,0}};#define rq_bit_map rq_bits.rq_bit_ary        static int high_prio=0;#define Rdy_Q_Hed(pri) &rq[pri]#define PREEMPTION_THRESHOLD 1#define NOT_RT 0   /* Use priority zero for non-RT processes */#define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedulestruct kernel_stat kstat;#ifdef CONFIG_SMP/* * At the moment, we will ignor cpus_allowed, primarily because if it were * used, we would have a conflict in the runq.ticks count (i.e. since we * are not scheduleing some tasks, the count would not reflect what is * is really on the list).  Oh, and also, nowhere is there code in the * kernel to set cpus_allowed to anything but -1.  In the long run, we * would like to try seperate lists for each cpu, at which point  * cpus_allowed could be used to direct the task to the proper list. * Well, darn, now there is code that messes with cpus_allowed.  We will change * sometime soon.... */#define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])#define can_schedule(p,cpu) \	((p)->cpus_runnable & (p)->cpus_allowed & (1 << cpu))#else#define idle_task(cpu) (&init_task)#define can_schedule(p,cpu) (1)#endifvoid scheduling_functions_start_here(void) { }/* * This is the function that decides how desirable a process is.. * You can weigh different processes against each other depending * on what CPU they've run on lately etc to try to handle cache * and TLB miss penalties. * * Return values: *	 -1000: never select this *	     0: out of time, recalculate counters (but it might still be *		selected) *	   +ve: "goodness" value (the larger, the better) */static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm){	int weight;	/*	 * goodness is NEVER called for Realtime processes!	 * Realtime process, select the first one on the	 * runqueue (taking priorities within processes	 * into account).	          */	/*	 * Give the process a first-approximation goodness value	 * according to the number of clock-ticks it has left.	 *	 * Don't do any other calculations if the time slice is	 * over or if this is an idle task.	 */	weight = p->counter;	if (weight <= 0)		goto out;			#ifdef CONFIG_SMP	/* Give a largish advantage to the same processor...   */	/* (this is equivalent to penalizing other processors) */	if (p->processor == this_cpu)		weight += PROC_CHANGE_PENALTY;#endif	/* .. and a slight advantage to the current MM */	if (p->mm == this_mm || !p->mm)		weight += 1;	weight += 20 - p->nice;out:	return weight;}/* * the 'goodness value' of replacing a process on a given CPU. * positive value means 'replace', zero or negative means 'dont'. */static inline int preemption_goodness(struct task_struct * prev, struct task_struct * p, int cpu){	return goodness(p, cpu, prev->active_mm) - goodness(prev, cpu, prev->active_mm);}/* * This is ugly, but reschedule_idle() is very timing-critical. * We are called with the runqueue spinlock held and we must * not claim the tasklist_lock. */static FASTCALL(void reschedule_idle(struct task_struct * p));static void reschedule_idle(struct task_struct * p){#ifdef CONFIG_SMP	int this_cpu = smp_processor_id(), target_cpu;	struct task_struct  *target_tsk;        struct list_head *cptr;        struct schedule_data *sch;	int  best_cpu;	/*	 * shortcut if the woken up task's last CPU is	 * idle now.	 */	best_cpu = p->processor;	target_tsk = idle_task(best_cpu);	if (cpu_curr(best_cpu) == target_tsk)		goto preempt_now;        /*         * For real time, the choice is simple.  We just check         * if the most available processor is working on a lower         * priority task.  If so we bounce it, if not, there is         * nothing more important than what we are doing.         * Note that this will pick up any idle cpu(s) we may         * have as they will have effprio of -1.         */        cptr = hed_cpu_prio.prev;        sch = list_entry(cptr,                         struct schedule_data,                         schedule_data_list);        target_tsk = sch->curr;        if (p->effprio > sch->effprio){                goto preempt_now;        }        /*         * If all cpus are doing real time and we failed         * above, then there is no help for this task.         */        if ( sch->effprio )                 goto out_no_target;               	/*         * Non-real time contender and one or more processors         * doing non-real time things.         * So we have a non-real time task contending among         * other non-real time tasks on one or more processors         * We know we have no idle cpus.         */	/*	 * No CPU is idle, but maybe this process has enough priority	 * to preempt it's preferred CPU.	 */	target_tsk = cpu_curr(best_cpu);	if (target_tsk->effprio == 0 &&            preemption_goodness(target_tsk, p, best_cpu) > 0)		goto preempt_now;        for (; cptr != &hed_cpu_prio;  cptr = cptr->prev ){                sch =list_entry(cptr,                                struct schedule_data,                                schedule_data_list);                if (sch->effprio != 0)                         break;                if (sch->cpu != best_cpu){                        target_tsk = sch->curr;                        if ( preemption_goodness(target_tsk, p, sch->cpu) >                              PREEMPTION_THRESHOLD)                                goto  preempt_now;		}               	}	out_no_target:	return;preempt_now:	target_cpu = target_tsk->processor;	target_tsk->need_resched = 1;	/*	 * the APIC stuff can go outside of the lock because	 * it uses no task information, only CPU#.	 */	if ((target_cpu != this_cpu)            && (target_tsk != idle_task(target_cpu)))		smp_send_reschedule(target_cpu);	return;#else /* UP */	struct task_struct *tsk;	tsk = cpu_curr(0);	if ((high_prio > tsk->effprio) ||            (!tsk->effprio && preemption_goodness(tsk, p, 0) >              PREEMPTION_THRESHOLD)){		tsk->need_resched = 1;	}#endif}/* * This routine maintains the list of smp processors.  This is  * a by directional list maintained in priority order.  The above * code used this list to find a processor to use for a new task. * The search will be backward thru the list as we want to take  * the lowest prioity cpu first.  We put equal prioities such that * the new one will be ahead of the old, so the new should stay * around a bit longer.  */#ifdef CONFIG_SMPstatic inline void re_queue_cpu(struct task_struct *next,                                struct schedule_data *sch){        struct list_head *cpuptr;        list_del(&sch->schedule_data_list);        sch->effprio = next->effprio;        cpuptr = hed_cpu_prio.next;        while (cpuptr != &hed_cpu_prio &&               sch->effprio < list_entry(cpuptr,                                         struct schedule_data,                                         schedule_data_list)->effprio                )                 cpuptr = cpuptr->next;        list_add_tail(&sch->schedule_data_list,cpuptr);        next->newprio = &newprio_executing;}#else#define re_queue_cpu(a,b)#endif/* * Careful! * * This has to add the process to the _beginning_ of the * run-queue, not the end. See the comment about "This is * subtle" in the scheduler proper.. *  * For real time tasks we do this a bit differently.  We  * keep a priority list of ready tasks.  We remove tasks  * from this list when they are running so a running real * time task will not be in either the ready list or the run * queue.  Also, in the name of speed and real time, only * priority is important so we spend a few bytes on the queue. * We have a doubly linked list for each priority.  This makes * Insert and removal very fast.  We also keep a bit map of * the priority queues where a bit says if the queue is empty * or not.  We also keep loose track of the highest priority * queue that is currently occupied.  This high_prio mark  * is updated when a higher priority task enters the ready * queue and only goes down when we look for a task in the * ready queue at high_prio and find none.  Then, and only * then, we examine the bit map to find the true high_prio. */#define BF 31  /* bit flip constant */#define   set_rq_bit(bit)    set_bit(BF-((bit)&0x1f),&rq_bit_map[(bit) >> 5])#define clear_rq_bit(bit)  clear_bit(BF-((bit)&0x1f),&rq_bit_map[(bit) >> 5])static inline void _del_from_runqueue(struct task_struct * p){	nr_running--;        list_del( &p->run_list );        if (list_empty(Rdy_Q_Hed(p->effprio))){                clear_rq_bit(p->effprio);        }	/*                  p->run_list.next = NULL; !=0 prevents requeue */	p->run_list.next = NULL;        p->newprio = NULL;        if( !p->effprio) runq.ticks -= p->counter;}/* Exported for main.c, also used in init code here */void __del_from_runqueue(struct task_struct * p){        _del_from_runqueue(p);}static inline struct task_struct * get_next_task(struct task_struct * prev,                                                 int this_cpu){        struct list_head *next, *rqptr;        struct task_struct *it=0;        int *i,c,oldcounter; repeat_schedule:        rqptr = Rdy_Q_Hed(high_prio);        next = rqptr->next;        if (unlikely( next == rqptr)){                 for (i=&rq_bit_map[MAX_PRI/32],high_prio=BF+((MAX_PRI/32)*32);                     (*i == 0);high_prio -=32,i--);                high_prio -= ffz(~*i);                if (unlikely(high_prio < 0)){                        /*                         * No tasks to run, return this cpu's idle task                          * It is not in the ready queue, so no need to remove it.                         * But first make sure its priority keeps it out of                          * the way.                         */                        high_prio = 0;                        it = idle_task(this_cpu);                        it->effprio = -1;                        return it;                }                goto repeat_schedule;        }        /*         * If there is only one task on the list, it is a no brainer.         * But really, this also prevents us from looping on recalulation         * if the one and only task is trying to yield.  These sort of          * loops are NOT_FUN.  Note: we use likely() to tilt toward          * real-time tasks, even thou they are, usually unlikely.  We         * are, after all, a real time scheduler.         */        if ( likely(high_prio || next->next == rqptr)){                 it = list_entry(next, struct task_struct, run_list); back_from_figure_non_rt_next:                _del_from_runqueue(it);                return it;                       }        /*         * Here we set up a SCHED_OTHER yield.  Note that for other policies         * yield is handled else where.  This means we can use == and =          * instead of & and &= to test and clear the flag.  If the prev          * task has all the runq.ticks, then we just do the recaculation         * version and let the winner take all (yield fails).  Otherwise         * we fource the counter to zero for the loop and put it back         * after we found some other task.  We must remember to update

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -