⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched_rt.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 3 页
字号:
		/* if the prio of this runqueue changed, try again */		if (double_lock_balance(rq, lowest_rq)) {			/*			 * We had to unlock the run queue. In			 * the mean time, task could have			 * migrated already or had its affinity changed.			 * Also make sure that it wasn't scheduled on its rq.			 */			if (unlikely(task_rq(task) != rq ||				     !cpu_isset(lowest_rq->cpu,						task->cpus_allowed) ||				     task_running(rq, task) ||				     !task->se.on_rq)) {				spin_unlock(&lowest_rq->lock);				lowest_rq = NULL;				break;			}		}		/* If this rq is still suitable use it. */		if (lowest_rq->rt.highest_prio > task->prio)			break;		/* try again */		double_unlock_balance(rq, lowest_rq);		lowest_rq = NULL;	}	return lowest_rq;}/* * If the current CPU has more than one RT task, see if the non * running task can migrate over to a CPU that is running a task * of lesser priority. */static int push_rt_task(struct rq *rq){	struct task_struct *next_task;	struct rq *lowest_rq;	int ret = 0;	int paranoid = RT_MAX_TRIES;	if (!rq->rt.overloaded)		return 0;	next_task = pick_next_highest_task_rt(rq, -1);	if (!next_task)		return 0; retry:	if (unlikely(next_task == rq->curr)) {		WARN_ON(1);		return 0;	}	/*	 * It's possible that the next_task slipped in of	 * higher priority than current. If that's the case	 * just reschedule current.	 */	if (unlikely(next_task->prio < rq->curr->prio)) {		resched_task(rq->curr);		return 0;	}	/* We might release rq lock */	get_task_struct(next_task);	/* find_lock_lowest_rq locks the rq if found */	lowest_rq = find_lock_lowest_rq(next_task, rq);	if (!lowest_rq) {		struct task_struct *task;		/*		 * find lock_lowest_rq releases rq->lock		 * so it is possible that next_task has changed.		 * If it has, then try again.		 */		task = pick_next_highest_task_rt(rq, -1);		if (unlikely(task != next_task) && task && paranoid--) {			put_task_struct(next_task);			next_task = task;			goto retry;		}		goto out;	}	deactivate_task(rq, next_task, 0);	set_task_cpu(next_task, lowest_rq->cpu);	activate_task(lowest_rq, next_task, 0);	resched_task(lowest_rq->curr);	double_unlock_balance(rq, lowest_rq);	ret = 1;out:	put_task_struct(next_task);	return ret;}/* * TODO: Currently we just use the second highest prio task on *       the queue, and stop when it can't migrate (or there's *       no more RT tasks).  There may be a case where a lower *       priority RT task has a different affinity than the *       higher RT task. In this case the lower RT task could *       possibly be able to migrate where as the higher priority *       RT task could not.  We currently ignore this issue. *       Enhancements are welcome! */static void push_rt_tasks(struct rq *rq){	/* push_rt_task will return true if it moved an RT */	while (push_rt_task(rq))		;}static int pull_rt_task(struct rq *this_rq){	int this_cpu = this_rq->cpu, ret = 0, cpu;	struct task_struct *p, *next;	struct rq *src_rq;	if (likely(!rt_overloaded(this_rq)))		return 0;	next = pick_next_task_rt(this_rq);	for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) {		if (this_cpu == cpu)			continue;		src_rq = cpu_rq(cpu);		/*		 * We can potentially drop this_rq's lock in		 * double_lock_balance, and another CPU could		 * steal our next task - hence we must cause		 * the caller to recalculate the next task		 * in that case:		 */		if (double_lock_balance(this_rq, src_rq)) {			struct task_struct *old_next = next;			next = pick_next_task_rt(this_rq);			if (next != old_next)				ret = 1;		}		/*		 * Are there still pullable RT tasks?		 */		if (src_rq->rt.rt_nr_running <= 1)			goto skip;		p = pick_next_highest_task_rt(src_rq, this_cpu);		/*		 * Do we have an RT task that preempts		 * the to-be-scheduled task?		 */		if (p && (!next || (p->prio < next->prio))) {			WARN_ON(p == src_rq->curr);			WARN_ON(!p->se.on_rq);			/*			 * There's a chance that p is higher in priority			 * than what's currently running on its cpu.			 * This is just that p is wakeing up and hasn't			 * had a chance to schedule. We only pull			 * p if it is lower in priority than the			 * current task on the run queue or			 * this_rq next task is lower in prio than			 * the current task on that rq.			 */			if (p->prio < src_rq->curr->prio ||			    (next && next->prio < src_rq->curr->prio))				goto skip;			ret = 1;			deactivate_task(src_rq, p, 0);			set_task_cpu(p, this_cpu);			activate_task(this_rq, p, 0);			/*			 * We continue with the search, just in			 * case there's an even higher prio task			 * in another runqueue. (low likelyhood			 * but possible)			 *			 * Update next so that we won't pick a task			 * on another cpu with a priority lower (or equal)			 * than the one we just picked.			 */			next = p;		} skip:		double_unlock_balance(this_rq, src_rq);	}	return ret;}static void pre_schedule_rt(struct rq *rq, struct task_struct *prev){	/* Try to pull RT tasks here if we lower this rq's prio */	if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)		pull_rt_task(rq);}static void post_schedule_rt(struct rq *rq){	/*	 * If we have more than one rt_task queued, then	 * see if we can push the other rt_tasks off to other CPUS.	 * Note we may release the rq lock, and since	 * the lock was owned by prev, we need to release it	 * first via finish_lock_switch and then reaquire it here.	 */	if (unlikely(rq->rt.overloaded)) {		spin_lock_irq(&rq->lock);		push_rt_tasks(rq);		spin_unlock_irq(&rq->lock);	}}/* * If we are not running and we are not going to reschedule soon, we should * try to push tasks away now */static void task_wake_up_rt(struct rq *rq, struct task_struct *p){	if (!task_running(rq, p) &&	    !test_tsk_need_resched(rq->curr) &&	    rq->rt.overloaded)		push_rt_tasks(rq);}static unsigned longload_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,		unsigned long max_load_move,		struct sched_domain *sd, enum cpu_idle_type idle,		int *all_pinned, int *this_best_prio){	/* don't touch RT tasks */	return 0;}static intmove_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,		 struct sched_domain *sd, enum cpu_idle_type idle){	/* don't touch RT tasks */	return 0;}static void set_cpus_allowed_rt(struct task_struct *p,				const cpumask_t *new_mask){	int weight = cpus_weight(*new_mask);	BUG_ON(!rt_task(p));	/*	 * Update the migration status of the RQ if we have an RT task	 * which is running AND changing its weight value.	 */	if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {		struct rq *rq = task_rq(p);		if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {			rq->rt.rt_nr_migratory++;		} else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {			BUG_ON(!rq->rt.rt_nr_migratory);			rq->rt.rt_nr_migratory--;		}		update_rt_migration(rq);	}	p->cpus_allowed    = *new_mask;	p->rt.nr_cpus_allowed = weight;}/* Assumes rq->lock is held */static void rq_online_rt(struct rq *rq){	if (rq->rt.overloaded)		rt_set_overload(rq);	__enable_runtime(rq);	cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);}/* Assumes rq->lock is held */static void rq_offline_rt(struct rq *rq){	if (rq->rt.overloaded)		rt_clear_overload(rq);	__disable_runtime(rq);	cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);}/* * When switch from the rt queue, we bring ourselves to a position * that we might want to pull RT tasks from other runqueues. */static void switched_from_rt(struct rq *rq, struct task_struct *p,			   int running){	/*	 * If there are other RT tasks then we will reschedule	 * and the scheduling of the other RT tasks will handle	 * the balancing. But if we are the last RT task	 * we may need to handle the pulling of RT tasks	 * now.	 */	if (!rq->rt.rt_nr_running)		pull_rt_task(rq);}#endif /* CONFIG_SMP *//* * When switching a task to RT, we may overload the runqueue * with RT tasks. In this case we try to push them off to * other runqueues. */static void switched_to_rt(struct rq *rq, struct task_struct *p,			   int running){	int check_resched = 1;	/*	 * If we are already running, then there's nothing	 * that needs to be done. But if we are not running	 * we may need to preempt the current running task.	 * If that current running task is also an RT task	 * then see if we can move to another run queue.	 */	if (!running) {#ifdef CONFIG_SMP		if (rq->rt.overloaded && push_rt_task(rq) &&		    /* Don't resched if we changed runqueues */		    rq != task_rq(p))			check_resched = 0;#endif /* CONFIG_SMP */		if (check_resched && p->prio < rq->curr->prio)			resched_task(rq->curr);	}}/* * Priority of the task has changed. This may cause * us to initiate a push or pull. */static void prio_changed_rt(struct rq *rq, struct task_struct *p,			    int oldprio, int running){	if (running) {#ifdef CONFIG_SMP		/*		 * If our priority decreases while running, we		 * may need to pull tasks to this runqueue.		 */		if (oldprio < p->prio)			pull_rt_task(rq);		/*		 * If there's a higher priority task waiting to run		 * then reschedule. Note, the above pull_rt_task		 * can release the rq lock and p could migrate.		 * Only reschedule if p is still on the same runqueue.		 */		if (p->prio > rq->rt.highest_prio && rq->curr == p)			resched_task(p);#else		/* For UP simply resched on drop of prio */		if (oldprio < p->prio)			resched_task(p);#endif /* CONFIG_SMP */	} else {		/*		 * This task is not running, but if it is		 * greater than the current running task		 * then reschedule.		 */		if (p->prio < rq->curr->prio)			resched_task(rq->curr);	}}static void watchdog(struct rq *rq, struct task_struct *p){	unsigned long soft, hard;	if (!p->signal)		return;	soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;	hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;	if (soft != RLIM_INFINITY) {		unsigned long next;		p->rt.timeout++;		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);		if (p->rt.timeout > next)			p->it_sched_expires = p->se.sum_exec_runtime;	}}static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued){	update_curr_rt(rq);	watchdog(rq, p);	/*	 * RR tasks need a special form of timeslice management.	 * FIFO tasks have no timeslices.	 */	if (p->policy != SCHED_RR)		return;	if (--p->rt.time_slice)		return;	p->rt.time_slice = DEF_TIMESLICE;	/*	 * Requeue to the end of queue if we are not the only element	 * on the queue:	 */	if (p->rt.run_list.prev != p->rt.run_list.next) {		requeue_task_rt(rq, p, 0);		set_tsk_need_resched(p);	}}static void set_curr_task_rt(struct rq *rq){	struct task_struct *p = rq->curr;	p->se.exec_start = rq->clock;}static const struct sched_class rt_sched_class = {	.next			= &fair_sched_class,	.enqueue_task		= enqueue_task_rt,	.dequeue_task		= dequeue_task_rt,	.yield_task		= yield_task_rt,#ifdef CONFIG_SMP	.select_task_rq		= select_task_rq_rt,#endif /* CONFIG_SMP */	.check_preempt_curr	= check_preempt_curr_rt,	.pick_next_task		= pick_next_task_rt,	.put_prev_task		= put_prev_task_rt,#ifdef CONFIG_SMP	.load_balance		= load_balance_rt,	.move_one_task		= move_one_task_rt,	.set_cpus_allowed       = set_cpus_allowed_rt,	.rq_online              = rq_online_rt,	.rq_offline             = rq_offline_rt,	.pre_schedule		= pre_schedule_rt,	.post_schedule		= post_schedule_rt,	.task_wake_up		= task_wake_up_rt,	.switched_from		= switched_from_rt,#endif	.set_curr_task          = set_curr_task_rt,	.task_tick		= task_tick_rt,	.prio_changed		= prio_changed_rt,	.switched_to		= switched_to_rt,};#ifdef CONFIG_SCHED_DEBUGextern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);static void print_rt_stats(struct seq_file *m, int cpu){	struct rt_rq *rt_rq;	rcu_read_lock();	for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))		print_rt_rq(m, cpu, rt_rq);	rcu_read_unlock();}#endif /* CONFIG_SCHED_DEBUG */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -