⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched_rt.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 3 页
字号:
static inlinevoid inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq){	WARN_ON(!rt_prio(rt_se_prio(rt_se)));	rt_rq->rt_nr_running++;#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED	if (rt_se_prio(rt_se) < rt_rq->highest_prio) {#ifdef CONFIG_SMP		struct rq *rq = rq_of_rt_rq(rt_rq);#endif		rt_rq->highest_prio = rt_se_prio(rt_se);#ifdef CONFIG_SMP		if (rq->online)			cpupri_set(&rq->rd->cpupri, rq->cpu,				   rt_se_prio(rt_se));#endif	}#endif#ifdef CONFIG_SMP	if (rt_se->nr_cpus_allowed > 1) {		struct rq *rq = rq_of_rt_rq(rt_rq);		rq->rt.rt_nr_migratory++;	}	update_rt_migration(rq_of_rt_rq(rt_rq));#endif#ifdef CONFIG_RT_GROUP_SCHED	if (rt_se_boosted(rt_se))		rt_rq->rt_nr_boosted++;	if (rt_rq->tg)		start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);#else	start_rt_bandwidth(&def_rt_bandwidth);#endif}static inlinevoid dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq){#ifdef CONFIG_SMP	int highest_prio = rt_rq->highest_prio;#endif	WARN_ON(!rt_prio(rt_se_prio(rt_se)));	WARN_ON(!rt_rq->rt_nr_running);	rt_rq->rt_nr_running--;#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED	if (rt_rq->rt_nr_running) {		struct rt_prio_array *array;		WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);		if (rt_se_prio(rt_se) == rt_rq->highest_prio) {			/* recalculate */			array = &rt_rq->active;			rt_rq->highest_prio =				sched_find_first_bit(array->bitmap);		} /* otherwise leave rq->highest prio alone */	} else		rt_rq->highest_prio = MAX_RT_PRIO;#endif#ifdef CONFIG_SMP	if (rt_se->nr_cpus_allowed > 1) {		struct rq *rq = rq_of_rt_rq(rt_rq);		rq->rt.rt_nr_migratory--;	}	if (rt_rq->highest_prio != highest_prio) {		struct rq *rq = rq_of_rt_rq(rt_rq);		if (rq->online)			cpupri_set(&rq->rd->cpupri, rq->cpu,				   rt_rq->highest_prio);	}	update_rt_migration(rq_of_rt_rq(rt_rq));#endif /* CONFIG_SMP */#ifdef CONFIG_RT_GROUP_SCHED	if (rt_se_boosted(rt_se))		rt_rq->rt_nr_boosted--;	WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);#endif}static void __enqueue_rt_entity(struct sched_rt_entity *rt_se){	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);	struct rt_prio_array *array = &rt_rq->active;	struct rt_rq *group_rq = group_rt_rq(rt_se);	struct list_head *queue = array->queue + rt_se_prio(rt_se);	/*	 * Don't enqueue the group if its throttled, or when empty.	 * The latter is a consequence of the former when a child group	 * get throttled and the current group doesn't have any other	 * active members.	 */	if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))		return;	list_add_tail(&rt_se->run_list, queue);	__set_bit(rt_se_prio(rt_se), array->bitmap);	inc_rt_tasks(rt_se, rt_rq);}static void __dequeue_rt_entity(struct sched_rt_entity *rt_se){	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);	struct rt_prio_array *array = &rt_rq->active;	list_del_init(&rt_se->run_list);	if (list_empty(array->queue + rt_se_prio(rt_se)))		__clear_bit(rt_se_prio(rt_se), array->bitmap);	dec_rt_tasks(rt_se, rt_rq);}/* * Because the prio of an upper entry depends on the lower * entries, we must remove entries top - down. */static void dequeue_rt_stack(struct sched_rt_entity *rt_se){	struct sched_rt_entity *back = NULL;	for_each_sched_rt_entity(rt_se) {		rt_se->back = back;		back = rt_se;	}	for (rt_se = back; rt_se; rt_se = rt_se->back) {		if (on_rt_rq(rt_se))			__dequeue_rt_entity(rt_se);	}}static void enqueue_rt_entity(struct sched_rt_entity *rt_se){	dequeue_rt_stack(rt_se);	for_each_sched_rt_entity(rt_se)		__enqueue_rt_entity(rt_se);}static void dequeue_rt_entity(struct sched_rt_entity *rt_se){	dequeue_rt_stack(rt_se);	for_each_sched_rt_entity(rt_se) {		struct rt_rq *rt_rq = group_rt_rq(rt_se);		if (rt_rq && rt_rq->rt_nr_running)			__enqueue_rt_entity(rt_se);	}}/* * Adding/removing a task to/from a priority array: */static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup){	struct sched_rt_entity *rt_se = &p->rt;	if (wakeup)		rt_se->timeout = 0;	enqueue_rt_entity(rt_se);	inc_cpu_load(rq, p->se.load.weight);}static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep){	struct sched_rt_entity *rt_se = &p->rt;	update_curr_rt(rq);	dequeue_rt_entity(rt_se);	dec_cpu_load(rq, p->se.load.weight);}/* * Put task to the end of the run list without the overhead of dequeue * followed by enqueue. */static voidrequeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head){	if (on_rt_rq(rt_se)) {		struct rt_prio_array *array = &rt_rq->active;		struct list_head *queue = array->queue + rt_se_prio(rt_se);		if (head)			list_move(&rt_se->run_list, queue);		else			list_move_tail(&rt_se->run_list, queue);	}}static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head){	struct sched_rt_entity *rt_se = &p->rt;	struct rt_rq *rt_rq;	for_each_sched_rt_entity(rt_se) {		rt_rq = rt_rq_of_se(rt_se);		requeue_rt_entity(rt_rq, rt_se, head);	}}static void yield_task_rt(struct rq *rq){	requeue_task_rt(rq, rq->curr, 0);}#ifdef CONFIG_SMPstatic int find_lowest_rq(struct task_struct *task);static int select_task_rq_rt(struct task_struct *p, int sync){	struct rq *rq = task_rq(p);	/*	 * If the current task is an RT task, then	 * try to see if we can wake this RT task up on another	 * runqueue. Otherwise simply start this RT task	 * on its current runqueue.	 *	 * We want to avoid overloading runqueues. Even if	 * the RT task is of higher priority than the current RT task.	 * RT tasks behave differently than other tasks. If	 * one gets preempted, we try to push it off to another queue.	 * So trying to keep a preempting RT task on the same	 * cache hot CPU will force the running RT task to	 * a cold CPU. So we waste all the cache for the lower	 * RT task in hopes of saving some of a RT task	 * that is just being woken and probably will have	 * cold cache anyway.	 */	if (unlikely(rt_task(rq->curr)) &&	    (p->rt.nr_cpus_allowed > 1)) {		int cpu = find_lowest_rq(p);		return (cpu == -1) ? task_cpu(p) : cpu;	}	/*	 * Otherwise, just let it ride on the affined RQ and the	 * post-schedule router will push the preempted task away	 */	return task_cpu(p);}static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p){	cpumask_t mask;	if (rq->curr->rt.nr_cpus_allowed == 1)		return;	if (p->rt.nr_cpus_allowed != 1	    && cpupri_find(&rq->rd->cpupri, p, &mask))		return;	if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))		return;	/*	 * There appears to be other cpus that can accept	 * current and none to run 'p', so lets reschedule	 * to try and push current away:	 */	requeue_task_rt(rq, p, 1);	resched_task(rq->curr);}#endif /* CONFIG_SMP *//* * Preempt the current task with a newly woken task if needed: */static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p){	if (p->prio < rq->curr->prio) {		resched_task(rq->curr);		return;	}#ifdef CONFIG_SMP	/*	 * If:	 *	 * - the newly woken task is of equal priority to the current task	 * - the newly woken task is non-migratable while current is migratable	 * - current will be preempted on the next reschedule	 *	 * we should check to see if current can readily move to a different	 * cpu.  If so, we will reschedule to allow the push logic to try	 * to move current somewhere else, making room for our non-migratable	 * task.	 */	if (p->prio == rq->curr->prio && !need_resched())		check_preempt_equal_prio(rq, p);#endif}static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,						   struct rt_rq *rt_rq){	struct rt_prio_array *array = &rt_rq->active;	struct sched_rt_entity *next = NULL;	struct list_head *queue;	int idx;	idx = sched_find_first_bit(array->bitmap);	BUG_ON(idx >= MAX_RT_PRIO);	queue = array->queue + idx;	next = list_entry(queue->next, struct sched_rt_entity, run_list);	return next;}static struct task_struct *pick_next_task_rt(struct rq *rq){	struct sched_rt_entity *rt_se;	struct task_struct *p;	struct rt_rq *rt_rq;	rt_rq = &rq->rt;	if (unlikely(!rt_rq->rt_nr_running))		return NULL;	if (rt_rq_throttled(rt_rq))		return NULL;	do {		rt_se = pick_next_rt_entity(rq, rt_rq);		BUG_ON(!rt_se);		rt_rq = group_rt_rq(rt_se);	} while (rt_rq);	p = rt_task_of(rt_se);	p->se.exec_start = rq->clock;	return p;}static void put_prev_task_rt(struct rq *rq, struct task_struct *p){	update_curr_rt(rq);	p->se.exec_start = 0;}#ifdef CONFIG_SMP/* Only try algorithms three times */#define RT_MAX_TRIES 3static int double_lock_balance(struct rq *this_rq, struct rq *busiest);static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu){	if (!task_running(rq, p) &&	    (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&	    (p->rt.nr_cpus_allowed > 1))		return 1;	return 0;}/* Return the second highest RT task, NULL otherwise */static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu){	struct task_struct *next = NULL;	struct sched_rt_entity *rt_se;	struct rt_prio_array *array;	struct rt_rq *rt_rq;	int idx;	for_each_leaf_rt_rq(rt_rq, rq) {		array = &rt_rq->active;		idx = sched_find_first_bit(array->bitmap); next_idx:		if (idx >= MAX_RT_PRIO)			continue;		if (next && next->prio < idx)			continue;		list_for_each_entry(rt_se, array->queue + idx, run_list) {			struct task_struct *p = rt_task_of(rt_se);			if (pick_rt_task(rq, p, cpu)) {				next = p;				break;			}		}		if (!next) {			idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);			goto next_idx;		}	}	return next;}static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask){	int first;	/* "this_cpu" is cheaper to preempt than a remote processor */	if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))		return this_cpu;	first = first_cpu(*mask);	if (first != NR_CPUS)		return first;	return -1;}static int find_lowest_rq(struct task_struct *task){	struct sched_domain *sd;	cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);	int this_cpu = smp_processor_id();	int cpu      = task_cpu(task);	if (task->rt.nr_cpus_allowed == 1)		return -1; /* No other targets possible */	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))		return -1; /* No targets found */	/*	 * Only consider CPUs that are usable for migration.	 * I guess we might want to change cpupri_find() to ignore those	 * in the first place.	 */	cpus_and(*lowest_mask, *lowest_mask, cpu_active_map);	/*	 * At this point we have built a mask of cpus representing the	 * lowest priority tasks in the system.  Now we want to elect	 * the best one based on our affinity and topology.	 *	 * We prioritize the last cpu that the task executed on since	 * it is most likely cache-hot in that location.	 */	if (cpu_isset(cpu, *lowest_mask))		return cpu;	/*	 * Otherwise, we consult the sched_domains span maps to figure	 * out which cpu is logically closest to our hot cache data.	 */	if (this_cpu == cpu)		this_cpu = -1; /* Skip this_cpu opt if the same */	for_each_domain(cpu, sd) {		if (sd->flags & SD_WAKE_AFFINE) {			cpumask_t domain_mask;			int       best_cpu;			cpus_and(domain_mask, sd->span, *lowest_mask);			best_cpu = pick_optimal_cpu(this_cpu,						    &domain_mask);			if (best_cpu != -1)				return best_cpu;		}	}	/*	 * And finally, if there were no matches within the domains	 * just give the caller *something* to work with from the compatible	 * locations.	 */	return pick_optimal_cpu(this_cpu, lowest_mask);}/* Will lock the rq it finds */static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq){	struct rq *lowest_rq = NULL;	int tries;	int cpu;	for (tries = 0; tries < RT_MAX_TRIES; tries++) {		cpu = find_lowest_rq(task);		if ((cpu == -1) || (cpu == rq->cpu))			break;		lowest_rq = cpu_rq(cpu);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -