⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 5 页
字号:
	 */	if (p->policy == SCHED_IDLE) {		p->se.load.weight = WEIGHT_IDLEPRIO;		p->se.load.inv_weight = WMULT_IDLEPRIO;		return;	}	p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];	p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];}static void update_avg(u64 *avg, u64 sample){	s64 diff = sample - *avg;	*avg += diff >> 3;}static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup){	sched_info_queued(p);	p->sched_class->enqueue_task(rq, p, wakeup);	p->se.on_rq = 1;}static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep){	if (sleep && p->se.last_wakeup) {		update_avg(&p->se.avg_overlap,			   p->se.sum_exec_runtime - p->se.last_wakeup);		p->se.last_wakeup = 0;	}	sched_info_dequeued(p);	p->sched_class->dequeue_task(rq, p, sleep);	p->se.on_rq = 0;}/* * __normal_prio - return the priority that is based on the static prio */static inline int __normal_prio(struct task_struct *p){	return p->static_prio;}/* * Calculate the expected normal priority: i.e. priority * without taking RT-inheritance into account. Might be * boosted by interactivity modifiers. Changes upon fork, * setprio syscalls, and whenever the interactivity * estimator recalculates. */static inline int normal_prio(struct task_struct *p){	int prio;	if (task_has_rt_policy(p))		prio = MAX_RT_PRIO-1 - p->rt_priority;	else		prio = __normal_prio(p);	return prio;}/* * Calculate the current priority, i.e. the priority * taken into account by the scheduler. This value might * be boosted by RT tasks, or might be boosted by * interactivity modifiers. Will be RT if the task got * RT-boosted. If not then it returns p->normal_prio. */static int effective_prio(struct task_struct *p){	p->normal_prio = normal_prio(p);	/*	 * If we are RT tasks or we were boosted to RT priority,	 * keep the priority unchanged. Otherwise, update priority	 * to the normal priority:	 */	if (!rt_prio(p->prio))		return p->normal_prio;	return p->prio;}/* * activate_task - move a task to the runqueue. */static void activate_task(struct rq *rq, struct task_struct *p, int wakeup){	if (task_contributes_to_load(p))		rq->nr_uninterruptible--;	enqueue_task(rq, p, wakeup);	inc_nr_running(rq);}/* * deactivate_task - remove a task from the runqueue. */static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep){	if (task_contributes_to_load(p))		rq->nr_uninterruptible++;	dequeue_task(rq, p, sleep);	dec_nr_running(rq);}/** * task_curr - is this task currently executing on a CPU? * @p: the task in question. */inline int task_curr(const struct task_struct *p){	return cpu_curr(task_cpu(p)) == p;}static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu){	set_task_rq(p, cpu);#ifdef CONFIG_SMP	/*	 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be	 * successfuly executed on another CPU. We must ensure that updates of	 * per-task data have been completed by this moment.	 */	smp_wmb();	task_thread_info(p)->cpu = cpu;#endif}static inline void check_class_changed(struct rq *rq, struct task_struct *p,				       const struct sched_class *prev_class,				       int oldprio, int running){	if (prev_class != p->sched_class) {		if (prev_class->switched_from)			prev_class->switched_from(rq, p, running);		p->sched_class->switched_to(rq, p, running);	} else		p->sched_class->prio_changed(rq, p, oldprio, running);}#ifdef CONFIG_SMP/* Used instead of source_load when we know the type == 0 */static unsigned long weighted_cpuload(const int cpu){	return cpu_rq(cpu)->load.weight;}/* * Is this task likely cache-hot: */static inttask_hot(struct task_struct *p, u64 now, struct sched_domain *sd){	s64 delta;	/*	 * Buddy candidates are cache hot:	 */	if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next))		return 1;	if (p->sched_class != &fair_sched_class)		return 0;	if (sysctl_sched_migration_cost == -1)		return 1;	if (sysctl_sched_migration_cost == 0)		return 0;	delta = now - p->se.exec_start;	return delta < (s64)sysctl_sched_migration_cost;}void set_task_cpu(struct task_struct *p, unsigned int new_cpu){	int old_cpu = task_cpu(p);	struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);	struct cfs_rq *old_cfsrq = task_cfs_rq(p),		      *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);	u64 clock_offset;	clock_offset = old_rq->clock - new_rq->clock;#ifdef CONFIG_SCHEDSTATS	if (p->se.wait_start)		p->se.wait_start -= clock_offset;	if (p->se.sleep_start)		p->se.sleep_start -= clock_offset;	if (p->se.block_start)		p->se.block_start -= clock_offset;	if (old_cpu != new_cpu) {		schedstat_inc(p, se.nr_migrations);		if (task_hot(p, old_rq->clock, NULL))			schedstat_inc(p, se.nr_forced2_migrations);	}#endif	p->se.vruntime -= old_cfsrq->min_vruntime -					 new_cfsrq->min_vruntime;	__set_task_cpu(p, new_cpu);}struct migration_req {	struct list_head list;	struct task_struct *task;	int dest_cpu;	struct completion done;};/* * The task's runqueue lock must be held. * Returns true if you have to wait for migration thread. */static intmigrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req){	struct rq *rq = task_rq(p);	/*	 * If the task is not on a runqueue (and not running), then	 * it is sufficient to simply update the task's cpu field.	 */	if (!p->se.on_rq && !task_running(rq, p)) {		set_task_cpu(p, dest_cpu);		return 0;	}	init_completion(&req->done);	req->task = p;	req->dest_cpu = dest_cpu;	list_add(&req->list, &rq->migration_queue);	return 1;}/* * wait_task_inactive - wait for a thread to unschedule. * * If @match_state is nonzero, it's the @p->state value just checked and * not expected to change.  If it changes, i.e. @p might have woken up, * then return zero.  When we succeed in waiting for @p to be off its CPU, * we return a positive number (its total switch count).  If a second call * a short while later returns the same number, the caller can be sure that * @p has remained unscheduled the whole time. * * The caller must ensure that the task *will* unschedule sometime soon, * else this function might spin for a *long* time. This function can't * be called with interrupts off, or it may introduce deadlock with * smp_call_function() if an IPI is sent by the same process we are * waiting to become inactive. */unsigned long wait_task_inactive(struct task_struct *p, long match_state){	unsigned long flags;	int running, on_rq;	unsigned long ncsw;	struct rq *rq;	for (;;) {		/*		 * We do the initial early heuristics without holding		 * any task-queue locks at all. We'll only try to get		 * the runqueue lock when things look like they will		 * work out!		 */		rq = task_rq(p);		/*		 * If the task is actively running on another CPU		 * still, just relax and busy-wait without holding		 * any locks.		 *		 * NOTE! Since we don't hold any locks, it's not		 * even sure that "rq" stays as the right runqueue!		 * But we don't care, since "task_running()" will		 * return false if the runqueue has changed and p		 * is actually now running somewhere else!		 */		while (task_running(rq, p)) {			if (match_state && unlikely(p->state != match_state))				return 0;			cpu_relax();		}		/*		 * Ok, time to look more closely! We need the rq		 * lock now, to be *sure*. If we're wrong, we'll		 * just go back and repeat.		 */		rq = task_rq_lock(p, &flags);		running = task_running(rq, p);		on_rq = p->se.on_rq;		ncsw = 0;		if (!match_state || p->state == match_state) {			ncsw = p->nivcsw + p->nvcsw;			if (unlikely(!ncsw))				ncsw = 1;		}		task_rq_unlock(rq, &flags);		/*		 * If it changed from the expected state, bail out now.		 */		if (unlikely(!ncsw))			break;		/*		 * Was it really running after all now that we		 * checked with the proper locks actually held?		 *		 * Oops. Go back and try again..		 */		if (unlikely(running)) {			cpu_relax();			continue;		}		/*		 * It's not enough that it's not actively running,		 * it must be off the runqueue _entirely_, and not		 * preempted!		 *		 * So if it wa still runnable (but just not actively		 * running right now), it's preempted, and we should		 * yield - it could be a while.		 */		if (unlikely(on_rq)) {			schedule_timeout_uninterruptible(1);			continue;		}		/*		 * Ahh, all good. It wasn't running, and it wasn't		 * runnable, which means that it will never become		 * running in the future either. We're all done!		 */		break;	}	return ncsw;}/*** * kick_process - kick a running thread to enter/exit the kernel * @p: the to-be-kicked thread * * Cause a process which is running on another CPU to enter * kernel-mode, without any delay. (to get signals handled.) * * NOTE: this function doesnt have to take the runqueue lock, * because all it wants to ensure is that the remote task enters * the kernel. If the IPI races and the task has been migrated * to another CPU then no harm is done and the purpose has been * achieved as well. */void kick_process(struct task_struct *p){	int cpu;	preempt_disable();	cpu = task_cpu(p);	if ((cpu != smp_processor_id()) && task_curr(p))		smp_send_reschedule(cpu);	preempt_enable();}/* * Return a low guess at the load of a migration-source cpu weighted * according to the scheduling class and "nice" value. * * We want to under-estimate the load of migration sources, to * balance conservatively. */static unsigned long source_load(int cpu, int type){	struct rq *rq = cpu_rq(cpu);	unsigned long total = weighted_cpuload(cpu);	if (type == 0 || !sched_feat(LB_BIAS))		return total;	return min(rq->cpu_load[type-1], total);}/* * Return a high guess at the load of a migration-target cpu weighted * according to the scheduling class and "nice" value. */static unsigned long target_load(int cpu, int type){	struct rq *rq = cpu_rq(cpu);	unsigned long total = weighted_cpuload(cpu);	if (type == 0 || !sched_feat(LB_BIAS))		return total;	return max(rq->cpu_load[type-1], total);}/* * find_idlest_group finds and returns the least busy CPU group within the * domain. */static struct sched_group *find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu){	struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;	unsigned long min_load = ULONG_MAX, this_load = 0;	int load_idx = sd->forkexec_idx;	int imbalance = 100 + (sd->imbalance_pct-100)/2;	do {		unsigned long load, avg_load;		int local_group;		int i;		/* Skip over this group if it has no CPUs allowed */		if (!cpus_intersects(group->cpumask, p->cpus_allowed))			continue;		local_group = cpu_isset(this_cpu, group->cpumask);		/* Tally up the load of all CPUs in the group */		avg_load = 0;		for_each_cpu_mask_nr(i, group->cpumask) {			/* Bias balancing toward cpus of our domain */			if (local_group)				load = source_load(i, load_idx);			else				load = target_load(i, load_idx);			avg_load += load;		}		/* Adjust by relative CPU power of the group */		avg_load = sg_div_cpu_power(group,				avg_load * SCHED_LOAD_SCALE);		if (local_group) {			this_load = avg_load;			this = group;		} else if (avg_load < min_load) {			min_load = avg_load;			idlest = group;		}	} while (group = group->next, group != sd->groups);	if (!idlest || 100*this_load < imbalance*min_load)		return NULL;	return idlest;}/* * find_idlest_cpu - find the idlest cpu among the cpus in group. */static intfind_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,		cpumask_t *tmp){	unsigned long load, min_load = ULONG_MAX;	int idlest = -1;	int i;	/* Traverse only the allowed CPUs */	cpus_and(*tmp, group->cpumask, p->cpus_allowed);	for_each_cpu_mask_nr(i, *tmp) {		load = weighted_cpuload(i);		if (load < min_load || (load == min_load && i == this_cpu)) {			min_load = load;			idlest = i;		}	}	return idlest;}/* * sched_balance_self: balance the current task (running on cpu) in domains * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and * SD_BALANCE_EXEC. * * Balance, ie. select the least loaded group. * * Returns the target CPU number, or the same CPU if no balancing is needed. * * preempt must be disabled. */static int sched_balance_self(int cpu, int flag){	struct task_struct *t = current;	struct sched_domain *tmp, *sd = NULL;	for_each_domain(cpu, tmp) {		/*		 * If power savings logic is enabled for a domain, stop there.		 */		if (tmp->flags & SD_POWERSAVINGS_BALANCE)			break;		if (tmp->flags & flag)			sd = tmp;	}	if (sd)		update_shares(sd);	while (sd) {		cpumask_t span, tmpmask;		struct sched_group *group;		int new_cpu, weight;		if (!(sd->flags & flag)) {			sd = sd->child;			continue;		}		span = sd->span;		group = find_idlest_group(sd, t, cpu);		if (!group) {			sd = sd->child;			continue;		}		new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask);		if (new_cpu == -1 || new_cpu == cpu) {			/* Now try balancing at a lower domain level of cpu */			sd = sd->child;			continue;		}		/* Now try balancing at a lower domain level of new_cpu */		cpu = new_cpu;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -