⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 5 页
字号:
	}#endif	/*	 * Sleep time is in units of nanosecs, so shift by 20 to get a	 * milliseconds-range estimation of the amount of time that the task	 * spent sleeping:	 */	if (unlikely(prof_on == SLEEP_PROFILING)) {		if (p->state == TASK_UNINTERRUPTIBLE)			profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),				     (now - p->timestamp) >> 20);	}	p->prio = recalc_task_prio(p, now);	/*	 * This checks to make sure it's not an uninterruptible task	 * that is now waking up.	 */	if (p->sleep_type == SLEEP_NORMAL) {		/*		 * Tasks which were woken up by interrupts (ie. hw events)		 * are most likely of interactive nature. So we give them		 * the credit of extending their sleep time to the period		 * of time they spend on the runqueue, waiting for execution		 * on a CPU, first time around:		 */		if (in_interrupt())			p->sleep_type = SLEEP_INTERRUPTED;		else {			/*			 * Normal first-time wakeups get a credit too for			 * on-runqueue time, but it will be weighted down:			 */			p->sleep_type = SLEEP_INTERACTIVE;		}	}	p->timestamp = now;out:	__activate_task(p, rq);}/* * deactivate_task - remove a task from the runqueue. */static void deactivate_task(struct task_struct *p, struct rq *rq){	dec_nr_running(p, rq);	dequeue_task(p, p->array);	p->array = NULL;}/* * resched_task - mark a task 'to be rescheduled now'. * * On UP this means the setting of the need_resched flag, on SMP it * might also involve a cross-CPU call to trigger the scheduler on * the target CPU. */#ifdef CONFIG_SMP#ifndef tsk_is_polling#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)#endifstatic void resched_task(struct task_struct *p){	int cpu;	assert_spin_locked(&task_rq(p)->lock);	if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))		return;	set_tsk_thread_flag(p, TIF_NEED_RESCHED);	cpu = task_cpu(p);	if (cpu == smp_processor_id())		return;	/* NEED_RESCHED must be visible before we test polling */	smp_mb();	if (!tsk_is_polling(p))		smp_send_reschedule(cpu);}static void resched_cpu(int cpu){	struct rq *rq = cpu_rq(cpu);	unsigned long flags;	if (!spin_trylock_irqsave(&rq->lock, flags))		return;	resched_task(cpu_curr(cpu));	spin_unlock_irqrestore(&rq->lock, flags);}#elsestatic inline void resched_task(struct task_struct *p){	assert_spin_locked(&task_rq(p)->lock);	set_tsk_need_resched(p);}#endif/** * task_curr - is this task currently executing on a CPU? * @p: the task in question. */inline int task_curr(const struct task_struct *p){	return cpu_curr(task_cpu(p)) == p;}/* Used instead of source_load when we know the type == 0 */unsigned long weighted_cpuload(const int cpu){	return cpu_rq(cpu)->raw_weighted_load;}#ifdef CONFIG_SMPstruct migration_req {	struct list_head list;	struct task_struct *task;	int dest_cpu;	struct completion done;};/* * The task's runqueue lock must be held. * Returns true if you have to wait for migration thread. */static intmigrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req){	struct rq *rq = task_rq(p);	/*	 * If the task is not on a runqueue (and not running), then	 * it is sufficient to simply update the task's cpu field.	 */	if (!p->array && !task_running(rq, p)) {		set_task_cpu(p, dest_cpu);		return 0;	}	init_completion(&req->done);	req->task = p;	req->dest_cpu = dest_cpu;	list_add(&req->list, &rq->migration_queue);	return 1;}/* * wait_task_inactive - wait for a thread to unschedule. * * The caller must ensure that the task *will* unschedule sometime soon, * else this function might spin for a *long* time. This function can't * be called with interrupts off, or it may introduce deadlock with * smp_call_function() if an IPI is sent by the same process we are * waiting to become inactive. */void wait_task_inactive(struct task_struct *p){	unsigned long flags;	struct rq *rq;	struct prio_array *array;	int running;repeat:	/*	 * We do the initial early heuristics without holding	 * any task-queue locks at all. We'll only try to get	 * the runqueue lock when things look like they will	 * work out!	 */	rq = task_rq(p);	/*	 * If the task is actively running on another CPU	 * still, just relax and busy-wait without holding	 * any locks.	 *	 * NOTE! Since we don't hold any locks, it's not	 * even sure that "rq" stays as the right runqueue!	 * But we don't care, since "task_running()" will	 * return false if the runqueue has changed and p	 * is actually now running somewhere else!	 */	while (task_running(rq, p))		cpu_relax();	/*	 * Ok, time to look more closely! We need the rq	 * lock now, to be *sure*. If we're wrong, we'll	 * just go back and repeat.	 */	rq = task_rq_lock(p, &flags);	running = task_running(rq, p);	array = p->array;	task_rq_unlock(rq, &flags);	/*	 * Was it really running after all now that we	 * checked with the proper locks actually held?	 *	 * Oops. Go back and try again..	 */	if (unlikely(running)) {		cpu_relax();		goto repeat;	}	/*	 * It's not enough that it's not actively running,	 * it must be off the runqueue _entirely_, and not	 * preempted!	 *	 * So if it wa still runnable (but just not actively	 * running right now), it's preempted, and we should	 * yield - it could be a while.	 */	if (unlikely(array)) {		yield();		goto repeat;	}	/*	 * Ahh, all good. It wasn't running, and it wasn't	 * runnable, which means that it will never become	 * running in the future either. We're all done!	 */}/*** * kick_process - kick a running thread to enter/exit the kernel * @p: the to-be-kicked thread * * Cause a process which is running on another CPU to enter * kernel-mode, without any delay. (to get signals handled.) * * NOTE: this function doesnt have to take the runqueue lock, * because all it wants to ensure is that the remote task enters * the kernel. If the IPI races and the task has been migrated * to another CPU then no harm is done and the purpose has been * achieved as well. */void kick_process(struct task_struct *p){	int cpu;	preempt_disable();	cpu = task_cpu(p);	if ((cpu != smp_processor_id()) && task_curr(p))		smp_send_reschedule(cpu);	preempt_enable();}/* * Return a low guess at the load of a migration-source cpu weighted * according to the scheduling class and "nice" value. * * We want to under-estimate the load of migration sources, to * balance conservatively. */static inline unsigned long source_load(int cpu, int type){	struct rq *rq = cpu_rq(cpu);	if (type == 0)		return rq->raw_weighted_load;	return min(rq->cpu_load[type-1], rq->raw_weighted_load);}/* * Return a high guess at the load of a migration-target cpu weighted * according to the scheduling class and "nice" value. */static inline unsigned long target_load(int cpu, int type){	struct rq *rq = cpu_rq(cpu);	if (type == 0)		return rq->raw_weighted_load;	return max(rq->cpu_load[type-1], rq->raw_weighted_load);}/* * Return the average load per task on the cpu's run queue */static inline unsigned long cpu_avg_load_per_task(int cpu){	struct rq *rq = cpu_rq(cpu);	unsigned long n = rq->nr_running;	return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;}/* * find_idlest_group finds and returns the least busy CPU group within the * domain. */static struct sched_group *find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu){	struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;	unsigned long min_load = ULONG_MAX, this_load = 0;	int load_idx = sd->forkexec_idx;	int imbalance = 100 + (sd->imbalance_pct-100)/2;	do {		unsigned long load, avg_load;		int local_group;		int i;		/* Skip over this group if it has no CPUs allowed */		if (!cpus_intersects(group->cpumask, p->cpus_allowed))			goto nextgroup;		local_group = cpu_isset(this_cpu, group->cpumask);		/* Tally up the load of all CPUs in the group */		avg_load = 0;		for_each_cpu_mask(i, group->cpumask) {			/* Bias balancing toward cpus of our domain */			if (local_group)				load = source_load(i, load_idx);			else				load = target_load(i, load_idx);			avg_load += load;		}		/* Adjust by relative CPU power of the group */		avg_load = sg_div_cpu_power(group,				avg_load * SCHED_LOAD_SCALE);		if (local_group) {			this_load = avg_load;			this = group;		} else if (avg_load < min_load) {			min_load = avg_load;			idlest = group;		}nextgroup:		group = group->next;	} while (group != sd->groups);	if (!idlest || 100*this_load < imbalance*min_load)		return NULL;	return idlest;}/* * find_idlest_cpu - find the idlest cpu among the cpus in group. */static intfind_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu){	cpumask_t tmp;	unsigned long load, min_load = ULONG_MAX;	int idlest = -1;	int i;	/* Traverse only the allowed CPUs */	cpus_and(tmp, group->cpumask, p->cpus_allowed);	for_each_cpu_mask(i, tmp) {		load = weighted_cpuload(i);		if (load < min_load || (load == min_load && i == this_cpu)) {			min_load = load;			idlest = i;		}	}	return idlest;}/* * sched_balance_self: balance the current task (running on cpu) in domains * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and * SD_BALANCE_EXEC. * * Balance, ie. select the least loaded group. * * Returns the target CPU number, or the same CPU if no balancing is needed. * * preempt must be disabled. */static int sched_balance_self(int cpu, int flag){	struct task_struct *t = current;	struct sched_domain *tmp, *sd = NULL;	for_each_domain(cpu, tmp) { 		/* 	 	 * If power savings logic is enabled for a domain, stop there. 	 	 */		if (tmp->flags & SD_POWERSAVINGS_BALANCE)			break;		if (tmp->flags & flag)			sd = tmp;	}	while (sd) {		cpumask_t span;		struct sched_group *group;		int new_cpu, weight;		if (!(sd->flags & flag)) {			sd = sd->child;			continue;		}		span = sd->span;		group = find_idlest_group(sd, t, cpu);		if (!group) {			sd = sd->child;			continue;		}		new_cpu = find_idlest_cpu(group, t, cpu);		if (new_cpu == -1 || new_cpu == cpu) {			/* Now try balancing at a lower domain level of cpu */			sd = sd->child;			continue;		}		/* Now try balancing at a lower domain level of new_cpu */		cpu = new_cpu;		sd = NULL;		weight = cpus_weight(span);		for_each_domain(cpu, tmp) {			if (weight <= cpus_weight(tmp->span))				break;			if (tmp->flags & flag)				sd = tmp;		}		/* while loop will break here if sd == NULL */	}	return cpu;}#endif /* CONFIG_SMP *//* * wake_idle() will wake a task on an idle cpu if task->cpu is * not idle and an idle cpu is available.  The span of cpus to * search starts with cpus closest then further out as needed, * so we always favor a closer, idle cpu. * * Returns the CPU we should wake onto. */#if defined(ARCH_HAS_SCHED_WAKE_IDLE)static int wake_idle(int cpu, struct task_struct *p){	cpumask_t tmp;	struct sched_domain *sd;	int i;	/*	 * If it is idle, then it is the best cpu to run this task.	 *	 * This cpu is also the best, if it has more than one task already.	 * Siblings must be also busy(in most cases) as they didn't already	 * pickup the extra load from this cpu and hence we need not check	 * sibling runqueue info. This will avoid the checks and cache miss	 * penalities associated with that.	 */	if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1)		return cpu;	for_each_domain(cpu, sd) {		if (sd->flags & SD_WAKE_IDLE) {			cpus_and(tmp, sd->span, p->cpus_allowed);			for_each_cpu_mask(i, tmp) {				if (idle_cpu(i))					return i;			}		}		else			break;	}	return cpu;}#elsestatic inline int wake_idle(int cpu, struct task_struct *p){

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -