⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 linux 2.6.19 kernel source code before patching
💻 C
📖 第 1 页 / 共 5 页
字号:
	return cpu;}#endif/*** * try_to_wake_up - wake up a thread * @p: the to-be-woken-up thread * @state: the mask of task states that can be woken * @sync: do a synchronous wakeup? * * Put it on the run-queue if it's not already there. The "current" * thread is always on the run-queue (except when the actual * re-schedule is in progress), and as such you're allowed to do * the simpler "current->state = TASK_RUNNING" to mark yourself * runnable without the overhead of this. * * returns failure only if the task is already active. */static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync){	int cpu, this_cpu, success = 0;	unsigned long flags;	long old_state;	struct rq *rq;#ifdef CONFIG_SMP	struct sched_domain *sd, *this_sd = NULL;	unsigned long load, this_load;	int new_cpu;#endif	rq = task_rq_lock(p, &flags);	old_state = p->state;	if (!(old_state & state))		goto out;	if (p->array)		goto out_running;	cpu = task_cpu(p);	this_cpu = smp_processor_id();#ifdef CONFIG_SMP	if (unlikely(task_running(rq, p)))		goto out_activate;	new_cpu = cpu;	schedstat_inc(rq, ttwu_cnt);	if (cpu == this_cpu) {		schedstat_inc(rq, ttwu_local);		goto out_set_cpu;	}	for_each_domain(this_cpu, sd) {		if (cpu_isset(cpu, sd->span)) {			schedstat_inc(sd, ttwu_wake_remote);			this_sd = sd;			break;		}	}	if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))		goto out_set_cpu;	/*	 * Check for affine wakeup and passive balancing possibilities.	 */	if (this_sd) {		int idx = this_sd->wake_idx;		unsigned int imbalance;		imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;		load = source_load(cpu, idx);		this_load = target_load(this_cpu, idx);		new_cpu = this_cpu; /* Wake to this CPU if we can */		if (this_sd->flags & SD_WAKE_AFFINE) {			unsigned long tl = this_load;			unsigned long tl_per_task;			tl_per_task = cpu_avg_load_per_task(this_cpu);			/*			 * If sync wakeup then subtract the (maximum possible)			 * effect of the currently running task from the load			 * of the current CPU:			 */			if (sync)				tl -= current->load_weight;			if ((tl <= load &&				tl + target_load(cpu, idx) <= tl_per_task) ||				100*(tl + p->load_weight) <= imbalance*load) {				/*				 * This domain has SD_WAKE_AFFINE and				 * p is cache cold in this domain, and				 * there is no bad imbalance.				 */				schedstat_inc(this_sd, ttwu_move_affine);				goto out_set_cpu;			}		}		/*		 * Start passive balancing when half the imbalance_pct		 * limit is reached.		 */		if (this_sd->flags & SD_WAKE_BALANCE) {			if (imbalance*this_load <= 100*load) {				schedstat_inc(this_sd, ttwu_move_balance);				goto out_set_cpu;			}		}	}	new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */out_set_cpu:	new_cpu = wake_idle(new_cpu, p);	if (new_cpu != cpu) {		set_task_cpu(p, new_cpu);		task_rq_unlock(rq, &flags);		/* might preempt at this point */		rq = task_rq_lock(p, &flags);		old_state = p->state;		if (!(old_state & state))			goto out;		if (p->array)			goto out_running;		this_cpu = smp_processor_id();		cpu = task_cpu(p);	}out_activate:#endif /* CONFIG_SMP */	if (old_state == TASK_UNINTERRUPTIBLE) {		rq->nr_uninterruptible--;		/*		 * Tasks on involuntary sleep don't earn		 * sleep_avg beyond just interactive state.		 */		p->sleep_type = SLEEP_NONINTERACTIVE;	} else	/*	 * Tasks that have marked their sleep as noninteractive get	 * woken up with their sleep average not weighted in an	 * interactive way.	 */		if (old_state & TASK_NONINTERACTIVE)			p->sleep_type = SLEEP_NONINTERACTIVE;	activate_task(p, rq, cpu == this_cpu);	/*	 * Sync wakeups (i.e. those types of wakeups where the waker	 * has indicated that it will leave the CPU in short order)	 * don't trigger a preemption, if the woken up task will run on	 * this cpu. (in this case the 'I will reschedule' promise of	 * the waker guarantees that the freshly woken up task is going	 * to be considered on this CPU.)	 */	if (!sync || cpu != this_cpu) {		if (TASK_PREEMPTS_CURR(p, rq))			resched_task(rq->curr);	}	success = 1;out_running:	p->state = TASK_RUNNING;out:	task_rq_unlock(rq, &flags);	return success;}int fastcall wake_up_process(struct task_struct *p){	return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |				 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);}EXPORT_SYMBOL(wake_up_process);int fastcall wake_up_state(struct task_struct *p, unsigned int state){	return try_to_wake_up(p, state, 0);}static void task_running_tick(struct rq *rq, struct task_struct *p);/* * Perform scheduler related setup for a newly forked process p. * p is forked by current. */void fastcall sched_fork(struct task_struct *p, int clone_flags){	int cpu = get_cpu();#ifdef CONFIG_SMP	cpu = sched_balance_self(cpu, SD_BALANCE_FORK);#endif	set_task_cpu(p, cpu);	/*	 * We mark the process as running here, but have not actually	 * inserted it onto the runqueue yet. This guarantees that	 * nobody will actually run it, and a signal or other external	 * event cannot wake it up and insert it on the runqueue either.	 */	p->state = TASK_RUNNING;	/*	 * Make sure we do not leak PI boosting priority to the child:	 */	p->prio = current->normal_prio;	INIT_LIST_HEAD(&p->run_list);	p->array = NULL;#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)	if (unlikely(sched_info_on()))		memset(&p->sched_info, 0, sizeof(p->sched_info));#endif#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)	p->oncpu = 0;#endif#ifdef CONFIG_PREEMPT	/* Want to start with kernel preemption disabled. */	task_thread_info(p)->preempt_count = 1;#endif	/*	 * Share the timeslice between parent and child, thus the	 * total amount of pending timeslices in the system doesn't change,	 * resulting in more scheduling fairness.	 */	local_irq_disable();	p->time_slice = (current->time_slice + 1) >> 1;	/*	 * The remainder of the first timeslice might be recovered by	 * the parent if the child exits early enough.	 */	p->first_time_slice = 1;	current->time_slice >>= 1;	p->timestamp = sched_clock();	if (unlikely(!current->time_slice)) {		/*		 * This case is rare, it happens when the parent has only		 * a single jiffy left from its timeslice. Taking the		 * runqueue lock is not a problem.		 */		current->time_slice = 1;		task_running_tick(cpu_rq(cpu), current);	}	local_irq_enable();	put_cpu();}/* * wake_up_new_task - wake up a newly created task for the first time. * * This function will do some initial scheduler statistics housekeeping * that must be done for every newly created context, then puts the task * on the runqueue and wakes it. */void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags){	struct rq *rq, *this_rq;	unsigned long flags;	int this_cpu, cpu;	rq = task_rq_lock(p, &flags);	BUG_ON(p->state != TASK_RUNNING);	this_cpu = smp_processor_id();	cpu = task_cpu(p);	/*	 * We decrease the sleep average of forking parents	 * and children as well, to keep max-interactive tasks	 * from forking tasks that are max-interactive. The parent	 * (current) is done further down, under its lock.	 */	p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *		CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);	p->prio = effective_prio(p);	if (likely(cpu == this_cpu)) {		if (!(clone_flags & CLONE_VM)) {			/*			 * The VM isn't cloned, so we're in a good position to			 * do child-runs-first in anticipation of an exec. This			 * usually avoids a lot of COW overhead.			 */			if (unlikely(!current->array))				__activate_task(p, rq);			else {				p->prio = current->prio;				p->normal_prio = current->normal_prio;				list_add_tail(&p->run_list, &current->run_list);				p->array = current->array;				p->array->nr_active++;				inc_nr_running(p, rq);			}			set_need_resched();		} else			/* Run child last */			__activate_task(p, rq);		/*		 * We skip the following code due to cpu == this_cpu	 	 *		 *   task_rq_unlock(rq, &flags);		 *   this_rq = task_rq_lock(current, &flags);		 */		this_rq = rq;	} else {		this_rq = cpu_rq(this_cpu);		/*		 * Not the local CPU - must adjust timestamp. This should		 * get optimised away in the !CONFIG_SMP case.		 */		p->timestamp = (p->timestamp - this_rq->most_recent_timestamp)					+ rq->most_recent_timestamp;		__activate_task(p, rq);		if (TASK_PREEMPTS_CURR(p, rq))			resched_task(rq->curr);		/*		 * Parent and child are on different CPUs, now get the		 * parent runqueue to update the parent's ->sleep_avg:		 */		task_rq_unlock(rq, &flags);		this_rq = task_rq_lock(current, &flags);	}	current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *		PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);	task_rq_unlock(this_rq, &flags);}/* * Potentially available exiting-child timeslices are * retrieved here - this way the parent does not get * penalized for creating too many threads. * * (this cannot be used to 'generate' timeslices * artificially, because any timeslice recovered here * was given away by the parent in the first place.) */void fastcall sched_exit(struct task_struct *p){	unsigned long flags;	struct rq *rq;	/*	 * If the child was a (relative-) CPU hog then decrease	 * the sleep_avg of the parent as well.	 */	rq = task_rq_lock(p->parent, &flags);	if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {		p->parent->time_slice += p->time_slice;		if (unlikely(p->parent->time_slice > task_timeslice(p)))			p->parent->time_slice = task_timeslice(p);	}	if (p->sleep_avg < p->parent->sleep_avg)		p->parent->sleep_avg = p->parent->sleep_avg /		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /		(EXIT_WEIGHT + 1);	task_rq_unlock(rq, &flags);}/** * prepare_task_switch - prepare to switch tasks * @rq: the runqueue preparing to switch * @next: the task we are going to switch to. * * This is called with the rq lock held and interrupts off. It must * be paired with a subsequent finish_task_switch after the context * switch. * * prepare_task_switch sets up locking and calls architecture specific * hooks. */static inline void prepare_task_switch(struct rq *rq, struct task_struct *next){	prepare_lock_switch(rq, next);	prepare_arch_switch(next);}/** * finish_task_switch - clean up after a task-switch * @rq: runqueue associated with task-switch * @prev: the thread we just switched away from. * * finish_task_switch must be called after the context switch, paired * with a prepare_task_switch call before the context switch. * finish_task_switch will reconcile locking set up by prepare_task_switch, * and do any other architecture-specific cleanup actions. * * Note that we may have delayed dropping an mm in context_switch(). If * so, we finish that here outside of the runqueue lock.  (Doing it * with the lock held can cause deadlocks; see schedule() for * details.) */static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)	__releases(rq->lock){	struct mm_struct *mm = rq->prev_mm;	long prev_state;	rq->prev_mm = NULL;	/*	 * A task struct has one reference for the use as "current".	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls	 * schedule one last time. The schedule call will never return, and	 * the scheduled task must drop that reference.	 * The test for TASK_DEAD must occur while the runqueue locks are	 * still held, otherwise prev could be scheduled on another cpu, die	 * there before we look at prev->state, and then the reference would	 * be dropped twice.	 *		Manfred Spraul <manfred@colorfullife.com>	 */	prev_state = prev->state;	finish_arch_switch(prev);	finish_lock_switch(rq, prev);	if (mm)		mmdrop(mm);	if (unlikely(prev_state == TASK_DEAD)) {		/*		 * Remove function-return probe instances associated with this		 * task and put them back on the free list.	 	 */		kprobe_flush_task(prev);		put_task_struct(prev);	}}/** * schedule_tail - first thing a freshly forked thread must call. * @prev: the thread we just switched away from. */asmlinkage void schedule_tail(struct task_struct *prev)	__releases(rq->lock){	struct rq *rq = this_rq();	finish_task_switch(rq, prev);#ifdef __ARCH_WANT_UNLOCKED_CTXSW	/* In this case, finish_task_switch does not reenable preemption */	preempt_enable();#endif	if (current->set_child_tid)		put_user(current->pid, current->set_child_tid);}/* * context_switch - switch to the new MM and the new * thread's register state. */static inline struct task_struct *context_switch(struct rq *rq, struct task_struct *prev,	       struct task_struct *next){	struct mm_struct *mm = next->mm;	struct mm_struct *oldmm = prev->active_mm;	/*	 * For paravirt, this is coupled with an exit in switch_to to	 * combine the page table reload and the switch backend into	 * one hypercall.	 */	arch_enter_lazy_cpu_mode();	if (!mm) {		next->active_mm = oldmm;		atomic_inc(&oldmm->mm_count);		enter_lazy_tlb(oldmm, next);	} else		switch_mm(oldmm, mm, next);	if (!prev->mm) {		prev->active_mm = NULL;		WARN_ON(rq->prev_mm);		rq->prev_mm = oldmm;	}	/*	 * Since the runqueue lock will be released by the next	 * task (which is an invalid locking op but in the case	 * of the scheduler it's an obvious special-case), so we	 * do an early lockdep release here:	 */#ifndef __ARCH_WANT_UNLOCKED_CTXSW	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);#endif	/* Here we just switch the register state and the stack. */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -