⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 Kernel code of linux kernel
💻 C
📖 第 1 页 / 共 5 页
字号:
		sd = NULL;		weight = cpus_weight(span);		for_each_domain(cpu, tmp) {			if (weight <= cpus_weight(tmp->span))				break;			if (tmp->flags & flag)				sd = tmp;		}		/* while loop will break here if sd == NULL */	}	return cpu;}#endif /* CONFIG_SMP *//*** * try_to_wake_up - wake up a thread * @p: the to-be-woken-up thread * @state: the mask of task states that can be woken * @sync: do a synchronous wakeup? * * Put it on the run-queue if it's not already there. The "current" * thread is always on the run-queue (except when the actual * re-schedule is in progress), and as such you're allowed to do * the simpler "current->state = TASK_RUNNING" to mark yourself * runnable without the overhead of this. * * returns failure only if the task is already active. */static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync){	int cpu, orig_cpu, this_cpu, success = 0;	unsigned long flags;	long old_state;	struct rq *rq;	if (!sched_feat(SYNC_WAKEUPS))		sync = 0;#ifdef CONFIG_SMP	if (sched_feat(LB_WAKEUP_UPDATE)) {		struct sched_domain *sd;		this_cpu = raw_smp_processor_id();		cpu = task_cpu(p);		for_each_domain(this_cpu, sd) {			if (cpu_isset(cpu, sd->span)) {				update_shares(sd);				break;			}		}	}#endif	smp_wmb();	rq = task_rq_lock(p, &flags);	old_state = p->state;	if (!(old_state & state))		goto out;	if (p->se.on_rq)		goto out_running;	cpu = task_cpu(p);	orig_cpu = cpu;	this_cpu = smp_processor_id();#ifdef CONFIG_SMP	if (unlikely(task_running(rq, p)))		goto out_activate;	cpu = p->sched_class->select_task_rq(p, sync);	if (cpu != orig_cpu) {		set_task_cpu(p, cpu);		task_rq_unlock(rq, &flags);		/* might preempt at this point */		rq = task_rq_lock(p, &flags);		old_state = p->state;		if (!(old_state & state))			goto out;		if (p->se.on_rq)			goto out_running;		this_cpu = smp_processor_id();		cpu = task_cpu(p);	}#ifdef CONFIG_SCHEDSTATS	schedstat_inc(rq, ttwu_count);	if (cpu == this_cpu)		schedstat_inc(rq, ttwu_local);	else {		struct sched_domain *sd;		for_each_domain(this_cpu, sd) {			if (cpu_isset(cpu, sd->span)) {				schedstat_inc(sd, ttwu_wake_remote);				break;			}		}	}#endif /* CONFIG_SCHEDSTATS */out_activate:#endif /* CONFIG_SMP */	schedstat_inc(p, se.nr_wakeups);	if (sync)		schedstat_inc(p, se.nr_wakeups_sync);	if (orig_cpu != cpu)		schedstat_inc(p, se.nr_wakeups_migrate);	if (cpu == this_cpu)		schedstat_inc(p, se.nr_wakeups_local);	else		schedstat_inc(p, se.nr_wakeups_remote);	update_rq_clock(rq);	activate_task(rq, p, 1);	success = 1;out_running:	trace_mark(kernel_sched_wakeup,		"pid %d state %ld ## rq %p task %p rq->curr %p",		p->pid, p->state, rq, p, rq->curr);	check_preempt_curr(rq, p);	p->state = TASK_RUNNING;#ifdef CONFIG_SMP	if (p->sched_class->task_wake_up)		p->sched_class->task_wake_up(rq, p);#endifout:	current->se.last_wakeup = current->se.sum_exec_runtime;	task_rq_unlock(rq, &flags);	return success;}int wake_up_process(struct task_struct *p){	return try_to_wake_up(p, TASK_ALL, 0);}EXPORT_SYMBOL(wake_up_process);int wake_up_state(struct task_struct *p, unsigned int state){	return try_to_wake_up(p, state, 0);}/* * Perform scheduler related setup for a newly forked process p. * p is forked by current. * * __sched_fork() is basic setup used by init_idle() too: */static void __sched_fork(struct task_struct *p){	p->se.exec_start		= 0;	p->se.sum_exec_runtime		= 0;	p->se.prev_sum_exec_runtime	= 0;	p->se.last_wakeup		= 0;	p->se.avg_overlap		= 0;#ifdef CONFIG_SCHEDSTATS	p->se.wait_start		= 0;	p->se.sum_sleep_runtime		= 0;	p->se.sleep_start		= 0;	p->se.block_start		= 0;	p->se.sleep_max			= 0;	p->se.block_max			= 0;	p->se.exec_max			= 0;	p->se.slice_max			= 0;	p->se.wait_max			= 0;#endif	INIT_LIST_HEAD(&p->rt.run_list);	p->se.on_rq = 0;	INIT_LIST_HEAD(&p->se.group_node);#ifdef CONFIG_PREEMPT_NOTIFIERS	INIT_HLIST_HEAD(&p->preempt_notifiers);#endif	/*	 * We mark the process as running here, but have not actually	 * inserted it onto the runqueue yet. This guarantees that	 * nobody will actually run it, and a signal or other external	 * event cannot wake it up and insert it on the runqueue either.	 */	p->state = TASK_RUNNING;}/* * fork()/clone()-time setup: */void sched_fork(struct task_struct *p, int clone_flags){	int cpu = get_cpu();	__sched_fork(p);#ifdef CONFIG_SMP	cpu = sched_balance_self(cpu, SD_BALANCE_FORK);#endif	set_task_cpu(p, cpu);	/*	 * Make sure we do not leak PI boosting priority to the child:	 */	p->prio = current->normal_prio;	if (!rt_prio(p->prio))		p->sched_class = &fair_sched_class;#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)	if (likely(sched_info_on()))		memset(&p->sched_info, 0, sizeof(p->sched_info));#endif#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)	p->oncpu = 0;#endif#ifdef CONFIG_PREEMPT	/* Want to start with kernel preemption disabled. */	task_thread_info(p)->preempt_count = 1;#endif	put_cpu();}/* * wake_up_new_task - wake up a newly created task for the first time. * * This function will do some initial scheduler statistics housekeeping * that must be done for every newly created context, then puts the task * on the runqueue and wakes it. */void wake_up_new_task(struct task_struct *p, unsigned long clone_flags){	unsigned long flags;	struct rq *rq;	rq = task_rq_lock(p, &flags);	BUG_ON(p->state != TASK_RUNNING);	update_rq_clock(rq);	p->prio = effective_prio(p);	if (!p->sched_class->task_new || !current->se.on_rq) {		activate_task(rq, p, 0);	} else {		/*		 * Let the scheduling class do new task startup		 * management (if any):		 */		p->sched_class->task_new(rq, p);		inc_nr_running(rq);	}	trace_mark(kernel_sched_wakeup_new,		"pid %d state %ld ## rq %p task %p rq->curr %p",		p->pid, p->state, rq, p, rq->curr);	check_preempt_curr(rq, p);#ifdef CONFIG_SMP	if (p->sched_class->task_wake_up)		p->sched_class->task_wake_up(rq, p);#endif	task_rq_unlock(rq, &flags);}#ifdef CONFIG_PREEMPT_NOTIFIERS/** * preempt_notifier_register - tell me when current is being being preempted & rescheduled * @notifier: notifier struct to register */void preempt_notifier_register(struct preempt_notifier *notifier){	hlist_add_head(&notifier->link, &current->preempt_notifiers);}EXPORT_SYMBOL_GPL(preempt_notifier_register);/** * preempt_notifier_unregister - no longer interested in preemption notifications * @notifier: notifier struct to unregister * * This is safe to call from within a preemption notifier. */void preempt_notifier_unregister(struct preempt_notifier *notifier){	hlist_del(&notifier->link);}EXPORT_SYMBOL_GPL(preempt_notifier_unregister);static void fire_sched_in_preempt_notifiers(struct task_struct *curr){	struct preempt_notifier *notifier;	struct hlist_node *node;	hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)		notifier->ops->sched_in(notifier, raw_smp_processor_id());}static voidfire_sched_out_preempt_notifiers(struct task_struct *curr,				 struct task_struct *next){	struct preempt_notifier *notifier;	struct hlist_node *node;	hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)		notifier->ops->sched_out(notifier, next);}#else /* !CONFIG_PREEMPT_NOTIFIERS */static void fire_sched_in_preempt_notifiers(struct task_struct *curr){}static voidfire_sched_out_preempt_notifiers(struct task_struct *curr,				 struct task_struct *next){}#endif /* CONFIG_PREEMPT_NOTIFIERS *//** * prepare_task_switch - prepare to switch tasks * @rq: the runqueue preparing to switch * @prev: the current task that is being switched out * @next: the task we are going to switch to. * * This is called with the rq lock held and interrupts off. It must * be paired with a subsequent finish_task_switch after the context * switch. * * prepare_task_switch sets up locking and calls architecture specific * hooks. */static inline voidprepare_task_switch(struct rq *rq, struct task_struct *prev,		    struct task_struct *next){	fire_sched_out_preempt_notifiers(prev, next);	prepare_lock_switch(rq, next);	prepare_arch_switch(next);}/** * finish_task_switch - clean up after a task-switch * @rq: runqueue associated with task-switch * @prev: the thread we just switched away from. * * finish_task_switch must be called after the context switch, paired * with a prepare_task_switch call before the context switch. * finish_task_switch will reconcile locking set up by prepare_task_switch, * and do any other architecture-specific cleanup actions. * * Note that we may have delayed dropping an mm in context_switch(). If * so, we finish that here outside of the runqueue lock. (Doing it * with the lock held can cause deadlocks; see schedule() for * details.) */static void finish_task_switch(struct rq *rq, struct task_struct *prev)	__releases(rq->lock){	struct mm_struct *mm = rq->prev_mm;	long prev_state;	rq->prev_mm = NULL;	/*	 * A task struct has one reference for the use as "current".	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls	 * schedule one last time. The schedule call will never return, and	 * the scheduled task must drop that reference.	 * The test for TASK_DEAD must occur while the runqueue locks are	 * still held, otherwise prev could be scheduled on another cpu, die	 * there before we look at prev->state, and then the reference would	 * be dropped twice.	 *		Manfred Spraul <manfred@colorfullife.com>	 */	prev_state = prev->state;	finish_arch_switch(prev);	finish_lock_switch(rq, prev);#ifdef CONFIG_SMP	if (current->sched_class->post_schedule)		current->sched_class->post_schedule(rq);#endif	fire_sched_in_preempt_notifiers(current);	if (mm)		mmdrop(mm);	if (unlikely(prev_state == TASK_DEAD)) {		/*		 * Remove function-return probe instances associated with this		 * task and put them back on the free list.		 */		kprobe_flush_task(prev);		put_task_struct(prev);	}}/** * schedule_tail - first thing a freshly forked thread must call. * @prev: the thread we just switched away from. */asmlinkage void schedule_tail(struct task_struct *prev)	__releases(rq->lock){	struct rq *rq = this_rq();	finish_task_switch(rq, prev);#ifdef __ARCH_WANT_UNLOCKED_CTXSW	/* In this case, finish_task_switch does not reenable preemption */	preempt_enable();#endif	if (current->set_child_tid)		put_user(task_pid_vnr(current), current->set_child_tid);}/* * context_switch - switch to the new MM and the new * thread's register state. */static inline voidcontext_switch(struct rq *rq, struct task_struct *prev,	       struct task_struct *next){	struct mm_struct *mm, *oldmm;	prepare_task_switch(rq, prev, next);	trace_mark(kernel_sched_schedule,		"prev_pid %d next_pid %d prev_state %ld "		"## rq %p prev %p next %p",		prev->pid, next->pid, prev->state,		rq, prev, next);	mm = next->mm;	oldmm = prev->active_mm;	/*	 * For paravirt, this is coupled with an exit in switch_to to	 * combine the page table reload and the switch backend into	 * one hypercall.	 */	arch_enter_lazy_cpu_mode();	if (unlikely(!mm)) {		next->active_mm = oldmm;		atomic_inc(&oldmm->mm_count);		enter_lazy_tlb(oldmm, next);	} else		switch_mm(oldmm, mm, next);	if (unlikely(!prev->mm)) {		prev->active_mm = NULL;		rq->prev_mm = oldmm;	}	/*	 * Since the runqueue lock will be released by the next	 * task (which is an invalid locking op but in the case	 * of the scheduler it's an obvious special-case), so we	 * do an early lockdep release here:	 */#ifndef __ARCH_WANT_UNLOCKED_CTXSW	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);#endif	/* Here we just switch the register state and the stack. */	switch_to(prev, next, prev);	barrier();	/*	 * this_rq must be evaluated again because prev may have moved	 * CPUs since it called schedule(), thus the 'rq' on its stack	 * frame will be invalid.	 */	finish_task_switch(this_rq(), prev);}/* * nr_running, nr_uninterruptible and nr_context_switches: * * externally visible scheduler statistics: current number of runnable * threads, current number of uninterruptible-sleeping threads, total * number of context switches performed since bootup. */unsigned long nr_running(void){	unsigned long i, sum = 0;	for_each_online_cpu(i)		sum += cpu_rq(i)->nr_running;	return sum;}unsigned long nr_uninterruptible(void){	unsigned long i, sum = 0;	for_each_possible_cpu(i)		sum += cpu_rq(i)->nr_uninterruptible;	/*	 * Since we read the counters lockless, it might be slightly	 * inaccurate. Do not allow it to go below zero though:	 */	if (unlikely((long)sum < 0))		sum = 0;	return sum;}unsigned long long nr_context_switches(void){	int i;	unsigned long long sum = 0;	for_each_possible_cpu(i)		sum += cpu_rq(i)->nr_switches;	return sum;}unsigned long nr_iowait(void){	unsigned long i, sum = 0;	for_each_possible_cpu(i)		sum += atomic_read(&cpu_rq(i)->nr_iowait);	return sum;}unsigned long nr_active(void){	unsigned long i, running = 0, uninterruptible = 0;	for_each_online_cpu(i) {		running += cpu_rq(i)->nr_running;		uninterruptible += 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -