⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 嵌入式系统设计与实例开发实验教材二源码 多线程应用程序设计 串行端口程序设计 AD接口实验 CAN总线通信实验 GPS通信实验 Linux内核移植与编译实验 IC卡读写实验 SD驱动使
💻 C
📖 第 1 页 / 共 3 页
字号:
	timer.function = process_timeout;	add_timer(&timer);	schedule();	del_timer_sync(&timer);	timeout = expire - jiffies; out:	return timeout < 0 ? 0 : timeout;}/* * schedule_tail() is getting called from the fork return path. This * cleans up all remaining scheduler things, without impacting the * common case. */static inline void __schedule_tail(struct task_struct *prev){#ifdef CONFIG_SMP	int policy;	/*	 * prev->policy can be written from here only before `prev'	 * can be scheduled (before setting prev->cpus_runnable to ~0UL).	 * Of course it must also be read before allowing prev	 * to be rescheduled, but since the write depends on the read	 * to complete, wmb() is enough. (the spin_lock() acquired	 * before setting cpus_runnable is not enough because the spin_lock()	 * common code semantics allows code outside the critical section	 * to enter inside the critical section)	 */	policy = prev->policy;	prev->policy = policy & ~SCHED_YIELD;	wmb();	/*	 * fast path falls through. We have to clear cpus_runnable before	 * checking prev->state to avoid a wakeup race. Protect against	 * the task exiting early.	 */	task_lock(prev);	task_release_cpu(prev);	mb();	if (prev->state == TASK_RUNNING)		goto needs_resched;out_unlock:	task_unlock(prev);	/* Synchronise here with release_task() if prev is TASK_ZOMBIE */	return;	/*	 * Slow path - we 'push' the previous process and	 * reschedule_idle() will attempt to find a new	 * processor for it. (but it might preempt the	 * current process as well.) We must take the runqueue	 * lock and re-check prev->state to be correct. It might	 * still happen that this process has a preemption	 * 'in progress' already - but this is not a problem and	 * might happen in other circumstances as well.	 */needs_resched:	{		unsigned long flags;		/*		 * Avoid taking the runqueue lock in cases where		 * no preemption-check is necessery:		 */		if ((prev == idle_task(smp_processor_id())) ||						(policy & SCHED_YIELD))			goto out_unlock;		spin_lock_irqsave(&runqueue_lock, flags);		if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))			reschedule_idle(prev);		spin_unlock_irqrestore(&runqueue_lock, flags);		goto out_unlock;	}#else	prev->policy &= ~SCHED_YIELD;#endif /* CONFIG_SMP */}asmlinkage void schedule_tail(struct task_struct *prev){	__schedule_tail(prev);}/* *  'schedule()' is the scheduler function. It's a very simple and nice * scheduler: it's not perfect, but certainly works for most things. * * The goto is "interesting". * *   NOTE!!  Task 0 is the 'idle' task, which gets called when no other * tasks can run. It can not be killed, and it cannot sleep. The 'state' * information in task[0] is never used. */asmlinkage void schedule(void){	struct schedule_data * sched_data;	struct task_struct *prev, *next, *p;	struct list_head *tmp;	int this_cpu, c;	spin_lock_prefetch(&runqueue_lock);	if (!current->active_mm) BUG();need_resched_back:	prev = current;	this_cpu = prev->processor;	if (unlikely(in_interrupt())) {		printk("Scheduling in interrupt\n");		BUG();	}	release_kernel_lock(prev, this_cpu);	/*	 * 'sched_data' is protected by the fact that we can run	 * only one process per CPU.	 */	sched_data = & aligned_data[this_cpu].schedule_data;	spin_lock_irq(&runqueue_lock);	/* move an exhausted RR process to be last.. */	if (unlikely(prev->policy == SCHED_RR))		if (!prev->counter) {			prev->counter = NICE_TO_TICKS(prev->nice);			move_last_runqueue(prev);		}	switch (prev->state) {		case TASK_INTERRUPTIBLE:			if (signal_pending(prev)) {				prev->state = TASK_RUNNING;				break;			}		default:			del_from_runqueue(prev);		case TASK_RUNNING:;	}	prev->need_resched = 0;	/*	 * this is the scheduler proper:	 */repeat_schedule:	/*	 * Default process to select..	 */	next = idle_task(this_cpu);	c = -1000;	list_for_each(tmp, &runqueue_head) {		p = list_entry(tmp, struct task_struct, run_list);		if (can_schedule(p, this_cpu)) {			int weight = goodness(p, this_cpu, prev->active_mm);			if (weight > c)				c = weight, next = p;		}	}	/* Do we need to re-calculate counters? */	if (unlikely(!c)) {		struct task_struct *p;		spin_unlock_irq(&runqueue_lock);		read_lock(&tasklist_lock);		for_each_task(p)			p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);		read_unlock(&tasklist_lock);		spin_lock_irq(&runqueue_lock);		goto repeat_schedule;	}	/*	 * from this point on nothing can prevent us from	 * switching to the next task, save this fact in	 * sched_data.	 */	sched_data->curr = next;	task_set_cpu(next, this_cpu);	spin_unlock_irq(&runqueue_lock);	if (unlikely(prev == next)) {		/* We won't go through the normal tail, so do this by hand */		prev->policy &= ~SCHED_YIELD;		goto same_process;	}#ifdef CONFIG_SMP 	/* 	 * maintain the per-process 'last schedule' value. 	 * (this has to be recalculated even if we reschedule to 	 * the same process) Currently this is only used on SMP,	 * and it's approximate, so we do not have to maintain	 * it while holding the runqueue spinlock. 	 */ 	sched_data->last_schedule = get_cycles();	/*	 * We drop the scheduler lock early (it's a global spinlock),	 * thus we have to lock the previous process from getting	 * rescheduled during switch_to().	 */#endif /* CONFIG_SMP */	kstat.context_swtch++;	/*	 * there are 3 processes which are affected by a context switch:	 *	 * prev == .... ==> (last => next)	 *	 * It's the 'much more previous' 'prev' that is on next's stack,	 * but prev is set to (the just run) 'last' process by switch_to().	 * This might sound slightly confusing but makes tons of sense.	 */	prepare_to_switch();	{		struct mm_struct *mm = next->mm;		struct mm_struct *oldmm = prev->active_mm;		if (!mm) {			if (next->active_mm) BUG();			next->active_mm = oldmm;			atomic_inc(&oldmm->mm_count);			enter_lazy_tlb(oldmm, next, this_cpu);		} else {			if (next->active_mm != mm) BUG();			switch_mm(oldmm, mm, next, this_cpu);		}		if (!prev->mm) {			prev->active_mm = NULL;			mmdrop(oldmm);		}	}	/*	 * This just switches the register state and the	 * stack.	 */	switch_to(prev, next, prev);	__schedule_tail(prev);same_process:	reacquire_kernel_lock(current);	if (current->need_resched)		goto need_resched_back;	return;}/* * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just wake everything * up.  If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the * non-exclusive tasks and one exclusive task. * * There are circumstances in which we can try to wake a task which has already * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns zero * in this (rare) case, and we handle it by contonuing to scan the queue. */static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,			 	     int nr_exclusive, const int sync){	struct list_head *tmp;	struct task_struct *p;	CHECK_MAGIC_WQHEAD(q);	WQ_CHECK_LIST_HEAD(&q->task_list);		list_for_each(tmp,&q->task_list) {		unsigned int state;                wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);		CHECK_MAGIC(curr->__magic);		p = curr->task;		state = p->state;		if (state & mode) {			WQ_NOTE_WAKER(curr);			if (try_to_wake_up(p, sync) && (curr->flags&WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)				break;		}	}}void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr){	if (q) {		unsigned long flags;		wq_read_lock_irqsave(&q->lock, flags);		__wake_up_common(q, mode, nr, 0);		wq_read_unlock_irqrestore(&q->lock, flags);	}}void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr){	if (q) {		unsigned long flags;		wq_read_lock_irqsave(&q->lock, flags);		__wake_up_common(q, mode, nr, 1);		wq_read_unlock_irqrestore(&q->lock, flags);	}}void complete(struct completion *x){	unsigned long flags;	spin_lock_irqsave(&x->wait.lock, flags);	x->done++;	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);	spin_unlock_irqrestore(&x->wait.lock, flags);}void wait_for_completion(struct completion *x){	spin_lock_irq(&x->wait.lock);	if (!x->done) {		DECLARE_WAITQUEUE(wait, current);		wait.flags |= WQ_FLAG_EXCLUSIVE;		__add_wait_queue_tail(&x->wait, &wait);		do {			__set_current_state(TASK_UNINTERRUPTIBLE);			spin_unlock_irq(&x->wait.lock);			schedule();			spin_lock_irq(&x->wait.lock);		} while (!x->done);		__remove_wait_queue(&x->wait, &wait);	}	x->done--;	spin_unlock_irq(&x->wait.lock);}#define	SLEEP_ON_VAR				\	unsigned long flags;			\	wait_queue_t wait;			\	init_waitqueue_entry(&wait, current);#define	SLEEP_ON_HEAD					\	wq_write_lock_irqsave(&q->lock,flags);		\	__add_wait_queue(q, &wait);			\	wq_write_unlock(&q->lock);#define	SLEEP_ON_TAIL						\	wq_write_lock_irq(&q->lock);				\	__remove_wait_queue(q, &wait);				\	wq_write_unlock_irqrestore(&q->lock,flags);void interruptible_sleep_on(wait_queue_head_t *q){	SLEEP_ON_VAR	current->state = TASK_INTERRUPTIBLE;	SLEEP_ON_HEAD	schedule();	SLEEP_ON_TAIL}long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout){	SLEEP_ON_VAR	current->state = TASK_INTERRUPTIBLE;	SLEEP_ON_HEAD	timeout = schedule_timeout(timeout);	SLEEP_ON_TAIL	return timeout;}void sleep_on(wait_queue_head_t *q){	SLEEP_ON_VAR		current->state = TASK_UNINTERRUPTIBLE;	SLEEP_ON_HEAD	schedule();	SLEEP_ON_TAIL}long sleep_on_timeout(wait_queue_head_t *q, long timeout){	SLEEP_ON_VAR		current->state = TASK_UNINTERRUPTIBLE;	SLEEP_ON_HEAD	timeout = schedule_timeout(timeout);	SLEEP_ON_TAIL	return timeout;}void scheduling_functions_end_here(void) { }#ifndef __alpha__/* * This has been replaced by sys_setpriority.  Maybe it should be * moved into the arch dependent tree for those ports that require * it for backward compatibility? */asmlinkage long sys_nice(int increment){	long newprio;	/*	 *	Setpriority might change our priority at the same moment.	 *	We don't have to worry. Conceptually one call occurs first	 *	and we have a single winner.	 */	if (increment < 0) {		if (!capable(CAP_SYS_NICE))			return -EPERM;		if (increment < -40)			increment = -40;	}	if (increment > 40)		increment = 40;	newprio = current->nice + increment;	if (newprio < -20)		newprio = -20;	if (newprio > 19)		newprio = 19;	current->nice = newprio;	return 0;}#endifstatic inline struct task_struct *find_process_by_pid(pid_t pid){	struct task_struct *tsk = current;	if (pid)		tsk = find_task_by_pid(pid);	return tsk;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -