⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 Linux2.4.20针对三星公司的s3c2410开发板的内核改造。
💻 C
📖 第 1 页 / 共 4 页
字号:
}#ifdef CONFIG_PREEMPT/* * this is is the entry point to schedule() from in-kernel preemption. */asmlinkage void preempt_schedule(void){	/*	 * Interrupts disabled implies no kernel preemption.  Just return.	 */	if (unlikely(irqs_disabled()))		return;need_resched:	current->preempt_count += PREEMPT_ACTIVE;	schedule();	current->preempt_count -= PREEMPT_ACTIVE;	/* we can miss a preemption between schedule() and now */	barrier();	if (unlikely((current->need_resched)))		goto need_resched;}#endif /* CONFIG_PREEMPT *//* * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve * number) then we wake all the non-exclusive tasks and one exclusive task. * * There are circumstances in which we can try to wake a task which has already * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync){	struct list_head *tmp;	unsigned int state;	wait_queue_t *curr;	task_t *p;	list_for_each(tmp, &q->task_list) {		curr = list_entry(tmp, wait_queue_t, task_list);		p = curr->task;		state = p->state;		if ((state & mode) && try_to_wake_up(p, sync) &&			((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive))				break;	}}/** * __wake_up - wake up threads blocked on a waitqueue. * @q: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up */void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive){	unsigned long flags;	if (unlikely(!q))		return;	spin_lock_irqsave(&q->lock, flags);	__wake_up_common(q, mode, nr_exclusive, 0);	spin_unlock_irqrestore(&q->lock, flags);}#if CONFIG_SMP/** * __wake_up - sync- wake up threads blocked on a waitqueue. * @q: the waitqueue * @mode: which threads * @nr_exclusive: how many wake-one or wake-many threads to wake up * * The sync wakeup differs that the waker knows that it will schedule * away soon, so while the target thread will be woken up, it will not * be migrated to another CPU - ie. the two threads are 'synchronized' * with each other. This can prevent needless bouncing between CPUs. */void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive){	unsigned long flags;	if (unlikely(!q))		return;	spin_lock_irqsave(&q->lock, flags);	if (likely(nr_exclusive))		__wake_up_common(q, mode, nr_exclusive, 1);	else		__wake_up_common(q, mode, nr_exclusive, 0);	spin_unlock_irqrestore(&q->lock, flags);}#endif void complete(struct completion *x){	unsigned long flags;	spin_lock_irqsave(&x->wait.lock, flags);	x->done++;	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);	spin_unlock_irqrestore(&x->wait.lock, flags);}void wait_for_completion(struct completion *x){	spin_lock_irq(&x->wait.lock);	if (!x->done) {		DECLARE_WAITQUEUE(wait, current);		wait.flags |= WQ_FLAG_EXCLUSIVE;		__add_wait_queue_tail(&x->wait, &wait);		do {			__set_current_state(TASK_UNINTERRUPTIBLE);			spin_unlock_irq(&x->wait.lock);			schedule();			spin_lock_irq(&x->wait.lock);		} while (!x->done);		__remove_wait_queue(&x->wait, &wait);	}	x->done--;	spin_unlock_irq(&x->wait.lock);}#define	SLEEP_ON_VAR				\	unsigned long flags;			\	wait_queue_t wait;			\	init_waitqueue_entry(&wait, current);#define SLEEP_ON_HEAD					\	spin_lock_irqsave(&q->lock,flags);		\	__add_wait_queue(q, &wait);			\	spin_unlock(&q->lock);#define	SLEEP_ON_TAIL						\	spin_lock_irq(&q->lock);				\	__remove_wait_queue(q, &wait);				\	spin_unlock_irqrestore(&q->lock, flags);void interruptible_sleep_on(wait_queue_head_t *q){	SLEEP_ON_VAR	current->state = TASK_INTERRUPTIBLE;	SLEEP_ON_HEAD	schedule();	SLEEP_ON_TAIL}long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout){	SLEEP_ON_VAR	current->state = TASK_INTERRUPTIBLE;	SLEEP_ON_HEAD	timeout = schedule_timeout(timeout);	SLEEP_ON_TAIL	return timeout;}void sleep_on(wait_queue_head_t *q){	SLEEP_ON_VAR		current->state = TASK_UNINTERRUPTIBLE;	SLEEP_ON_HEAD	schedule();	SLEEP_ON_TAIL}long sleep_on_timeout(wait_queue_head_t *q, long timeout){	SLEEP_ON_VAR		current->state = TASK_UNINTERRUPTIBLE;	SLEEP_ON_HEAD	timeout = schedule_timeout(timeout);	SLEEP_ON_TAIL	return timeout;}void scheduling_functions_end_here(void) { }void set_user_nice(task_t *p, long nice){	unsigned long flags;	prio_array_t *array;	runqueue_t *rq;	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)		return;	/*	 * We have to be careful, if called from sys_setpriority(),	 * the task might be in the middle of scheduling on another CPU.	 */	rq = task_rq_lock(p, &flags);	if (rt_task(p)) {		p->static_prio = NICE_TO_PRIO(nice);		goto out_unlock;	}	array = p->array;	if (array)		dequeue_task(p, array);	p->static_prio = NICE_TO_PRIO(nice);	p->prio = NICE_TO_PRIO(nice);	if (array) {		enqueue_task(p, array);		/*		 * If the task is running and lowered its priority,		 * or increased its priority then reschedule its CPU:		 */		if ((NICE_TO_PRIO(nice) < p->static_prio) || (p == rq->curr))			resched_task(rq->curr);	}out_unlock:	task_rq_unlock(rq, &flags);}#ifndef __alpha__/** * sys_nice - change the priority of the current process. * @increment: priority increment * * sys_setpriority is a more generic, but much slower function that * does similar things. */asmlinkage long sys_nice(int increment){	long nice;	/*	 *	Setpriority might change our priority at the same moment.	 *	We don't have to worry. Conceptually one call occurs first	 *	and we have a single winner.	 */	if (increment < 0) {		if (!capable(CAP_SYS_NICE))			return -EPERM;		if (increment < -40)			increment = -40;	}	if (increment > 40)		increment = 40;	nice = PRIO_TO_NICE(current->static_prio) + increment;	if (nice < -20)		nice = -20;	if (nice > 19)		nice = 19;	set_user_nice(current, nice);	return 0;}#endif/** * task_prio - return the priority value of a given task. * @p: the task in question. * * This is the priority value as seen by users in /proc. * RT tasks are offset by -200. Normal tasks are centered * around 0, value goes from -16 to +15. */int task_prio(task_t *p){	return p->prio - MAX_RT_PRIO;}/** * task_nice - return the nice value of a given task. * @p: the task in question. */int task_nice(task_t *p){	return TASK_NICE(p);}/** * idle_cpu - is a given cpu idle currently? * @cpu: the processor in question. */inline int idle_cpu(int cpu){	return cpu_curr(cpu) == cpu_rq(cpu)->idle;}/** * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. */static inline task_t *find_process_by_pid(pid_t pid){	return pid ? find_task_by_pid(pid) : current;}/* * setscheduler - change the scheduling policy and/or RT priority of a thread. */int setscheduler(pid_t pid, int policy, struct sched_param *param){	struct sched_param lp;	int retval = -EINVAL;	prio_array_t *array;	unsigned long flags;	runqueue_t *rq;	task_t *p;	int oldprio;	if (!param || pid < 0)		goto out_nounlock;	retval = -EFAULT;	if (copy_from_user(&lp, param, sizeof(struct sched_param)))		goto out_nounlock;	/*	 * We play safe to avoid deadlocks.	 */	read_lock_irq(&tasklist_lock);	p = find_process_by_pid(pid);	retval = -ESRCH;	if (!p)		goto out_unlock_tasklist;	/*	 * To be able to change p->policy safely, the apropriate	 * runqueue lock must be held.	 */	rq = task_rq_lock(p, &flags);	if (policy < 0)		policy = p->policy;	else {		retval = -EINVAL;		if (policy != SCHED_FIFO && policy != SCHED_RR &&				policy != SCHED_OTHER)			goto out_unlock;	}	/*	 * Valid priorities for SCHED_FIFO and SCHED_RR are	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_OTHER is 0.	 */	retval = -EINVAL;	if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1)		goto out_unlock;	if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))		goto out_unlock;	retval = -EPERM;	if ((policy == SCHED_FIFO || policy == SCHED_RR) &&	    !capable(CAP_SYS_NICE))		goto out_unlock;	if ((current->euid != p->euid) && (current->euid != p->uid) &&	    !capable(CAP_SYS_NICE))		goto out_unlock;	array = p->array;	if (array)		deactivate_task(p, task_rq(p));	retval = 0;	p->policy = policy;	p->rt_priority = lp.sched_priority;	oldprio = p->prio;	if (policy != SCHED_OTHER)		p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;	else		p->prio = p->static_prio;	if (array) {		activate_task(p, task_rq(p));		/*		 * Reschedule if we are currently running on this runqueue and		 * our priority decreased, or if we are not currently running on		 * this runqueue and our priority is higher than the current's		 */		if (rq->curr == p) {			if (p->prio > oldprio)				resched_task(rq->curr);		} else if (p->prio < rq->curr->prio)			resched_task(rq->curr);	}out_unlock:	task_rq_unlock(rq, &flags);out_unlock_tasklist:	read_unlock_irq(&tasklist_lock);out_nounlock:	return retval;}/** * sys_sched_setscheduler - set/change the scheduler policy and RT priority * @pid: the pid in question. * @policy: new policy * @param: structure containing the new RT priority. */asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,				      struct sched_param *param){	return setscheduler(pid, policy, param);}/** * sys_sched_setparam - set/change the RT priority of a thread * @pid: the pid in question. * @param: structure containing the new RT priority. */asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param *param){	return setscheduler(pid, -1, param);}/** * sys_sched_getscheduler - get the policy (scheduling class) of a thread * @pid: the pid in question. */asmlinkage long sys_sched_getscheduler(pid_t pid){	int retval = -EINVAL;	task_t *p;	if (pid < 0)		goto out_nounlock;	retval = -ESRCH;	read_lock(&tasklist_lock);	p = find_process_by_pid(pid);	if (p)		retval = p->policy;	read_unlock(&tasklist_lock);out_nounlock:	return retval;}/** * sys_sched_getparam - get the RT priority of a thread * @pid: the pid in question. * @param: sched_param structure containing the RT priority. */asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param){	struct sched_param lp;	int retval = -EINVAL;	task_t *p;	if (!param || pid < 0)		goto out_nounlock;	read_lock(&tasklist_lock);	p = find_process_by_pid(pid);	retval = -ESRCH;	if (!p)		goto out_unlock;	lp.sched_priority = p->rt_priority;	read_unlock(&tasklist_lock);	/*	 * This one might sleep, we cannot do it with a spinlock held ...	 */	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;out_nounlock:	return retval;out_unlock:	read_unlock(&tasklist_lock);	return retval;}/** * sys_sched_setaffinity - set the cpu affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to the new cpu mask */asmlinkage int sys_sched_setaffinity(pid_t pid, unsigned int len,				     unsigned long *user_mask_ptr){	unsigned long new_mask;	int retval;	task_t *p;	if (len < sizeof(new_mask))		return -EINVAL;	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))		return -EFAULT;	new_mask &= cpu_online_map;	if (!new_mask)		return -EINVAL;	read_lock(&tasklist_lock);	p = find_process_by_pid(pid);	if (!p) {		read_unlock(&tasklist_lock);		return -ESRCH;	}	/*	 * It is not safe to call set_cpus_allowed with the	 * tasklist_lock held.  We will bump the task_struct's	 * usage count and then drop tasklist_lock.	 */	get_task_struct(p);	read_unlock(&tasklist_lock);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -