⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 中科院徐志伟老师一书《操作系统 原理·技术与编程》的源代码和习题接
💻 C
📖 第 1 页 / 共 3 页
字号:
		tsk = find_task_by_pid(pid);	return tsk;}static int setscheduler(pid_t pid, int policy, 			struct sched_param *param){	struct sched_param lp;	struct task_struct *p;	int retval;	retval = -EINVAL;	if (!param || pid < 0)		goto out_nounlock;	retval = -EFAULT;	if (copy_from_user(&lp, param, sizeof(struct sched_param)))		goto out_nounlock;	/*	 * We play safe to avoid deadlocks.	 */	read_lock_irq(&tasklist_lock);	spin_lock(&runqueue_lock);	p = find_process_by_pid(pid);	retval = -ESRCH;	if (!p)		goto out_unlock;				if (policy < 0)		policy = p->policy;	else {		retval = -EINVAL;		if (policy != SCHED_FIFO && policy != SCHED_RR &&				policy != SCHED_OTHER)			goto out_unlock;	}		/*	 * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid	 * priority for SCHED_OTHER is 0.	 */	retval = -EINVAL;	if (lp.sched_priority < 0 || lp.sched_priority > 99)		goto out_unlock;	if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))		goto out_unlock;	retval = -EPERM;	if ((policy == SCHED_FIFO || policy == SCHED_RR) && 	    !capable(CAP_SYS_NICE))		goto out_unlock;	if ((current->euid != p->euid) && (current->euid != p->uid) &&	    !capable(CAP_SYS_NICE))		goto out_unlock;	retval = 0;	p->policy = policy;	p->rt_priority = lp.sched_priority;	if (task_on_runqueue(p))		move_first_runqueue(p);	current->need_resched = 1;out_unlock:	spin_unlock(&runqueue_lock);	read_unlock_irq(&tasklist_lock);out_nounlock:	return retval;}asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, 				      struct sched_param *param){	return setscheduler(pid, policy, param);}asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param *param){	return setscheduler(pid, -1, param);}asmlinkage long sys_sched_getscheduler(pid_t pid){	struct task_struct *p;	int retval;	retval = -EINVAL;	if (pid < 0)		goto out_nounlock;	retval = -ESRCH;	read_lock(&tasklist_lock);	p = find_process_by_pid(pid);	if (p)		retval = p->policy & ~SCHED_YIELD;	read_unlock(&tasklist_lock);out_nounlock:	return retval;}asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param){	struct task_struct *p;	struct sched_param lp;	int retval;	retval = -EINVAL;	if (!param || pid < 0)		goto out_nounlock;	read_lock(&tasklist_lock);	p = find_process_by_pid(pid);	retval = -ESRCH;	if (!p)		goto out_unlock;	lp.sched_priority = p->rt_priority;	read_unlock(&tasklist_lock);	/*	 * This one might sleep, we cannot do it with a spinlock held ...	 */	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;out_nounlock:	return retval;out_unlock:	read_unlock(&tasklist_lock);	return retval;}asmlinkage long sys_sched_yield(void){	/*	 * Trick. sched_yield() first counts the number of truly 	 * 'pending' runnable processes, then returns if it's	 * only the current processes. (This test does not have	 * to be atomic.) In threaded applications this optimization	 * gets triggered quite often.	 */	int nr_pending = nr_running;#if CONFIG_SMP	int i;	// Subtract non-idle processes running on other CPUs.	for (i = 0; i < smp_num_cpus; i++) {		int cpu = cpu_logical_map(i);		if (aligned_data[cpu].schedule_data.curr != idle_task(cpu))			nr_pending--;	}#else	// on UP this process is on the runqueue as well	nr_pending--;#endif	if (nr_pending) {		/*		 * This process can only be rescheduled by us,		 * so this is safe without any locking.		 */		if (current->policy == SCHED_OTHER)			current->policy |= SCHED_YIELD;		current->need_resched = 1;		spin_lock_irq(&runqueue_lock);		move_last_runqueue(current);		spin_unlock_irq(&runqueue_lock);	}	return 0;}asmlinkage long sys_sched_get_priority_max(int policy){	int ret = -EINVAL;	switch (policy) {	case SCHED_FIFO:	case SCHED_RR:		ret = 99;		break;	case SCHED_OTHER:		ret = 0;		break;	}	return ret;}asmlinkage long sys_sched_get_priority_min(int policy){	int ret = -EINVAL;	switch (policy) {	case SCHED_FIFO:	case SCHED_RR:		ret = 1;		break;	case SCHED_OTHER:		ret = 0;	}	return ret;}asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval){	struct timespec t;	struct task_struct *p;	int retval = -EINVAL;	if (pid < 0)		goto out_nounlock;	retval = -ESRCH;	read_lock(&tasklist_lock);	p = find_process_by_pid(pid);	if (p)		jiffies_to_timespec(p->policy & SCHED_FIFO ? 0 : NICE_TO_TICKS(p->nice),				    &t);	read_unlock(&tasklist_lock);	if (p)		retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;out_nounlock:	return retval;}static void show_task(struct task_struct * p){	unsigned long free = 0;	int state;	static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };	printk("%-13.13s ", p->comm);	state = p->state ? ffz(~p->state) + 1 : 0;	if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *))		printk(stat_nam[state]);	else		printk(" ");#if (BITS_PER_LONG == 32)	if (p == current)		printk(" current  ");	else		printk(" %08lX ", thread_saved_pc(&p->thread));#else	if (p == current)		printk("   current task   ");	else		printk(" %016lx ", thread_saved_pc(&p->thread));#endif	{		unsigned long * n = (unsigned long *) (p+1);		while (!*n)			n++;		free = (unsigned long) n - (unsigned long)(p+1);	}	printk("%5lu %5d %6d ", free, p->pid, p->p_pptr->pid);	if (p->p_cptr)		printk("%5d ", p->p_cptr->pid);	else		printk("      ");	if (p->p_ysptr)		printk("%7d", p->p_ysptr->pid);	else		printk("       ");	if (p->p_osptr)		printk(" %5d", p->p_osptr->pid);	else		printk("      ");	if (!p->mm)		printk(" (L-TLB)\n");	else		printk(" (NOTLB)\n");	{		extern void show_trace_task(struct task_struct *tsk);		show_trace_task(p);	}}char * render_sigset_t(sigset_t *set, char *buffer){	int i = _NSIG, x;	do {		i -= 4, x = 0;		if (sigismember(set, i+1)) x |= 1;		if (sigismember(set, i+2)) x |= 2;		if (sigismember(set, i+3)) x |= 4;		if (sigismember(set, i+4)) x |= 8;		*buffer++ = (x < 10 ? '0' : 'a' - 10) + x;	} while (i >= 4);	*buffer = 0;	return buffer;}void show_state(void){	struct task_struct *p;#if (BITS_PER_LONG == 32)	printk("\n"	       "                         free                        sibling\n");	printk("  task             PC    stack   pid father child younger older\n");#else	printk("\n"	       "                                 free                        sibling\n");	printk("  task                 PC        stack   pid father child younger older\n");#endif	read_lock(&tasklist_lock);	for_each_task(p) {		/*		 * reset the NMI-timeout, listing all files on a slow		 * console might take alot of time:		 */		touch_nmi_watchdog();		show_task(p);	}	read_unlock(&tasklist_lock);}/** * reparent_to_init() - Reparent the calling kernel thread to the init task. * * If a kernel thread is launched as a result of a system call, or if * it ever exits, it should generally reparent itself to init so that * it is correctly cleaned up on exit. * * The various task state such as scheduling policy and priority may have * been inherited fro a user process, so we reset them to sane values here. * * NOTE that reparent_to_init() gives the caller full capabilities. */void reparent_to_init(void){	struct task_struct *this_task = current;	write_lock_irq(&tasklist_lock);	/* Reparent to init */	REMOVE_LINKS(this_task);	this_task->p_pptr = child_reaper;	this_task->p_opptr = child_reaper;	SET_LINKS(this_task);	/* Set the exit signal to SIGCHLD so we signal init on exit */	this_task->exit_signal = SIGCHLD;	/* We also take the runqueue_lock while altering task fields	 * which affect scheduling decisions */	spin_lock(&runqueue_lock);	this_task->ptrace = 0;	this_task->nice = DEF_NICE;	this_task->policy = SCHED_OTHER;	/* cpus_allowed? */	/* rt_priority? */	/* signals? */	this_task->cap_effective = CAP_INIT_EFF_SET;	this_task->cap_inheritable = CAP_INIT_INH_SET;	this_task->cap_permitted = CAP_FULL_SET;	this_task->keep_capabilities = 0;	memcpy(this_task->rlim, init_task.rlim, sizeof(*(this_task->rlim)));	this_task->user = INIT_USER;	spin_unlock(&runqueue_lock);	write_unlock_irq(&tasklist_lock);}/* *	Put all the gunge required to become a kernel thread without *	attached user resources in one place where it belongs. */void daemonize(void){	struct fs_struct *fs;	/*	 * If we were started as result of loading a module, close all of the	 * user space pages.  We don't need them, and if we didn't close them	 * they would be locked into memory.	 */	exit_mm(current);	current->session = 1;	current->pgrp = 1;	current->tty = NULL;	/* Become as one with the init task */	exit_fs(current);	/* current->fs->count--; */	fs = init_task.fs;	current->fs = fs;	atomic_inc(&fs->count); 	exit_files(current);	current->files = init_task.files;	atomic_inc(&current->files->count);}extern unsigned long wait_init_idle;void __init init_idle(void){	struct schedule_data * sched_data;	sched_data = &aligned_data[smp_processor_id()].schedule_data;	if (current != &init_task && task_on_runqueue(current)) {		printk("UGH! (%d:%d) was on the runqueue, removing.\n",			smp_processor_id(), current->pid);		del_from_runqueue(current);	}	sched_data->curr = current;	sched_data->last_schedule = get_cycles();	clear_bit(current->processor, &wait_init_idle);}extern void init_timervecs (void);void __init sched_init(void){	/*	 * We have to do a little magic to get the first	 * process right in SMP mode.	 */	int cpu = smp_processor_id();	int nr;	init_task.processor = cpu;	for(nr = 0; nr < PIDHASH_SZ; nr++)		pidhash[nr] = NULL;	init_timervecs();	init_bh(TIMER_BH, timer_bh);	init_bh(TQUEUE_BH, tqueue_bh);	init_bh(IMMEDIATE_BH, immediate_bh);	/*	 * The boot idle thread does lazy MMU switching as well:	 */	atomic_inc(&init_mm.mm_count);	enter_lazy_tlb(&init_mm, current, cpu);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -