⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 Linux2.4.20针对三星公司的s3c2410开发板的内核改造。
💻 C
📖 第 1 页 / 共 4 页
字号:
	retval = -EPERM;	if ((current->euid != p->euid) && (current->euid != p->uid) &&			!capable(CAP_SYS_NICE))		goto out_unlock;	retval = 0;	set_cpus_allowed(p, new_mask);out_unlock:	free_task_struct(p);	return retval;}/** * sys_sched_getaffinity - get the cpu affinity of a process * @pid: pid of the process * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to hold the current cpu mask */asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len,				     unsigned long *user_mask_ptr){	unsigned int real_len;	unsigned long mask;	int retval;	task_t *p;	real_len = sizeof(mask);	if (len < real_len)		return -EINVAL;	read_lock(&tasklist_lock);	retval = -ESRCH;	p = find_process_by_pid(pid);	if (!p)		goto out_unlock;	retval = 0;	mask = p->cpus_allowed & cpu_online_map;out_unlock:	read_unlock(&tasklist_lock);	if (retval)		return retval;	if (copy_to_user(user_mask_ptr, &mask, real_len))		return -EFAULT;	return real_len;}/** * sys_sched_yield - yield the current processor to other threads. * * this function yields the current CPU by moving the calling thread * to the expired array. If there are no other threads running on this * CPU then this function will return. */asmlinkage long sys_sched_yield(void){	runqueue_t *rq = this_rq_lock();	prio_array_t *array = current->array;	/*	 * We implement yielding by moving the task into the expired	 * queue.	 *	 * (special rule: RT tasks will just roundrobin in the active	 *  array.)	 */	if (likely(!rt_task(current))) {		dequeue_task(current, array);		enqueue_task(current, rq->expired);	} else {		list_del(&current->run_list);		list_add_tail(&current->run_list, array->queue + current->prio);	}	spin_unlock_no_resched(&rq->lock);	schedule();	return 0;}/** * yield - yield the current processor to other threads. * * this is a shortcut for kernel-space yielding - it marks the * thread runnable and calls sys_sched_yield(). */void yield(void){	set_current_state(TASK_RUNNING);	sys_sched_yield();}/** * _cond_resched	-	conditionally reschedule * * Helper function called if cond_resched inline decides we have * exceeded the timeslice at this point. We give up the processor * having made sure we will get it back */ void __cond_resched(void){	set_current_state(TASK_RUNNING);	schedule();}/** * sys_sched_get_priority_max - return maximum RT priority. * @policy: scheduling class. * * this syscall returns the maximum rt_priority that can be used * by a given scheduling class. */asmlinkage long sys_sched_get_priority_max(int policy){	int ret = -EINVAL;	switch (policy) {	case SCHED_FIFO:	case SCHED_RR:		ret = MAX_USER_RT_PRIO-1;		break;	case SCHED_OTHER:		ret = 0;		break;	}	return ret;}/** * sys_sched_get_priority_min - return minimum RT priority. * @policy: scheduling class. * * this syscall returns the minimum rt_priority that can be used * by a given scheduling class. */asmlinkage long sys_sched_get_priority_min(int policy){	int ret = -EINVAL;	switch (policy) {	case SCHED_FIFO:	case SCHED_RR:		ret = 1;		break;	case SCHED_OTHER:		ret = 0;	}	return ret;}/** * sys_sched_rr_get_interval - return the default timeslice of a process. * @pid: pid of the process. * @interval: userspace pointer to the timeslice value. * * this syscall writes the default timeslice value of a given process * into the user-space timespec buffer. A value of '0' means infinity. */asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval){	int retval = -EINVAL;	struct timespec t;	task_t *p;	if (pid < 0)		goto out_nounlock;	retval = -ESRCH;	read_lock(&tasklist_lock);	p = find_process_by_pid(pid);	if (p)		jiffies_to_timespec(p->policy & SCHED_FIFO ?					 0 : task_timeslice(p), &t);	read_unlock(&tasklist_lock);	if (p)		retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;out_nounlock:	return retval;}static void show_task(task_t * p){	unsigned long free = 0;	int state;	static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };	printk("%-13.13s ", p->comm);	state = p->state ? __ffs(p->state) + 1 : 0;	if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *))		printk(stat_nam[state]);	else		printk(" ");#if (BITS_PER_LONG == 32)	if (p == current)		printk(" current  ");	else		printk(" %08lX ", thread_saved_pc(&p->thread));#else	if (p == current)		printk("   current task   ");	else		printk(" %016lx ", thread_saved_pc(&p->thread));#endif	{		unsigned long * n = (unsigned long *) (p+1);		while (!*n)			n++;		free = (unsigned long) n - (unsigned long)(p+1);	}	printk("%5lu %5d %6d ", free, p->pid, p->p_pptr->pid);	if (p->p_cptr)		printk("%5d ", p->p_cptr->pid);	else		printk("      ");	if (p->p_ysptr)		printk("%7d", p->p_ysptr->pid);	else		printk("       ");	if (p->p_osptr)		printk(" %5d", p->p_osptr->pid);	else		printk("      ");	if (!p->mm)		printk(" (L-TLB)\n");	else		printk(" (NOTLB)\n");	{		extern void show_trace_task(task_t *tsk);		show_trace_task(p);	}}char * render_sigset_t(sigset_t *set, char *buffer){	int i = _NSIG, x;	do {		i -= 4, x = 0;		if (sigismember(set, i+1)) x |= 1;		if (sigismember(set, i+2)) x |= 2;		if (sigismember(set, i+3)) x |= 4;		if (sigismember(set, i+4)) x |= 8;		*buffer++ = (x < 10 ? '0' : 'a' - 10) + x;	} while (i >= 4);	*buffer = 0;	return buffer;}void show_state(void){	task_t *p;#if (BITS_PER_LONG == 32)	printk("\n"	       "                         free                        sibling\n");	printk("  task             PC    stack   pid father child younger older\n");#else	printk("\n"	       "                                 free                        sibling\n");	printk("  task                 PC        stack   pid father child younger older\n");#endif	read_lock(&tasklist_lock);	for_each_task(p) {		/*		 * reset the NMI-timeout, listing all files on a slow		 * console might take alot of time:		 */		touch_nmi_watchdog();		show_task(p);	}	read_unlock(&tasklist_lock);}void __init init_idle(task_t *idle, int cpu){	runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle));	unsigned long flags;	__save_flags(flags);	__cli();	double_rq_lock(idle_rq, rq);	idle_rq->curr = idle_rq->idle = idle;	deactivate_task(idle, rq);	idle->array = NULL;	idle->prio = MAX_PRIO;	idle->state = TASK_RUNNING;	set_task_cpu(idle, cpu);	double_rq_unlock(idle_rq, rq);	set_tsk_need_resched(idle);	__restore_flags(flags);	/* Set the preempt count _outside_ the spinlocks! */	idle->preempt_count = (idle->lock_depth >= 0);}extern void init_timervecs(void);extern void timer_bh(void);extern void tqueue_bh(void);extern void immediate_bh(void);void __init sched_init(void){	runqueue_t *rq;	int i, j, k;	for (i = 0; i < NR_CPUS; i++) {		prio_array_t *array;		rq = cpu_rq(i);		rq->active = rq->arrays;		rq->expired = rq->arrays + 1;		spin_lock_init(&rq->lock);		INIT_LIST_HEAD(&rq->migration_queue);		for (j = 0; j < 2; j++) {			array = rq->arrays + j;			for (k = 0; k < MAX_PRIO; k++) {				INIT_LIST_HEAD(array->queue + k);				__clear_bit(k, array->bitmap);			}			// delimiter for bitsearch			__set_bit(MAX_PRIO, array->bitmap);		}	}	/*	 * We have to do a little magic to get the first	 * process right in SMP mode.	 */	rq = this_rq();	rq->curr = current;	rq->idle = current;	wake_up_process(current);	init_timervecs();	init_bh(TIMER_BH, timer_bh);	init_bh(TQUEUE_BH, tqueue_bh);	init_bh(IMMEDIATE_BH, immediate_bh);	/*	 * The boot idle thread does lazy MMU switching as well:	 */	atomic_inc(&init_mm.mm_count);	enter_lazy_tlb(&init_mm, current, smp_processor_id());}#if CONFIG_SMP/* * This is how migration works: * * 1) we queue a migration_req_t structure in the source CPU's *    runqueue and wake up that CPU's migration thread. * 2) we down() the locked semaphore => thread blocks. * 3) migration thread wakes up (implicitly it forces the migrated *    thread off the CPU) * 4) it gets the migration request and checks whether the migrated *    task is still in the wrong runqueue. * 5) if it's in the wrong runqueue then the migration thread removes *    it and puts it into the right queue. * 6) migration thread up()s the semaphore. * 7) we wake up and the migration is done. */typedef struct {	struct list_head list;	task_t *task;	struct semaphore sem;} migration_req_t;/* * Change a given task's CPU affinity. Migrate the process to a * proper CPU and schedule it away if the CPU it's executing on * is removed from the allowed bitmask. * * NOTE: the caller must have a valid reference to the task, the * task must not exit() & deallocate itself prematurely.  The * call is not atomic; no spinlocks may be held. */void set_cpus_allowed(task_t *p, unsigned long new_mask){	unsigned long flags;	migration_req_t req;	runqueue_t *rq;	new_mask &= cpu_online_map;	if (!new_mask)		BUG();	preempt_disable();	rq = task_rq_lock(p, &flags);	p->cpus_allowed = new_mask;	/*	 * Can the task run on the task's current CPU? If not then	 * migrate the process off to a proper CPU.	 */	if (new_mask & (1UL << task_cpu(p))) {		task_rq_unlock(rq, &flags);		goto out;	}	/*	 * If the task is not on a runqueue (and not running), then	 * it is sufficient to simply update the task's cpu field.	 */	if (!p->array && (p != rq->curr)) {		set_task_cpu(p, __ffs(p->cpus_allowed));		task_rq_unlock(rq, &flags);		goto out;	}	init_MUTEX_LOCKED(&req.sem);	req.task = p;	list_add(&req.list, &rq->migration_queue);	task_rq_unlock(rq, &flags);	wake_up_process(rq->migration_thread);	down(&req.sem);out:	preempt_enable();}/* * migration_thread - this is a highprio system thread that performs * thread migration by 'pulling' threads into the target runqueue. */static int migration_thread(void * bind_cpu){	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };	int cpu = cpu_logical_map((int) (long) bind_cpu);	runqueue_t *rq;	int ret;	daemonize();	sigfillset(&current->blocked);	set_fs(KERNEL_DS);	/*	 * The first migration thread is started on CPU #0. This one can	 * migrate the other migration threads to their destination CPUs.	 */	if (cpu != 0) {		while (!cpu_rq(cpu_logical_map(0))->migration_thread)			yield();		set_cpus_allowed(current, 1UL << cpu);	}	printk("migration_task %d on cpu=%d\n", cpu, smp_processor_id());	ret = setscheduler(0, SCHED_FIFO, &param);	rq = this_rq();	rq->migration_thread = current;	sprintf(current->comm, "migration_CPU%d", smp_processor_id());	for (;;) {		runqueue_t *rq_src, *rq_dest;		struct list_head *head;		int cpu_src, cpu_dest;		migration_req_t *req;		unsigned long flags;		task_t *p;		spin_lock_irqsave(&rq->lock, flags);		head = &rq->migration_queue;		current->state = TASK_INTERRUPTIBLE;		if (list_empty(head)) {			spin_unlock_irqrestore(&rq->lock, flags);			schedule();			continue;		}		req = list_entry(head->next, migration_req_t, list);		list_del_init(head->next);		spin_unlock_irqrestore(&rq->lock, flags);		p = req->task;		cpu_dest = __ffs(p->cpus_allowed);		rq_dest = cpu_rq(cpu_dest);repeat:		cpu_src = task_cpu(p);		rq_src = cpu_rq(cpu_src);		local_irq_save(flags);		double_rq_lock(rq_src, rq_dest);		if (task_cpu(p) != cpu_src) {			double_rq_unlock(rq_src, rq_dest);			local_irq_restore(flags);			goto repeat;		}		if (rq_src == rq) {			set_task_cpu(p, cpu_dest);			if (p->array) {				deactivate_task(p, rq_src);				activate_task(p, rq_dest);			}		}		double_rq_unlock(rq_src, rq_dest);		local_irq_restore(flags);		up(&req->sem);	}}void __init migration_init(void){	int cpu;	current->cpus_allowed = 1UL << cpu_logical_map(0);	for (cpu = 0; cpu < smp_num_cpus; cpu++)		if (kernel_thread(migration_thread, (void *) (long) cpu,				CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)			BUG();	current->cpus_allowed = -1L;	for (cpu = 0; cpu < smp_num_cpus; cpu++)		while (!cpu_rq(cpu_logical_map(cpu))->migration_thread)			schedule_timeout(2);}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -