⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 sched.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 3 页
字号:
	spin_unlock(&rpc_sched_lock);	if (clnt) {		atomic_inc(&clnt->cl_users);		if (clnt->cl_softrtry)			task->tk_flags |= RPC_TASK_SOFT;		if (!clnt->cl_intr)			task->tk_flags |= RPC_TASK_NOINTR;	}#ifdef RPC_DEBUG	task->tk_magic = 0xf00baa;	task->tk_pid = rpc_task_id++;#endif	dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,				current->pid);}static struct rpc_task *rpc_alloc_task(void){	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);}static voidrpc_default_free_task(struct rpc_task *task){	dprintk("RPC: %4d freeing task\n", task->tk_pid);	mempool_free(task, rpc_task_mempool);}/* * Create a new task for the specified client.  We have to * clean up after an allocation failure, as the client may * have specified "oneshot". */struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags){	struct rpc_task	*task;	task = rpc_alloc_task();	if (!task)		goto cleanup;	rpc_init_task(task, clnt, callback, flags);	/* Replace tk_release */	task->tk_release = rpc_default_free_task;	dprintk("RPC: %4d allocated task\n", task->tk_pid);	task->tk_flags |= RPC_TASK_DYNAMIC;out:	return task;cleanup:	/* Check whether to release the client */	if (clnt) {		printk("rpc_new_task: failed, users=%d, oneshot=%d\n",			atomic_read(&clnt->cl_users), clnt->cl_oneshot);		atomic_inc(&clnt->cl_users); /* pretend we were used ... */		rpc_release_client(clnt);	}	goto out;}voidrpc_release_task(struct rpc_task *task){	dprintk("RPC: %4d release task\n", task->tk_pid);#ifdef RPC_DEBUG	if (task->tk_magic != 0xf00baa) {		printk(KERN_ERR "RPC: attempt to release a non-existing task!\n");		rpc_debug = ~0;		rpc_show_tasks();		return;	}#endif	/* Remove from global task list */	spin_lock(&rpc_sched_lock);	list_del(&task->tk_task);	spin_unlock(&rpc_sched_lock);	/* Protect the execution below. */	spin_lock_bh(&rpc_queue_lock);	/* Disable timer to prevent zombie wakeup */	__rpc_disable_timer(task);	/* Remove from any wait queue we're still on */	__rpc_remove_wait_queue(task);	task->tk_active = 0;	spin_unlock_bh(&rpc_queue_lock);	/* Synchronously delete any running timer */	rpc_delete_timer(task);	/* Release resources */	if (task->tk_rqstp)		xprt_release(task);	if (task->tk_msg.rpc_cred)		rpcauth_unbindcred(task);	rpc_free(task);	if (task->tk_client) {		rpc_release_client(task->tk_client);		task->tk_client = NULL;	}#ifdef RPC_DEBUG	task->tk_magic = 0;#endif	if (task->tk_release)		task->tk_release(task);}/** * rpc_find_parent - find the parent of a child task. * @child: child task * * Checks that the parent task is still sleeping on the * queue 'childq'. If so returns a pointer to the parent. * Upon failure returns NULL. * * Caller must hold rpc_queue_lock */static inline struct rpc_task *rpc_find_parent(struct rpc_task *child){	struct rpc_task	*task, *parent;	struct list_head *le;	parent = (struct rpc_task *) child->tk_calldata;	task_for_each(task, le, &childq.tasks[0])		if (task == parent)			return parent;	return NULL;}static voidrpc_child_exit(struct rpc_task *child){	struct rpc_task	*parent;	spin_lock_bh(&rpc_queue_lock);	if ((parent = rpc_find_parent(child)) != NULL) {		parent->tk_status = child->tk_status;		__rpc_wake_up_task(parent);	}	spin_unlock_bh(&rpc_queue_lock);}/* * Note: rpc_new_task releases the client after a failure. */struct rpc_task *rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent){	struct rpc_task	*task;	task = rpc_new_task(clnt, NULL, RPC_TASK_ASYNC | RPC_TASK_CHILD);	if (!task)		goto fail;	task->tk_exit = rpc_child_exit;	task->tk_calldata = parent;	return task;fail:	parent->tk_status = -ENOMEM;	return NULL;}voidrpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func){	spin_lock_bh(&rpc_queue_lock);	/* N.B. Is it possible for the child to have already finished? */	__rpc_sleep_on(&childq, task, func, NULL);	rpc_schedule_run(child);	spin_unlock_bh(&rpc_queue_lock);}/* * Kill all tasks for the given client. * XXX: kill their descendants as well? */voidrpc_killall_tasks(struct rpc_clnt *clnt){	struct rpc_task	*rovr;	struct list_head *le;	dprintk("RPC:      killing all tasks for client %p\n", clnt);	/*	 * Spin lock all_tasks to prevent changes...	 */	spin_lock(&rpc_sched_lock);	alltask_for_each(rovr, le, &all_tasks)		if (!clnt || rovr->tk_client == clnt) {			rovr->tk_flags |= RPC_TASK_KILLED;			rpc_exit(rovr, -EIO);			rpc_wake_up_task(rovr);		}	spin_unlock(&rpc_sched_lock);}static DECLARE_MUTEX_LOCKED(rpciod_running);static inline intrpciod_task_pending(void){	return !list_empty(&schedq.tasks[0]);}/* * This is the rpciod kernel thread */static intrpciod(void *ptr){	int		rounds = 0;	lock_kernel();	/*	 * Let our maker know we're running ...	 */	rpciod_pid = current->pid;	up(&rpciod_running);	daemonize("rpciod");	allow_signal(SIGKILL);	dprintk("RPC: rpciod starting (pid %d)\n", rpciod_pid);	spin_lock_bh(&rpc_queue_lock);	while (rpciod_users) {		DEFINE_WAIT(wait);		if (signalled()) {			spin_unlock_bh(&rpc_queue_lock);			rpciod_killall();			flush_signals(current);			spin_lock_bh(&rpc_queue_lock);		}		__rpc_schedule();		if (current->flags & PF_FREEZE) {			spin_unlock_bh(&rpc_queue_lock);			refrigerator(PF_FREEZE);			spin_lock_bh(&rpc_queue_lock);		}		if (++rounds >= 64) {	/* safeguard */			spin_unlock_bh(&rpc_queue_lock);			schedule();			rounds = 0;			spin_lock_bh(&rpc_queue_lock);		}		dprintk("RPC: rpciod back to sleep\n");		prepare_to_wait(&rpciod_idle, &wait, TASK_INTERRUPTIBLE);		if (!rpciod_task_pending() && !signalled()) {			spin_unlock_bh(&rpc_queue_lock);			schedule();			rounds = 0;			spin_lock_bh(&rpc_queue_lock);		}		finish_wait(&rpciod_idle, &wait);		dprintk("RPC: switch to rpciod\n");	}	spin_unlock_bh(&rpc_queue_lock);	dprintk("RPC: rpciod shutdown commences\n");	if (!list_empty(&all_tasks)) {		printk(KERN_ERR "rpciod: active tasks at shutdown?!\n");		rpciod_killall();	}	dprintk("RPC: rpciod exiting\n");	unlock_kernel();	rpciod_pid = 0;	complete_and_exit(&rpciod_killer, 0);	return 0;}static voidrpciod_killall(void){	unsigned long flags;	while (!list_empty(&all_tasks)) {		clear_thread_flag(TIF_SIGPENDING);		rpc_killall_tasks(NULL);		spin_lock_bh(&rpc_queue_lock);		__rpc_schedule();		spin_unlock_bh(&rpc_queue_lock);		if (!list_empty(&all_tasks)) {			dprintk("rpciod_killall: waiting for tasks to exit\n");			yield();		}	}	spin_lock_irqsave(&current->sighand->siglock, flags);	recalc_sigpending();	spin_unlock_irqrestore(&current->sighand->siglock, flags);}/* * Start up the rpciod process if it's not already running. */intrpciod_up(void){	int error = 0;	down(&rpciod_sema);	dprintk("rpciod_up: pid %d, users %d\n", rpciod_pid, rpciod_users);	rpciod_users++;	if (rpciod_pid)		goto out;	/*	 * If there's no pid, we should be the first user.	 */	if (rpciod_users > 1)		printk(KERN_WARNING "rpciod_up: no pid, %d users??\n", rpciod_users);	/*	 * Create the rpciod thread and wait for it to start.	 */	error = kernel_thread(rpciod, NULL, 0);	if (error < 0) {		printk(KERN_WARNING "rpciod_up: create thread failed, error=%d\n", error);		rpciod_users--;		goto out;	}	down(&rpciod_running);	error = 0;out:	up(&rpciod_sema);	return error;}voidrpciod_down(void){	down(&rpciod_sema);	dprintk("rpciod_down pid %d sema %d\n", rpciod_pid, rpciod_users);	if (rpciod_users) {		if (--rpciod_users)			goto out;	} else		printk(KERN_WARNING "rpciod_down: pid=%d, no users??\n", rpciod_pid);	if (!rpciod_pid) {		dprintk("rpciod_down: Nothing to do!\n");		goto out;	}	kill_proc(rpciod_pid, SIGKILL, 1);	wait_for_completion(&rpciod_killer); out:	up(&rpciod_sema);}#ifdef RPC_DEBUGvoid rpc_show_tasks(void){	struct list_head *le;	struct rpc_task *t;	spin_lock(&rpc_sched_lock);	if (list_empty(&all_tasks)) {		spin_unlock(&rpc_sched_lock);		return;	}	printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "		"-rpcwait -action- --exit--\n");	alltask_for_each(t, le, &all_tasks)		printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",			t->tk_pid,			(t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),			t->tk_flags, t->tk_status,			t->tk_client,			(t->tk_client ? t->tk_client->cl_prog : 0),			t->tk_rqstp, t->tk_timeout,			rpc_qname(t->tk_rpcwait),			t->tk_action, t->tk_exit);	spin_unlock(&rpc_sched_lock);}#endifvoidrpc_destroy_mempool(void){	if (rpc_buffer_mempool)		mempool_destroy(rpc_buffer_mempool);	if (rpc_task_mempool)		mempool_destroy(rpc_task_mempool);	if (rpc_task_slabp && kmem_cache_destroy(rpc_task_slabp))		printk(KERN_INFO "rpc_task: not all structures were freed\n");	if (rpc_buffer_slabp && kmem_cache_destroy(rpc_buffer_slabp))		printk(KERN_INFO "rpc_buffers: not all structures were freed\n");}intrpc_init_mempool(void){	rpc_task_slabp = kmem_cache_create("rpc_tasks",					     sizeof(struct rpc_task),					     0, SLAB_HWCACHE_ALIGN,					     NULL, NULL);	if (!rpc_task_slabp)		goto err_nomem;	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",					     RPC_BUFFER_MAXSIZE,					     0, SLAB_HWCACHE_ALIGN,					     NULL, NULL);	if (!rpc_buffer_slabp)		goto err_nomem;	rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE,					    mempool_alloc_slab,					    mempool_free_slab,					    rpc_task_slabp);	if (!rpc_task_mempool)		goto err_nomem;	rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE,					    mempool_alloc_slab,					    mempool_free_slab,					    rpc_buffer_slabp);	if (!rpc_buffer_mempool)		goto err_nomem;	return 0;err_nomem:	rpc_destroy_mempool();	return -ENOMEM;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -