📄 sched.c
字号:
*/ if (task->tk_client->cl_intr && signalled()) { dprintk("RPC: %4d got signal\n", task->tk_pid); task->tk_flags |= RPC_TASK_KILLED; rpc_exit(task, -ERESTARTSYS); rpc_wake_up_task(task); } } } if (task->tk_exit) { task->tk_exit(task); /* If tk_action is non-null, the user wants us to restart */ if (task->tk_action) { if (!RPC_ASSASSINATED(task)) { /* Release RPC slot and buffer memory */ if (task->tk_rqstp) xprt_release(task); if (task->tk_buffer) { rpc_free(task->tk_buffer); task->tk_buffer = NULL; } goto restarted; } printk(KERN_ERR "RPC: dead task tries to walk away.\n"); } } dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status); status = task->tk_status; /* Release all resources associated with the task */ rpc_release_task(task); return status;}/* * User-visible entry point to the scheduler. * * This may be called recursively if e.g. an async NFS task updates * the attributes and finds that dirty pages must be flushed. * NOTE: Upon exit of this function the task is guaranteed to be * released. In particular note that tk_release() will have * been called, so your task memory may have been freed. */intrpc_execute(struct rpc_task *task){ int status = -EIO; if (rpc_inhibit) { printk(KERN_INFO "RPC: execution inhibited!\n"); goto out_release; } status = -EWOULDBLOCK; if (task->tk_active) { printk(KERN_ERR "RPC: active task was run twice!\n"); goto out_err; } task->tk_active = 1; rpc_set_running(task); return __rpc_execute(task); out_release: rpc_release_task(task); out_err: return status;}/* * This is our own little scheduler for async RPC tasks. */static void__rpc_schedule(void){ struct rpc_task *task; int count = 0; dprintk("RPC: rpc_schedule enter\n"); while (1) { /* Ensure equal rights for tcp tasks... */ rpciod_tcp_dispatcher(); spin_lock_bh(&rpc_queue_lock); if (!(task = schedq.task)) { spin_unlock_bh(&rpc_queue_lock); break; } if (task->tk_lock) { spin_unlock_bh(&rpc_queue_lock); printk(KERN_ERR "RPC: Locked task was scheduled !!!!\n");#ifdef RPC_DEBUG rpc_debug = ~0; rpc_show_tasks();#endif break; } __rpc_remove_wait_queue(task); spin_unlock_bh(&rpc_queue_lock); __rpc_execute(task); if (++count >= 200 || current->need_resched) { count = 0; schedule(); } } dprintk("RPC: rpc_schedule leave\n");}/* * Allocate memory for RPC purpose. * * This is yet another tricky issue: For sync requests issued by * a user process, we want to make kmalloc sleep if there isn't * enough memory. Async requests should not sleep too excessively * because that will block rpciod (but that's not dramatic when * it's starved of memory anyway). Finally, swapout requests should * never sleep at all, and should not trigger another swap_out * request through kmalloc which would just increase memory contention. * * I hope the following gets it right, which gives async requests * a slight advantage over sync requests (good for writeback, debatable * for readahead): * * sync user requests: GFP_KERNEL * async requests: GFP_RPC (== GFP_NFS) * swap requests: GFP_ATOMIC (or new GFP_SWAPPER) */void *rpc_allocate(unsigned int flags, unsigned int size){ u32 *buffer; int gfp; if (flags & RPC_TASK_SWAPPER) gfp = GFP_ATOMIC; else if (flags & RPC_TASK_ASYNC) gfp = GFP_RPC; else gfp = GFP_KERNEL; do { if ((buffer = (u32 *) kmalloc(size, gfp)) != NULL) { dprintk("RPC: allocated buffer %p\n", buffer); return buffer; } if ((flags & RPC_TASK_SWAPPER) && size <= sizeof(swap_buffer) && rpc_lock_swapbuf()) { dprintk("RPC: used last-ditch swap buffer\n"); return swap_buffer; } if (flags & RPC_TASK_ASYNC) return NULL; set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ>>4); } while (!signalled()); return NULL;}voidrpc_free(void *buffer){ if (buffer != swap_buffer) { kfree(buffer); return; } rpc_unlock_swapbuf();}/* * Creation and deletion of RPC task structures */inline voidrpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action callback, int flags){ memset(task, 0, sizeof(*task)); init_timer(&task->tk_timer); task->tk_timer.data = (unsigned long) task; task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer; task->tk_client = clnt; task->tk_flags = flags; task->tk_exit = callback; init_waitqueue_head(&task->tk_wait); if (current->uid != current->fsuid || current->gid != current->fsgid) task->tk_flags |= RPC_TASK_SETUID; /* Initialize retry counters */ task->tk_garb_retry = 2; task->tk_cred_retry = 2; task->tk_suid_retry = 1; /* Add to global list of all tasks */ spin_lock(&rpc_sched_lock); task->tk_next_task = all_tasks; task->tk_prev_task = NULL; if (all_tasks) all_tasks->tk_prev_task = task; all_tasks = task; spin_unlock(&rpc_sched_lock); if (clnt) atomic_inc(&clnt->cl_users);#ifdef RPC_DEBUG task->tk_magic = 0xf00baa; task->tk_pid = rpc_task_id++;#endif dprintk("RPC: %4d new task procpid %d\n", task->tk_pid, current->pid);}static voidrpc_default_free_task(struct rpc_task *task){ dprintk("RPC: %4d freeing task\n", task->tk_pid); rpc_free(task);}/* * Create a new task for the specified client. We have to * clean up after an allocation failure, as the client may * have specified "oneshot". */struct rpc_task *rpc_new_task(struct rpc_clnt *clnt, rpc_action callback, int flags){ struct rpc_task *task; task = (struct rpc_task *) rpc_allocate(flags, sizeof(*task)); if (!task) goto cleanup; rpc_init_task(task, clnt, callback, flags); /* Replace tk_release */ task->tk_release = rpc_default_free_task; dprintk("RPC: %4d allocated task\n", task->tk_pid); task->tk_flags |= RPC_TASK_DYNAMIC;out: return task;cleanup: /* Check whether to release the client */ if (clnt) { printk("rpc_new_task: failed, users=%d, oneshot=%d\n", atomic_read(&clnt->cl_users), clnt->cl_oneshot); atomic_inc(&clnt->cl_users); /* pretend we were used ... */ rpc_release_client(clnt); } goto out;}voidrpc_release_task(struct rpc_task *task){ struct rpc_task *next, *prev; dprintk("RPC: %4d release task\n", task->tk_pid);#ifdef RPC_DEBUG if (task->tk_magic != 0xf00baa) { printk(KERN_ERR "RPC: attempt to release a non-existing task!\n"); rpc_debug = ~0; rpc_show_tasks(); return; }#endif /* Remove from global task list */ spin_lock(&rpc_sched_lock); prev = task->tk_prev_task; next = task->tk_next_task; if (next) next->tk_prev_task = prev; if (prev) prev->tk_next_task = next; else all_tasks = next; task->tk_next_task = task->tk_prev_task = NULL; spin_unlock(&rpc_sched_lock); /* Protect the execution below. */ spin_lock_bh(&rpc_queue_lock); /* Disable timer to prevent zombie wakeup */ __rpc_disable_timer(task); /* Remove from any wait queue we're still on */ __rpc_remove_wait_queue(task); task->tk_active = 0; spin_unlock_bh(&rpc_queue_lock); /* Synchronously delete any running timer */ rpc_delete_timer(task); /* Release resources */ if (task->tk_rqstp) xprt_release(task); if (task->tk_msg.rpc_cred) rpcauth_unbindcred(task); if (task->tk_buffer) { rpc_free(task->tk_buffer); task->tk_buffer = NULL; } if (task->tk_client) { rpc_release_client(task->tk_client); task->tk_client = NULL; }#ifdef RPC_DEBUG task->tk_magic = 0;#endif if (task->tk_release) task->tk_release(task);}/** * rpc_find_parent - find the parent of a child task. * @child: child task * * Checks that the parent task is still sleeping on the * queue 'childq'. If so returns a pointer to the parent. * Upon failure returns NULL. * * Caller must hold rpc_queue_lock */static inline struct rpc_task *rpc_find_parent(struct rpc_task *child){ struct rpc_task *task, *parent; parent = (struct rpc_task *) child->tk_calldata; if ((task = childq.task) != NULL) { do { if (task == parent) return parent; } while ((task = task->tk_next) != childq.task); } return NULL;}static voidrpc_child_exit(struct rpc_task *child){ struct rpc_task *parent; spin_lock_bh(&rpc_queue_lock); if ((parent = rpc_find_parent(child)) != NULL) { parent->tk_status = child->tk_status; __rpc_wake_up_task(parent); } spin_unlock_bh(&rpc_queue_lock);}/* * Note: rpc_new_task releases the client after a failure. */struct rpc_task *rpc_new_child(struct rpc_clnt *clnt, struct rpc_task *parent){ struct rpc_task *task; task = rpc_new_task(clnt, NULL, RPC_TASK_ASYNC | RPC_TASK_CHILD); if (!task) goto fail; task->tk_exit = rpc_child_exit; task->tk_calldata = parent; return task;fail: parent->tk_status = -ENOMEM; return NULL;}voidrpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func){ spin_lock_bh(&rpc_queue_lock); /* N.B. Is it possible for the child to have already finished? */ __rpc_sleep_on(&childq, task, func, NULL); rpc_schedule_run(child); spin_unlock_bh(&rpc_queue_lock);}/* * Kill all tasks for the given client. * XXX: kill their descendants as well? */voidrpc_killall_tasks(struct rpc_clnt *clnt){ struct rpc_task **q, *rovr; dprintk("RPC: killing all tasks for client %p\n", clnt); /* * Spin lock all_tasks to prevent changes... */ spin_lock(&rpc_sched_lock); for (q = &all_tasks; (rovr = *q); q = &rovr->tk_next_task) { if (!clnt || rovr->tk_client == clnt) { rovr->tk_flags |= RPC_TASK_KILLED; rpc_exit(rovr, -EIO); rpc_wake_up_task(rovr); } } spin_unlock(&rpc_sched_lock);}static DECLARE_MUTEX_LOCKED(rpciod_running);static inline intrpciod_task_pending(void){ return schedq.task != NULL || xprt_tcp_pending();}/* * This is the rpciod kernel thread */static intrpciod(void *ptr){ wait_queue_head_t *assassin = (wait_queue_head_t*) ptr; int rounds = 0; MOD_INC_USE_COUNT; lock_kernel(); /* * Let our maker know we're running ... */ rpciod_pid = current->pid; up(&rpciod_running); exit_fs(current); exit_files(current); exit_mm(current); spin_lock_irq(¤t->sigmask_lock); siginitsetinv(¤t->blocked, sigmask(SIGKILL)); recalc_sigpending(current); spin_unlock_irq(¤t->sigmask_lock); current->session = 1; current->pgrp = 1; strcpy(current->comm, "rpciod"); dprintk("RPC: rpciod starting (pid %d)\n", rpciod_pid); while (rpciod_users) { if (signalled()) { rpciod_killall(); flush_signals(current); } __rpc_schedule(); if (++rounds >= 64) { /* safeguard */ schedule(); rounds = 0; } if (!rpciod_task_pending()) { dprintk("RPC: rpciod back to sleep\n"); wait_event_interruptible(rpciod_idle, rpciod_task_pending()); dprintk("RPC: switch to rpciod\n"); rounds = 0; } } dprintk("RPC: rpciod shutdown commences\n"); if (all_tasks) { printk(KERN_ERR "rpciod: active tasks at shutdown?!\n"); rpciod_killall(); } rpciod_pid = 0; wake_up(assassin); dprintk("RPC: rpciod exiting\n"); MOD_DEC_USE_COUNT; return 0;}static voidrpciod_killall(void){ unsigned long flags; while (all_tasks) { current->sigpending = 0; rpc_killall_tasks(NULL); __rpc_schedule(); if (all_tasks) { dprintk("rpciod_killall: waiting for tasks to exit\n"); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1); } } spin_lock_irqsave(¤t->sigmask_lock, flags); recalc_sigpending(current); spin_unlock_irqrestore(¤t->sigmask_lock, flags);}/* * Start up the rpciod process if it's not already running. */intrpciod_up(void){ int error = 0; MOD_INC_USE_COUNT; down(&rpciod_sema); dprintk("rpciod_up: pid %d, users %d\n", rpciod_pid, rpciod_users); rpciod_users++; if (rpciod_pid) goto out; /* * If there's no pid, we should be the first user. */ if (rpciod_users > 1) printk(KERN_WARNING "rpciod_up: no pid, %d users??\n", rpciod_users); /* * Create the rpciod thread and wait for it to start. */ error = kernel_thread(rpciod, &rpciod_killer, 0); if (error < 0) { printk(KERN_WARNING "rpciod_up: create thread failed, error=%d\n", error); rpciod_users--; goto out; } down(&rpciod_running); error = 0;out: up(&rpciod_sema); MOD_DEC_USE_COUNT; return error;}voidrpciod_down(void){ unsigned long flags; MOD_INC_USE_COUNT; down(&rpciod_sema); dprintk("rpciod_down pid %d sema %d\n", rpciod_pid, rpciod_users); if (rpciod_users) { if (--rpciod_users) goto out; } else printk(KERN_WARNING "rpciod_down: pid=%d, no users??\n", rpciod_pid); if (!rpciod_pid) { dprintk("rpciod_down: Nothing to do!\n"); goto out; } kill_proc(rpciod_pid, SIGKILL, 1); /* * Usually rpciod will exit very quickly, so we * wait briefly before checking the process id. */ current->sigpending = 0; set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1); /* * Display a message if we're going to wait longer. */ while (rpciod_pid) { dprintk("rpciod_down: waiting for pid %d to exit\n", rpciod_pid); if (signalled()) { dprintk("rpciod_down: caught signal\n"); break; } interruptible_sleep_on(&rpciod_killer); } spin_lock_irqsave(¤t->sigmask_lock, flags); recalc_sigpending(current); spin_unlock_irqrestore(¤t->sigmask_lock, flags);out: up(&rpciod_sema); MOD_DEC_USE_COUNT;}#ifdef RPC_DEBUGvoid rpc_show_tasks(void){ struct rpc_task *t = all_tasks, *next; spin_lock(&rpc_sched_lock); t = all_tasks; if (!t) { spin_unlock(&rpc_sched_lock); return; } printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout " "-rpcwait -action- --exit--\n"); for (; t; t = next) { next = t->tk_next_task; printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n", t->tk_pid, t->tk_msg.rpc_proc, t->tk_flags, t->tk_status, t->tk_client, t->tk_client->cl_prog, t->tk_rqstp, t->tk_timeout, t->tk_rpcwait ? rpc_qname(t->tk_rpcwait) : " <NULL> ", t->tk_action, t->tk_exit); } spin_unlock(&rpc_sched_lock);}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -