📄 sched.c
字号:
}#endif /* Has the task been executed yet? If not, we cannot wake it up! */ if (!RPC_IS_ACTIVATED(task)) { printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task); return; } if (RPC_IS_RUNNING(task)) return; __rpc_disable_timer(task); if (task->tk_rpcwait != &schedq) __rpc_remove_wait_queue(task); rpc_make_runnable(task); dprintk("RPC: __rpc_wake_up_task done\n");}/* * Default timeout handler if none specified by user */static void__rpc_default_timer(struct rpc_task *task){ dprintk("RPC: %d timeout (default timer)\n", task->tk_pid); task->tk_status = -ETIMEDOUT; rpc_wake_up_task(task);}/* * Wake up the specified task */voidrpc_wake_up_task(struct rpc_task *task){ if (RPC_IS_RUNNING(task)) return; spin_lock_bh(&rpc_queue_lock); __rpc_wake_up_task(task); spin_unlock_bh(&rpc_queue_lock);}/* * Wake up the next task on a priority queue. */static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue){ struct list_head *q; struct rpc_task *task; /* * Service a batch of tasks from a single cookie. */ q = &queue->tasks[queue->priority]; if (!list_empty(q)) { task = list_entry(q->next, struct rpc_task, tk_list); if (queue->cookie == task->tk_cookie) { if (--queue->nr) goto out; list_move_tail(&task->tk_list, q); } /* * Check if we need to switch queues. */ if (--queue->count) goto new_cookie; } /* * Service the next queue. */ do { if (q == &queue->tasks[0]) q = &queue->tasks[queue->maxpriority]; else q = q - 1; if (!list_empty(q)) { task = list_entry(q->next, struct rpc_task, tk_list); goto new_queue; } } while (q != &queue->tasks[queue->priority]); rpc_reset_waitqueue_priority(queue); return NULL;new_queue: rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));new_cookie: rpc_set_waitqueue_cookie(queue, task->tk_cookie);out: __rpc_wake_up_task(task); return task;}/* * Wake up the next task on the wait queue. */struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue){ struct rpc_task *task = NULL; dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); spin_lock_bh(&rpc_queue_lock); if (RPC_IS_PRIORITY(queue)) task = __rpc_wake_up_next_priority(queue); else { task_for_first(task, &queue->tasks[0]) __rpc_wake_up_task(task); } spin_unlock_bh(&rpc_queue_lock); return task;}/** * rpc_wake_up - wake up all rpc_tasks * @queue: rpc_wait_queue on which the tasks are sleeping * * Grabs rpc_queue_lock */void rpc_wake_up(struct rpc_wait_queue *queue){ struct rpc_task *task; struct list_head *head; spin_lock_bh(&rpc_queue_lock); head = &queue->tasks[queue->maxpriority]; for (;;) { while (!list_empty(head)) { task = list_entry(head->next, struct rpc_task, tk_list); __rpc_wake_up_task(task); } if (head == &queue->tasks[0]) break; head--; } spin_unlock_bh(&rpc_queue_lock);}/** * rpc_wake_up_status - wake up all rpc_tasks and set their status value. * @queue: rpc_wait_queue on which the tasks are sleeping * @status: status value to set * * Grabs rpc_queue_lock */void rpc_wake_up_status(struct rpc_wait_queue *queue, int status){ struct list_head *head; struct rpc_task *task; spin_lock_bh(&rpc_queue_lock); head = &queue->tasks[queue->maxpriority]; for (;;) { while (!list_empty(head)) { task = list_entry(head->next, struct rpc_task, tk_list); task->tk_status = status; __rpc_wake_up_task(task); } if (head == &queue->tasks[0]) break; head--; } spin_unlock_bh(&rpc_queue_lock);}/* * Run a task at a later time */static void __rpc_atrun(struct rpc_task *);voidrpc_delay(struct rpc_task *task, unsigned long delay){ task->tk_timeout = delay; rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun);}static void__rpc_atrun(struct rpc_task *task){ task->tk_status = 0; rpc_wake_up_task(task);}/* * This is the RPC `scheduler' (or rather, the finite state machine). */static int__rpc_execute(struct rpc_task *task){ int status = 0; dprintk("RPC: %4d rpc_execute flgs %x\n", task->tk_pid, task->tk_flags); if (!RPC_IS_RUNNING(task)) { printk(KERN_WARNING "RPC: rpc_execute called for sleeping task!!\n"); return 0; } restarted: while (1) { /* * Execute any pending callback. */ if (RPC_DO_CALLBACK(task)) { /* Define a callback save pointer */ void (*save_callback)(struct rpc_task *); /* * If a callback exists, save it, reset it, * call it. * The save is needed to stop from resetting * another callback set within the callback handler * - Dave */ save_callback=task->tk_callback; task->tk_callback=NULL; save_callback(task); } /* * Perform the next FSM step. * tk_action may be NULL when the task has been killed * by someone else. */ if (RPC_IS_RUNNING(task)) { /* * Garbage collection of pending timers... */ rpc_delete_timer(task); if (!task->tk_action) break; task->tk_action(task); /* micro-optimization to avoid spinlock */ if (RPC_IS_RUNNING(task)) continue; } /* * Check whether task is sleeping. */ spin_lock_bh(&rpc_queue_lock); if (!RPC_IS_RUNNING(task)) { rpc_set_sleeping(task); if (RPC_IS_ASYNC(task)) { spin_unlock_bh(&rpc_queue_lock); return 0; } } spin_unlock_bh(&rpc_queue_lock); if (!RPC_IS_SLEEPING(task)) continue; /* sync task: sleep here */ dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid); if (current->pid == rpciod_pid) printk(KERN_ERR "RPC: rpciod waiting on sync task!\n"); if (RPC_TASK_UNINTERRUPTIBLE(task)) { __wait_event(task->tk_wait, !RPC_IS_SLEEPING(task)); } else { __wait_event_interruptible(task->tk_wait, !RPC_IS_SLEEPING(task), status); /* * When a sync task receives a signal, it exits with * -ERESTARTSYS. In order to catch any callbacks that * clean up after sleeping on some queue, we don't * break the loop here, but go around once more. */ if (status == -ERESTARTSYS) { dprintk("RPC: %4d got signal\n", task->tk_pid); task->tk_flags |= RPC_TASK_KILLED; rpc_exit(task, -ERESTARTSYS); rpc_wake_up_task(task); } } dprintk("RPC: %4d sync task resuming\n", task->tk_pid); } if (task->tk_exit) { task->tk_exit(task); /* If tk_action is non-null, the user wants us to restart */ if (task->tk_action) { if (!RPC_ASSASSINATED(task)) { /* Release RPC slot and buffer memory */ if (task->tk_rqstp) xprt_release(task); rpc_free(task); goto restarted; } printk(KERN_ERR "RPC: dead task tries to walk away.\n"); } } dprintk("RPC: %4d exit() = %d\n", task->tk_pid, task->tk_status); status = task->tk_status; /* Release all resources associated with the task */ rpc_release_task(task); return status;}/* * User-visible entry point to the scheduler. * * This may be called recursively if e.g. an async NFS task updates * the attributes and finds that dirty pages must be flushed. * NOTE: Upon exit of this function the task is guaranteed to be * released. In particular note that tk_release() will have * been called, so your task memory may have been freed. */intrpc_execute(struct rpc_task *task){ int status = -EIO; if (rpc_inhibit) { printk(KERN_INFO "RPC: execution inhibited!\n"); goto out_release; } status = -EWOULDBLOCK; if (task->tk_active) { printk(KERN_ERR "RPC: active task was run twice!\n"); goto out_err; } task->tk_active = 1; rpc_set_running(task); return __rpc_execute(task); out_release: rpc_release_task(task); out_err: return status;}/* * This is our own little scheduler for async RPC tasks. */static void__rpc_schedule(void){ struct rpc_task *task; int count = 0; dprintk("RPC: rpc_schedule enter\n"); while (1) { task_for_first(task, &schedq.tasks[0]) { __rpc_remove_wait_queue(task); spin_unlock_bh(&rpc_queue_lock); __rpc_execute(task); spin_lock_bh(&rpc_queue_lock); } else { break; } if (++count >= 200 || need_resched()) { count = 0; spin_unlock_bh(&rpc_queue_lock); schedule(); spin_lock_bh(&rpc_queue_lock); } } dprintk("RPC: rpc_schedule leave\n");}/* * Allocate memory for RPC purposes. * * We try to ensure that some NFS reads and writes can always proceed * by using a mempool when allocating 'small' buffers. * In order to avoid memory starvation triggering more writebacks of * NFS requests, we use GFP_NOFS rather than GFP_KERNEL. */void *rpc_malloc(struct rpc_task *task, size_t size){ int gfp; if (task->tk_flags & RPC_TASK_SWAPPER) gfp = GFP_ATOMIC; else gfp = GFP_NOFS; if (size > RPC_BUFFER_MAXSIZE) { task->tk_buffer = kmalloc(size, gfp); if (task->tk_buffer) task->tk_bufsize = size; } else { task->tk_buffer = mempool_alloc(rpc_buffer_mempool, gfp); if (task->tk_buffer) task->tk_bufsize = RPC_BUFFER_MAXSIZE; } return task->tk_buffer;}voidrpc_free(struct rpc_task *task){ if (task->tk_buffer) { if (task->tk_bufsize == RPC_BUFFER_MAXSIZE) mempool_free(task->tk_buffer, rpc_buffer_mempool); else kfree(task->tk_buffer); task->tk_buffer = NULL; task->tk_bufsize = 0; }}/* * Creation and deletion of RPC task structures */void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, rpc_action callback, int flags){ memset(task, 0, sizeof(*task)); init_timer(&task->tk_timer); task->tk_timer.data = (unsigned long) task; task->tk_timer.function = (void (*)(unsigned long)) rpc_run_timer; task->tk_client = clnt; task->tk_flags = flags; task->tk_exit = callback; init_waitqueue_head(&task->tk_wait); if (current->uid != current->fsuid || current->gid != current->fsgid) task->tk_flags |= RPC_TASK_SETUID; /* Initialize retry counters */ task->tk_garb_retry = 2; task->tk_cred_retry = 2; task->tk_suid_retry = 1; task->tk_priority = RPC_PRIORITY_NORMAL; task->tk_cookie = (unsigned long)current; INIT_LIST_HEAD(&task->tk_links); /* Add to global list of all tasks */ spin_lock(&rpc_sched_lock); list_add(&task->tk_task, &all_tasks);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -