📄 sched.c
字号:
/* * linux/net/sunrpc/sched.c * * Scheduling for synchronous and asynchronous RPC requests. * * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> * * TCP NFS related read + write fixes * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> */#include <linux/module.h>#include <linux/sched.h>#include <linux/interrupt.h>#include <linux/slab.h>#include <linux/mempool.h>#include <linux/smp.h>#include <linux/smp_lock.h>#include <linux/spinlock.h>#include <linux/suspend.h>#include <linux/sunrpc/clnt.h>#include <linux/sunrpc/xprt.h>#ifdef RPC_DEBUG#define RPCDBG_FACILITY RPCDBG_SCHEDstatic int rpc_task_id;#endif/* * RPC slabs and memory pools */#define RPC_BUFFER_MAXSIZE (2048)#define RPC_BUFFER_POOLSIZE (8)#define RPC_TASK_POOLSIZE (8)static kmem_cache_t *rpc_task_slabp;static kmem_cache_t *rpc_buffer_slabp;static mempool_t *rpc_task_mempool;static mempool_t *rpc_buffer_mempool;static void __rpc_default_timer(struct rpc_task *task);static void rpciod_killall(void);/* * When an asynchronous RPC task is activated within a bottom half * handler, or while executing another RPC task, it is put on * schedq, and rpciod is woken up. */static RPC_WAITQ(schedq, "schedq");/* * RPC tasks that create another task (e.g. for contacting the portmapper) * will wait on this queue for their child's completion */static RPC_WAITQ(childq, "childq");/* * RPC tasks sit here while waiting for conditions to improve. */static RPC_WAITQ(delay_queue, "delayq");/* * All RPC tasks are linked into this list */static LIST_HEAD(all_tasks);/* * rpciod-related stuff */static DECLARE_WAIT_QUEUE_HEAD(rpciod_idle);static DECLARE_COMPLETION(rpciod_killer);static DECLARE_MUTEX(rpciod_sema);static unsigned int rpciod_users;static pid_t rpciod_pid;static int rpc_inhibit;/* * Spinlock for wait queues. Access to the latter also has to be * interrupt-safe in order to allow timers to wake up sleeping tasks. */static spinlock_t rpc_queue_lock = SPIN_LOCK_UNLOCKED;/* * Spinlock for other critical sections of code. */static spinlock_t rpc_sched_lock = SPIN_LOCK_UNLOCKED;/* * Disable the timer for a given RPC task. Should be called with * rpc_queue_lock and bh_disabled in order to avoid races within * rpc_run_timer(). */static inline void__rpc_disable_timer(struct rpc_task *task){ dprintk("RPC: %4d disabling timer\n", task->tk_pid); task->tk_timeout_fn = NULL; task->tk_timeout = 0;}/* * Run a timeout function. * We use the callback in order to allow __rpc_wake_up_task() * and friends to disable the timer synchronously on SMP systems * without calling del_timer_sync(). The latter could cause a * deadlock if called while we're holding spinlocks... */static voidrpc_run_timer(struct rpc_task *task){ void (*callback)(struct rpc_task *); spin_lock_bh(&rpc_queue_lock); callback = task->tk_timeout_fn; task->tk_timeout_fn = NULL; spin_unlock_bh(&rpc_queue_lock); if (callback) { dprintk("RPC: %4d running timer\n", task->tk_pid); callback(task); }}/* * Set up a timer for the current task. */static inline void__rpc_add_timer(struct rpc_task *task, rpc_action timer){ if (!task->tk_timeout) return; dprintk("RPC: %4d setting alarm for %lu ms\n", task->tk_pid, task->tk_timeout * 1000 / HZ); if (timer) task->tk_timeout_fn = timer; else task->tk_timeout_fn = __rpc_default_timer; mod_timer(&task->tk_timer, jiffies + task->tk_timeout);}/* * Set up a timer for an already sleeping task. */void rpc_add_timer(struct rpc_task *task, rpc_action timer){ spin_lock_bh(&rpc_queue_lock); if (!RPC_IS_RUNNING(task)) __rpc_add_timer(task, timer); spin_unlock_bh(&rpc_queue_lock);}/* * Delete any timer for the current task. Because we use del_timer_sync(), * this function should never be called while holding rpc_queue_lock. */static inline voidrpc_delete_timer(struct rpc_task *task){ if (del_timer_sync(&task->tk_timer)) dprintk("RPC: %4d deleting timer\n", task->tk_pid);}/* * Add new request to a priority queue. */static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task){ struct list_head *q; struct rpc_task *t; q = &queue->tasks[task->tk_priority]; if (unlikely(task->tk_priority > queue->maxpriority)) q = &queue->tasks[queue->maxpriority]; list_for_each_entry(t, q, tk_list) { if (t->tk_cookie == task->tk_cookie) { list_add_tail(&task->tk_list, &t->tk_links); return; } } list_add_tail(&task->tk_list, q);}/* * Add new request to wait queue. * * Swapper tasks always get inserted at the head of the queue. * This should avoid many nasty memory deadlocks and hopefully * improve overall performance. * Everyone else gets appended to the queue to ensure proper FIFO behavior. */static int __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task){ if (task->tk_rpcwait == queue) return 0; if (task->tk_rpcwait) { printk(KERN_WARNING "RPC: doubly enqueued task!\n"); return -EWOULDBLOCK; } if (RPC_IS_PRIORITY(queue)) __rpc_add_wait_queue_priority(queue, task); else if (RPC_IS_SWAPPER(task)) list_add(&task->tk_list, &queue->tasks[0]); else list_add_tail(&task->tk_list, &queue->tasks[0]); task->tk_rpcwait = queue; dprintk("RPC: %4d added to queue %p \"%s\"\n", task->tk_pid, queue, rpc_qname(queue)); return 0;}int rpc_add_wait_queue(struct rpc_wait_queue *q, struct rpc_task *task){ int result; spin_lock_bh(&rpc_queue_lock); result = __rpc_add_wait_queue(q, task); spin_unlock_bh(&rpc_queue_lock); return result;}/* * Remove request from a priority queue. */static void __rpc_remove_wait_queue_priority(struct rpc_task *task){ struct rpc_task *t; if (!list_empty(&task->tk_links)) { t = list_entry(task->tk_links.next, struct rpc_task, tk_list); list_move(&t->tk_list, &task->tk_list); list_splice_init(&task->tk_links, &t->tk_links); } list_del(&task->tk_list);}/* * Remove request from queue. * Note: must be called with spin lock held. */static void __rpc_remove_wait_queue(struct rpc_task *task){ struct rpc_wait_queue *queue = task->tk_rpcwait; if (!queue) return; if (RPC_IS_PRIORITY(queue)) __rpc_remove_wait_queue_priority(task); else list_del(&task->tk_list); task->tk_rpcwait = NULL; dprintk("RPC: %4d removed from queue %p \"%s\"\n", task->tk_pid, queue, rpc_qname(queue));}voidrpc_remove_wait_queue(struct rpc_task *task){ if (!task->tk_rpcwait) return; spin_lock_bh(&rpc_queue_lock); __rpc_remove_wait_queue(task); spin_unlock_bh(&rpc_queue_lock);}static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority){ queue->priority = priority; queue->count = 1 << (priority * 2);}static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie){ queue->cookie = cookie; queue->nr = RPC_BATCH_COUNT;}static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue){ rpc_set_waitqueue_priority(queue, queue->maxpriority); rpc_set_waitqueue_cookie(queue, 0);}static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio){ int i; for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) INIT_LIST_HEAD(&queue->tasks[i]); queue->maxpriority = maxprio; rpc_reset_waitqueue_priority(queue);#ifdef RPC_DEBUG queue->name = qname;#endif}void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname){ __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH);}void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname){ __rpc_init_priority_wait_queue(queue, qname, 0);}EXPORT_SYMBOL(rpc_init_wait_queue);/* * Make an RPC task runnable. * * Note: If the task is ASYNC, this must be called with * the spinlock held to protect the wait queue operation. */static inline voidrpc_make_runnable(struct rpc_task *task){ if (task->tk_timeout_fn) { printk(KERN_ERR "RPC: task w/ running timer in rpc_make_runnable!!\n"); return; } rpc_set_running(task); if (RPC_IS_ASYNC(task)) { if (RPC_IS_SLEEPING(task)) { int status; status = __rpc_add_wait_queue(&schedq, task); if (status < 0) { printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); task->tk_status = status; return; } rpc_clear_sleeping(task); wake_up(&rpciod_idle); } } else { rpc_clear_sleeping(task); wake_up(&task->tk_wait); }}/* * Place a newly initialized task on the schedq. */static inline voidrpc_schedule_run(struct rpc_task *task){ /* Don't run a child twice! */ if (RPC_IS_ACTIVATED(task)) return; task->tk_active = 1; rpc_set_sleeping(task); rpc_make_runnable(task);}/* * For other people who may need to wake the I/O daemon * but should (for now) know nothing about its innards */void rpciod_wake_up(void){ if(rpciod_pid==0) printk(KERN_ERR "rpciod: wot no daemon?\n"); wake_up(&rpciod_idle);}/* * Prepare for sleeping on a wait queue. * By always appending tasks to the list we ensure FIFO behavior. * NB: An RPC task will only receive interrupt-driven events as long * as it's on a wait queue. */static void__rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action, rpc_action timer){ int status; dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid, rpc_qname(q), jiffies); if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) { printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n"); return; } /* Mark the task as being activated if so needed */ if (!RPC_IS_ACTIVATED(task)) { task->tk_active = 1; rpc_set_sleeping(task); } status = __rpc_add_wait_queue(q, task); if (status) { printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); task->tk_status = status; } else { rpc_clear_running(task); if (task->tk_callback) { dprintk(KERN_ERR "RPC: %4d overwrites an active callback\n", task->tk_pid); BUG(); } task->tk_callback = action; __rpc_add_timer(task, timer); }}voidrpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action, rpc_action timer){ /* * Protect the queue operations. */ spin_lock_bh(&rpc_queue_lock); __rpc_sleep_on(q, task, action, timer); spin_unlock_bh(&rpc_queue_lock);}/** * __rpc_wake_up_task - wake up a single rpc_task * @task: task to be woken up * * Caller must hold rpc_queue_lock */static void__rpc_wake_up_task(struct rpc_task *task){ dprintk("RPC: %4d __rpc_wake_up_task (now %ld inh %d)\n", task->tk_pid, jiffies, rpc_inhibit);#ifdef RPC_DEBUG if (task->tk_magic != 0xf00baa) { printk(KERN_ERR "RPC: attempt to wake up non-existing task!\n"); rpc_debug = ~0; rpc_show_tasks(); return;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -