📄 cfq-iosched.c
字号:
if (rw == READ || process_sync(tsk)) cfq_mark_crq_is_sync(crq); else cfq_clear_crq_is_sync(crq); rq->elevator_private = crq; return 0; } spin_lock_irqsave(q->queue_lock, flags); cfqq->allocated[rw]--; if (!(cfqq->allocated[0] + cfqq->allocated[1])) cfq_mark_cfqq_must_alloc(cfqq); cfq_put_queue(cfqq);queue_fail: if (cic) put_io_context(cic->ioc); /* * mark us rq allocation starved. we need to kickstart the process * ourselves if there are no pending requests that can do it for us. * that would be an extremely rare OOM situation */ cfqd->rq_starved = 1; cfq_schedule_dispatch(cfqd); spin_unlock_irqrestore(q->queue_lock, flags); return 1;}static void cfq_kick_queue(void *data){ request_queue_t *q = data; struct cfq_data *cfqd = q->elevator->elevator_data; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); if (cfqd->rq_starved) { struct request_list *rl = &q->rq; /* * we aren't guaranteed to get a request after this, but we * have to be opportunistic */ smp_mb(); if (waitqueue_active(&rl->wait[READ])) wake_up(&rl->wait[READ]); if (waitqueue_active(&rl->wait[WRITE])) wake_up(&rl->wait[WRITE]); } blk_remove_plug(q); q->request_fn(q); spin_unlock_irqrestore(q->queue_lock, flags);}/* * Timer running if the active_queue is currently idling inside its time slice */static void cfq_idle_slice_timer(unsigned long data){ struct cfq_data *cfqd = (struct cfq_data *) data; struct cfq_queue *cfqq; unsigned long flags; spin_lock_irqsave(cfqd->queue->queue_lock, flags); if ((cfqq = cfqd->active_queue) != NULL) { unsigned long now = jiffies; /* * expired */ if (time_after(now, cfqq->slice_end)) goto expire; /* * only expire and reinvoke request handler, if there are * other queues with pending requests */ if (!cfq_pending_requests(cfqd)) { cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); add_timer(&cfqd->idle_slice_timer); goto out_cont; } /* * not expired and it has a request pending, let it dispatch */ if (!RB_EMPTY(&cfqq->sort_list)) { cfq_mark_cfqq_must_dispatch(cfqq); goto out_kick; } }expire: cfq_slice_expired(cfqd, 0);out_kick: cfq_schedule_dispatch(cfqd);out_cont: spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);}/* * Timer running if an idle class queue is waiting for service */static void cfq_idle_class_timer(unsigned long data){ struct cfq_data *cfqd = (struct cfq_data *) data; unsigned long flags, end; spin_lock_irqsave(cfqd->queue->queue_lock, flags); /* * race with a non-idle queue, reset timer */ end = cfqd->last_end_request + CFQ_IDLE_GRACE; if (!time_after_eq(jiffies, end)) { cfqd->idle_class_timer.expires = end; add_timer(&cfqd->idle_class_timer); } else cfq_schedule_dispatch(cfqd); spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);}static void cfq_shutdown_timer_wq(struct cfq_data *cfqd){ del_timer_sync(&cfqd->idle_slice_timer); del_timer_sync(&cfqd->idle_class_timer); blk_sync_queue(cfqd->queue);}static void cfq_put_cfqd(struct cfq_data *cfqd){ request_queue_t *q = cfqd->queue; if (!atomic_dec_and_test(&cfqd->ref)) return; blk_put_queue(q); cfq_shutdown_timer_wq(cfqd); q->elevator->elevator_data = NULL; mempool_destroy(cfqd->crq_pool); kfree(cfqd->crq_hash); kfree(cfqd->cfq_hash); kfree(cfqd);}static void cfq_exit_queue(elevator_t *e){ struct cfq_data *cfqd = e->elevator_data; cfq_shutdown_timer_wq(cfqd); cfq_put_cfqd(cfqd);}static int cfq_init_queue(request_queue_t *q, elevator_t *e){ struct cfq_data *cfqd; int i; cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL); if (!cfqd) return -ENOMEM; memset(cfqd, 0, sizeof(*cfqd)); for (i = 0; i < CFQ_PRIO_LISTS; i++) INIT_LIST_HEAD(&cfqd->rr_list[i]); INIT_LIST_HEAD(&cfqd->busy_rr); INIT_LIST_HEAD(&cfqd->cur_rr); INIT_LIST_HEAD(&cfqd->idle_rr); INIT_LIST_HEAD(&cfqd->empty_list); cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); if (!cfqd->crq_hash) goto out_crqhash; cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL); if (!cfqd->cfq_hash) goto out_cfqhash; cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool); if (!cfqd->crq_pool) goto out_crqpool; for (i = 0; i < CFQ_MHASH_ENTRIES; i++) INIT_HLIST_HEAD(&cfqd->crq_hash[i]); for (i = 0; i < CFQ_QHASH_ENTRIES; i++) INIT_HLIST_HEAD(&cfqd->cfq_hash[i]); e->elevator_data = cfqd; cfqd->queue = q; atomic_inc(&q->refcnt); cfqd->max_queued = q->nr_requests / 4; q->nr_batching = cfq_queued; init_timer(&cfqd->idle_slice_timer); cfqd->idle_slice_timer.function = cfq_idle_slice_timer; cfqd->idle_slice_timer.data = (unsigned long) cfqd; init_timer(&cfqd->idle_class_timer); cfqd->idle_class_timer.function = cfq_idle_class_timer; cfqd->idle_class_timer.data = (unsigned long) cfqd; INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); atomic_set(&cfqd->ref, 1); cfqd->cfq_queued = cfq_queued; cfqd->cfq_quantum = cfq_quantum; cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; cfqd->cfq_back_max = cfq_back_max; cfqd->cfq_back_penalty = cfq_back_penalty; cfqd->cfq_slice[0] = cfq_slice_async; cfqd->cfq_slice[1] = cfq_slice_sync; cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; cfqd->cfq_max_depth = cfq_max_depth; return 0;out_crqpool: kfree(cfqd->cfq_hash);out_cfqhash: kfree(cfqd->crq_hash);out_crqhash: kfree(cfqd); return -ENOMEM;}static void cfq_slab_kill(void){ if (crq_pool) kmem_cache_destroy(crq_pool); if (cfq_pool) kmem_cache_destroy(cfq_pool); if (cfq_ioc_pool) kmem_cache_destroy(cfq_ioc_pool);}static int __init cfq_slab_setup(void){ crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0, NULL, NULL); if (!crq_pool) goto fail; cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0, NULL, NULL); if (!cfq_pool) goto fail; cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool", sizeof(struct cfq_io_context), 0, 0, NULL, NULL); if (!cfq_ioc_pool) goto fail; return 0;fail: cfq_slab_kill(); return -ENOMEM;}/* * sysfs parts below --> */struct cfq_fs_entry { struct attribute attr; ssize_t (*show)(struct cfq_data *, char *); ssize_t (*store)(struct cfq_data *, const char *, size_t);};static ssize_tcfq_var_show(unsigned int var, char *page){ return sprintf(page, "%d\n", var);}static ssize_tcfq_var_store(unsigned int *var, const char *page, size_t count){ char *p = (char *) page; *var = simple_strtoul(p, &p, 10); return count;}#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \{ \ unsigned int __data = __VAR; \ if (__CONV) \ __data = jiffies_to_msecs(__data); \ return cfq_var_show(__data, (page)); \}SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0);SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0);SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0);#undef SHOW_FUNCTION#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \{ \ unsigned int __data; \ int ret = cfq_var_store(&__data, (page), count); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ if (__CONV) \ *(__PTR) = msecs_to_jiffies(__data); \ else \ *(__PTR) = __data; \ return ret; \}STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);#undef STORE_FUNCTIONstatic struct cfq_fs_entry cfq_quantum_entry = { .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR }, .show = cfq_quantum_show, .store = cfq_quantum_store,};static struct cfq_fs_entry cfq_queued_entry = { .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR }, .show = cfq_queued_show, .store = cfq_queued_store,};static struct cfq_fs_entry cfq_fifo_expire_sync_entry = { .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR }, .show = cfq_fifo_expire_sync_show, .store = cfq_fifo_expire_sync_store,};static struct cfq_fs_entry cfq_fifo_expire_async_entry = { .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR }, .show = cfq_fifo_expire_async_show, .store = cfq_fifo_expire_async_store,};static struct cfq_fs_entry cfq_back_max_entry = { .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR }, .show = cfq_back_max_show, .store = cfq_back_max_store,};static struct cfq_fs_entry cfq_back_penalty_entry = { .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR }, .show = cfq_back_penalty_show, .store = cfq_back_penalty_store,};static struct cfq_fs_entry cfq_slice_sync_entry = { .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR }, .show = cfq_slice_sync_show, .store = cfq_slice_sync_store,};static struct cfq_fs_entry cfq_slice_async_entry = { .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR }, .show = cfq_slice_async_show, .store = cfq_slice_async_store,};static struct cfq_fs_entry cfq_slice_async_rq_entry = { .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR }, .show = cfq_slice_async_rq_show, .store = cfq_slice_async_rq_store,};static struct cfq_fs_entry cfq_slice_idle_entry = { .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR }, .show = cfq_slice_idle_show, .store = cfq_slice_idle_store,};static struct cfq_fs_entry cfq_max_depth_entry = { .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR }, .show = cfq_max_depth_show, .store = cfq_max_depth_store,};static struct attribute *default_attrs[] = { &cfq_quantum_entry.attr, &cfq_queued_entry.attr, &cfq_fifo_expire_sync_entry.attr, &cfq_fifo_expire_async_entry.attr, &cfq_back_max_entry.attr, &cfq_back_penalty_entry.attr, &cfq_slice_sync_entry.attr, &cfq_slice_async_entry.attr, &cfq_slice_async_rq_entry.attr, &cfq_slice_idle_entry.attr, &cfq_max_depth_entry.attr, NULL,};#define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)static ssize_tcfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page){ elevator_t *e = container_of(kobj, elevator_t, kobj); struct cfq_fs_entry *entry = to_cfq(attr); if (!entry->show) return -EIO; return entry->show(e->elevator_data, page);}static ssize_tcfq_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length){ elevator_t *e = container_of(kobj, elevator_t, kobj); struct cfq_fs_entry *entry = to_cfq(attr); if (!entry->store) return -EIO; return entry->store(e->elevator_data, page, length);}static struct sysfs_ops cfq_sysfs_ops = { .show = cfq_attr_show, .store = cfq_attr_store,};static struct kobj_type cfq_ktype = { .sysfs_ops = &cfq_sysfs_ops, .default_attrs = default_attrs,};static struct elevator_type iosched_cfq = { .ops = { .elevator_merge_fn = cfq_merge, .elevator_merged_fn = cfq_merged_request, .elevator_merge_req_fn = cfq_merged_requests, .elevator_next_req_fn = cfq_next_request, .elevator_add_req_fn = cfq_insert_request, .elevator_remove_req_fn = cfq_remove_request, .elevator_requeue_req_fn = cfq_requeue_request, .elevator_deactivate_req_fn = cfq_deactivate_request, .elevator_queue_empty_fn = cfq_queue_empty, .elevator_completed_req_fn = cfq_completed_request, .elevator_former_req_fn = cfq_former_request, .elevator_latter_req_fn = cfq_latter_request, .elevator_set_req_fn = cfq_set_request, .elevator_put_req_fn = cfq_put_request, .elevator_may_queue_fn = cfq_may_queue, .elevator_init_fn = cfq_init_queue, .elevator_exit_fn = cfq_exit_queue, }, .elevator_ktype = &cfq_ktype, .elevator_name = "cfq", .elevator_owner = THIS_MODULE,};static int __init cfq_init(void){ int ret; /* * could be 0 on HZ < 1000 setups */ if (!cfq_slice_async) cfq_slice_async = 1; if (!cfq_slice_idle) cfq_slice_idle = 1; if (cfq_slab_setup()) return -ENOMEM; ret = elv_register(&iosched_cfq); if (ret) cfq_slab_kill(); return ret;}static void __exit cfq_exit(void){ struct task_struct *g, *p; unsigned long flags; read_lock_irqsave(&tasklist_lock, flags); /* * iterate each process in the system, removing our io_context */ do_each_thread(g, p) { struct io_context *ioc = p->io_context; if (ioc && ioc->cic) { ioc->cic->exit(ioc->cic); cfq_free_io_context(ioc->cic); ioc->cic = NULL; } } while_each_thread(g, p); read_unlock_irqrestore(&tasklist_lock, flags); cfq_slab_kill(); elv_unregister(&iosched_cfq);}module_init(cfq_init);module_exit(cfq_exit);MODULE_AUTHOR("Jens Axboe");MODULE_LICENSE("GPL");MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -