📄 as-iosched.c
字号:
}static intas_merge(request_queue_t *q, struct request **req, struct bio *bio){ struct as_data *ad = q->elevator->elevator_data; sector_t rb_key = bio->bi_sector + bio_sectors(bio); struct request *__rq; int ret; /* * try last_merge to avoid going to hash */ ret = elv_try_last_merge(q, bio); if (ret != ELEVATOR_NO_MERGE) { __rq = q->last_merge; goto out_insert; } /* * see if the merge hash can satisfy a back merge */ __rq = as_find_arq_hash(ad, bio->bi_sector); if (__rq) { BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector); if (elv_rq_merge_ok(__rq, bio)) { ret = ELEVATOR_BACK_MERGE; goto out; } } /* * check for front merge */ __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio)); if (__rq) { BUG_ON(rb_key != rq_rb_key(__rq)); if (elv_rq_merge_ok(__rq, bio)) { ret = ELEVATOR_FRONT_MERGE; goto out; } } return ELEVATOR_NO_MERGE;out: if (rq_mergeable(__rq)) q->last_merge = __rq;out_insert: if (ret) { if (rq_mergeable(__rq)) as_hot_arq_hash(ad, RQ_DATA(__rq)); } *req = __rq; return ret;}static void as_merged_request(request_queue_t *q, struct request *req){ struct as_data *ad = q->elevator->elevator_data; struct as_rq *arq = RQ_DATA(req); /* * hash always needs to be repositioned, key is end sector */ as_del_arq_hash(arq); as_add_arq_hash(ad, arq); /* * if the merge was a front merge, we need to reposition request */ if (rq_rb_key(req) != arq->rb_key) { struct as_rq *alias, *next_arq = NULL; if (ad->next_arq[arq->is_sync] == arq) next_arq = as_find_next_arq(ad, arq); /* * Note! We should really be moving any old aliased requests * off this request and try to insert them into the rbtree. We * currently don't bother. Ditto the next function. */ as_del_arq_rb(ad, arq); if ((alias = as_add_arq_rb(ad, arq)) ) { list_del_init(&arq->fifo); as_add_aliased_request(ad, arq, alias); if (next_arq) ad->next_arq[arq->is_sync] = next_arq; } /* * Note! At this stage of this and the next function, our next * request may not be optimal - eg the request may have "grown" * behind the disk head. We currently don't bother adjusting. */ } if (arq->on_hash) q->last_merge = req;}static voidas_merged_requests(request_queue_t *q, struct request *req, struct request *next){ struct as_data *ad = q->elevator->elevator_data; struct as_rq *arq = RQ_DATA(req); struct as_rq *anext = RQ_DATA(next); BUG_ON(!arq); BUG_ON(!anext); /* * reposition arq (this is the merged request) in hash, and in rbtree * in case of a front merge */ as_del_arq_hash(arq); as_add_arq_hash(ad, arq); if (rq_rb_key(req) != arq->rb_key) { struct as_rq *alias, *next_arq = NULL; if (ad->next_arq[arq->is_sync] == arq) next_arq = as_find_next_arq(ad, arq); as_del_arq_rb(ad, arq); if ((alias = as_add_arq_rb(ad, arq)) ) { list_del_init(&arq->fifo); as_add_aliased_request(ad, arq, alias); if (next_arq) ad->next_arq[arq->is_sync] = next_arq; } } /* * if anext expires before arq, assign its expire time to arq * and move into anext position (anext will be deleted) in fifo */ if (!list_empty(&arq->fifo) && !list_empty(&anext->fifo)) { if (time_before(anext->expires, arq->expires)) { list_move(&arq->fifo, &anext->fifo); arq->expires = anext->expires; /* * Don't copy here but swap, because when anext is * removed below, it must contain the unused context */ swap_io_context(&arq->io_context, &anext->io_context); } } /* * Transfer list of aliases */ while (!list_empty(&next->queuelist)) { struct request *__rq = list_entry_rq(next->queuelist.next); struct as_rq *__arq = RQ_DATA(__rq); list_move_tail(&__rq->queuelist, &req->queuelist); WARN_ON(__arq->state != AS_RQ_QUEUED); } /* * kill knowledge of next, this one is a goner */ as_remove_queued_request(q, next); anext->state = AS_RQ_MERGED;}/* * This is executed in a "deferred" process context, by kblockd. It calls the * driver's request_fn so the driver can submit that request. * * IMPORTANT! This guy will reenter the elevator, so set up all queue global * state before calling, and don't rely on any state over calls. * * FIXME! dispatch queue is not a queue at all! */static void as_work_handler(void *data){ struct request_queue *q = data; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); if (as_next_request(q)) q->request_fn(q); spin_unlock_irqrestore(q->queue_lock, flags);}static void as_put_request(request_queue_t *q, struct request *rq){ struct as_data *ad = q->elevator->elevator_data; struct as_rq *arq = RQ_DATA(rq); if (!arq) { WARN_ON(1); return; } if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) { printk("arq->state %d\n", arq->state); WARN_ON(1); } mempool_free(arq, ad->arq_pool); rq->elevator_private = NULL;}static int as_set_request(request_queue_t *q, struct request *rq, struct bio *bio, int gfp_mask){ struct as_data *ad = q->elevator->elevator_data; struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); if (arq) { memset(arq, 0, sizeof(*arq)); RB_CLEAR(&arq->rb_node); arq->request = rq; arq->state = AS_RQ_PRESCHED; arq->io_context = NULL; INIT_LIST_HEAD(&arq->hash); arq->on_hash = 0; INIT_LIST_HEAD(&arq->fifo); rq->elevator_private = arq; return 0; } return 1;}static int as_may_queue(request_queue_t *q, int rw, struct bio *bio){ int ret = ELV_MQUEUE_MAY; struct as_data *ad = q->elevator->elevator_data; struct io_context *ioc; if (ad->antic_status == ANTIC_WAIT_REQ || ad->antic_status == ANTIC_WAIT_NEXT) { ioc = as_get_io_context(); if (ad->io_context == ioc) ret = ELV_MQUEUE_MUST; put_io_context(ioc); } return ret;}static void as_exit_queue(elevator_t *e){ struct as_data *ad = e->elevator_data; del_timer_sync(&ad->antic_timer); kblockd_flush(); BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC])); BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC])); mempool_destroy(ad->arq_pool); put_io_context(ad->io_context); kfree(ad->hash); kfree(ad);}/* * initialize elevator private data (as_data), and alloc a arq for * each request on the free lists */static int as_init_queue(request_queue_t *q, elevator_t *e){ struct as_data *ad; int i; if (!arq_pool) return -ENOMEM; ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node); if (!ad) return -ENOMEM; memset(ad, 0, sizeof(*ad)); ad->q = q; /* Identify what queue the data belongs to */ ad->hash = kmalloc_node(sizeof(struct list_head)*AS_HASH_ENTRIES, GFP_KERNEL, q->node); if (!ad->hash) { kfree(ad); return -ENOMEM; } ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, arq_pool, q->node); if (!ad->arq_pool) { kfree(ad->hash); kfree(ad); return -ENOMEM; } /* anticipatory scheduling helpers */ ad->antic_timer.function = as_antic_timeout; ad->antic_timer.data = (unsigned long)q; init_timer(&ad->antic_timer); INIT_WORK(&ad->antic_work, as_work_handler, q); for (i = 0; i < AS_HASH_ENTRIES; i++) INIT_LIST_HEAD(&ad->hash[i]); INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); ad->sort_list[REQ_SYNC] = RB_ROOT; ad->sort_list[REQ_ASYNC] = RB_ROOT; ad->dispatch = &q->queue_head; ad->fifo_expire[REQ_SYNC] = default_read_expire; ad->fifo_expire[REQ_ASYNC] = default_write_expire; ad->antic_expire = default_antic_expire; ad->batch_expire[REQ_SYNC] = default_read_batch_expire; ad->batch_expire[REQ_ASYNC] = default_write_batch_expire; e->elevator_data = ad; ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC]; ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10; if (ad->write_batch_count < 2) ad->write_batch_count = 2; return 0;}/* * sysfs parts below */struct as_fs_entry { struct attribute attr; ssize_t (*show)(struct as_data *, char *); ssize_t (*store)(struct as_data *, const char *, size_t);};static ssize_tas_var_show(unsigned int var, char *page){ return sprintf(page, "%d\n", var);}static ssize_tas_var_store(unsigned long *var, const char *page, size_t count){ char *p = (char *) page; *var = simple_strtoul(p, &p, 10); return count;}static ssize_t as_est_show(struct as_data *ad, char *page){ int pos = 0; pos += sprintf(page+pos, "%lu %% exit probability\n", 100*ad->exit_prob/256); pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean); pos += sprintf(page+pos, "%llu sectors new seek distance\n", (unsigned long long)ad->new_seek_mean); return pos;}#define SHOW_FUNCTION(__FUNC, __VAR) \static ssize_t __FUNC(struct as_data *ad, char *page) \{ \ return as_var_show(jiffies_to_msecs((__VAR)), (page)); \}SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]);SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]);SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire);SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]);SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]);#undef SHOW_FUNCTION#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \{ \ int ret = as_var_store(__PTR, (page), count); \ if (*(__PTR) < (MIN)) \ *(__PTR) = (MIN); \ else if (*(__PTR) > (MAX)) \ *(__PTR) = (MAX); \ *(__PTR) = msecs_to_jiffies(*(__PTR)); \ return ret; \}STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX);STORE_FUNCTION(as_read_batchexpire_store, &ad->batch_expire[REQ_SYNC], 0, INT_MAX);STORE_FUNCTION(as_write_batchexpire_store, &ad->batch_expire[REQ_ASYNC], 0, INT_MAX);#undef STORE_FUNCTIONstatic struct as_fs_entry as_est_entry = { .attr = {.name = "est_time", .mode = S_IRUGO }, .show = as_est_show,};static struct as_fs_entry as_readexpire_entry = { .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR }, .show = as_readexpire_show, .store = as_readexpire_store,};static struct as_fs_entry as_writeexpire_entry = { .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR }, .show = as_writeexpire_show, .store = as_writeexpire_store,};static struct as_fs_entry as_anticexpire_entry = { .attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR }, .show = as_anticexpire_show, .store = as_anticexpire_store,};static struct as_fs_entry as_read_batchexpire_entry = { .attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR }, .show = as_read_batchexpire_show, .store = as_read_batchexpire_store,};static struct as_fs_entry as_write_batchexpire_entry = { .attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR }, .show = as_write_batchexpire_show, .store = as_write_batchexpire_store,};static struct attribute *default_attrs[] = { &as_est_entry.attr, &as_readexpire_entry.attr, &as_writeexpire_entry.attr, &as_anticexpire_entry.attr, &as_read_batchexpire_entry.attr, &as_write_batchexpire_entry.attr, NULL,};#define to_as(atr) container_of((atr), struct as_fs_entry, attr)static ssize_tas_attr_show(struct kobject *kobj, struct attribute *attr, char *page){ elevator_t *e = container_of(kobj, elevator_t, kobj); struct as_fs_entry *entry = to_as(attr); if (!entry->show) return -EIO; return entry->show(e->elevator_data, page);}static ssize_tas_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length){ elevator_t *e = container_of(kobj, elevator_t, kobj); struct as_fs_entry *entry = to_as(attr); if (!entry->store) return -EIO; return entry->store(e->elevator_data, page, length);}static struct sysfs_ops as_sysfs_ops = { .show = as_attr_show, .store = as_attr_store,};static struct kobj_type as_ktype = { .sysfs_ops = &as_sysfs_ops, .default_attrs = default_attrs,};static struct elevator_type iosched_as = { .ops = { .elevator_merge_fn = as_merge, .elevator_merged_fn = as_merged_request, .elevator_merge_req_fn = as_merged_requests, .elevator_next_req_fn = as_next_request, .elevator_add_req_fn = as_insert_request, .elevator_remove_req_fn = as_remove_request, .elevator_requeue_req_fn = as_requeue_request, .elevator_deactivate_req_fn = as_deactivate_request, .elevator_queue_empty_fn = as_queue_empty, .elevator_completed_req_fn = as_completed_request, .elevator_former_req_fn = as_former_request, .elevator_latter_req_fn = as_latter_request, .elevator_set_req_fn = as_set_request, .elevator_put_req_fn = as_put_request, .elevator_may_queue_fn = as_may_queue, .elevator_init_fn = as_init_queue, .elevator_exit_fn = as_exit_queue, }, .elevator_ktype = &as_ktype, .elevator_name = "anticipatory", .elevator_owner = THIS_MODULE,};static int __init as_init(void){ int ret; arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq), 0, 0, NULL, NULL); if (!arq_pool) return -ENOMEM; ret = elv_register(&iosched_as); if (!ret) { /* * don't allow AS to get unregistered, since we would have * to browse all tasks in the system and release their * as_io_context first */ __module_get(THIS_MODULE); return 0; } kmem_cache_destroy(arq_pool); return ret;}static void __exit as_exit(void){ kmem_cache_destroy(arq_pool); elv_unregister(&iosched_as);}module_init(as_init);module_exit(as_exit);MODULE_AUTHOR("Nick Piggin");MODULE_LICENSE("GPL");MODULE_DESCRIPTION("anticipatory IO scheduler");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -