cfq-iosched.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 891 行 · 第 1/2 页
C
891 行
return __cfq_find_cfq_hash(cfqd, pid, hashval);}static void cfq_put_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq){ cfqd->busy_queues--; list_del(&cfqq->cfq_list); list_del(&cfqq->cfq_hash); mempool_free(cfqq, cfq_mpool);}static struct cfq_queue *__cfq_get_queue(struct cfq_data *cfqd, int pid, int gfp_mask){ const int hashval = hash_long(current->tgid, CFQ_QHASH_SHIFT); struct cfq_queue *cfqq, *new_cfqq = NULL; request_queue_t *q = cfqd->queue;retry: cfqq = __cfq_find_cfq_hash(cfqd, pid, hashval); if (!cfqq) { if (new_cfqq) { cfqq = new_cfqq; new_cfqq = NULL; } else if (gfp_mask & __GFP_WAIT) { spin_unlock_irq(q->queue_lock); new_cfqq = mempool_alloc(cfq_mpool, gfp_mask); spin_lock_irq(q->queue_lock); goto retry; } else return NULL; INIT_LIST_HEAD(&cfqq->cfq_hash); INIT_LIST_HEAD(&cfqq->cfq_list); RB_CLEAR_ROOT(&cfqq->sort_list); cfqq->pid = pid; cfqq->queued[0] = cfqq->queued[1] = 0; list_add(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); } if (new_cfqq) mempool_free(new_cfqq, cfq_mpool); return cfqq;}static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, int pid, int gfp_mask){ request_queue_t *q = cfqd->queue; struct cfq_queue *cfqq; spin_lock_irq(q->queue_lock); cfqq = __cfq_get_queue(cfqd, pid, gfp_mask); spin_unlock_irq(q->queue_lock); return cfqq;}static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq){ struct cfq_queue *cfqq; cfqq = __cfq_get_queue(cfqd, current->tgid, GFP_ATOMIC); if (cfqq) { cfq_add_crq_rb(cfqd, cfqq, crq); if (list_empty(&cfqq->cfq_list)) { list_add(&cfqq->cfq_list, &cfqd->rr_list); cfqd->busy_queues++; } } else { /* * should can only happen if the request wasn't allocated * through blk_alloc_request(), eg stack requests from ide-cd * (those should be removed) _and_ we are in OOM. */ list_add_tail(&crq->request->queuelist, cfqd->dispatch); }}static voidcfq_insert_request(request_queue_t *q, struct request *rq, int where){ struct cfq_data *cfqd = q->elevator.elevator_data; struct cfq_rq *crq = RQ_DATA(rq); switch (where) { case ELEVATOR_INSERT_BACK: while (cfq_dispatch_requests(q, cfqd)) ; list_add_tail(&rq->queuelist, cfqd->dispatch); break; case ELEVATOR_INSERT_FRONT: list_add(&rq->queuelist, cfqd->dispatch); break; case ELEVATOR_INSERT_SORT: BUG_ON(!blk_fs_request(rq)); cfq_enqueue(cfqd, crq); break; default: printk("%s: bad insert point %d\n", __FUNCTION__,where); return; } if (rq_mergeable(rq)) { cfq_add_crq_hash(cfqd, crq); if (!q->last_merge) q->last_merge = rq; }}static int cfq_queue_empty(request_queue_t *q){ struct cfq_data *cfqd = q->elevator.elevator_data; if (list_empty(cfqd->dispatch) && list_empty(&cfqd->rr_list)) return 1; return 0;}static struct request *cfq_former_request(request_queue_t *q, struct request *rq){ struct cfq_rq *crq = RQ_DATA(rq); struct rb_node *rbprev = rb_prev(&crq->rb_node); if (rbprev) return rb_entry_crq(rbprev)->request; return NULL;}static struct request *cfq_latter_request(request_queue_t *q, struct request *rq){ struct cfq_rq *crq = RQ_DATA(rq); struct rb_node *rbnext = rb_next(&crq->rb_node); if (rbnext) return rb_entry_crq(rbnext)->request; return NULL;}static int cfq_may_queue(request_queue_t *q, int rw){ struct cfq_data *cfqd = q->elevator.elevator_data; struct cfq_queue *cfqq; int ret = 1; if (!cfqd->busy_queues) goto out; cfqq = cfq_find_cfq_hash(cfqd, current->tgid); if (cfqq) { int limit = (q->nr_requests - cfqd->cfq_queued) / cfqd->busy_queues; if (limit < 3) limit = 3; else if (limit > cfqd->max_queued) limit = cfqd->max_queued; if (cfqq->queued[rw] > limit) ret = 0; }out: return ret;}static void cfq_put_request(request_queue_t *q, struct request *rq){ struct cfq_data *cfqd = q->elevator.elevator_data; struct cfq_rq *crq = RQ_DATA(rq); struct request_list *rl; int other_rw; if (crq) { BUG_ON(q->last_merge == rq); BUG_ON(ON_MHASH(crq)); mempool_free(crq, cfqd->crq_pool); rq->elevator_private = NULL; } /* * work-around for may_queue "bug": if a read gets issued and refused * to queue because writes ate all the allowed slots and no other * reads are pending for this queue, it could get stuck infinitely * since freed_request() only checks the waitqueue for writes when * freeing them. or vice versa for a single write vs many reads. * so check here whether "the other" data direction might be able * to queue and wake them */ rl = &q->rq; other_rw = rq_data_dir(rq) ^ 1; if (rl->count[other_rw] <= q->nr_requests) { smp_mb(); if (waitqueue_active(&rl->wait[other_rw])) wake_up(&rl->wait[other_rw]); }}static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask){ struct cfq_data *cfqd = q->elevator.elevator_data; struct cfq_queue *cfqq; struct cfq_rq *crq; /* * prepare a queue up front, so cfq_enqueue() doesn't have to */ cfqq = cfq_get_queue(cfqd, current->tgid, gfp_mask); if (!cfqq) return 1; crq = mempool_alloc(cfqd->crq_pool, gfp_mask); if (crq) { memset(crq, 0, sizeof(*crq)); RB_CLEAR(&crq->rb_node); crq->request = rq; crq->cfq_queue = NULL; INIT_LIST_HEAD(&crq->hash); rq->elevator_private = crq; return 0; } return 1;}static void cfq_exit(request_queue_t *q, elevator_t *e){ struct cfq_data *cfqd = e->elevator_data; e->elevator_data = NULL; mempool_destroy(cfqd->crq_pool); kfree(cfqd->crq_hash); kfree(cfqd->cfq_hash); kfree(cfqd);}static int cfq_init(request_queue_t *q, elevator_t *e){ struct cfq_data *cfqd; int i; cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL); if (!cfqd) return -ENOMEM; memset(cfqd, 0, sizeof(*cfqd)); INIT_LIST_HEAD(&cfqd->rr_list); cfqd->crq_hash = kmalloc(sizeof(struct list_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); if (!cfqd->crq_hash) goto out_crqhash; cfqd->cfq_hash = kmalloc(sizeof(struct list_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL); if (!cfqd->cfq_hash) goto out_cfqhash; cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool); if (!cfqd->crq_pool) goto out_crqpool; for (i = 0; i < CFQ_MHASH_ENTRIES; i++) INIT_LIST_HEAD(&cfqd->crq_hash[i]); for (i = 0; i < CFQ_QHASH_ENTRIES; i++) INIT_LIST_HEAD(&cfqd->cfq_hash[i]); cfqd->dispatch = &q->queue_head; e->elevator_data = cfqd; cfqd->queue = q; /* * just set it to some high value, we want anyone to be able to queue * some requests. fairness is handled differently */ cfqd->max_queued = q->nr_requests; q->nr_requests = 8192; cfqd->cfq_queued = cfq_queued; cfqd->cfq_quantum = cfq_quantum; return 0;out_crqpool: kfree(cfqd->cfq_hash);out_cfqhash: kfree(cfqd->crq_hash);out_crqhash: kfree(cfqd); return -ENOMEM;}static int __init cfq_slab_setup(void){ crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0, NULL, NULL); if (!crq_pool) panic("cfq_iosched: can't init crq pool\n"); cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0, NULL, NULL); if (!cfq_pool) panic("cfq_iosched: can't init cfq pool\n"); cfq_mpool = mempool_create(64, mempool_alloc_slab, mempool_free_slab, cfq_pool); if (!cfq_mpool) panic("cfq_iosched: can't init cfq mpool\n"); return 0;}subsys_initcall(cfq_slab_setup);/* * sysfs parts below --> */struct cfq_fs_entry { struct attribute attr; ssize_t (*show)(struct cfq_data *, char *); ssize_t (*store)(struct cfq_data *, const char *, size_t);};static ssize_tcfq_var_show(unsigned int var, char *page){ return sprintf(page, "%d\n", var);}static ssize_tcfq_var_store(unsigned int *var, const char *page, size_t count){ char *p = (char *) page; *var = simple_strtoul(p, &p, 10); return count;}#define SHOW_FUNCTION(__FUNC, __VAR) \static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \{ \ return cfq_var_show(__VAR, (page)); \}SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum);SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued);#undef SHOW_FUNCTION#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \{ \ int ret = cfq_var_store(__PTR, (page), count); \ if (*(__PTR) < (MIN)) \ *(__PTR) = (MIN); \ else if (*(__PTR) > (MAX)) \ *(__PTR) = (MAX); \ return ret; \}STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, INT_MAX);STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, INT_MAX);#undef STORE_FUNCTIONstatic struct cfq_fs_entry cfq_quantum_entry = { .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR }, .show = cfq_quantum_show, .store = cfq_quantum_store,};static struct cfq_fs_entry cfq_queued_entry = { .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR }, .show = cfq_queued_show, .store = cfq_queued_store,};static struct attribute *default_attrs[] = { &cfq_quantum_entry.attr, &cfq_queued_entry.attr, NULL,};#define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)static ssize_tcfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page){ elevator_t *e = container_of(kobj, elevator_t, kobj); struct cfq_fs_entry *entry = to_cfq(attr); if (!entry->show) return 0; return entry->show(e->elevator_data, page);}static ssize_tcfq_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length){ elevator_t *e = container_of(kobj, elevator_t, kobj); struct cfq_fs_entry *entry = to_cfq(attr); if (!entry->store) return -EINVAL; return entry->store(e->elevator_data, page, length);}static struct sysfs_ops cfq_sysfs_ops = { .show = cfq_attr_show, .store = cfq_attr_store,};struct kobj_type cfq_ktype = { .sysfs_ops = &cfq_sysfs_ops, .default_attrs = default_attrs,};elevator_t iosched_cfq = { .elevator_name = "cfq", .elevator_ktype = &cfq_ktype, .elevator_merge_fn = cfq_merge, .elevator_merged_fn = cfq_merged_request, .elevator_merge_req_fn = cfq_merged_requests, .elevator_next_req_fn = cfq_next_request, .elevator_add_req_fn = cfq_insert_request, .elevator_remove_req_fn = cfq_remove_request, .elevator_queue_empty_fn = cfq_queue_empty, .elevator_former_req_fn = cfq_former_request, .elevator_latter_req_fn = cfq_latter_request, .elevator_set_req_fn = cfq_set_request, .elevator_put_req_fn = cfq_put_request, .elevator_may_queue_fn = cfq_may_queue, .elevator_init_fn = cfq_init, .elevator_exit_fn = cfq_exit,};EXPORT_SYMBOL(iosched_cfq);
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?