⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cfq-iosched.c

📁 Linux块设备驱动源码
💻 C
📖 第 1 页 / 共 5 页
字号:
		struct cfq_queue *__cfqq = list_entry_cfqq(entry);		if (!__cfqq->service_last)			break;		if (time_before(__cfqq->service_last, cfqq->service_last))			break;	}	list_add(&cfqq->cfq_list, entry);}/* * add to busy list of queues for service, trying to be fair in ordering * the pending list according to last request service */static inline voidcfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue){	BUG_ON(cfq_cfqq_on_rr(cfqq));	cfq_mark_cfqq_on_rr(cfqq);	cfqd->busy_queues++;	cfq_resort_rr_list(cfqq, requeue);}static inline voidcfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq){	BUG_ON(!cfq_cfqq_on_rr(cfqq));	cfq_clear_cfqq_on_rr(cfqq);	list_move(&cfqq->cfq_list, &cfqd->empty_list);	BUG_ON(!cfqd->busy_queues);	cfqd->busy_queues--;}/* * rb tree support functions */static inline void cfq_del_crq_rb(struct cfq_rq *crq){	struct cfq_queue *cfqq = crq->cfq_queue;	if (ON_RB(&crq->rb_node)) {		struct cfq_data *cfqd = cfqq->cfqd;		const int sync = cfq_crq_is_sync(crq);		BUG_ON(!cfqq->queued[sync]);		cfqq->queued[sync]--;		cfq_update_next_crq(crq);		rb_erase(&crq->rb_node, &cfqq->sort_list);		RB_CLEAR_COLOR(&crq->rb_node);		if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))			cfq_del_cfqq_rr(cfqd, cfqq);	}}static struct cfq_rq *__cfq_add_crq_rb(struct cfq_rq *crq){	struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;	struct rb_node *parent = NULL;	struct cfq_rq *__crq;	while (*p) {		parent = *p;		__crq = rb_entry_crq(parent);		if (crq->rb_key < __crq->rb_key)			p = &(*p)->rb_left;		else if (crq->rb_key > __crq->rb_key)			p = &(*p)->rb_right;		else			return __crq;	}	rb_link_node(&crq->rb_node, parent, p);	return NULL;}static void cfq_add_crq_rb(struct cfq_rq *crq){	struct cfq_queue *cfqq = crq->cfq_queue;	struct cfq_data *cfqd = cfqq->cfqd;	struct request *rq = crq->request;	struct cfq_rq *__alias;	crq->rb_key = rq_rb_key(rq);	cfqq->queued[cfq_crq_is_sync(crq)]++;	/*	 * looks a little odd, but the first insert might return an alias.	 * if that happens, put the alias on the dispatch list	 */	while ((__alias = __cfq_add_crq_rb(crq)) != NULL)		cfq_dispatch_sort(cfqd->queue, __alias);	rb_insert_color(&crq->rb_node, &cfqq->sort_list);	if (!cfq_cfqq_on_rr(cfqq))		cfq_add_cfqq_rr(cfqd, cfqq, cfq_crq_requeued(crq));	/*	 * check if this request is a better next-serve candidate	 */	cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);}static inline voidcfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq){	if (ON_RB(&crq->rb_node)) {		rb_erase(&crq->rb_node, &cfqq->sort_list);		cfqq->queued[cfq_crq_is_sync(crq)]--;	}	cfq_add_crq_rb(crq);}static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector){	struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY);	struct rb_node *n;	if (!cfqq)		goto out;	n = cfqq->sort_list.rb_node;	while (n) {		struct cfq_rq *crq = rb_entry_crq(n);		if (sector < crq->rb_key)			n = n->rb_left;		else if (sector > crq->rb_key)			n = n->rb_right;		else			return crq->request;	}out:	return NULL;}static void cfq_deactivate_request(request_queue_t *q, struct request *rq){	struct cfq_data *cfqd = q->elevator->elevator_data;	struct cfq_rq *crq = RQ_DATA(rq);	if (crq) {		struct cfq_queue *cfqq = crq->cfq_queue;		if (cfq_crq_in_driver(crq)) {			cfq_clear_crq_in_driver(crq);			WARN_ON(!cfqd->rq_in_driver);			cfqd->rq_in_driver--;		}		if (cfq_crq_in_flight(crq)) {			const int sync = cfq_crq_is_sync(crq);			cfq_clear_crq_in_flight(crq);			WARN_ON(!cfqq->on_dispatch[sync]);			cfqq->on_dispatch[sync]--;		}		cfq_mark_crq_requeued(crq);	}}/* * make sure the service time gets corrected on reissue of this request */static void cfq_requeue_request(request_queue_t *q, struct request *rq){	cfq_deactivate_request(q, rq);	list_add(&rq->queuelist, &q->queue_head);}static void cfq_remove_request(request_queue_t *q, struct request *rq){	struct cfq_rq *crq = RQ_DATA(rq);	if (crq) {		list_del_init(&rq->queuelist);		cfq_del_crq_rb(crq);		cfq_remove_merge_hints(q, crq);	}}static intcfq_merge(request_queue_t *q, struct request **req, struct bio *bio){	struct cfq_data *cfqd = q->elevator->elevator_data;	struct request *__rq;	int ret;	ret = elv_try_last_merge(q, bio);	if (ret != ELEVATOR_NO_MERGE) {		__rq = q->last_merge;		goto out_insert;	}	__rq = cfq_find_rq_hash(cfqd, bio->bi_sector);	if (__rq && elv_rq_merge_ok(__rq, bio)) {		ret = ELEVATOR_BACK_MERGE;		goto out;	}	__rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));	if (__rq && elv_rq_merge_ok(__rq, bio)) {		ret = ELEVATOR_FRONT_MERGE;		goto out;	}	return ELEVATOR_NO_MERGE;out:	q->last_merge = __rq;out_insert:	*req = __rq;	return ret;}static void cfq_merged_request(request_queue_t *q, struct request *req){	struct cfq_data *cfqd = q->elevator->elevator_data;	struct cfq_rq *crq = RQ_DATA(req);	cfq_del_crq_hash(crq);	cfq_add_crq_hash(cfqd, crq);	if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {		struct cfq_queue *cfqq = crq->cfq_queue;		cfq_update_next_crq(crq);		cfq_reposition_crq_rb(cfqq, crq);	}	q->last_merge = req;}static voidcfq_merged_requests(request_queue_t *q, struct request *rq,		    struct request *next){	cfq_merged_request(q, rq);	/*	 * reposition in fifo if next is older than rq	 */	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&	    time_before(next->start_time, rq->start_time))		list_move(&rq->queuelist, &next->queuelist);	cfq_remove_request(q, next);}static inline void__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq){	if (cfqq) {		/*		 * stop potential idle class queues waiting service		 */		del_timer(&cfqd->idle_class_timer);		cfqq->slice_start = jiffies;		cfqq->slice_end = 0;		cfqq->slice_left = 0;		cfq_clear_cfqq_must_alloc_slice(cfqq);		cfq_clear_cfqq_fifo_expire(cfqq);		cfq_clear_cfqq_expired(cfqq);	}	cfqd->active_queue = cfqq;}/* * 0 * 0,1 * 0,1,2 * 0,1,2,3 * 0,1,2,3,4 * 0,1,2,3,4,5 * 0,1,2,3,4,5,6 * 0,1,2,3,4,5,6,7 */static int cfq_get_next_prio_level(struct cfq_data *cfqd){	int prio, wrap;	prio = -1;	wrap = 0;	do {		int p;		for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {			if (!list_empty(&cfqd->rr_list[p])) {				prio = p;				break;			}		}		if (prio != -1)			break;		cfqd->cur_prio = 0;		if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {			cfqd->cur_end_prio = 0;			if (wrap)				break;			wrap = 1;		}	} while (1);	if (unlikely(prio == -1))		return -1;	BUG_ON(prio >= CFQ_PRIO_LISTS);	list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);	cfqd->cur_prio = prio + 1;	if (cfqd->cur_prio > cfqd->cur_end_prio) {		cfqd->cur_end_prio = cfqd->cur_prio;		cfqd->cur_prio = 0;	}	if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {		cfqd->cur_prio = 0;		cfqd->cur_end_prio = 0;	}	return prio;}static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd){	struct cfq_queue *cfqq;	/*	 * if current queue is expired but not done with its requests yet,	 * wait for that to happen	 */	if ((cfqq = cfqd->active_queue) != NULL) {		if (cfq_cfqq_expired(cfqq) && cfq_cfqq_dispatched(cfqq))			return NULL;	}	/*	 * if current list is non-empty, grab first entry. if it is empty,	 * get next prio level and grab first entry then if any are spliced	 */	if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)		cfqq = list_entry_cfqq(cfqd->cur_rr.next);	/*	 * if we have idle queues and no rt or be queues had pending	 * requests, either allow immediate service if the grace period	 * has passed or arm the idle grace timer	 */	if (!cfqq && !list_empty(&cfqd->idle_rr)) {		unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;		if (time_after_eq(jiffies, end))			cfqq = list_entry_cfqq(cfqd->idle_rr.next);		else			mod_timer(&cfqd->idle_class_timer, end);	}	__cfq_set_active_queue(cfqd, cfqq);	return cfqq;}/* * current cfqq expired its slice (or was too idle), select new one */static void__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,		    int preempted){	unsigned long now = jiffies;	if (cfq_cfqq_wait_request(cfqq))		del_timer(&cfqd->idle_slice_timer);	if (!preempted && !cfq_cfqq_dispatched(cfqq))		cfqq->service_last = now;	cfq_clear_cfqq_must_dispatch(cfqq);	cfq_clear_cfqq_wait_request(cfqq);	/*	 * store what was left of this slice, if the queue idled out	 * or was preempted	 */	if (time_after(now, cfqq->slice_end))		cfqq->slice_left = now - cfqq->slice_end;	else		cfqq->slice_left = 0;	if (cfq_cfqq_on_rr(cfqq))		cfq_resort_rr_list(cfqq, preempted);	if (cfqq == cfqd->active_queue)		cfqd->active_queue = NULL;	if (cfqd->active_cic) {		put_io_context(cfqd->active_cic->ioc);		cfqd->active_cic = NULL;	}	cfqd->dispatch_slice = 0;}static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted){	struct cfq_queue *cfqq = cfqd->active_queue;	if (cfqq) {		/*		 * use deferred expiry, if there are requests in progress as		 * not to disturb the slice of the next queue		 */		if (cfq_cfqq_dispatched(cfqq))			cfq_mark_cfqq_expired(cfqq);		else			__cfq_slice_expired(cfqd, cfqq, preempted);	}}static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq){	WARN_ON(!RB_EMPTY(&cfqq->sort_list));	WARN_ON(cfqq != cfqd->active_queue);	/*	 * idle is disabled, either manually or by past process history	 */	if (!cfqd->cfq_slice_idle)		return 0;	if (!cfq_cfqq_idle_window(cfqq))		return 0;	/*	 * task has exited, don't wait	 */	if (cfqd->active_cic && !cfqd->active_cic->ioc->task)		return 0;	cfq_mark_cfqq_must_dispatch(cfqq);	cfq_mark_cfqq_wait_request(cfqq);	if (!timer_pending(&cfqd->idle_slice_timer)) {		unsigned long slice_left = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);		cfqd->idle_slice_timer.expires = jiffies + slice_left;		add_timer(&cfqd->idle_slice_timer);	}	return 1;}/* * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues, * this function sector sorts the selected request to minimize seeks. we start * at cfqd->last_sector, not 0. */static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq){	struct cfq_data *cfqd = q->elevator->elevator_data;	struct cfq_queue *cfqq = crq->cfq_queue;	struct list_head *head = &q->queue_head, *entry = head;	struct request *__rq;	sector_t last;	list_del(&crq->request->queuelist);	last = cfqd->last_sector;	list_for_each_entry_reverse(__rq, head, queuelist) {		struct cfq_rq *__crq = RQ_DATA(__rq);		if (blk_barrier_rq(__rq))			break;		if (!blk_fs_request(__rq))			break;		if (cfq_crq_requeued(__crq))			break;		if (__rq->sector <= crq->request->sector)			break;		if (__rq->sector > last && crq->request->sector < last) {			last = crq->request->sector + crq->request->nr_sectors;			break;		}		entry = &__rq->queuelist;	}	cfqd->last_sector = last;	cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);	cfq_del_crq_rb(crq);	cfq_remove_merge_hints(q, crq);	cfq_mark_crq_in_flight(crq);	cfq_clear_crq_requeued(crq);	cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;	list_add_tail(&crq->request->queuelist, entry);}/* * return expired entry, or NULL to just start from scratch in rbtree */static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq){	struct cfq_data *cfqd = cfqq->cfqd;	struct request *rq;	struct cfq_rq *crq;	if (cfq_cfqq_fifo_expire(cfqq))		return NULL;	if (!list_empty(&cfqq->fifo)) {		int fifo = cfq_cfqq_class_sync(cfqq);		crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next));		rq = crq->request;		if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {			cfq_mark_cfqq_fifo_expire(cfqq);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -