⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 as-iosched.c

📁 Linux块设备驱动源码
💻 C
📖 第 1 页 / 共 4 页
字号:
	WARN_ON(arq->state != AS_RQ_DISPATCHED);	WARN_ON(ON_RB(&arq->rb_node));	if (arq->io_context && arq->io_context->aic) {		aic = arq->io_context->aic;		if (aic) {			WARN_ON(!atomic_read(&aic->nr_dispatched));			atomic_dec(&aic->nr_dispatched);		}	}}/* * as_remove_request is called when a driver has finished with a request. * This should be only called for dispatched requests, but for some reason * a POWER4 box running hwscan it does not. */static void as_remove_request(request_queue_t *q, struct request *rq){	struct as_rq *arq = RQ_DATA(rq);	if (unlikely(arq->state == AS_RQ_NEW))		goto out;	if (ON_RB(&arq->rb_node)) {		if (arq->state != AS_RQ_QUEUED) {			printk("arq->state %d\n", arq->state);			WARN_ON(1);			goto out;		}		/*		 * We'll lose the aliased request(s) here. I don't think this		 * will ever happen, but if it does, hopefully someone will		 * report it.		 */		WARN_ON(!list_empty(&rq->queuelist));		as_remove_queued_request(q, rq);	} else {		if (arq->state != AS_RQ_DISPATCHED) {			printk("arq->state %d\n", arq->state);			WARN_ON(1);			goto out;		}		as_remove_dispatched_request(q, rq);	}out:	arq->state = AS_RQ_REMOVED;}/* * as_fifo_expired returns 0 if there are no expired reads on the fifo, * 1 otherwise.  It is ratelimited so that we only perform the check once per * `fifo_expire' interval.  Otherwise a large number of expired requests * would create a hopeless seekstorm. * * See as_antic_expired comment. */static int as_fifo_expired(struct as_data *ad, int adir){	struct as_rq *arq;	long delta_jif;	delta_jif = jiffies - ad->last_check_fifo[adir];	if (unlikely(delta_jif < 0))		delta_jif = -delta_jif;	if (delta_jif < ad->fifo_expire[adir])		return 0;	ad->last_check_fifo[adir] = jiffies;	if (list_empty(&ad->fifo_list[adir]))		return 0;	arq = list_entry_fifo(ad->fifo_list[adir].next);	return time_after(jiffies, arq->expires);}/* * as_batch_expired returns true if the current batch has expired. A batch * is a set of reads or a set of writes. */static inline int as_batch_expired(struct as_data *ad){	if (ad->changed_batch || ad->new_batch)		return 0;	if (ad->batch_data_dir == REQ_SYNC)		/* TODO! add a check so a complete fifo gets written? */		return time_after(jiffies, ad->current_batch_expires);	return time_after(jiffies, ad->current_batch_expires)		|| ad->current_write_count == 0;}/* * move an entry to dispatch queue */static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq){	struct request *rq = arq->request;	struct list_head *insert;	const int data_dir = arq->is_sync;	BUG_ON(!ON_RB(&arq->rb_node));	as_antic_stop(ad);	ad->antic_status = ANTIC_OFF;	/*	 * This has to be set in order to be correctly updated by	 * as_find_next_arq	 */	ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;	if (data_dir == REQ_SYNC) {		/* In case we have to anticipate after this */		copy_io_context(&ad->io_context, &arq->io_context);	} else {		if (ad->io_context) {			put_io_context(ad->io_context);			ad->io_context = NULL;		}		if (ad->current_write_count != 0)			ad->current_write_count--;	}	ad->ioc_finished = 0;	ad->next_arq[data_dir] = as_find_next_arq(ad, arq);	/*	 * take it off the sort and fifo list, add to dispatch queue	 */	insert = ad->dispatch->prev;	while (!list_empty(&rq->queuelist)) {		struct request *__rq = list_entry_rq(rq->queuelist.next);		struct as_rq *__arq = RQ_DATA(__rq);		list_move_tail(&__rq->queuelist, ad->dispatch);		if (__arq->io_context && __arq->io_context->aic)			atomic_inc(&__arq->io_context->aic->nr_dispatched);		WARN_ON(__arq->state != AS_RQ_QUEUED);		__arq->state = AS_RQ_DISPATCHED;		ad->nr_dispatched++;	}	as_remove_queued_request(ad->q, rq);	WARN_ON(arq->state != AS_RQ_QUEUED);	list_add(&rq->queuelist, insert);	arq->state = AS_RQ_DISPATCHED;	if (arq->io_context && arq->io_context->aic)		atomic_inc(&arq->io_context->aic->nr_dispatched);	ad->nr_dispatched++;}/* * as_dispatch_request selects the best request according to * read/write expire, batch expire, etc, and moves it to the dispatch * queue. Returns 1 if a request was found, 0 otherwise. */static int as_dispatch_request(struct as_data *ad){	struct as_rq *arq;	const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);	const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);	/* Signal that the write batch was uncontended, so we can't time it */	if (ad->batch_data_dir == REQ_ASYNC && !reads) {		if (ad->current_write_count == 0 || !writes)			ad->write_batch_idled = 1;	}	if (!(reads || writes)		|| ad->antic_status == ANTIC_WAIT_REQ		|| ad->antic_status == ANTIC_WAIT_NEXT		|| ad->changed_batch)		return 0;	if (!(reads && writes && as_batch_expired(ad)) ) {		/*		 * batch is still running or no reads or no writes		 */		arq = ad->next_arq[ad->batch_data_dir];		if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) {			if (as_fifo_expired(ad, REQ_SYNC))				goto fifo_expired;			if (as_can_anticipate(ad, arq)) {				as_antic_waitreq(ad);				return 0;			}		}		if (arq) {			/* we have a "next request" */			if (reads && !writes)				ad->current_batch_expires =					jiffies + ad->batch_expire[REQ_SYNC];			goto dispatch_request;		}	}	/*	 * at this point we are not running a batch. select the appropriate	 * data direction (read / write)	 */	if (reads) {		BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC]));		if (writes && ad->batch_data_dir == REQ_SYNC)			/*			 * Last batch was a read, switch to writes			 */			goto dispatch_writes;		if (ad->batch_data_dir == REQ_ASYNC) {			WARN_ON(ad->new_batch);			ad->changed_batch = 1;		}		ad->batch_data_dir = REQ_SYNC;		arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);		ad->last_check_fifo[ad->batch_data_dir] = jiffies;		goto dispatch_request;	}	/*	 * the last batch was a read	 */	if (writes) {dispatch_writes:		BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC]));		if (ad->batch_data_dir == REQ_SYNC) {			ad->changed_batch = 1;			/*			 * new_batch might be 1 when the queue runs out of			 * reads. A subsequent submission of a write might			 * cause a change of batch before the read is finished.			 */			ad->new_batch = 0;		}		ad->batch_data_dir = REQ_ASYNC;		ad->current_write_count = ad->write_batch_count;		ad->write_batch_idled = 0;		arq = ad->next_arq[ad->batch_data_dir];		goto dispatch_request;	}	BUG();	return 0;dispatch_request:	/*	 * If a request has expired, service it.	 */	if (as_fifo_expired(ad, ad->batch_data_dir)) {fifo_expired:		arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);		BUG_ON(arq == NULL);	}	if (ad->changed_batch) {		WARN_ON(ad->new_batch);		if (ad->nr_dispatched)			return 0;		if (ad->batch_data_dir == REQ_ASYNC)			ad->current_batch_expires = jiffies +					ad->batch_expire[REQ_ASYNC];		else			ad->new_batch = 1;		ad->changed_batch = 0;	}	/*	 * arq is the selected appropriate request.	 */	as_move_to_dispatch(ad, arq);	return 1;}static struct request *as_next_request(request_queue_t *q){	struct as_data *ad = q->elevator->elevator_data;	struct request *rq = NULL;	/*	 * if there are still requests on the dispatch queue, grab the first	 */	if (!list_empty(ad->dispatch) || as_dispatch_request(ad))		rq = list_entry_rq(ad->dispatch->next);	return rq;}/* * Add arq to a list behind alias */static inline voidas_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alias){	struct request  *req = arq->request;	struct list_head *insert = alias->request->queuelist.prev;	/*	 * Transfer list of aliases	 */	while (!list_empty(&req->queuelist)) {		struct request *__rq = list_entry_rq(req->queuelist.next);		struct as_rq *__arq = RQ_DATA(__rq);		list_move_tail(&__rq->queuelist, &alias->request->queuelist);		WARN_ON(__arq->state != AS_RQ_QUEUED);	}	/*	 * Another request with the same start sector on the rbtree.	 * Link this request to that sector. They are untangled in	 * as_move_to_dispatch	 */	list_add(&arq->request->queuelist, insert);	/*	 * Don't want to have to handle merges.	 */	as_remove_merge_hints(ad->q, arq);}/* * add arq to rbtree and fifo */static void as_add_request(struct as_data *ad, struct as_rq *arq){	struct as_rq *alias;	int data_dir;	if (rq_data_dir(arq->request) == READ			|| current->flags&PF_SYNCWRITE)		arq->is_sync = 1;	else		arq->is_sync = 0;	data_dir = arq->is_sync;	arq->io_context = as_get_io_context();	if (arq->io_context) {		as_update_iohist(ad, arq->io_context->aic, arq->request);		atomic_inc(&arq->io_context->aic->nr_queued);	}	alias = as_add_arq_rb(ad, arq);	if (!alias) {		/*		 * set expire time (only used for reads) and add to fifo list		 */		arq->expires = jiffies + ad->fifo_expire[data_dir];		list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);		if (rq_mergeable(arq->request)) {			as_add_arq_hash(ad, arq);			if (!ad->q->last_merge)				ad->q->last_merge = arq->request;		}		as_update_arq(ad, arq); /* keep state machine up to date */	} else {		as_add_aliased_request(ad, arq, alias);		/*		 * have we been anticipating this request?		 * or does it come from the same process as the one we are		 * anticipating for?		 */		if (ad->antic_status == ANTIC_WAIT_REQ				|| ad->antic_status == ANTIC_WAIT_NEXT) {			if (as_can_break_anticipation(ad, arq))				as_antic_stop(ad);		}	}	arq->state = AS_RQ_QUEUED;}static void as_deactivate_request(request_queue_t *q, struct request *rq){	struct as_data *ad = q->elevator->elevator_data;	struct as_rq *arq = RQ_DATA(rq);	if (arq) {		if (arq->state == AS_RQ_REMOVED) {			arq->state = AS_RQ_DISPATCHED;			if (arq->io_context && arq->io_context->aic)				atomic_inc(&arq->io_context->aic->nr_dispatched);		}	} else		WARN_ON(blk_fs_request(rq)			&& (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );	/* Stop anticipating - let this request get through */	as_antic_stop(ad);}/* * requeue the request. The request has not been completed, nor is it a * new request, so don't touch accounting. */static void as_requeue_request(request_queue_t *q, struct request *rq){	as_deactivate_request(q, rq);	list_add(&rq->queuelist, &q->queue_head);}/* * Account a request that is inserted directly onto the dispatch queue. * arq->io_context->aic->nr_dispatched should not need to be incremented * because only new requests should come through here: requeues go through * our explicit requeue handler. */static void as_account_queued_request(struct as_data *ad, struct request *rq){	if (blk_fs_request(rq)) {		struct as_rq *arq = RQ_DATA(rq);		arq->state = AS_RQ_DISPATCHED;		ad->nr_dispatched++;	}}static voidas_insert_request(request_queue_t *q, struct request *rq, int where){	struct as_data *ad = q->elevator->elevator_data;	struct as_rq *arq = RQ_DATA(rq);	if (arq) {		if (arq->state != AS_RQ_PRESCHED) {			printk("arq->state: %d\n", arq->state);			WARN_ON(1);		}		arq->state = AS_RQ_NEW;	}	/* barriers must flush the reorder queue */	if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)			&& where == ELEVATOR_INSERT_SORT)) {		WARN_ON(1);		where = ELEVATOR_INSERT_BACK;	}	switch (where) {		case ELEVATOR_INSERT_BACK:			while (ad->next_arq[REQ_SYNC])				as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);			while (ad->next_arq[REQ_ASYNC])				as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);			list_add_tail(&rq->queuelist, ad->dispatch);			as_account_queued_request(ad, rq);			as_antic_stop(ad);			break;		case ELEVATOR_INSERT_FRONT:			list_add(&rq->queuelist, ad->dispatch);			as_account_queued_request(ad, rq);			as_antic_stop(ad);			break;		case ELEVATOR_INSERT_SORT:			BUG_ON(!blk_fs_request(rq));			as_add_request(ad, arq);			break;		default:			BUG();			return;	}}/* * as_queue_empty tells us if there are requests left in the device. It may * not be the case that a driver can get the next request even if the queue * is not empty - it is used in the block layer to check for plugging and * merging opportunities */static int as_queue_empty(request_queue_t *q){	struct as_data *ad = q->elevator->elevator_data;	if (!list_empty(&ad->fifo_list[REQ_ASYNC])		|| !list_empty(&ad->fifo_list[REQ_SYNC])		|| !list_empty(ad->dispatch))			return 0;	return 1;}static struct request *as_former_request(request_queue_t *q, struct request *rq){	struct as_rq *arq = RQ_DATA(rq);	struct rb_node *rbprev = rb_prev(&arq->rb_node);	struct request *ret = NULL;	if (rbprev)		ret = rb_entry_arq(rbprev)->request;	return ret;}static struct request *as_latter_request(request_queue_t *q, struct request *rq){	struct as_rq *arq = RQ_DATA(rq);	struct rb_node *rbnext = rb_next(&arq->rb_node);	struct request *ret = NULL;	if (rbnext)		ret = rb_entry_arq(rbnext)->request;	return ret;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -