⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 cfq-iosched.c

📁 Linux块设备驱动源码
💻 C
📖 第 1 页 / 共 5 页
字号:
		if (cic == NULL)			goto err;		/*		 * manually increment generic io_context usage count, it		 * cannot go away since we are already holding one ref to it		 */		ioc->cic = cic;		ioc->set_ioprio = cfq_ioc_set_ioprio;		cic->ioc = ioc;		cic->key = cfqd;		atomic_inc(&cfqd->ref);	} else {		struct cfq_io_context *__cic;		/*		 * the first cic on the list is actually the head itself		 */		if (cic->key == cfqd)			goto out;		/*		 * cic exists, check if we already are there. linear search		 * should be ok here, the list will usually not be more than		 * 1 or a few entries long		 */		list_for_each_entry(__cic, &cic->list, list) {			/*			 * this process is already holding a reference to			 * this queue, so no need to get one more			 */			if (__cic->key == cfqd) {				cic = __cic;				goto out;			}		}		/*		 * nope, process doesn't have a cic assoicated with this		 * cfqq yet. get a new one and add to list		 */		__cic = cfq_alloc_io_context(cfqd, gfp_mask);		if (__cic == NULL)			goto err;		__cic->ioc = ioc;		__cic->key = cfqd;		atomic_inc(&cfqd->ref);		list_add(&__cic->list, &cic->list);		cic = __cic;	}out:	return cic;err:	put_io_context(ioc);	return NULL;}static voidcfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic){	unsigned long elapsed, ttime;	/*	 * if this context already has stuff queued, thinktime is from	 * last queue not last end	 */#if 0	if (time_after(cic->last_end_request, cic->last_queue))		elapsed = jiffies - cic->last_end_request;	else		elapsed = jiffies - cic->last_queue;#else		elapsed = jiffies - cic->last_end_request;#endif	ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);	cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;	cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;	cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;}#define sample_valid(samples)	((samples) > 80)/* * Disable idle window if the process thinks too long or seeks so much that * it doesn't matter */static voidcfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,		       struct cfq_io_context *cic){	int enable_idle = cfq_cfqq_idle_window(cfqq);	if (!cic->ioc->task || !cfqd->cfq_slice_idle)		enable_idle = 0;	else if (sample_valid(cic->ttime_samples)) {		if (cic->ttime_mean > cfqd->cfq_slice_idle)			enable_idle = 0;		else			enable_idle = 1;	}	if (enable_idle)		cfq_mark_cfqq_idle_window(cfqq);	else		cfq_clear_cfqq_idle_window(cfqq);}/* * Check if new_cfqq should preempt the currently active queue. Return 0 for * no or if we aren't sure, a 1 will cause a preempt. */static intcfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,		   struct cfq_rq *crq){	struct cfq_queue *cfqq = cfqd->active_queue;	if (cfq_class_idle(new_cfqq))		return 0;	if (!cfqq)		return 1;	if (cfq_class_idle(cfqq))		return 1;	if (!cfq_cfqq_wait_request(new_cfqq))		return 0;	/*	 * if it doesn't have slice left, forget it	 */	if (new_cfqq->slice_left < cfqd->cfq_slice_idle)		return 0;	if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq))		return 1;	return 0;}/* * cfqq preempts the active queue. if we allowed preempt with no slice left, * let it have half of its nominal slice. */static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq){	struct cfq_queue *__cfqq, *next;	list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)		cfq_resort_rr_list(__cfqq, 1);	if (!cfqq->slice_left)		cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;	cfqq->slice_end = cfqq->slice_left + jiffies;	__cfq_slice_expired(cfqd, cfqq, 1);	__cfq_set_active_queue(cfqd, cfqq);}/* * should really be a ll_rw_blk.c helper */static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq){	request_queue_t *q = cfqd->queue;	if (!blk_queue_plugged(q))		q->request_fn(q);	else		__generic_unplug_device(q);}/* * Called when a new fs request (crq) is added (to cfqq). Check if there's * something we should do about it */static voidcfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,		 struct cfq_rq *crq){	struct cfq_io_context *cic;	cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);	/*	 * we never wait for an async request and we don't allow preemption	 * of an async request. so just return early	 */	if (!cfq_crq_is_sync(crq))		return;	cic = crq->io_context;	cfq_update_io_thinktime(cfqd, cic);	cfq_update_idle_window(cfqd, cfqq, cic);	cic->last_queue = jiffies;	if (cfqq == cfqd->active_queue) {		/*		 * if we are waiting for a request for this queue, let it rip		 * immediately and flag that we must not expire this queue		 * just now		 */		if (cfq_cfqq_wait_request(cfqq)) {			cfq_mark_cfqq_must_dispatch(cfqq);			del_timer(&cfqd->idle_slice_timer);			cfq_start_queueing(cfqd, cfqq);		}	} else if (cfq_should_preempt(cfqd, cfqq, crq)) {		/*		 * not the active queue - expire current slice if it is		 * idle and has expired it's mean thinktime or this new queue		 * has some old slice time left and is of higher priority		 */		cfq_preempt_queue(cfqd, cfqq);		cfq_mark_cfqq_must_dispatch(cfqq);		cfq_start_queueing(cfqd, cfqq);	}}static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq){	struct cfq_rq *crq = RQ_DATA(rq);	struct cfq_queue *cfqq = crq->cfq_queue;	cfq_init_prio_data(cfqq);	cfq_add_crq_rb(crq);	list_add_tail(&rq->queuelist, &cfqq->fifo);	if (rq_mergeable(rq)) {		cfq_add_crq_hash(cfqd, crq);		if (!cfqd->queue->last_merge)			cfqd->queue->last_merge = rq;	}	cfq_crq_enqueued(cfqd, cfqq, crq);}static voidcfq_insert_request(request_queue_t *q, struct request *rq, int where){	struct cfq_data *cfqd = q->elevator->elevator_data;	switch (where) {		case ELEVATOR_INSERT_BACK:			while (cfq_dispatch_requests(q, INT_MAX, 1))				;			list_add_tail(&rq->queuelist, &q->queue_head);			/*			 * If we were idling with pending requests on			 * inactive cfqqs, force dispatching will			 * remove the idle timer and the queue won't			 * be kicked by __make_request() afterward.			 * Kick it here.			 */			cfq_schedule_dispatch(cfqd);			break;		case ELEVATOR_INSERT_FRONT:			list_add(&rq->queuelist, &q->queue_head);			break;		case ELEVATOR_INSERT_SORT:			BUG_ON(!blk_fs_request(rq));			cfq_enqueue(cfqd, rq);			break;		default:			printk("%s: bad insert point %d\n", __FUNCTION__,where);			return;	}}static void cfq_completed_request(request_queue_t *q, struct request *rq){	struct cfq_rq *crq = RQ_DATA(rq);	struct cfq_queue *cfqq;	if (unlikely(!blk_fs_request(rq)))		return;	cfqq = crq->cfq_queue;	if (cfq_crq_in_flight(crq)) {		const int sync = cfq_crq_is_sync(crq);		WARN_ON(!cfqq->on_dispatch[sync]);		cfqq->on_dispatch[sync]--;	}	cfq_account_completion(cfqq, crq);}static struct request *cfq_former_request(request_queue_t *q, struct request *rq){	struct cfq_rq *crq = RQ_DATA(rq);	struct rb_node *rbprev = rb_prev(&crq->rb_node);	if (rbprev)		return rb_entry_crq(rbprev)->request;	return NULL;}static struct request *cfq_latter_request(request_queue_t *q, struct request *rq){	struct cfq_rq *crq = RQ_DATA(rq);	struct rb_node *rbnext = rb_next(&crq->rb_node);	if (rbnext)		return rb_entry_crq(rbnext)->request;	return NULL;}/* * we temporarily boost lower priority queues if they are holding fs exclusive * resources. they are boosted to normal prio (CLASS_BE/4) */static void cfq_prio_boost(struct cfq_queue *cfqq){	const int ioprio_class = cfqq->ioprio_class;	const int ioprio = cfqq->ioprio;	if (has_fs_excl()) {		/*		 * boost idle prio on transactions that would lock out other		 * users of the filesystem		 */		if (cfq_class_idle(cfqq))			cfqq->ioprio_class = IOPRIO_CLASS_BE;		if (cfqq->ioprio > IOPRIO_NORM)			cfqq->ioprio = IOPRIO_NORM;	} else {		/*		 * check if we need to unboost the queue		 */		if (cfqq->ioprio_class != cfqq->org_ioprio_class)			cfqq->ioprio_class = cfqq->org_ioprio_class;		if (cfqq->ioprio != cfqq->org_ioprio)			cfqq->ioprio = cfqq->org_ioprio;	}	/*	 * refile between round-robin lists if we moved the priority class	 */	if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) &&	    cfq_cfqq_on_rr(cfqq))		cfq_resort_rr_list(cfqq, 0);}static inline pid_t cfq_queue_pid(struct task_struct *task, int rw){	if (rw == READ || process_sync(task))		return task->pid;	return CFQ_KEY_ASYNC;}static inline int__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,		struct task_struct *task, int rw){#if 1	if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&	    !cfq_cfqq_must_alloc_slice(cfqq)) {		cfq_mark_cfqq_must_alloc_slice(cfqq);		return ELV_MQUEUE_MUST;	}	return ELV_MQUEUE_MAY;#else	if (!cfqq || task->flags & PF_MEMALLOC)		return ELV_MQUEUE_MAY;	if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {		if (cfq_cfqq_wait_request(cfqq))			return ELV_MQUEUE_MUST;		/*		 * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we		 * can quickly flood the queue with writes from a single task		 */		if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {			cfq_mark_cfqq_must_alloc_slice(cfqq);			return ELV_MQUEUE_MUST;		}		return ELV_MQUEUE_MAY;	}	if (cfq_class_idle(cfqq))		return ELV_MQUEUE_NO;	if (cfqq->allocated[rw] >= cfqd->max_queued) {		struct io_context *ioc = get_io_context(GFP_ATOMIC);		int ret = ELV_MQUEUE_NO;		if (ioc && ioc->nr_batch_requests)			ret = ELV_MQUEUE_MAY;		put_io_context(ioc);		return ret;	}	return ELV_MQUEUE_MAY;#endif}static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio){	struct cfq_data *cfqd = q->elevator->elevator_data;	struct task_struct *tsk = current;	struct cfq_queue *cfqq;	/*	 * don't force setup of a queue from here, as a call to may_queue	 * does not necessarily imply that a request actually will be queued.	 * so just lookup a possibly existing queue, or return 'may queue'	 * if that fails	 */	cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);	if (cfqq) {		cfq_init_prio_data(cfqq);		cfq_prio_boost(cfqq);		return __cfq_may_queue(cfqd, cfqq, tsk, rw);	}	return ELV_MQUEUE_MAY;}static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq){	struct cfq_data *cfqd = q->elevator->elevator_data;	struct request_list *rl = &q->rq;	if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) {		smp_mb();		if (waitqueue_active(&rl->wait[READ]))			wake_up(&rl->wait[READ]);	}	if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {		smp_mb();		if (waitqueue_active(&rl->wait[WRITE]))			wake_up(&rl->wait[WRITE]);	}}/* * queue lock held here */static void cfq_put_request(request_queue_t *q, struct request *rq){	struct cfq_data *cfqd = q->elevator->elevator_data;	struct cfq_rq *crq = RQ_DATA(rq);	if (crq) {		struct cfq_queue *cfqq = crq->cfq_queue;		const int rw = rq_data_dir(rq);		BUG_ON(!cfqq->allocated[rw]);		cfqq->allocated[rw]--;		put_io_context(crq->io_context->ioc);		mempool_free(crq, cfqd->crq_pool);		rq->elevator_private = NULL;		cfq_check_waiters(q, cfqq);		cfq_put_queue(cfqq);	}}/* * Allocate cfq data structures associated with this request. */static intcfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,		int gfp_mask){	struct cfq_data *cfqd = q->elevator->elevator_data;	struct task_struct *tsk = current;	struct cfq_io_context *cic;	const int rw = rq_data_dir(rq);	pid_t key = cfq_queue_pid(tsk, rw);	struct cfq_queue *cfqq;	struct cfq_rq *crq;	unsigned long flags;	might_sleep_if(gfp_mask & __GFP_WAIT);	cic = cfq_get_io_context(cfqd, key, gfp_mask);	spin_lock_irqsave(q->queue_lock, flags);	if (!cic)		goto queue_fail;	if (!cic->cfqq) {		cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask);		if (!cfqq)			goto queue_fail;		cic->cfqq = cfqq;	} else		cfqq = cic->cfqq;	cfqq->allocated[rw]++;	cfq_clear_cfqq_must_alloc(cfqq);	cfqd->rq_starved = 0;	atomic_inc(&cfqq->ref);	spin_unlock_irqrestore(q->queue_lock, flags);	crq = mempool_alloc(cfqd->crq_pool, gfp_mask);	if (crq) {		RB_CLEAR(&crq->rb_node);		crq->rb_key = 0;		crq->request = rq;		INIT_HLIST_NODE(&crq->hash);		crq->cfq_queue = cfqq;		crq->io_context = cic;		cfq_clear_crq_in_flight(crq);		cfq_clear_crq_in_driver(crq);		cfq_clear_crq_requeued(crq);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -