⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ll_rw_blk.c

📁 Linux块设备驱动源码
💻 C
📖 第 1 页 / 共 5 页
字号:
}static void blk_unplug_work(void *data){	request_queue_t *q = data;	q->unplug_fn(q);}static void blk_unplug_timeout(unsigned long data){	request_queue_t *q = (request_queue_t *)data;	kblockd_schedule_work(&q->unplug_work);}/** * blk_start_queue - restart a previously stopped queue * @q:    The &request_queue_t in question * * Description: *   blk_start_queue() will clear the stop flag on the queue, and call *   the request_fn for the queue if it was in a stopped state when *   entered. Also see blk_stop_queue(). Queue lock must be held. **/void blk_start_queue(request_queue_t *q){	clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);	/*	 * one level of recursion is ok and is much faster than kicking	 * the unplug handling	 */	if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {		q->request_fn(q);		clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);	} else {		blk_plug_device(q);		kblockd_schedule_work(&q->unplug_work);	}}EXPORT_SYMBOL(blk_start_queue);/** * blk_stop_queue - stop a queue * @q:    The &request_queue_t in question * * Description: *   The Linux block layer assumes that a block driver will consume all *   entries on the request queue when the request_fn strategy is called. *   Often this will not happen, because of hardware limitations (queue *   depth settings). If a device driver gets a 'queue full' response, *   or if it simply chooses not to queue more I/O at one point, it can *   call this function to prevent the request_fn from being called until *   the driver has signalled it's ready to go again. This happens by calling *   blk_start_queue() to restart queue operations. Queue lock must be held. **/void blk_stop_queue(request_queue_t *q){	blk_remove_plug(q);	set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);}EXPORT_SYMBOL(blk_stop_queue);/** * blk_sync_queue - cancel any pending callbacks on a queue * @q: the queue * * Description: *     The block layer may perform asynchronous callback activity *     on a queue, such as calling the unplug function after a timeout. *     A block device may call blk_sync_queue to ensure that any *     such activity is cancelled, thus allowing it to release resources *     the the callbacks might use. The caller must already have made sure *     that its ->make_request_fn will not re-add plugging prior to calling *     this function. * */void blk_sync_queue(struct request_queue *q){	del_timer_sync(&q->unplug_timer);	kblockd_flush();}EXPORT_SYMBOL(blk_sync_queue);/** * blk_run_queue - run a single device queue * @q:	The queue to run */void blk_run_queue(struct request_queue *q){	unsigned long flags;	spin_lock_irqsave(q->queue_lock, flags);	blk_remove_plug(q);	if (!elv_queue_empty(q))		q->request_fn(q);	spin_unlock_irqrestore(q->queue_lock, flags);}EXPORT_SYMBOL(blk_run_queue);/** * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed * @q:    the request queue to be released * * Description: *     blk_cleanup_queue is the pair to blk_init_queue() or *     blk_queue_make_request().  It should be called when a request queue is *     being released; typically when a block device is being de-registered. *     Currently, its primary task it to free all the &struct request *     structures that were allocated to the queue and the queue itself. * * Caveat: *     Hopefully the low level driver will have finished any *     outstanding requests first... **/void blk_cleanup_queue(request_queue_t * q){	struct request_list *rl = &q->rq;	if (!atomic_dec_and_test(&q->refcnt))		return;	if (q->elevator)		elevator_exit(q->elevator);	blk_sync_queue(q);	if (rl->rq_pool)		mempool_destroy(rl->rq_pool);	if (q->queue_tags)		__blk_queue_free_tags(q);	blk_queue_ordered(q, QUEUE_ORDERED_NONE);	kmem_cache_free(requestq_cachep, q);}EXPORT_SYMBOL(blk_cleanup_queue);static int blk_init_free_list(request_queue_t *q){	struct request_list *rl = &q->rq;	rl->count[READ] = rl->count[WRITE] = 0;	rl->starved[READ] = rl->starved[WRITE] = 0;	init_waitqueue_head(&rl->wait[READ]);	init_waitqueue_head(&rl->wait[WRITE]);	init_waitqueue_head(&rl->drain);	rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,				mempool_free_slab, request_cachep, q->node);	if (!rl->rq_pool)		return -ENOMEM;	return 0;}static int __make_request(request_queue_t *, struct bio *);request_queue_t *blk_alloc_queue(int gfp_mask){	return blk_alloc_queue_node(gfp_mask, -1);}EXPORT_SYMBOL(blk_alloc_queue);request_queue_t *blk_alloc_queue_node(int gfp_mask, int node_id){	request_queue_t *q;	q = kmem_cache_alloc_node(requestq_cachep, gfp_mask, node_id);	if (!q)		return NULL;	memset(q, 0, sizeof(*q));	init_timer(&q->unplug_timer);	atomic_set(&q->refcnt, 1);	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;	q->backing_dev_info.unplug_io_data = q;	return q;}EXPORT_SYMBOL(blk_alloc_queue_node);/** * blk_init_queue  - prepare a request queue for use with a block device * @rfn:  The function to be called to process requests that have been *        placed on the queue. * @lock: Request queue spin lock * * Description: *    If a block device wishes to use the standard request handling procedures, *    which sorts requests and coalesces adjacent requests, then it must *    call blk_init_queue().  The function @rfn will be called when there *    are requests on the queue that need to be processed.  If the device *    supports plugging, then @rfn may not be called immediately when requests *    are available on the queue, but may be called at some time later instead. *    Plugged queues are generally unplugged when a buffer belonging to one *    of the requests on the queue is needed, or due to memory pressure. * *    @rfn is not required, or even expected, to remove all requests off the *    queue, but only as many as it can handle at a time.  If it does leave *    requests on the queue, it is responsible for arranging that the requests *    get dealt with eventually. * *    The queue spin lock must be held while manipulating the requests on the *    request queue. * *    Function returns a pointer to the initialized request queue, or NULL if *    it didn't succeed. * * Note: *    blk_init_queue() must be paired with a blk_cleanup_queue() call *    when the block device is deactivated (such as at module unload). **/request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock){	return blk_init_queue_node(rfn, lock, -1);}EXPORT_SYMBOL(blk_init_queue);request_queue_t *blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id){	request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);	if (!q)		return NULL;	q->node = node_id;	if (blk_init_free_list(q))		goto out_init;	/*	 * if caller didn't supply a lock, they get per-queue locking with	 * our embedded lock	 */	if (!lock) {		spin_lock_init(&q->__queue_lock);		lock = &q->__queue_lock;	}	q->request_fn		= rfn;	q->back_merge_fn       	= ll_back_merge_fn;	q->front_merge_fn      	= ll_front_merge_fn;	q->merge_requests_fn	= ll_merge_requests_fn;	q->prep_rq_fn		= NULL;	q->unplug_fn		= generic_unplug_device;	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER);	q->queue_lock		= lock;	blk_queue_segment_boundary(q, 0xffffffff);	blk_queue_make_request(q, __make_request);	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);	/*	 * all done	 */	if (!elevator_init(q, NULL)) {		blk_queue_congestion_threshold(q);		return q;	}	blk_cleanup_queue(q);out_init:	kmem_cache_free(requestq_cachep, q);	return NULL;}EXPORT_SYMBOL(blk_init_queue_node);int blk_get_queue(request_queue_t *q){	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {		atomic_inc(&q->refcnt);		return 0;	}	return 1;}EXPORT_SYMBOL(blk_get_queue);static inline void blk_free_request(request_queue_t *q, struct request *rq){	elv_put_request(q, rq);	mempool_free(rq, q->rq.rq_pool);}static inline struct request *blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask){	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);	if (!rq)		return NULL;	/*	 * first three bits are identical in rq->flags and bio->bi_rw,	 * see bio.h and blkdev.h	 */	rq->flags = rw;	if (!elv_set_request(q, rq, bio, gfp_mask))		return rq;	mempool_free(rq, q->rq.rq_pool);	return NULL;}/* * ioc_batching returns true if the ioc is a valid batching request and * should be given priority access to a request. */static inline int ioc_batching(request_queue_t *q, struct io_context *ioc){	if (!ioc)		return 0;	/*	 * Make sure the process is able to allocate at least 1 request	 * even if the batch times out, otherwise we could theoretically	 * lose wakeups.	 */	return ioc->nr_batch_requests == q->nr_batching ||		(ioc->nr_batch_requests > 0		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));}/* * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This * will cause the process to be a "batcher" on all queues in the system. This * is the behaviour we want though - once it gets a wakeup it should be given * a nice run. */static void ioc_set_batching(request_queue_t *q, struct io_context *ioc){	if (!ioc || ioc_batching(q, ioc))		return;	ioc->nr_batch_requests = q->nr_batching;	ioc->last_waited = jiffies;}static void __freed_request(request_queue_t *q, int rw){	struct request_list *rl = &q->rq;	if (rl->count[rw] < queue_congestion_off_threshold(q))		clear_queue_congested(q, rw);	if (rl->count[rw] + 1 <= q->nr_requests) {		if (waitqueue_active(&rl->wait[rw]))			wake_up(&rl->wait[rw]);		blk_clear_queue_full(q, rw);	}}/* * A request has just been released.  Account for it, update the full and * congestion status, wake up any waiters.   Called under q->queue_lock. */static void freed_request(request_queue_t *q, int rw){	struct request_list *rl = &q->rq;	rl->count[rw]--;	__freed_request(q, rw);	if (unlikely(rl->starved[rw ^ 1]))		__freed_request(q, rw ^ 1);	if (!rl->count[READ] && !rl->count[WRITE]) {		smp_mb();		if (unlikely(waitqueue_active(&rl->drain)))			wake_up(&rl->drain);	}}#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)/* * Get a free request, queue_lock must be held. * Returns NULL on failure, with queue_lock held. * Returns !NULL on success, with queue_lock *not held*. */static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,				   int gfp_mask){	struct request *rq = NULL;	struct request_list *rl = &q->rq;	struct io_context *ioc = current_io_context(GFP_ATOMIC);	if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))		goto out;	if (rl->count[rw]+1 >= q->nr_requests) {		/*		 * The queue will fill after this allocation, so set it as		 * full, and mark this process as "batching". This process		 * will be allowed to complete a batch of requests, others		 * will be blocked.		 */		if (!blk_queue_full(q, rw)) {			ioc_set_batching(q, ioc);			blk_set_queue_full(q, rw);		}	}	switch (elv_may_queue(q, rw, bio)) {		case ELV_MQUEUE_NO:			goto rq_starved;		case ELV_MQUEUE_MAY:			break;		case ELV_MQUEUE_MUST:			goto get_rq;	}	if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {		/*		 * The queue is full and the allocating process is not a		 * "batcher", and not exempted by the IO scheduler		 */		goto out;	}get_rq:	/*	 * Only allow batching queuers to allocate up to 50% over the defined	 * limit of requests, otherwise we could have thousands of requests	 * allocated with any setting of ->nr_requests	 */	if (rl->count[rw] >= (3 * q->nr_requests / 2))		goto out;	rl->count[rw]++;	rl->starved[rw] = 0;	if (rl->count[rw] >= queue_congestion_on_threshold(q))		set_queue_congested(q, rw);	spin_unlock_irq(q->queue_lock);	rq = blk_alloc_request(q, rw, bio, gfp_mask);	if (!rq) {		/*		 * Allocation failed presumably due to memory. Undo anything		 * we might have messed up.		 *		 * Allocating task should really be put onto the front of the		 * wait queue, but this is pretty rare.		 */		spin_lock_irq(q->queue_lock);		freed_request(q, rw);		/*		 * in the very unlikely event that allocation failed and no		 * requests for this direction was pending, mark us starved		 * so that freeing of a request in the other direction will		 * notice us. another possible fix would be to split the		 * rq mempool into READ and WRITE		 */rq_starved:		if (unlikely(rl->count[rw] == 0))			rl->starved[rw] = 1;		goto out;	}	if (ioc_batching(q, ioc))		ioc->nr_batch_requests--;		rq_init(q, rq);	rq->rl = rl;out:	return rq;}/* * No available requests for this queue, unplug the device and wait for some * requests to become available. * * Called with q->queue_lock held, and returns with it unlocked. */static struct request *get_request_wait(request_queue_t *q, int rw,					struct bio *bio){	struct request *rq;	rq = get_request(q, rw, bio, GFP_NOIO);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -