ll_rw_blk.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 2,525 行 · 第 1/5 页

C
2,525
字号
 * @tag: The tag of the request * * Notes: *    Should be used when a device returns a tag and you want to match *    it with a request. * *    no locks need be held. **/struct request *blk_queue_find_tag(request_queue_t *q, int tag){	struct blk_queue_tag *bqt = q->queue_tags;	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))		return NULL;	return bqt->tag_index[tag];}EXPORT_SYMBOL(blk_queue_find_tag);/** * __blk_queue_free_tags - release tag maintenance info * @q:  the request queue for the device * *  Notes: *    blk_cleanup_queue() will take care of calling this function, if tagging *    has been used. So there's no need to call this directly. **/static void __blk_queue_free_tags(request_queue_t *q){	struct blk_queue_tag *bqt = q->queue_tags;	if (!bqt)		return;	if (atomic_dec_and_test(&bqt->refcnt)) {		BUG_ON(bqt->busy);		BUG_ON(!list_empty(&bqt->busy_list));		kfree(bqt->tag_index);		bqt->tag_index = NULL;		kfree(bqt->tag_map);		bqt->tag_map = NULL;		kfree(bqt);	}	q->queue_tags = NULL;	q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);}/** * blk_queue_free_tags - release tag maintenance info * @q:  the request queue for the device * *  Notes: *	This is used to disabled tagged queuing to a device, yet leave *	queue in function. **/void blk_queue_free_tags(request_queue_t *q){	clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);}EXPORT_SYMBOL(blk_queue_free_tags);static intinit_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth){	int bits, i;	struct request **tag_index;	unsigned long *tag_map;	if (depth > q->nr_requests * 2) {		depth = q->nr_requests * 2;		printk(KERN_ERR "%s: adjusted depth to %d\n",				__FUNCTION__, depth);	}	tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);	if (!tag_index)		goto fail;	bits = (depth / BLK_TAGS_PER_LONG) + 1;	tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC);	if (!tag_map)		goto fail;	memset(tag_index, 0, depth * sizeof(struct request *));	memset(tag_map, 0, bits * sizeof(unsigned long));	tags->max_depth = depth;	tags->real_max_depth = bits * BITS_PER_LONG;	tags->tag_index = tag_index;	tags->tag_map = tag_map;	/*	 * set the upper bits if the depth isn't a multiple of the word size	 */	for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)		__set_bit(i, tag_map);	return 0;fail:	kfree(tag_index);	return -ENOMEM;}/** * blk_queue_init_tags - initialize the queue tag info * @q:  the request queue for the device * @depth:  the maximum queue depth supported **/int blk_queue_init_tags(request_queue_t *q, int depth,			struct blk_queue_tag *tags){	int rc;	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);	if (!tags && !q->queue_tags) {		tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);		if (!tags)			goto fail;		if (init_tag_map(q, tags, depth))			goto fail;		INIT_LIST_HEAD(&tags->busy_list);		tags->busy = 0;		atomic_set(&tags->refcnt, 1);	} else if (q->queue_tags) {		if ((rc = blk_queue_resize_tags(q, depth)))			return rc;		set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);		return 0;	} else		atomic_inc(&tags->refcnt);	/*	 * assign it, all done	 */	q->queue_tags = tags;	q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);	return 0;fail:	kfree(tags);	return -ENOMEM;}EXPORT_SYMBOL(blk_queue_init_tags);/** * blk_queue_resize_tags - change the queueing depth * @q:  the request queue for the device * @new_depth: the new max command queueing depth * *  Notes: *    Must be called with the queue lock held. **/int blk_queue_resize_tags(request_queue_t *q, int new_depth){	struct blk_queue_tag *bqt = q->queue_tags;	struct request **tag_index;	unsigned long *tag_map;	int bits, max_depth;	if (!bqt)		return -ENXIO;	/*	 * don't bother sizing down	 */	if (new_depth <= bqt->real_max_depth) {		bqt->max_depth = new_depth;		return 0;	}	/*	 * save the old state info, so we can copy it back	 */	tag_index = bqt->tag_index;	tag_map = bqt->tag_map;	max_depth = bqt->real_max_depth;	if (init_tag_map(q, bqt, new_depth))		return -ENOMEM;	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));	bits = max_depth / BLK_TAGS_PER_LONG;	memcpy(bqt->tag_map, tag_map, bits * sizeof(unsigned long));	kfree(tag_index);	kfree(tag_map);	return 0;}EXPORT_SYMBOL(blk_queue_resize_tags);/** * blk_queue_end_tag - end tag operations for a request * @q:  the request queue for the device * @rq: the request that has completed * *  Description: *    Typically called when end_that_request_first() returns 0, meaning *    all transfers have been done for a request. It's important to call *    this function before end_that_request_last(), as that will put the *    request back on the free list thus corrupting the internal tag list. * *  Notes: *   queue lock must be held. **/void blk_queue_end_tag(request_queue_t *q, struct request *rq){	struct blk_queue_tag *bqt = q->queue_tags;	int tag = rq->tag;	BUG_ON(tag == -1);	if (unlikely(tag >= bqt->real_max_depth))		return;	if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {		printk("attempt to clear non-busy tag (%d)\n", tag);		return;	}	list_del_init(&rq->queuelist);	rq->flags &= ~REQ_QUEUED;	rq->tag = -1;	if (unlikely(bqt->tag_index[tag] == NULL))		printk("tag %d is missing\n", tag);	bqt->tag_index[tag] = NULL;	bqt->busy--;}EXPORT_SYMBOL(blk_queue_end_tag);/** * blk_queue_start_tag - find a free tag and assign it * @q:  the request queue for the device * @rq:  the block request that needs tagging * *  Description: *    This can either be used as a stand-alone helper, or possibly be *    assigned as the queue &prep_rq_fn (in which case &struct request *    automagically gets a tag assigned). Note that this function *    assumes that any type of request can be queued! if this is not *    true for your device, you must check the request type before *    calling this function.  The request will also be removed from *    the request queue, so it's the drivers responsibility to readd *    it if it should need to be restarted for some reason. * *  Notes: *   queue lock must be held. **/int blk_queue_start_tag(request_queue_t *q, struct request *rq){	struct blk_queue_tag *bqt = q->queue_tags;	unsigned long *map = bqt->tag_map;	int tag = 0;	if (unlikely((rq->flags & REQ_QUEUED))) {		printk(KERN_ERR 		       "request %p for device [%s] already tagged %d",		       rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);		BUG();	}	for (map = bqt->tag_map; *map == -1UL; map++) {		tag += BLK_TAGS_PER_LONG;		if (tag >= bqt->max_depth)			return 1;	}	tag += ffz(*map);	__set_bit(tag, bqt->tag_map);	rq->flags |= REQ_QUEUED;	rq->tag = tag;	bqt->tag_index[tag] = rq;	blkdev_dequeue_request(rq);	list_add(&rq->queuelist, &bqt->busy_list);	bqt->busy++;	return 0;}EXPORT_SYMBOL(blk_queue_start_tag);/** * blk_queue_invalidate_tags - invalidate all pending tags * @q:  the request queue for the device * *  Description: *   Hardware conditions may dictate a need to stop all pending requests. *   In this case, we will safely clear the block side of the tag queue and *   readd all requests to the request queue in the right order. * *  Notes: *   queue lock must be held. **/void blk_queue_invalidate_tags(request_queue_t *q){	struct blk_queue_tag *bqt = q->queue_tags;	struct list_head *tmp, *n;	struct request *rq;	list_for_each_safe(tmp, n, &bqt->busy_list) {		rq = list_entry_rq(tmp);		if (rq->tag == -1) {			printk("bad tag found on list\n");			list_del_init(&rq->queuelist);			rq->flags &= ~REQ_QUEUED;		} else			blk_queue_end_tag(q, rq);		rq->flags &= ~REQ_STARTED;		__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);	}}EXPORT_SYMBOL(blk_queue_invalidate_tags);static char *rq_flags[] = {	"REQ_RW",	"REQ_FAILFAST",	"REQ_SOFTBARRIER",	"REQ_HARDBARRIER",	"REQ_CMD",	"REQ_NOMERGE",	"REQ_STARTED",	"REQ_DONTPREP",	"REQ_QUEUED",	"REQ_PC",	"REQ_BLOCK_PC",	"REQ_SENSE",	"REQ_FAILED",	"REQ_QUIET",	"REQ_SPECIAL",	"REQ_DRIVE_CMD",	"REQ_DRIVE_TASK",	"REQ_DRIVE_TASKFILE",	"REQ_PREEMPT",	"REQ_PM_SUSPEND",	"REQ_PM_RESUME",	"REQ_PM_SHUTDOWN",};void blk_dump_rq_flags(struct request *rq, char *msg){	int bit;	printk("%s: dev %s: flags = ", msg,		rq->rq_disk ? rq->rq_disk->disk_name : "?");	bit = 0;	do {		if (rq->flags & (1 << bit))			printk("%s ", rq_flags[bit]);		bit++;	} while (bit < __REQ_NR_BITS);	printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,						       rq->nr_sectors,						       rq->current_nr_sectors);	printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);	if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {		printk("cdb: ");		for (bit = 0; bit < sizeof(rq->cmd); bit++)			printk("%02x ", rq->cmd[bit]);		printk("\n");	}}EXPORT_SYMBOL(blk_dump_rq_flags);void blk_recount_segments(request_queue_t *q, struct bio *bio){	struct bio_vec *bv, *bvprv = NULL;	int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;	int high, highprv = 1;	if (unlikely(!bio->bi_io_vec))		return;	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);	hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;	bio_for_each_segment(bv, bio, i) {		/*		 * the trick here is making sure that a high page is never		 * considered part of another segment, since that might		 * change with the bounce page.		 */		high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;		if (high || highprv)			goto new_hw_segment;		if (cluster) {			if (seg_size + bv->bv_len > q->max_segment_size)				goto new_segment;			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))				goto new_segment;			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))				goto new_segment;			if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))				goto new_hw_segment;			seg_size += bv->bv_len;			hw_seg_size += bv->bv_len;			bvprv = bv;			continue;		}new_segment:		if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&		    !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {			hw_seg_size += bv->bv_len;		} else {new_hw_segment:			if (hw_seg_size > bio->bi_hw_front_size)				bio->bi_hw_front_size = hw_seg_size;			hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;			nr_hw_segs++;		}		nr_phys_segs++;		bvprv = bv;		seg_size = bv->bv_len;		highprv = high;	}	if (hw_seg_size > bio->bi_hw_back_size)		bio->bi_hw_back_size = hw_seg_size;	if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)		bio->bi_hw_front_size = hw_seg_size;	bio->bi_phys_segments = nr_phys_segs;	bio->bi_hw_segments = nr_hw_segs;	bio->bi_flags |= (1 << BIO_SEG_VALID);}int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,				   struct bio *nxt){	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))		return 0;	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))		return 0;	if (bio->bi_size + nxt->bi_size > q->max_segment_size)		return 0;	/*	 * bio and nxt are contigous in memory, check if the queue allows	 * these two to be merged into one	 */	if (BIO_SEG_BOUNDARY(q, bio, nxt))		return 1;	return 0;}EXPORT_SYMBOL(blk_phys_contig_segment);int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,				 struct bio *nxt){	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))		blk_recount_segments(q, bio);	if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))		blk_recount_segments(q, nxt);	if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||	    BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))		return 0;	if (bio->bi_size + nxt->bi_size > q->max_segment_size)		return 0;	return 1;}EXPORT_SYMBOL(blk_hw_contig_segment);/* * map a request to scatterlist, return number of sg entries setup. Caller * must make sure sg can hold rq->nr_phys_segments entries */int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg){	struct bio_vec *bvec, *bvprv;	struct bio *bio;	int nsegs, i, cluster;	nsegs = 0;	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);	/*	 * for each bio in rq	 */	bvprv = NULL;	rq_for_each_bio(bio, rq) {		/*		 * for each segment in bio		 */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?