⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ll_rw_blk.c

📁 Linux块设备驱动源码
💻 C
📖 第 1 页 / 共 5 页
字号:
	bqt->tag_index[tag] = rq;	blkdev_dequeue_request(rq);	list_add(&rq->queuelist, &bqt->busy_list);	bqt->busy++;	return 0;}EXPORT_SYMBOL(blk_queue_start_tag);/** * blk_queue_invalidate_tags - invalidate all pending tags * @q:  the request queue for the device * *  Description: *   Hardware conditions may dictate a need to stop all pending requests. *   In this case, we will safely clear the block side of the tag queue and *   readd all requests to the request queue in the right order. * *  Notes: *   queue lock must be held. **/void blk_queue_invalidate_tags(request_queue_t *q){	struct blk_queue_tag *bqt = q->queue_tags;	struct list_head *tmp, *n;	struct request *rq;	list_for_each_safe(tmp, n, &bqt->busy_list) {		rq = list_entry_rq(tmp);		if (rq->tag == -1) {			printk(KERN_ERR			       "%s: bad tag found on list\n", __FUNCTION__);			list_del_init(&rq->queuelist);			rq->flags &= ~REQ_QUEUED;		} else			blk_queue_end_tag(q, rq);		rq->flags &= ~REQ_STARTED;		__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);	}}EXPORT_SYMBOL(blk_queue_invalidate_tags);static char *rq_flags[] = {	"REQ_RW",	"REQ_FAILFAST",	"REQ_SOFTBARRIER",	"REQ_HARDBARRIER",	"REQ_CMD",	"REQ_NOMERGE",	"REQ_STARTED",	"REQ_DONTPREP",	"REQ_QUEUED",	"REQ_PC",	"REQ_BLOCK_PC",	"REQ_SENSE",	"REQ_FAILED",	"REQ_QUIET",	"REQ_SPECIAL",	"REQ_DRIVE_CMD",	"REQ_DRIVE_TASK",	"REQ_DRIVE_TASKFILE",	"REQ_PREEMPT",	"REQ_PM_SUSPEND",	"REQ_PM_RESUME",	"REQ_PM_SHUTDOWN",};void blk_dump_rq_flags(struct request *rq, char *msg){	int bit;	printk("%s: dev %s: flags = ", msg,		rq->rq_disk ? rq->rq_disk->disk_name : "?");	bit = 0;	do {		if (rq->flags & (1 << bit))			printk("%s ", rq_flags[bit]);		bit++;	} while (bit < __REQ_NR_BITS);	printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,						       rq->nr_sectors,						       rq->current_nr_sectors);	printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);	if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {		printk("cdb: ");		for (bit = 0; bit < sizeof(rq->cmd); bit++)			printk("%02x ", rq->cmd[bit]);		printk("\n");	}}EXPORT_SYMBOL(blk_dump_rq_flags);void blk_recount_segments(request_queue_t *q, struct bio *bio){	struct bio_vec *bv, *bvprv = NULL;	int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;	int high, highprv = 1;	if (unlikely(!bio->bi_io_vec))		return;	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);	hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;	bio_for_each_segment(bv, bio, i) {		/*		 * the trick here is making sure that a high page is never		 * considered part of another segment, since that might		 * change with the bounce page.		 */		high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;		if (high || highprv)			goto new_hw_segment;		if (cluster) {			if (seg_size + bv->bv_len > q->max_segment_size)				goto new_segment;			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))				goto new_segment;			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))				goto new_segment;			if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))				goto new_hw_segment;			seg_size += bv->bv_len;			hw_seg_size += bv->bv_len;			bvprv = bv;			continue;		}new_segment:		if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&		    !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {			hw_seg_size += bv->bv_len;		} else {new_hw_segment:			if (hw_seg_size > bio->bi_hw_front_size)				bio->bi_hw_front_size = hw_seg_size;			hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;			nr_hw_segs++;		}		nr_phys_segs++;		bvprv = bv;		seg_size = bv->bv_len;		highprv = high;	}	if (hw_seg_size > bio->bi_hw_back_size)		bio->bi_hw_back_size = hw_seg_size;	if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)		bio->bi_hw_front_size = hw_seg_size;	bio->bi_phys_segments = nr_phys_segs;	bio->bi_hw_segments = nr_hw_segs;	bio->bi_flags |= (1 << BIO_SEG_VALID);}static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,				   struct bio *nxt){	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))		return 0;	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))		return 0;	if (bio->bi_size + nxt->bi_size > q->max_segment_size)		return 0;	/*	 * bio and nxt are contigous in memory, check if the queue allows	 * these two to be merged into one	 */	if (BIO_SEG_BOUNDARY(q, bio, nxt))		return 1;	return 0;}static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,				 struct bio *nxt){	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))		blk_recount_segments(q, bio);	if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))		blk_recount_segments(q, nxt);	if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||	    BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))		return 0;	if (bio->bi_size + nxt->bi_size > q->max_segment_size)		return 0;	return 1;}/* * map a request to scatterlist, return number of sg entries setup. Caller * must make sure sg can hold rq->nr_phys_segments entries */int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg){	struct bio_vec *bvec, *bvprv;	struct bio *bio;	int nsegs, i, cluster;	nsegs = 0;	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);	/*	 * for each bio in rq	 */	bvprv = NULL;	rq_for_each_bio(bio, rq) {		/*		 * for each segment in bio		 */		bio_for_each_segment(bvec, bio, i) {			int nbytes = bvec->bv_len;			if (bvprv && cluster) {				if (sg[nsegs - 1].length + nbytes > q->max_segment_size)					goto new_segment;				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))					goto new_segment;				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))					goto new_segment;				sg[nsegs - 1].length += nbytes;			} else {new_segment:				memset(&sg[nsegs],0,sizeof(struct scatterlist));				sg[nsegs].page = bvec->bv_page;				sg[nsegs].length = nbytes;				sg[nsegs].offset = bvec->bv_offset;				nsegs++;			}			bvprv = bvec;		} /* segments in bio */	} /* bios in rq */	return nsegs;}EXPORT_SYMBOL(blk_rq_map_sg);/* * the standard queue merge functions, can be overridden with device * specific ones if so desired */static inline int ll_new_mergeable(request_queue_t *q,				   struct request *req,				   struct bio *bio){	int nr_phys_segs = bio_phys_segments(q, bio);	if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {		req->flags |= REQ_NOMERGE;		if (req == q->last_merge)			q->last_merge = NULL;		return 0;	}	/*	 * A hw segment is just getting larger, bump just the phys	 * counter.	 */	req->nr_phys_segments += nr_phys_segs;	return 1;}static inline int ll_new_hw_segment(request_queue_t *q,				    struct request *req,				    struct bio *bio){	int nr_hw_segs = bio_hw_segments(q, bio);	int nr_phys_segs = bio_phys_segments(q, bio);	if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments	    || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {		req->flags |= REQ_NOMERGE;		if (req == q->last_merge)			q->last_merge = NULL;		return 0;	}	/*	 * This will form the start of a new hw segment.  Bump both	 * counters.	 */	req->nr_hw_segments += nr_hw_segs;	req->nr_phys_segments += nr_phys_segs;	return 1;}static int ll_back_merge_fn(request_queue_t *q, struct request *req, 			    struct bio *bio){	int len;	if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {		req->flags |= REQ_NOMERGE;		if (req == q->last_merge)			q->last_merge = NULL;		return 0;	}	if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))		blk_recount_segments(q, req->biotail);	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))		blk_recount_segments(q, bio);	len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&	    !BIOVEC_VIRT_OVERSIZE(len)) {		int mergeable =  ll_new_mergeable(q, req, bio);		if (mergeable) {			if (req->nr_hw_segments == 1)				req->bio->bi_hw_front_size = len;			if (bio->bi_hw_segments == 1)				bio->bi_hw_back_size = len;		}		return mergeable;	}	return ll_new_hw_segment(q, req, bio);}static int ll_front_merge_fn(request_queue_t *q, struct request *req, 			     struct bio *bio){	int len;	if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {		req->flags |= REQ_NOMERGE;		if (req == q->last_merge)			q->last_merge = NULL;		return 0;	}	len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))		blk_recount_segments(q, bio);	if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))		blk_recount_segments(q, req->bio);	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&	    !BIOVEC_VIRT_OVERSIZE(len)) {		int mergeable =  ll_new_mergeable(q, req, bio);		if (mergeable) {			if (bio->bi_hw_segments == 1)				bio->bi_hw_front_size = len;			if (req->nr_hw_segments == 1)				req->biotail->bi_hw_back_size = len;		}		return mergeable;	}	return ll_new_hw_segment(q, req, bio);}static int ll_merge_requests_fn(request_queue_t *q, struct request *req,				struct request *next){	int total_phys_segments;	int total_hw_segments;	/*	 * First check if the either of the requests are re-queued	 * requests.  Can't merge them if they are.	 */	if (req->special || next->special)		return 0;	/*	 * Will it become too large?	 */	if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)		return 0;	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;	if (blk_phys_contig_segment(q, req->biotail, next->bio))		total_phys_segments--;	if (total_phys_segments > q->max_phys_segments)		return 0;	total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;	if (blk_hw_contig_segment(q, req->biotail, next->bio)) {		int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;		/*		 * propagate the combined length to the end of the requests		 */		if (req->nr_hw_segments == 1)			req->bio->bi_hw_front_size = len;		if (next->nr_hw_segments == 1)			next->biotail->bi_hw_back_size = len;		total_hw_segments--;	}	if (total_hw_segments > q->max_hw_segments)		return 0;	/* Merge is OK... */	req->nr_phys_segments = total_phys_segments;	req->nr_hw_segments = total_hw_segments;	return 1;}/* * "plug" the device if there are no outstanding requests: this will * force the transfer to start only after we have put all the requests * on the list. * * This is called with interrupts off and no requests on the queue and * with the queue lock held. */void blk_plug_device(request_queue_t *q){	WARN_ON(!irqs_disabled());	/*	 * don't plug a stopped queue, it must be paired with blk_start_queue()	 * which will restart the queueing	 */	if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))		return;	if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);}EXPORT_SYMBOL(blk_plug_device);/* * remove the queue from the plugged list, if present. called with * queue lock held and interrupts disabled. */int blk_remove_plug(request_queue_t *q){	WARN_ON(!irqs_disabled());	if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))		return 0;	del_timer(&q->unplug_timer);	return 1;}EXPORT_SYMBOL(blk_remove_plug);/* * remove the plug and let it rip.. */void __generic_unplug_device(request_queue_t *q){	if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)))		return;	if (!blk_remove_plug(q))		return;	q->request_fn(q);}EXPORT_SYMBOL(__generic_unplug_device);/** * generic_unplug_device - fire a request queue * @q:    The &request_queue_t in question * * Description: *   Linux uses plugging to build bigger requests queues before letting *   the device have at them. If a queue is plugged, the I/O scheduler *   is still adding and merging requests on the queue. Once the queue *   gets unplugged, the request_fn defined for the queue is invoked and *   transfers started. **/void generic_unplug_device(request_queue_t *q){	spin_lock_irq(q->queue_lock);	__generic_unplug_device(q);	spin_unlock_irq(q->queue_lock);}EXPORT_SYMBOL(generic_unplug_device);static void blk_backing_dev_unplug(struct backing_dev_info *bdi,				   struct page *page){	request_queue_t *q = bdi->unplug_io_data;	/*	 * devices don't necessarily have an ->unplug_fn defined	 */	if (q->unplug_fn)		q->unplug_fn(q);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -