⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 scsi_merge.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
 *              any of the DMA code.  Same goes for clustering - in the *              case of hosts with no need for clustering, there is no point *              in a whole bunch of overhead. * *              Finally, in the event that a host has set can_queue to SG_ALL *              implying that there is no limit to the length of a scatter *              gather list, the sg count in the request won't be valid *              (mainly because we don't need queue management functions *              which keep the tally uptodate. */__inline static int __init_io(Scsi_Cmnd * SCpnt,			      int sg_count_valid,			      int use_clustering,			      int dma_host){	struct buffer_head * bh;	struct buffer_head * bhprev;	char		   * buff;	int		     count;	int		     i;	struct request     * req;	int		     sectors;	struct scatterlist * sgpnt;	int		     this_count;	/*	 * FIXME(eric) - don't inline this - it doesn't depend on the	 * integer flags.   Come to think of it, I don't think this is even	 * needed any more.  Need to play with it and see if we hit the	 * panic.  If not, then don't bother.	 */	if (!SCpnt->request.bh) {		/* 		 * Case of page request (i.e. raw device), or unlinked buffer 		 * Typically used for swapping, but this isn't how we do		 * swapping any more.		 */		panic("I believe this is dead code.  If we hit this, I was wrong");#if 0		SCpnt->request_bufflen = SCpnt->request.nr_sectors << 9;		SCpnt->request_buffer = SCpnt->request.buffer;		SCpnt->use_sg = 0;		/*		 * FIXME(eric) - need to handle DMA here.		 */#endif		return 1;	}	req = &SCpnt->request;	/*	 * First we need to know how many scatter gather segments are needed.	 */	if (!sg_count_valid) {		count = __count_segments(req, use_clustering, dma_host, NULL);	} else {		count = req->nr_segments;	}	/*	 * If the dma pool is nearly empty, then queue a minimal request	 * with a single segment.  Typically this will satisfy a single	 * buffer.	 */	if (dma_host && scsi_dma_free_sectors <= 10) {		this_count = SCpnt->request.current_nr_sectors;		goto single_segment;	}	/*	 * Don't bother with scatter-gather if there is only one segment.	 */	if (count == 1) {		this_count = SCpnt->request.nr_sectors;		goto single_segment;	}	SCpnt->use_sg = count;	/* 	 * Allocate the actual scatter-gather table itself.	 * scsi_malloc can only allocate in chunks of 512 bytes 	 */	SCpnt->sglist_len = (SCpnt->use_sg			     * sizeof(struct scatterlist) + 511) & ~511;	sgpnt = (struct scatterlist *) scsi_malloc(SCpnt->sglist_len);	/*	 * Now fill the scatter-gather table.	 */	if (!sgpnt) {		/*		 * If we cannot allocate the scatter-gather table, then		 * simply write the first buffer all by itself.		 */		printk("Warning - running *really* short on DMA buffers\n");		this_count = SCpnt->request.current_nr_sectors;		goto single_segment;	}	/* 	 * Next, walk the list, and fill in the addresses and sizes of	 * each segment.	 */	memset(sgpnt, 0, SCpnt->sglist_len);	SCpnt->request_buffer = (char *) sgpnt;	SCpnt->request_bufflen = 0;	bhprev = NULL;	for (count = 0, bh = SCpnt->request.bh;	     bh; bh = bh->b_reqnext) {		if (use_clustering && bhprev != NULL) {			if (dma_host &&			    virt_to_phys(bhprev->b_data) - 1 == ISA_DMA_THRESHOLD) {				/* Nothing - fall through */			} else if (CONTIGUOUS_BUFFERS(bhprev, bh)) {				/*				 * This one is OK.  Let it go.  Note that we				 * do not have the ability to allocate				 * bounce buffer segments > PAGE_SIZE, so				 * for now we limit the thing.				 */				if( dma_host ) {#ifdef DMA_SEGMENT_SIZE_LIMITED					if( virt_to_phys(bh->b_data) - 1 < ISA_DMA_THRESHOLD					    || sgpnt[count - 1].length + bh->b_size <= PAGE_SIZE ) {						sgpnt[count - 1].length += bh->b_size;						bhprev = bh;						continue;					}#else					sgpnt[count - 1].length += bh->b_size;					bhprev = bh;					continue;#endif				} else {					sgpnt[count - 1].length += bh->b_size;					SCpnt->request_bufflen += bh->b_size;					bhprev = bh;					continue;				}			}		}		count++;		sgpnt[count - 1].address = bh->b_data;		sgpnt[count - 1].length += bh->b_size;		if (!dma_host) {			SCpnt->request_bufflen += bh->b_size;		}		bhprev = bh;	}	/*	 * Verify that the count is correct.	 */	if (count != SCpnt->use_sg) {		printk("Incorrect number of segments after building list\n");#ifdef CONFIG_SCSI_DEBUG_QUEUES		dump_stats(req, use_clustering, dma_host, count);#endif	}	if (!dma_host) {		return 1;	}	/*	 * Now allocate bounce buffers, if needed.	 */	SCpnt->request_bufflen = 0;	for (i = 0; i < count; i++) {		sectors = (sgpnt[i].length >> 9);		SCpnt->request_bufflen += sgpnt[i].length;		if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 >		    ISA_DMA_THRESHOLD) {			if( scsi_dma_free_sectors - sectors <= 10  ) {				/*				 * If this would nearly drain the DMA				 * pool, mpty, then let's stop here.				 * Don't make this request any larger.				 * This is kind of a safety valve that				 * we use - we could get screwed later				 * on if we run out completely.  				 */				SCpnt->request_bufflen -= sgpnt[i].length;				SCpnt->use_sg = i;				if (i == 0) {					goto big_trouble;				}				break;			}			sgpnt[i].alt_address = sgpnt[i].address;			sgpnt[i].address =			    (char *) scsi_malloc(sgpnt[i].length);			/*			 * If we cannot allocate memory for this DMA bounce			 * buffer, then queue just what we have done so far.			 */			if (sgpnt[i].address == NULL) {				printk("Warning - running low on DMA memory\n");				SCpnt->request_bufflen -= sgpnt[i].length;				SCpnt->use_sg = i;				if (i == 0) {					goto big_trouble;				}				break;			}			if (SCpnt->request.cmd == WRITE) {				memcpy(sgpnt[i].address, sgpnt[i].alt_address,				       sgpnt[i].length);			}		}	}	return 1;      big_trouble:	/*	 * We come here in the event that we get one humongous	 * request, where we need a bounce buffer, and the buffer is	 * more than we can allocate in a single call to	 * scsi_malloc().  In addition, we only come here when it is	 * the 0th element of the scatter-gather table that gets us	 * into this trouble.  As a fallback, we fall back to	 * non-scatter-gather, and ask for a single segment.  We make	 * a half-hearted attempt to pick a reasonably large request	 * size mainly so that we don't thrash the thing with	 * iddy-biddy requests.	 */	/*	 * The original number of sectors in the 0th element of the	 * scatter-gather table.  	 */	sectors = sgpnt[0].length >> 9;	/* 	 * Free up the original scatter-gather table.  Note that since	 * it was the 0th element that got us here, we don't have to	 * go in and free up memory from the other slots.  	 */	SCpnt->request_bufflen = 0;	SCpnt->use_sg = 0;	scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);	/*	 * Make an attempt to pick up as much as we reasonably can.	 * Just keep adding sectors until the pool starts running kind of	 * low.  The limit of 30 is somewhat arbitrary - the point is that	 * it would kind of suck if we dropped down and limited ourselves to	 * single-block requests if we had hundreds of free sectors.	 */	if( scsi_dma_free_sectors > 30 ) {		for (this_count = 0, bh = SCpnt->request.bh;		     bh; bh = bh->b_reqnext) {			if( scsi_dma_free_sectors - this_count < 30 			    || this_count == sectors )			{				break;			}			this_count += bh->b_size >> 9;		}	} else {		/*		 * Yow!   Take the absolute minimum here.		 */		this_count = SCpnt->request.current_nr_sectors;	}	/*	 * Now drop through into the single-segment case.	 */	      single_segment:	/*	 * Come here if for any reason we choose to do this as a single	 * segment.  Possibly the entire request, or possibly a small	 * chunk of the entire request.	 */	bh = SCpnt->request.bh;	buff = SCpnt->request.buffer;	if (dma_host) {		/*		 * Allocate a DMA bounce buffer.  If the allocation fails, fall		 * back and allocate a really small one - enough to satisfy		 * the first buffer.		 */		if (virt_to_phys(SCpnt->request.bh->b_data)		    + (this_count << 9) - 1 > ISA_DMA_THRESHOLD) {			buff = (char *) scsi_malloc(this_count << 9);			if (!buff) {				printk("Warning - running low on DMA memory\n");				this_count = SCpnt->request.current_nr_sectors;				buff = (char *) scsi_malloc(this_count << 9);				if (!buff) {					dma_exhausted(SCpnt, 0);				}			}			if (SCpnt->request.cmd == WRITE)				memcpy(buff, (char *) SCpnt->request.buffer, this_count << 9);		}	}	SCpnt->request_bufflen = this_count << 9;	SCpnt->request_buffer = buff;	SCpnt->use_sg = 0;	return 1;}#define INITIO(_FUNCTION, _VALID, _CLUSTER, _DMA)	\static int _FUNCTION(Scsi_Cmnd * SCpnt)			\{							\    return __init_io(SCpnt, _VALID, _CLUSTER, _DMA);	\}/* * ll_rw_blk.c now keeps track of the number of segments in * a request.  Thus we don't have to do it any more here. * We always force "_VALID" to 1.  Eventually clean this up * and get rid of the extra argument. */INITIO(scsi_init_io_v, 1, 0, 0)INITIO(scsi_init_io_vd, 1, 0, 1)INITIO(scsi_init_io_vc, 1, 1, 0)INITIO(scsi_init_io_vdc, 1, 1, 1)/* * Function:    initialize_merge_fn() * * Purpose:     Initialize merge function for a host * * Arguments:   SHpnt   - Host descriptor. * * Returns:     Nothing. * * Lock status:  * * Notes: */void initialize_merge_fn(Scsi_Device * SDpnt){	request_queue_t *q;	struct Scsi_Host *SHpnt;	SHpnt = SDpnt->host;	q = &SDpnt->request_queue;	/*	 * If the host has already selected a merge manager, then don't	 * pick a new one.	 */#if 0	if (q->back_merge_fn && q->front_merge_fn)		return;#endif	/*	 * If this host has an unlimited tablesize, then don't bother with a	 * merge manager.  The whole point of the operation is to make sure	 * that requests don't grow too large, and this host isn't picky.	 *	 * Note that ll_rw_blk.c is effectively maintaining a segment	 * count which is only valid if clustering is used, and it obviously	 * doesn't handle the DMA case.   In the end, it	 * is simply easier to do it ourselves with our own functions	 * rather than rely upon the default behavior of ll_rw_blk.	 */	if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {		q->back_merge_fn = scsi_back_merge_fn_;		q->front_merge_fn = scsi_front_merge_fn_;		q->merge_requests_fn = scsi_merge_requests_fn_;		SDpnt->scsi_init_io_fn = scsi_init_io_v;	} else if (!CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {		q->back_merge_fn = scsi_back_merge_fn_;		q->front_merge_fn = scsi_front_merge_fn_;		q->merge_requests_fn = scsi_merge_requests_fn_;		SDpnt->scsi_init_io_fn = scsi_init_io_vd;	} else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma == 0) {		q->back_merge_fn = scsi_back_merge_fn_c;		q->front_merge_fn = scsi_front_merge_fn_c;		q->merge_requests_fn = scsi_merge_requests_fn_c;		SDpnt->scsi_init_io_fn = scsi_init_io_vc;	} else if (CLUSTERABLE_DEVICE(SHpnt, SDpnt) && SHpnt->unchecked_isa_dma != 0) {		q->back_merge_fn = scsi_back_merge_fn_dc;		q->front_merge_fn = scsi_front_merge_fn_dc;		q->merge_requests_fn = scsi_merge_requests_fn_dc;		SDpnt->scsi_init_io_fn = scsi_init_io_vdc;	}}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -