⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 scsi_merge.c

📁 Linux内核源代码 为压缩文件 是<<Linux内核>>一书中的源代码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* *  scsi_merge.c Copyright (C) 1999 Eric Youngdale * *  SCSI queueing library. *      Initial versions: Eric Youngdale (eric@andante.org). *                        Based upon conversations with large numbers *                        of people at Linux Expo. *	Support for dynamic DMA mapping: Jakub Jelinek (jakub@redhat.com). *//* * This file contains queue management functions that are used by SCSI. * Typically this is used for several purposes.   First, we need to ensure * that commands do not grow so large that they cannot be handled all at * once by a host adapter.   The various flavors of merge functions included * here serve this purpose. * * Note that it would be quite trivial to allow the low-level driver the * flexibility to define it's own queue handling functions.  For the time * being, the hooks are not present.   Right now we are just using the * data in the host template as an indicator of how we should be handling * queues, and we select routines that are optimized for that purpose. * * Some hosts do not impose any restrictions on the size of a request. * In such cases none of the merge functions in this file are called, * and we allow ll_rw_blk to merge requests in the default manner. * This isn't guaranteed to be optimal, but it should be pretty darned * good.   If someone comes up with ideas of better ways of managing queues * to improve on the default behavior, then certainly fit it into this * scheme in whatever manner makes the most sense.   Please note that * since each device has it's own queue, we have considerable flexibility * in queue management. */#define __NO_VERSION__#include <linux/config.h>#include <linux/module.h>#include <linux/sched.h>#include <linux/timer.h>#include <linux/string.h>#include <linux/malloc.h>#include <linux/ioport.h>#include <linux/kernel.h>#include <linux/stat.h>#include <linux/blk.h>#include <linux/interrupt.h>#include <linux/delay.h>#include <linux/smp_lock.h>#define __KERNEL_SYSCALLS__#include <linux/unistd.h>#include <asm/system.h>#include <asm/irq.h>#include <asm/dma.h>#include <asm/io.h>#include "scsi.h"#include "hosts.h"#include "constants.h"#include <scsi/scsi_ioctl.h>/* * This means that bounce buffers cannot be allocated in chunks > PAGE_SIZE. * Ultimately we should get away from using a dedicated DMA bounce buffer * pool, and we should instead try and use kmalloc() instead.  If we can * eliminate this pool, then this restriction would no longer be needed. */#define DMA_SEGMENT_SIZE_LIMITED#ifdef CONFIG_SCSI_DEBUG_QUEUES/* * Enable a bunch of additional consistency checking.   Turn this off * if you are benchmarking. */static int dump_stats(struct request *req,		      int use_clustering,		      int dma_host,		      int segments){	struct buffer_head *bh;	/*	 * Dump the information that we have.  We know we have an	 * inconsistency.	 */	printk("nr_segments is %x\n", req->nr_segments);	printk("counted segments is %x\n", segments);	printk("Flags %d %d\n", use_clustering, dma_host);	for (bh = req->bh; bh->b_reqnext != NULL; bh = bh->b_reqnext) 	{		printk("Segment 0x%p, blocks %d, addr 0x%lx\n",		       bh,		       bh->b_size >> 9,		       virt_to_phys(bh->b_data - 1));	}	panic("Ththththaats all folks.  Too dangerous to continue.\n");}/* * Simple sanity check that we will use for the first go around * in order to ensure that we are doing the counting correctly. * This can be removed for optimization. */#define SANITY_CHECK(req, _CLUSTER, _DMA)				\    if( req->nr_segments != __count_segments(req, _CLUSTER, _DMA, NULL) )	\    {									\	printk("Incorrect segment count at 0x%p", current_text_addr());	\	dump_stats(req, _CLUSTER, _DMA, __count_segments(req, _CLUSTER, _DMA, NULL)); \    }#else#define SANITY_CHECK(req, _CLUSTER, _DMA)#endifstatic void dma_exhausted(Scsi_Cmnd * SCpnt, int i){	int jj;	struct scatterlist *sgpnt;	int consumed = 0;	sgpnt = (struct scatterlist *) SCpnt->request_buffer;	/*	 * Now print out a bunch of stats.  First, start with the request	 * size.	 */	printk("dma_free_sectors:%d\n", scsi_dma_free_sectors);	printk("use_sg:%d\ti:%d\n", SCpnt->use_sg, i);	printk("request_bufflen:%d\n", SCpnt->request_bufflen);	/*	 * Now dump the scatter-gather table, up to the point of failure.	 */	for(jj=0; jj < SCpnt->use_sg; jj++)	{		printk("[%d]\tlen:%d\taddr:%p\talt:%p\n",		       jj,		       sgpnt[jj].length,		       sgpnt[jj].address,		       sgpnt[jj].alt_address);		       		if( sgpnt[jj].alt_address != NULL )		{			consumed = (sgpnt[jj].length >> 9);		}	}	printk("Total %d sectors consumed\n", consumed);	panic("DMA pool exhausted");}/* * FIXME(eric) - the original disk code disabled clustering for MOD * devices.  I have no idea why we thought this was a good idea - my * guess is that it was an attempt to limit the size of requests to MOD * devices. */#define CLUSTERABLE_DEVICE(SH,SD) (SH->use_clustering && \				   SD->type != TYPE_MOD)/* * This entire source file deals with the new queueing code. *//* * Function:    __count_segments() * * Purpose:     Prototype for queue merge function. * * Arguments:   q       - Queue for which we are merging request. *              req     - request into which we wish to merge. *              use_clustering - 1 if this host wishes to use clustering *              dma_host - 1 if this host has ISA DMA issues (bus doesn't *                      expose all of the address lines, so that DMA cannot *                      be done from an arbitrary address). *		remainder - used to track the residual size of the last *			segment.  Comes in handy when we want to limit the  *			size of bounce buffer segments to PAGE_SIZE. * * Returns:     Count of the number of SG segments for the request. * * Lock status:  * * Notes:       This is only used for diagnostic purposes. */__inline static int __count_segments(struct request *req,				     int use_clustering,				     int dma_host,				     int * remainder){	int ret = 1;	int reqsize = 0;	struct buffer_head *bh;	struct buffer_head *bhnext;	if( remainder != NULL ) {		reqsize = *remainder;	}	/*	 * Add in the size increment for the first buffer.	 */	bh = req->bh;#ifdef DMA_SEGMENT_SIZE_LIMITED	if( reqsize + bh->b_size > PAGE_SIZE ) {		ret++;		reqsize = bh->b_size;	} else {		reqsize += bh->b_size;	}#else	reqsize += bh->b_size;#endif	for (bh = req->bh, bhnext = bh->b_reqnext; 	     bhnext != NULL; 	     bh = bhnext, bhnext = bh->b_reqnext) {		if (use_clustering) {			/* 			 * See if we can do this without creating another			 * scatter-gather segment.  In the event that this is a			 * DMA capable host, make sure that a segment doesn't span			 * the DMA threshold boundary.  			 */			if (dma_host &&			    virt_to_phys(bhnext->b_data) - 1 == ISA_DMA_THRESHOLD) {				ret++;				reqsize = bhnext->b_size;			} else if (CONTIGUOUS_BUFFERS(bh, bhnext)) {				/*				 * This one is OK.  Let it go.				 */ #ifdef DMA_SEGMENT_SIZE_LIMITED				/* Note scsi_malloc is only able to hand out				 * chunks of memory in sizes of PAGE_SIZE or				 * less.  Thus we need to keep track of				 * the size of the piece that we have				 * seen so far, and if we have hit				 * the limit of PAGE_SIZE, then we are				 * kind of screwed and we need to start				 * another segment.				 */				if( dma_host				    && virt_to_phys(bh->b_data) - 1 >= ISA_DMA_THRESHOLD				    && reqsize + bhnext->b_size > PAGE_SIZE )				{					ret++;					reqsize = bhnext->b_size;					continue;				}#endif				reqsize += bhnext->b_size;				continue;			}			ret++;			reqsize = bhnext->b_size;		} else {			ret++;			reqsize = bhnext->b_size;		}	}	if( remainder != NULL ) {		*remainder = reqsize;	}	return ret;}/* * Function:    recount_segments() * * Purpose:     Recount the number of scatter-gather segments for this request. * * Arguments:   req     - request that needs recounting. * * Returns:     Count of the number of SG segments for the request. * * Lock status: Irrelevant. * * Notes:	This is only used when we have partially completed requests *		and the bit that is leftover is of an indeterminate size. *		This can come up if you get a MEDIUM_ERROR, for example, *		as we will have "completed" all of the sectors up to and *		including the bad sector, and the leftover bit is what *		we have to do now.  This tends to be a rare occurrence, so *		we aren't busting our butts to instantiate separate versions *		of this function for the 4 different flag values.  We *		probably should, however. */voidrecount_segments(Scsi_Cmnd * SCpnt){	struct request *req;	struct Scsi_Host *SHpnt;	Scsi_Device * SDpnt;	req   = &SCpnt->request;	SHpnt = SCpnt->host;	SDpnt = SCpnt->device;	req->nr_segments = __count_segments(req, 					    CLUSTERABLE_DEVICE(SHpnt, SDpnt),					    SHpnt->unchecked_isa_dma, NULL);}#define MERGEABLE_BUFFERS(X,Y) \(((((long)(X)->b_data+(X)->b_size)|((long)(Y)->b_data)) & \  (DMA_CHUNK_SIZE - 1)) == 0)#ifdef DMA_CHUNK_SIZEstatic inline int scsi_new_mergeable(request_queue_t * q,				     struct request * req,				     struct Scsi_Host *SHpnt,				     int max_segments){	/*	 * pci_map_sg will be able to merge these two	 * into a single hardware sg entry, check if	 * we'll have enough memory for the sg list.	 * scsi.c allocates for this purpose	 * min(64,sg_tablesize) entries.	 */	if (req->nr_segments >= max_segments ||	    req->nr_segments >= SHpnt->sg_tablesize)		return 0;	req->nr_segments++;	q->elevator.nr_segments++;	return 1;}static inline int scsi_new_segment(request_queue_t * q,				   struct request * req,				   struct Scsi_Host *SHpnt,				   int max_segments){	/*	 * pci_map_sg won't be able to map these two	 * into a single hardware sg entry, so we have to	 * check if things fit into sg_tablesize.	 */	if (req->nr_hw_segments >= SHpnt->sg_tablesize ||	     req->nr_segments >= SHpnt->sg_tablesize)		return 0;	if (req->nr_segments >= max_segments)		return 0;	req->nr_hw_segments++;	req->nr_segments++;	q->elevator.nr_segments++;	return 1;}#elsestatic inline int scsi_new_segment(request_queue_t * q,				   struct request * req,				   struct Scsi_Host *SHpnt,				   int max_segments){	if (req->nr_segments < SHpnt->sg_tablesize &&	    req->nr_segments < max_segments) {		/*		 * This will form the start of a new segment.  Bump the 		 * counter.		 */		req->nr_segments++;		q->elevator.nr_segments++;		return 1;	} else {		return 0;	}}#endif/* * Function:    __scsi_merge_fn() * * Purpose:     Prototype for queue merge function. * * Arguments:   q       - Queue for which we are merging request. *              req     - request into which we wish to merge. *              bh      - Block which we may wish to merge into request *              use_clustering - 1 if this host wishes to use clustering *              dma_host - 1 if this host has ISA DMA issues (bus doesn't *                      expose all of the address lines, so that DMA cannot *                      be done from an arbitrary address). * * Returns:     1 if it is OK to merge the block into the request.  0

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -