scsi_lib.c

来自「linux 内核源代码」· C语言 代码 · 共 2,521 行 · 第 1/5 页

C
2,521
字号
/* *  scsi_lib.c Copyright (C) 1999 Eric Youngdale * *  SCSI queueing library. *      Initial versions: Eric Youngdale (eric@andante.org). *                        Based upon conversations with large numbers *                        of people at Linux Expo. */#include <linux/bio.h>#include <linux/blkdev.h>#include <linux/completion.h>#include <linux/kernel.h>#include <linux/mempool.h>#include <linux/slab.h>#include <linux/init.h>#include <linux/pci.h>#include <linux/delay.h>#include <linux/hardirq.h>#include <linux/scatterlist.h>#include <scsi/scsi.h>#include <scsi/scsi_cmnd.h>#include <scsi/scsi_dbg.h>#include <scsi/scsi_device.h>#include <scsi/scsi_driver.h>#include <scsi/scsi_eh.h>#include <scsi/scsi_host.h>#include "scsi_priv.h"#include "scsi_logging.h"#define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)#define SG_MEMPOOL_SIZE		2/* * The maximum number of SG segments that we will put inside a scatterlist * (unless chaining is used). Should ideally fit inside a single page, to * avoid a higher order allocation. */#define SCSI_MAX_SG_SEGMENTS	128struct scsi_host_sg_pool {	size_t		size;	char		*name;	struct kmem_cache	*slab;	mempool_t	*pool;};#define SP(x) { x, "sgpool-" #x }static struct scsi_host_sg_pool scsi_sg_pools[] = {	SP(8),	SP(16),#if (SCSI_MAX_SG_SEGMENTS > 16)	SP(32),#if (SCSI_MAX_SG_SEGMENTS > 32)	SP(64),#if (SCSI_MAX_SG_SEGMENTS > 64)	SP(128),#endif#endif#endif};#undef SPstatic void scsi_run_queue(struct request_queue *q);/* * Function:	scsi_unprep_request() * * Purpose:	Remove all preparation done for a request, including its *		associated scsi_cmnd, so that it can be requeued. * * Arguments:	req	- request to unprepare * * Lock status:	Assumed that no locks are held upon entry. * * Returns:	Nothing. */static void scsi_unprep_request(struct request *req){	struct scsi_cmnd *cmd = req->special;	req->cmd_flags &= ~REQ_DONTPREP;	req->special = NULL;	scsi_put_command(cmd);}/* * Function:    scsi_queue_insert() * * Purpose:     Insert a command in the midlevel queue. * * Arguments:   cmd    - command that we are adding to queue. *              reason - why we are inserting command to queue. * * Lock status: Assumed that lock is not held upon entry. * * Returns:     Nothing. * * Notes:       We do this for one of two cases.  Either the host is busy *              and it cannot accept any more commands for the time being, *              or the device returned QUEUE_FULL and can accept no more *              commands. * Notes:       This could be called either from an interrupt context or a *              normal process context. */int scsi_queue_insert(struct scsi_cmnd *cmd, int reason){	struct Scsi_Host *host = cmd->device->host;	struct scsi_device *device = cmd->device;	struct request_queue *q = device->request_queue;	unsigned long flags;	SCSI_LOG_MLQUEUE(1,		 printk("Inserting command %p into mlqueue\n", cmd));	/*	 * Set the appropriate busy bit for the device/host.	 *	 * If the host/device isn't busy, assume that something actually	 * completed, and that we should be able to queue a command now.	 *	 * Note that the prior mid-layer assumption that any host could	 * always queue at least one command is now broken.  The mid-layer	 * will implement a user specifiable stall (see	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)	 * if a command is requeued with no other commands outstanding	 * either for the device or for the host.	 */	if (reason == SCSI_MLQUEUE_HOST_BUSY)		host->host_blocked = host->max_host_blocked;	else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)		device->device_blocked = device->max_device_blocked;	/*	 * Decrement the counters, since these commands are no longer	 * active on the host/device.	 */	scsi_device_unbusy(device);	/*	 * Requeue this command.  It will go before all other commands	 * that are already in the queue.	 *	 * NOTE: there is magic here about the way the queue is plugged if	 * we have no outstanding commands.	 * 	 * Although we *don't* plug the queue, we call the request	 * function.  The SCSI request function detects the blocked condition	 * and plugs the queue appropriately.         */	spin_lock_irqsave(q->queue_lock, flags);	blk_requeue_request(q, cmd->request);	spin_unlock_irqrestore(q->queue_lock, flags);	scsi_run_queue(q);	return 0;}/** * scsi_execute - insert request and wait for the result * @sdev:	scsi device * @cmd:	scsi command * @data_direction: data direction * @buffer:	data buffer * @bufflen:	len of buffer * @sense:	optional sense buffer * @timeout:	request timeout in seconds * @retries:	number of times to retry request * @flags:	or into request flags; * * returns the req->errors value which is the scsi_cmnd result * field. **/int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,		 int data_direction, void *buffer, unsigned bufflen,		 unsigned char *sense, int timeout, int retries, int flags){	struct request *req;	int write = (data_direction == DMA_TO_DEVICE);	int ret = DRIVER_ERROR << 24;	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,					buffer, bufflen, __GFP_WAIT))		goto out;	req->cmd_len = COMMAND_SIZE(cmd[0]);	memcpy(req->cmd, cmd, req->cmd_len);	req->sense = sense;	req->sense_len = 0;	req->retries = retries;	req->timeout = timeout;	req->cmd_type = REQ_TYPE_BLOCK_PC;	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;	/*	 * head injection *required* here otherwise quiesce won't work	 */	blk_execute_rq(req->q, NULL, req, 1);	ret = req->errors; out:	blk_put_request(req);	return ret;}EXPORT_SYMBOL(scsi_execute);int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,		     int data_direction, void *buffer, unsigned bufflen,		     struct scsi_sense_hdr *sshdr, int timeout, int retries){	char *sense = NULL;	int result;		if (sshdr) {		sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);		if (!sense)			return DRIVER_ERROR << 24;	}	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,			      sense, timeout, retries, 0);	if (sshdr)		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);	kfree(sense);	return result;}EXPORT_SYMBOL(scsi_execute_req);struct scsi_io_context {	void *data;	void (*done)(void *data, char *sense, int result, int resid);	char sense[SCSI_SENSE_BUFFERSIZE];};static struct kmem_cache *scsi_io_context_cache;static void scsi_end_async(struct request *req, int uptodate){	struct scsi_io_context *sioc = req->end_io_data;	if (sioc->done)		sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);	kmem_cache_free(scsi_io_context_cache, sioc);	__blk_put_request(req->q, req);}static int scsi_merge_bio(struct request *rq, struct bio *bio){	struct request_queue *q = rq->q;	bio->bi_flags &= ~(1 << BIO_SEG_VALID);	if (rq_data_dir(rq) == WRITE)		bio->bi_rw |= (1 << BIO_RW);	blk_queue_bounce(q, &bio);	return blk_rq_append_bio(q, rq, bio);}static void scsi_bi_endio(struct bio *bio, int error){	bio_put(bio);}/** * scsi_req_map_sg - map a scatterlist into a request * @rq:		request to fill * @sg:		scatterlist * @nsegs:	number of elements * @bufflen:	len of buffer * @gfp:	memory allocation flags * * scsi_req_map_sg maps a scatterlist into a request so that the * request can be sent to the block layer. We do not trust the scatterlist * sent to use, as some ULDs use that struct to only organize the pages. */static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,			   int nsegs, unsigned bufflen, gfp_t gfp){	struct request_queue *q = rq->q;	int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;	unsigned int data_len = bufflen, len, bytes, off;	struct scatterlist *sg;	struct page *page;	struct bio *bio = NULL;	int i, err, nr_vecs = 0;	for_each_sg(sgl, sg, nsegs, i) {		page = sg_page(sg);		off = sg->offset;		len = sg->length; 		data_len += len;		while (len > 0 && data_len > 0) {			/*			 * sg sends a scatterlist that is larger than			 * the data_len it wants transferred for certain			 * IO sizes			 */			bytes = min_t(unsigned int, len, PAGE_SIZE - off);			bytes = min(bytes, data_len);			if (!bio) {				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);				nr_pages -= nr_vecs;				bio = bio_alloc(gfp, nr_vecs);				if (!bio) {					err = -ENOMEM;					goto free_bios;				}				bio->bi_end_io = scsi_bi_endio;			}			if (bio_add_pc_page(q, bio, page, bytes, off) !=			    bytes) {				bio_put(bio);				err = -EINVAL;				goto free_bios;			}			if (bio->bi_vcnt >= nr_vecs) {				err = scsi_merge_bio(rq, bio);				if (err) {					bio_endio(bio, 0);					goto free_bios;				}				bio = NULL;			}			page++;			len -= bytes;			data_len -=bytes;			off = 0;		}	}	rq->buffer = rq->data = NULL;	rq->data_len = bufflen;	return 0;free_bios:	while ((bio = rq->bio) != NULL) {		rq->bio = bio->bi_next;		/*		 * call endio instead of bio_put incase it was bounced		 */		bio_endio(bio, 0);	}	return err;}/** * scsi_execute_async - insert request * @sdev:	scsi device * @cmd:	scsi command * @cmd_len:	length of scsi cdb * @data_direction: data direction * @buffer:	data buffer (this can be a kernel buffer or scatterlist) * @bufflen:	len of buffer * @use_sg:	if buffer is a scatterlist this is the number of elements * @timeout:	request timeout in seconds * @retries:	number of times to retry request * @flags:	or into request flags **/int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,		       int cmd_len, int data_direction, void *buffer, unsigned bufflen,		       int use_sg, int timeout, int retries, void *privdata,		       void (*done)(void *, char *, int, int), gfp_t gfp){	struct request *req;	struct scsi_io_context *sioc;	int err = 0;	int write = (data_direction == DMA_TO_DEVICE);	sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);	if (!sioc)		return DRIVER_ERROR << 24;	req = blk_get_request(sdev->request_queue, write, gfp);	if (!req)		goto free_sense;	req->cmd_type = REQ_TYPE_BLOCK_PC;	req->cmd_flags |= REQ_QUIET;	if (use_sg)		err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);	else if (bufflen)		err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);	if (err)		goto free_req;	req->cmd_len = cmd_len;	memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */	memcpy(req->cmd, cmd, req->cmd_len);	req->sense = sioc->sense;	req->sense_len = 0;	req->timeout = timeout;	req->retries = retries;	req->end_io_data = sioc;	sioc->data = privdata;	sioc->done = done;	blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);	return 0;free_req:	blk_put_request(req);free_sense:	kmem_cache_free(scsi_io_context_cache, sioc);	return DRIVER_ERROR << 24;}EXPORT_SYMBOL_GPL(scsi_execute_async);/* * Function:    scsi_init_cmd_errh() * * Purpose:     Initialize cmd fields related to error handling. * * Arguments:   cmd	- command that is ready to be queued. * * Notes:       This function has the job of initializing a number of *              fields related to error handling.   Typically this will *              be called once for each command, as required. */static void scsi_init_cmd_errh(struct scsi_cmnd *cmd){	cmd->serial_number = 0;	cmd->resid = 0;	memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);	if (cmd->cmd_len == 0)		cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);}void scsi_device_unbusy(struct scsi_device *sdev){	struct Scsi_Host *shost = sdev->host;	unsigned long flags;	spin_lock_irqsave(shost->host_lock, flags);	shost->host_busy--;	if (unlikely(scsi_host_in_recovery(shost) &&		     (shost->host_failed || shost->host_eh_scheduled)))		scsi_eh_wakeup(shost);	spin_unlock(shost->host_lock);	spin_lock(sdev->request_queue->queue_lock);	sdev->device_busy--;	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);}/* * Called for single_lun devices on IO completion. Clear starget_sdev_user, * and call blk_run_queue for all the scsi_devices on the target - * including current_sdev first. * * Called with *no* scsi locks held. */static void scsi_single_lun_run(struct scsi_device *current_sdev){	struct Scsi_Host *shost = current_sdev->host;	struct scsi_device *sdev, *tmp;	struct scsi_target *starget = scsi_target(current_sdev);	unsigned long flags;	spin_lock_irqsave(shost->host_lock, flags);	starget->starget_sdev_user = NULL;	spin_unlock_irqrestore(shost->host_lock, flags);	/*	 * Call blk_run_queue for all LUNs on the target, starting with	 * current_sdev. We race with others (to set starget_sdev_user),	 * but in most cases, we will be first. Ideally, each LU on the	 * target would get some limited time or requests on the target.	 */	blk_run_queue(current_sdev->request_queue);	spin_lock_irqsave(shost->host_lock, flags);	if (starget->starget_sdev_user)		goto out;	list_for_each_entry_safe(sdev, tmp, &starget->devices,			same_target_siblings) {		if (sdev == current_sdev)			continue;		if (scsi_device_get(sdev))			continue;		spin_unlock_irqrestore(shost->host_lock, flags);		blk_run_queue(sdev->request_queue);		spin_lock_irqsave(shost->host_lock, flags);			scsi_device_put(sdev);	} out:

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?