scsi_lib.c

来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 1,742 行 · 第 1/4 页

C
1,742
字号
/* *  scsi_lib.c Copyright (C) 1999 Eric Youngdale * *  SCSI queueing library. *      Initial versions: Eric Youngdale (eric@andante.org). *                        Based upon conversations with large numbers *                        of people at Linux Expo. */#include <linux/bio.h>#include <linux/blkdev.h>#include <linux/completion.h>#include <linux/kernel.h>#include <linux/mempool.h>#include <linux/slab.h>#include <linux/init.h>#include <linux/pci.h>#include <scsi/scsi.h>#include <scsi/scsi_dbg.h>#include <scsi/scsi_device.h>#include <scsi/scsi_driver.h>#include <scsi/scsi_eh.h>#include <scsi/scsi_host.h>#include <scsi/scsi_request.h>#include "scsi_priv.h"#include "scsi_logging.h"#define SG_MEMPOOL_NR		(sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))#define SG_MEMPOOL_SIZE		32struct scsi_host_sg_pool {	size_t		size;	char		*name; 	kmem_cache_t	*slab;	mempool_t	*pool;};#if (SCSI_MAX_PHYS_SEGMENTS < 32)#error SCSI_MAX_PHYS_SEGMENTS is too small#endif#define SP(x) { x, "sgpool-" #x } struct scsi_host_sg_pool scsi_sg_pools[] = { 	SP(8),	SP(16),	SP(32),#if (SCSI_MAX_PHYS_SEGMENTS > 32)	SP(64),#if (SCSI_MAX_PHYS_SEGMENTS > 64)	SP(128),#if (SCSI_MAX_PHYS_SEGMENTS > 128)	SP(256),#if (SCSI_MAX_PHYS_SEGMENTS > 256)#error SCSI_MAX_PHYS_SEGMENTS is too large#endif#endif#endif#endif}; 	#undef SP/* * Function:    scsi_insert_special_req() * * Purpose:     Insert pre-formed request into request queue. * * Arguments:   sreq	- request that is ready to be queued. *              at_head	- boolean.  True if we should insert at head *                        of queue, false if we should insert at tail. * * Lock status: Assumed that lock is not held upon entry. * * Returns:     Nothing * * Notes:       This function is called from character device and from *              ioctl types of functions where the caller knows exactly *              what SCSI command needs to be issued.   The idea is that *              we merely inject the command into the queue (at the head *              for now), and then call the queue request function to actually *              process it. */int scsi_insert_special_req(struct scsi_request *sreq, int at_head){	/*	 * Because users of this function are apt to reuse requests with no	 * modification, we have to sanitise the request flags here	 */	sreq->sr_request->flags &= ~REQ_DONTPREP;	blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,		       	   at_head, sreq, 0);	return 0;}/* * Function:    scsi_queue_insert() * * Purpose:     Insert a command in the midlevel queue. * * Arguments:   cmd    - command that we are adding to queue. *              reason - why we are inserting command to queue. * * Lock status: Assumed that lock is not held upon entry. * * Returns:     Nothing. * * Notes:       We do this for one of two cases.  Either the host is busy *              and it cannot accept any more commands for the time being, *              or the device returned QUEUE_FULL and can accept no more *              commands. * Notes:       This could be called either from an interrupt context or a *              normal process context. */int scsi_queue_insert(struct scsi_cmnd *cmd, int reason){	struct Scsi_Host *host = cmd->device->host;	struct scsi_device *device = cmd->device;	SCSI_LOG_MLQUEUE(1,		 printk("Inserting command %p into mlqueue\n", cmd));	/*	 * We are inserting the command into the ml queue.  First, we	 * cancel the timer, so it doesn't time out.	 */	scsi_delete_timer(cmd);	/*	 * Next, set the appropriate busy bit for the device/host.	 *	 * If the host/device isn't busy, assume that something actually	 * completed, and that we should be able to queue a command now.	 *	 * Note that the prior mid-layer assumption that any host could	 * always queue at least one command is now broken.  The mid-layer	 * will implement a user specifiable stall (see	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)	 * if a command is requeued with no other commands outstanding	 * either for the device or for the host.	 */	if (reason == SCSI_MLQUEUE_HOST_BUSY)		host->host_blocked = host->max_host_blocked;	else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)		device->device_blocked = device->max_device_blocked;	/*	 * Register the fact that we own the thing for now.	 */	cmd->state = SCSI_STATE_MLQUEUE;	cmd->owner = SCSI_OWNER_MIDLEVEL;	/*	 * Decrement the counters, since these commands are no longer	 * active on the host/device.	 */	scsi_device_unbusy(device);	/*	 * Insert this command at the head of the queue for it's device.	 * It will go before all other commands that are already in the queue.	 *	 * NOTE: there is magic here about the way the queue is plugged if	 * we have no outstanding commands.	 * 	 * Although this *doesn't* plug the queue, it does call the request	 * function.  The SCSI request function detects the blocked condition	 * and plugs the queue appropriately.	 */	blk_insert_request(device->request_queue, cmd->request, 1, cmd, 1);	return 0;}/* * Function:    scsi_do_req * * Purpose:     Queue a SCSI request * * Arguments:   sreq	  - command descriptor. *              cmnd      - actual SCSI command to be performed. *              buffer    - data buffer. *              bufflen   - size of data buffer. *              done      - completion function to be run. *              timeout   - how long to let it run before timeout. *              retries   - number of retries we allow. * * Lock status: No locks held upon entry. * * Returns:     Nothing. * * Notes:	This function is only used for queueing requests for things *		like ioctls and character device requests - this is because *		we essentially just inject a request into the queue for the *		device. * *		In order to support the scsi_device_quiesce function, we *		now inject requests on the *head* of the device queue *		rather than the tail. */void scsi_do_req(struct scsi_request *sreq, const void *cmnd,		 void *buffer, unsigned bufflen,		 void (*done)(struct scsi_cmnd *),		 int timeout, int retries){	/*	 * If the upper level driver is reusing these things, then	 * we should release the low-level block now.  Another one will	 * be allocated later when this request is getting queued.	 */	__scsi_release_request(sreq);	/*	 * Our own function scsi_done (which marks the host as not busy,	 * disables the timeout counter, etc) will be called by us or by the	 * scsi_hosts[host].queuecommand() function needs to also call	 * the completion function for the high level driver.	 */	memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));	sreq->sr_bufflen = bufflen;	sreq->sr_buffer = buffer;	sreq->sr_allowed = retries;	sreq->sr_done = done;	sreq->sr_timeout_per_command = timeout;	if (sreq->sr_cmd_len == 0)		sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);	/*	 * head injection *required* here otherwise quiesce won't work	 */	scsi_insert_special_req(sreq, 1);} static void scsi_wait_done(struct scsi_cmnd *cmd){	struct request *req = cmd->request;	struct request_queue *q = cmd->device->request_queue;	unsigned long flags;	req->rq_status = RQ_SCSI_DONE;	/* Busy, but indicate request done */	spin_lock_irqsave(q->queue_lock, flags);	if (blk_rq_tagged(req))		blk_queue_end_tag(q, req);	spin_unlock_irqrestore(q->queue_lock, flags);	if (req->waiting)		complete(req->waiting);}void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,		   unsigned bufflen, int timeout, int retries){	DECLARE_COMPLETION(wait);		sreq->sr_request->waiting = &wait;	sreq->sr_request->rq_status = RQ_SCSI_BUSY;	scsi_do_req(sreq, cmnd, buffer, bufflen, scsi_wait_done,			timeout, retries);	wait_for_completion(&wait);	sreq->sr_request->waiting = NULL;	if (sreq->sr_request->rq_status != RQ_SCSI_DONE)		sreq->sr_result |= (DRIVER_ERROR << 24);	__scsi_release_request(sreq);}/* * Function:    scsi_init_cmd_errh() * * Purpose:     Initialize cmd fields related to error handling. * * Arguments:   cmd	- command that is ready to be queued. * * Returns:     Nothing * * Notes:       This function has the job of initializing a number of *              fields related to error handling.   Typically this will *              be called once for each command, as required. */static int scsi_init_cmd_errh(struct scsi_cmnd *cmd){	cmd->owner = SCSI_OWNER_MIDLEVEL;	cmd->serial_number = 0;	cmd->serial_number_at_timeout = 0;	cmd->abort_reason = 0;	memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);	if (cmd->cmd_len == 0)		cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);	/*	 * We need saved copies of a number of fields - this is because	 * error handling may need to overwrite these with different values	 * to run different commands, and once error handling is complete,	 * we will need to restore these values prior to running the actual	 * command.	 */	cmd->old_use_sg = cmd->use_sg;	cmd->old_cmd_len = cmd->cmd_len;	cmd->sc_old_data_direction = cmd->sc_data_direction;	cmd->old_underflow = cmd->underflow;	memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));	cmd->buffer = cmd->request_buffer;	cmd->bufflen = cmd->request_bufflen;	cmd->internal_timeout = NORMAL_TIMEOUT;	cmd->abort_reason = 0;	return 1;}/* * Function:   scsi_setup_cmd_retry() * * Purpose:    Restore the command state for a retry * * Arguments:  cmd	- command to be restored * * Returns:    Nothing * * Notes:      Immediately prior to retrying a command, we need *             to restore certain fields that we saved above. */void scsi_setup_cmd_retry(struct scsi_cmnd *cmd){	memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));	cmd->request_buffer = cmd->buffer;	cmd->request_bufflen = cmd->bufflen;	cmd->use_sg = cmd->old_use_sg;	cmd->cmd_len = cmd->old_cmd_len;	cmd->sc_data_direction = cmd->sc_old_data_direction;	cmd->underflow = cmd->old_underflow;}void scsi_device_unbusy(struct scsi_device *sdev){	struct Scsi_Host *shost = sdev->host;	unsigned long flags;	spin_lock_irqsave(shost->host_lock, flags);	shost->host_busy--;	if (unlikely(test_bit(SHOST_RECOVERY, &shost->shost_state) &&		     shost->host_failed))		scsi_eh_wakeup(shost);	spin_unlock(shost->host_lock);	spin_lock(&sdev->sdev_lock);	sdev->device_busy--;	spin_unlock_irqrestore(&sdev->sdev_lock, flags);}/* * Called for single_lun devices on IO completion. Clear starget_sdev_user, * and call blk_run_queue for all the scsi_devices on the target - * including current_sdev first. * * Called with *no* scsi locks held. */static void scsi_single_lun_run(struct scsi_device *current_sdev){	struct Scsi_Host *shost = current_sdev->host;	struct scsi_device *sdev, *tmp;	unsigned long flags;	spin_lock_irqsave(shost->host_lock, flags);	current_sdev->sdev_target->starget_sdev_user = NULL;	spin_unlock_irqrestore(shost->host_lock, flags);	/*	 * Call blk_run_queue for all LUNs on the target, starting with	 * current_sdev. We race with others (to set starget_sdev_user),	 * but in most cases, we will be first. Ideally, each LU on the	 * target would get some limited time or requests on the target.	 */	blk_run_queue(current_sdev->request_queue);	spin_lock_irqsave(shost->host_lock, flags);	if (current_sdev->sdev_target->starget_sdev_user)		goto out;	list_for_each_entry_safe(sdev, tmp, &current_sdev->same_target_siblings,			same_target_siblings) {		if (scsi_device_get(sdev))			continue;		spin_unlock_irqrestore(shost->host_lock, flags);		blk_run_queue(sdev->request_queue);		spin_lock_irqsave(shost->host_lock, flags);			scsi_device_put(sdev);	} out:	spin_unlock_irqrestore(shost->host_lock, flags);}/* * Function:	scsi_run_queue() * * Purpose:	Select a proper request queue to serve next * * Arguments:	q	- last request's queue * * Returns:     Nothing * * Notes:	The previous command was completely finished, start *		a new one if possible. */static void scsi_run_queue(struct request_queue *q){	struct scsi_device *sdev = q->queuedata;	struct Scsi_Host *shost = sdev->host;	unsigned long flags;	if (sdev->single_lun)		scsi_single_lun_run(sdev);	spin_lock_irqsave(shost->host_lock, flags);	while (!list_empty(&shost->starved_list) &&	       !shost->host_blocked && !shost->host_self_blocked &&		!((shost->can_queue > 0) &&		  (shost->host_busy >= shost->can_queue))) {		/*		 * As long as shost is accepting commands and we have		 * starved queues, call blk_run_queue. scsi_request_fn		 * drops the queue_lock and can add us back to the		 * starved_list.		 *		 * host_lock protects the starved_list and starved_entry.		 * scsi_request_fn must get the host_lock before checking		 * or modifying starved_list or starved_entry.		 */		sdev = list_entry(shost->starved_list.next,					  struct scsi_device, starved_entry);		list_del_init(&sdev->starved_entry);		spin_unlock_irqrestore(shost->host_lock, flags);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?