lpfc_scsi.c

来自「linux 内核源代码」· C语言 代码 · 共 1,469 行 · 第 1/3 页

C
1,469
字号
/******************************************************************* * This file is part of the Emulex Linux Device Driver for         * * Fibre Channel Host Bus Adapters.                                * * Copyright (C) 2004-2007 Emulex.  All rights reserved.           * * EMULEX and SLI are trademarks of Emulex.                        * * www.emulex.com                                                  * * Portions Copyright (C) 2004-2005 Christoph Hellwig              * *                                                                 * * This program is free software; you can redistribute it and/or   * * modify it under the terms of version 2 of the GNU General       * * Public License as published by the Free Software Foundation.    * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID.  See the GNU General Public License for  * * more details, a copy of which can be found in the file COPYING  * * included with this package.                                     * *******************************************************************/#include <linux/pci.h>#include <linux/interrupt.h>#include <linux/delay.h>#include <scsi/scsi.h>#include <scsi/scsi_device.h>#include <scsi/scsi_host.h>#include <scsi/scsi_tcq.h>#include <scsi/scsi_transport_fc.h>#include "lpfc_version.h"#include "lpfc_hw.h"#include "lpfc_sli.h"#include "lpfc_disc.h"#include "lpfc_scsi.h"#include "lpfc.h"#include "lpfc_logmsg.h"#include "lpfc_crtn.h"#include "lpfc_vport.h"#define LPFC_RESET_WAIT  2#define LPFC_ABORT_WAIT  2/* * This function is called with no lock held when there is a resource * error in driver or in firmware. */voidlpfc_adjust_queue_depth(struct lpfc_hba *phba){	unsigned long flags;	spin_lock_irqsave(&phba->hbalock, flags);	atomic_inc(&phba->num_rsrc_err);	phba->last_rsrc_error_time = jiffies;	if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {		spin_unlock_irqrestore(&phba->hbalock, flags);		return;	}	phba->last_ramp_down_time = jiffies;	spin_unlock_irqrestore(&phba->hbalock, flags);	spin_lock_irqsave(&phba->pport->work_port_lock, flags);	if ((phba->pport->work_port_events &		WORKER_RAMP_DOWN_QUEUE) == 0) {		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;	}	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);	spin_lock_irqsave(&phba->hbalock, flags);	if (phba->work_wait)		wake_up(phba->work_wait);	spin_unlock_irqrestore(&phba->hbalock, flags);	return;}/* * This function is called with no lock held when there is a successful * SCSI command completion. */static inline voidlpfc_rampup_queue_depth(struct lpfc_vport  *vport,			struct scsi_device *sdev){	unsigned long flags;	struct lpfc_hba *phba = vport->phba;	atomic_inc(&phba->num_cmd_success);	if (vport->cfg_lun_queue_depth <= sdev->queue_depth)		return;	spin_lock_irqsave(&phba->hbalock, flags);	if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||	 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {		spin_unlock_irqrestore(&phba->hbalock, flags);		return;	}	phba->last_ramp_up_time = jiffies;	spin_unlock_irqrestore(&phba->hbalock, flags);	spin_lock_irqsave(&phba->pport->work_port_lock, flags);	if ((phba->pport->work_port_events &		WORKER_RAMP_UP_QUEUE) == 0) {		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;	}	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);	spin_lock_irqsave(&phba->hbalock, flags);	if (phba->work_wait)		wake_up(phba->work_wait);	spin_unlock_irqrestore(&phba->hbalock, flags);}voidlpfc_ramp_down_queue_handler(struct lpfc_hba *phba){	struct lpfc_vport **vports;	struct Scsi_Host  *shost;	struct scsi_device *sdev;	unsigned long new_queue_depth;	unsigned long num_rsrc_err, num_cmd_success;	int i;	num_rsrc_err = atomic_read(&phba->num_rsrc_err);	num_cmd_success = atomic_read(&phba->num_cmd_success);	vports = lpfc_create_vport_work_array(phba);	if (vports != NULL)		for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {			shost = lpfc_shost_from_vport(vports[i]);			shost_for_each_device(sdev, shost) {				new_queue_depth =					sdev->queue_depth * num_rsrc_err /					(num_rsrc_err + num_cmd_success);				if (!new_queue_depth)					new_queue_depth = sdev->queue_depth - 1;				else					new_queue_depth = sdev->queue_depth -								new_queue_depth;				if (sdev->ordered_tags)					scsi_adjust_queue_depth(sdev,							MSG_ORDERED_TAG,							new_queue_depth);				else					scsi_adjust_queue_depth(sdev,							MSG_SIMPLE_TAG,							new_queue_depth);			}		}	lpfc_destroy_vport_work_array(vports);	atomic_set(&phba->num_rsrc_err, 0);	atomic_set(&phba->num_cmd_success, 0);}voidlpfc_ramp_up_queue_handler(struct lpfc_hba *phba){	struct lpfc_vport **vports;	struct Scsi_Host  *shost;	struct scsi_device *sdev;	int i;	vports = lpfc_create_vport_work_array(phba);	if (vports != NULL)		for(i = 0; i < LPFC_MAX_VPORTS && vports[i] != NULL; i++) {			shost = lpfc_shost_from_vport(vports[i]);			shost_for_each_device(sdev, shost) {				if (sdev->ordered_tags)					scsi_adjust_queue_depth(sdev,							MSG_ORDERED_TAG,							sdev->queue_depth+1);				else					scsi_adjust_queue_depth(sdev,							MSG_SIMPLE_TAG,							sdev->queue_depth+1);			}		}	lpfc_destroy_vport_work_array(vports);	atomic_set(&phba->num_rsrc_err, 0);	atomic_set(&phba->num_cmd_success, 0);}/* * This routine allocates a scsi buffer, which contains all the necessary * information needed to initiate a SCSI I/O.  The non-DMAable buffer region * contains information to build the IOCB.  The DMAable region contains * memory for the FCP CMND, FCP RSP, and the inital BPL.  In addition to * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL * and the BPL BDE is setup in the IOCB. */static struct lpfc_scsi_buf *lpfc_new_scsi_buf(struct lpfc_vport *vport){	struct lpfc_hba *phba = vport->phba;	struct lpfc_scsi_buf *psb;	struct ulp_bde64 *bpl;	IOCB_t *iocb;	dma_addr_t pdma_phys;	uint16_t iotag;	psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);	if (!psb)		return NULL;	/*	 * Get memory from the pci pool to map the virt space to pci bus space	 * for an I/O.  The DMA buffer includes space for the struct fcp_cmnd,	 * struct fcp_rsp and the number of bde's necessary to support the	 * sg_tablesize.	 */	psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,							&psb->dma_handle);	if (!psb->data) {		kfree(psb);		return NULL;	}	/* Initialize virtual ptrs to dma_buf region. */	memset(psb->data, 0, phba->cfg_sg_dma_buf_size);	/* Allocate iotag for psb->cur_iocbq. */	iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);	if (iotag == 0) {		pci_pool_free(phba->lpfc_scsi_dma_buf_pool,			      psb->data, psb->dma_handle);		kfree (psb);		return NULL;	}	psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;	psb->fcp_cmnd = psb->data;	psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);	psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +							sizeof(struct fcp_rsp);	/* Initialize local short-hand pointers. */	bpl = psb->fcp_bpl;	pdma_phys = psb->dma_handle;	/*	 * The first two bdes are the FCP_CMD and FCP_RSP.  The balance are sg	 * list bdes.  Initialize the first two and leave the rest for	 * queuecommand.	 */	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));	bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));	bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);	bpl->tus.f.bdeFlags = BUFF_USE_CMND;	bpl->tus.w = le32_to_cpu(bpl->tus.w);	bpl++;	/* Setup the physical region for the FCP RSP */	pdma_phys += sizeof (struct fcp_cmnd);	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));	bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));	bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);	bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);	bpl->tus.w = le32_to_cpu(bpl->tus.w);	/*	 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,	 * initialize it with all known data now.	 */	pdma_phys += (sizeof (struct fcp_rsp));	iocb = &psb->cur_iocbq.iocb;	iocb->un.fcpi64.bdl.ulpIoTag32 = 0;	iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);	iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);	iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));	iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;	iocb->ulpBdeCount = 1;	iocb->ulpClass = CLASS3;	return psb;}static struct lpfc_scsi_buf*lpfc_get_scsi_buf(struct lpfc_hba * phba){	struct  lpfc_scsi_buf * lpfc_cmd = NULL;	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;	unsigned long iflag = 0;	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);	list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);	if (lpfc_cmd) {		lpfc_cmd->seg_cnt = 0;		lpfc_cmd->nonsg_phys = 0;	}	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);	return  lpfc_cmd;}static voidlpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb){	unsigned long iflag = 0;	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);	psb->pCmd = NULL;	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);}static intlpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd){	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;	struct scatterlist *sgel = NULL;	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;	dma_addr_t physaddr;	uint32_t i, num_bde = 0;	int nseg, datadir = scsi_cmnd->sc_data_direction;	/*	 * There are three possibilities here - use scatter-gather segment, use	 * the single mapping, or neither.  Start the lpfc command prep by	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first	 * data bde entry.	 */	bpl += 2;	if (scsi_sg_count(scsi_cmnd)) {		/*		 * The driver stores the segment count returned from pci_map_sg		 * because this a count of dma-mappings used to map the use_sg		 * pages.  They are not guaranteed to be the same for those		 * architectures that implement an IOMMU.		 */		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),				  scsi_sg_count(scsi_cmnd), datadir);		if (unlikely(!nseg))			return 1;		lpfc_cmd->seg_cnt = nseg;		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {			printk(KERN_ERR "%s: Too many sg segments from "			       "dma_map_sg.  Config %d, seg_cnt %d",			       __FUNCTION__, phba->cfg_sg_seg_cnt,			       lpfc_cmd->seg_cnt);			scsi_dma_unmap(scsi_cmnd);			return 1;		}		/*		 * The driver established a maximum scatter-gather segment count		 * during probe that limits the number of sg elements in any		 * single scsi command.  Just run through the seg_cnt and format		 * the bde's.		 */		scsi_for_each_sg(scsi_cmnd, sgel, nseg, i) {			physaddr = sg_dma_address(sgel);			bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));			bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));			bpl->tus.f.bdeSize = sg_dma_len(sgel);			if (datadir == DMA_TO_DEVICE)				bpl->tus.f.bdeFlags = 0;			else				bpl->tus.f.bdeFlags = BUFF_USE_RCV;			bpl->tus.w = le32_to_cpu(bpl->tus.w);			bpl++;			num_bde++;		}	}	/*	 * Finish initializing those IOCB fields that are dependent on the	 * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly	 * reinitialized since all iocb memory resources are used many times	 * for transmit, receive, and continuation bpl's.	 */	iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));	iocb_cmd->un.fcpi64.bdl.bdeSize +=		(num_bde * sizeof (struct ulp_bde64));	iocb_cmd->ulpBdeCount = 1;	iocb_cmd->ulpLe = 1;	fcp_cmnd->fcpDl = be32_to_cpu(scsi_bufflen(scsi_cmnd));	return 0;}static voidlpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb){	/*	 * There are only two special cases to consider.  (1) the scsi command	 * requested scatter-gather usage or (2) the scsi command allocated	 * a request buffer, but did not request use_sg.  There is a third	 * case, but it does not require resource deallocation.	 */	if (psb->seg_cnt > 0)		scsi_dma_unmap(psb->pCmd);}static voidlpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,		    struct lpfc_iocbq *rsp_iocb){	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;	uint32_t resp_info = fcprsp->rspStatus2;	uint32_t scsi_status = fcprsp->rspStatus3;	uint32_t *lp;	uint32_t host_status = DID_OK;	uint32_t rsplen = 0;	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;	/*	 *  If this is a task management command, there is no	 *  scsi packet associated with this lpfc_cmd.  The driver	 *  consumes it.	 */	if (fcpcmd->fcpCntl2) {		scsi_status = 0;		goto out;	}	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);		if (snslen > SCSI_SENSE_BUFFERSIZE)			snslen = SCSI_SENSE_BUFFERSIZE;		if (resp_info & RSP_LEN_VALID)		  rsplen = be32_to_cpu(fcprsp->rspRspLen);		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);	}	lp = (uint32_t *)cmnd->sense_buffer;	if (!scsi_status && (resp_info & RESID_UNDER))		logit = LOG_FCP;	lpfc_printf_vlog(vport, KERN_WARNING, logit,			 "0730 FCP command x%x failed: x%x SNS x%x x%x "			 "Data: x%x x%x x%x x%x x%x\n",			 cmnd->cmnd[0], scsi_status,			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,			 be32_to_cpu(fcprsp->rspResId),			 be32_to_cpu(fcprsp->rspSnsLen),			 be32_to_cpu(fcprsp->rspRspLen),			 fcprsp->rspInfo3);	if (resp_info & RSP_LEN_VALID) {		rsplen = be32_to_cpu(fcprsp->rspRspLen);		if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||		    (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {			host_status = DID_ERROR;			goto out;		}	}	scsi_set_resid(cmnd, 0);	if (resp_info & RESID_UNDER) {		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,				 "0716 FCP Read Underrun, expected %d, "				 "residual %d Data: x%x x%x x%x\n",				 be32_to_cpu(fcpcmd->fcpDl),				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],				 cmnd->underflow);		/*		 * If there is an under run check if under run reported by		 * storage array is same as the under run reported by HBA.		 * If this is not same, there is a dropped frame.		 */		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&			fcpi_parm &&			(scsi_get_resid(cmnd) != fcpi_parm)) {			lpfc_printf_vlog(vport, KERN_WARNING,					 LOG_FCP | LOG_FCP_ERROR,					 "0735 FCP Read Check Error "					 "and Underrun Data: x%x x%x x%x x%x\n",					 be32_to_cpu(fcpcmd->fcpDl),					 scsi_get_resid(cmnd), fcpi_parm,					 cmnd->cmnd[0]);			scsi_set_resid(cmnd, scsi_bufflen(cmnd));			host_status = DID_ERROR;		}		/*		 * The cmnd->underflow is the minimum number of bytes that must		 * be transfered for this command.  Provided a sense condition		 * is not present, make sure the actual amount transferred is at

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?