📄 lpfc_scsi.c
字号:
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2005 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/#include <linux/pci.h>#include <linux/interrupt.h>#include <scsi/scsi.h>#include <scsi/scsi_device.h>#include <scsi/scsi_host.h>#include <scsi/scsi_tcq.h>#include <scsi/scsi_transport_fc.h>#include "lpfc_version.h"#include "lpfc_hw.h"#include "lpfc_sli.h"#include "lpfc_disc.h"#include "lpfc_scsi.h"#include "lpfc.h"#include "lpfc_logmsg.h"#include "lpfc_crtn.h"#define LPFC_RESET_WAIT 2#define LPFC_ABORT_WAIT 2/* * This routine allocates a scsi buffer, which contains all the necessary * information needed to initiate a SCSI I/O. The non-DMAable buffer region * contains information to build the IOCB. The DMAable region contains * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL * and the BPL BDE is setup in the IOCB. */static struct lpfc_scsi_buf *lpfc_new_scsi_buf(struct lpfc_hba * phba){ struct lpfc_scsi_buf *psb; struct ulp_bde64 *bpl; IOCB_t *iocb; dma_addr_t pdma_phys; uint16_t iotag; psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); if (!psb) return NULL; memset(psb, 0, sizeof (struct lpfc_scsi_buf)); psb->scsi_hba = phba; /* * Get memory from the pci pool to map the virt space to pci bus space * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, * struct fcp_rsp and the number of bde's necessary to support the * sg_tablesize. */ psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); return NULL; } /* Initialize virtual ptrs to dma_buf region. */ memset(psb->data, 0, phba->cfg_sg_dma_buf_size); /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data, psb->dma_handle); kfree (psb); return NULL; } psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; psb->fcp_cmnd = psb->data; psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp); /* Initialize local short-hand pointers. */ bpl = psb->fcp_bpl; pdma_phys = psb->dma_handle; /* * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg * list bdes. Initialize the first two and leave the rest for * queuecommand. */ bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd); bpl->tus.f.bdeFlags = BUFF_USE_CMND; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; /* Setup the physical region for the FCP RSP */ pdma_phys += sizeof (struct fcp_cmnd); bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys)); bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys)); bpl->tus.f.bdeSize = sizeof (struct fcp_rsp); bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV); bpl->tus.w = le32_to_cpu(bpl->tus.w); /* * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, * initialize it with all known data now. */ pdma_phys += (sizeof (struct fcp_rsp)); iocb = &psb->cur_iocbq.iocb; iocb->un.fcpi64.bdl.ulpIoTag32 = 0; iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys); iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys); iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL; iocb->ulpBdeCount = 1; iocb->ulpClass = CLASS3; return psb;}struct lpfc_scsi_buf*lpfc_sli_get_scsi_buf(struct lpfc_hba * phba){ struct lpfc_scsi_buf * lpfc_cmd = NULL; struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); return lpfc_cmd;}static voidlpfc_release_scsi_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb){ /* * There are only two special cases to consider. (1) the scsi command * requested scatter-gather usage or (2) the scsi command allocated * a request buffer, but did not request use_sg. There is a third * case, but it does not require resource deallocation. */ if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) { dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer, psb->seg_cnt, psb->pCmd->sc_data_direction); } else { if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) { dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys, psb->pCmd->request_bufflen, psb->pCmd->sc_data_direction); } } psb->pCmd = NULL; list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);}static intlpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd){ struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; dma_addr_t physaddr; uint32_t i, num_bde = 0; int datadir = scsi_cmnd->sc_data_direction; int dma_error; /* * There are three possibilities here - use scatter-gather segment, use * the single mapping, or neither. Start the lpfc command prep by * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first * data bde entry. */ bpl += 2; if (scsi_cmnd->use_sg) { /* * The driver stores the segment count returned from pci_map_sg * because this a count of dma-mappings used to map the use_sg * pages. They are not guaranteed to be the same for those * architectures that implement an IOMMU. */ sgel = (struct scatterlist *)scsi_cmnd->request_buffer; lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel, scsi_cmnd->use_sg, datadir); if (lpfc_cmd->seg_cnt == 0) return 1; if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { printk(KERN_ERR "%s: Too many sg segments from " "dma_map_sg. Config %d, seg_cnt %d", __FUNCTION__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); dma_unmap_sg(&phba->pcidev->dev, sgel, lpfc_cmd->seg_cnt, datadir); return 1; } /* * The driver established a maximum scatter-gather segment count * during probe that limits the number of sg elements in any * single scsi command. Just run through the seg_cnt and format * the bde's. */ for (i = 0; i < lpfc_cmd->seg_cnt; i++) { physaddr = sg_dma_address(sgel); bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); bpl->tus.f.bdeSize = sg_dma_len(sgel); if (datadir == DMA_TO_DEVICE) bpl->tus.f.bdeFlags = 0; else bpl->tus.f.bdeFlags = BUFF_USE_RCV; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; sgel++; num_bde++; } } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) { physaddr = dma_map_single(&phba->pcidev->dev, scsi_cmnd->request_buffer, scsi_cmnd->request_bufflen, datadir); dma_error = dma_mapping_error(physaddr); if (dma_error) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0718 Unable to dma_map_single " "request_buffer: x%x\n", phba->brd_no, dma_error); return 1; } lpfc_cmd->nonsg_phys = physaddr; bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen; if (datadir == DMA_TO_DEVICE) bpl->tus.f.bdeFlags = 0; else bpl->tus.f.bdeFlags = BUFF_USE_RCV; bpl->tus.w = le32_to_cpu(bpl->tus.w); num_bde = 1; bpl++; } /* * Finish initializing those IOCB fields that are dependent on the * scsi_cmnd request_buffer. Note that the bdeSize is explicitly * reinitialized since all iocb memory resources are used many times * for transmit, receive, and continuation bpl's. */ iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64)); iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof (struct ulp_bde64)); iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpLe = 1; fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen); return 0;}static voidlpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd){ struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd; struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; struct lpfc_hba *phba = lpfc_cmd->scsi_hba; uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm; uint32_t resp_info = fcprsp->rspStatus2; uint32_t scsi_status = fcprsp->rspStatus3; uint32_t host_status = DID_OK; uint32_t rsplen = 0; /* * If this is a task management command, there is no * scsi packet associated with this lpfc_cmd. The driver * consumes it. */ if (fcpcmd->fcpCntl2) { scsi_status = 0; goto out; } lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0730 FCP command failed: RSP " "Data: x%x x%x x%x x%x x%x x%x\n", phba->brd_no, resp_info, scsi_status, be32_to_cpu(fcprsp->rspResId), be32_to_cpu(fcprsp->rspSnsLen), be32_to_cpu(fcprsp->rspRspLen), fcprsp->rspInfo3); if (resp_info & RSP_LEN_VALID) { rsplen = be32_to_cpu(fcprsp->rspRspLen); if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { host_status = DID_ERROR; goto out; } } if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen); if (snslen > SCSI_SENSE_BUFFERSIZE) snslen = SCSI_SENSE_BUFFERSIZE; memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen); } cmnd->resid = 0; if (resp_info & RESID_UNDER) { cmnd->resid = be32_to_cpu(fcprsp->rspResId); lpfc_printf_log(phba, KERN_INFO, LOG_FCP, "%d:0716 FCP Read Underrun, expected %d, " "residual %d Data: x%x x%x x%x\n", phba->brd_no, be32_to_cpu(fcpcmd->fcpDl), cmnd->resid, fcpi_parm, cmnd->cmnd[0], cmnd->underflow); /* * The cmnd->underflow is the minimum number of bytes that must * be transfered for this command. Provided a sense condition * is not present, make sure the actual amount transferred is at * least the underflow value or fail. */ if (!(resp_info & SNS_LEN_VALID) && (scsi_status == SAM_STAT_GOOD) && (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) { lpfc_printf_log(phba, KERN_INFO, LOG_FCP, "%d:0717 FCP command x%x residual " "underrun converted to error " "Data: x%x x%x x%x\n", phba->brd_no, cmnd->cmnd[0], cmnd->request_bufflen, cmnd->resid, cmnd->underflow); host_status = DID_ERROR; } } else if (resp_info & RESID_OVER) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0720 FCP command x%x residual " "overrun error. Data: x%x x%x \n", phba->brd_no, cmnd->cmnd[0], cmnd->request_bufflen, cmnd->resid); host_status = DID_ERROR; /* * Check SLI validation that all the transfer was actually done * (fcpi_parm should be zero). Apply check only to reads. */ } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0734 FCP Read Check Error Data: " "x%x x%x x%x x%x\n", phba->brd_no, be32_to_cpu(fcpcmd->fcpDl), be32_to_cpu(fcprsp->rspResId), fcpi_parm, cmnd->cmnd[0]); host_status = DID_ERROR; cmnd->resid = cmnd->request_bufflen; } out: cmnd->result = ScsiResult(host_status, scsi_status);}static voidlpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct lpfc_iocbq *pIocbOut){ struct lpfc_scsi_buf *lpfc_cmd = (struct lpfc_scsi_buf *) pIocbIn->context1; struct lpfc_rport_data *rdata = lpfc_cmd->rdata; struct lpfc_nodelist *pnode = rdata->pnode; struct scsi_cmnd *cmd = lpfc_cmd->pCmd; unsigned long iflag; lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4]; lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -