📄 lpfc_scsi.c
字号:
abtsiocb = lpfc_sli_get_iocbq(phba); if (abtsiocb == NULL) { ret = FAILED; goto out; } /* * The scsi command can not be in txq and it is in flight because the * pCmd is still pointig at the SCSI command we have to abort. There * is no need to search the txcmplq. Just send an abort to the FW. */ cmd = &iocb->iocb; icmd = &abtsiocb->iocb; icmd->un.acxri.abortType = ABORT_TYPE_ABTS; icmd->un.acxri.abortContextTag = cmd->ulpContext; icmd->un.acxri.abortIoTag = cmd->ulpIoTag; icmd->ulpLe = 1; icmd->ulpClass = cmd->ulpClass; if (phba->hba_state >= LPFC_LINK_UP) icmd->ulpCommand = CMD_ABORT_XRI_CN; else icmd->ulpCommand = CMD_CLOSE_XRI_CN; abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; goto out; } /* Wait for abort to complete */ while (lpfc_cmd->pCmd == cmnd) { spin_unlock_irq(phba->host->host_lock); schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ); spin_lock_irq(phba->host->host_lock); if (++loop_count > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT) break; } if (lpfc_cmd->pCmd == cmnd) { ret = FAILED; lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0748 abort handler timed out waiting for " "abort to complete: ret %#x, ID %d, LUN %d, " "snum %#lx\n", phba->brd_no, ret, cmnd->device->id, cmnd->device->lun, cmnd->serial_number); } out: lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0749 SCSI layer issued abort device: ret %#x, " "ID %d, LUN %d, snum %#lx\n", phba->brd_no, ret, cmnd->device->id, cmnd->device->lun, cmnd->serial_number); return ret;}static intlpfc_abort_handler(struct scsi_cmnd *cmnd){ int rc; spin_lock_irq(cmnd->device->host->host_lock); rc = __lpfc_abort_handler(cmnd); spin_unlock_irq(cmnd->device->host->host_lock); return rc;}static int__lpfc_reset_lun_handler(struct scsi_cmnd *cmnd){ struct Scsi_Host *shost = cmnd->device->host; struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; struct lpfc_scsi_buf *lpfc_cmd; struct lpfc_iocbq *iocbq, *iocbqrsp; struct lpfc_rport_data *rdata = cmnd->device->hostdata; struct lpfc_nodelist *pnode = rdata->pnode; int ret = FAILED; int cnt, loopcnt; /* * If target is not in a MAPPED state, delay the reset until * target is rediscovered or nodev timeout expires. */ while ( 1 ) { if (!pnode) break; if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { spin_unlock_irq(phba->host->host_lock); schedule_timeout_uninterruptible(msecs_to_jiffies(500)); spin_lock_irq(phba->host->host_lock); } if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE)) break; } lpfc_cmd = lpfc_sli_get_scsi_buf (phba); if (lpfc_cmd == NULL) goto out; lpfc_cmd->pCmd = cmnd; lpfc_cmd->timeout = 60; lpfc_cmd->scsi_hba = phba; ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); if (!ret) goto out_free_scsi_buf; iocbq = &lpfc_cmd->cur_iocbq; /* get a buffer for this IOCB command response */ iocbqrsp = lpfc_sli_get_iocbq(phba); if (iocbqrsp == NULL) goto out_free_scsi_buf; ret = lpfc_sli_issue_iocb_wait(phba, &phba->sli.ring[phba->sli.fcp_ring], iocbq, iocbqrsp, lpfc_cmd->timeout); if (ret == IOCB_SUCCESS) ret = SUCCESS; lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4]; lpfc_cmd->status = iocbqrsp->iocb.ulpStatus; if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT) if (lpfc_cmd->result & IOERR_DRVR_MASK) lpfc_cmd->status = IOSTAT_DRIVER_REJECT; /* * All outstanding txcmplq I/Os should have been aborted by the target. * Unfortunately, some targets do not abide by this forcing the driver * to double check. */ lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], cmnd->device->id, cmnd->device->lun, 0, LPFC_CTX_LUN); loopcnt = 0; while((cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], cmnd->device->id, cmnd->device->lun, LPFC_CTX_LUN))) { spin_unlock_irq(phba->host->host_lock); schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); spin_lock_irq(phba->host->host_lock); if (++loopcnt > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) break; } if (cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0719 LUN Reset I/O flush failure: cnt x%x\n", phba->brd_no, cnt); ret = FAILED; } lpfc_sli_release_iocbq(phba, iocbqrsp);out_free_scsi_buf: lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0713 SCSI layer issued LUN reset (%d, %d) " "Data: x%x x%x x%x\n", phba->brd_no, lpfc_cmd->pCmd->device->id, lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status, lpfc_cmd->result); lpfc_release_scsi_buf(phba, lpfc_cmd);out: return ret;}static intlpfc_reset_lun_handler(struct scsi_cmnd *cmnd){ int rc; spin_lock_irq(cmnd->device->host->host_lock); rc = __lpfc_reset_lun_handler(cmnd); spin_unlock_irq(cmnd->device->host->host_lock); return rc;}/* * Note: midlayer calls this function with the host_lock held */static int__lpfc_reset_bus_handler(struct scsi_cmnd *cmnd){ struct Scsi_Host *shost = cmnd->device->host; struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0]; struct lpfc_nodelist *ndlp = NULL; int match; int ret = FAILED, i, err_count = 0; int cnt, loopcnt; unsigned int midlayer_id = 0; struct lpfc_scsi_buf * lpfc_cmd; lpfc_cmd = lpfc_sli_get_scsi_buf (phba); if (lpfc_cmd == NULL) goto out; /* The lpfc_cmd storage is reused. Set all loop invariants. */ lpfc_cmd->timeout = 60; lpfc_cmd->pCmd = cmnd; lpfc_cmd->scsi_hba = phba; /* * Since the driver manages a single bus device, reset all * targets known to the driver. Should any target reset * fail, this routine returns failure to the midlayer. */ midlayer_id = cmnd->device->id; for (i = 0; i < MAX_FCP_TARGET; i++) { /* Search the mapped list for this target ID */ match = 0; list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) { if ((i == ndlp->nlp_sid) && ndlp->rport) { match = 1; break; } } if (!match) continue; lpfc_cmd->pCmd->device->id = i; lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data; ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba); if (ret != SUCCESS) { lpfc_printf_log(phba, KERN_INFO, LOG_FCP, "%d:0713 Bus Reset on target %d failed\n", phba->brd_no, i); err_count++; } } cmnd->device->id = midlayer_id; loopcnt = 0; while((cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 0, 0, LPFC_CTX_HOST))) { spin_unlock_irq(phba->host->host_lock); schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ); spin_lock_irq(phba->host->host_lock); if (++loopcnt > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT) break; } if (cnt) { /* flush all outstanding commands on the host */ i = lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0, LPFC_CTX_HOST); lpfc_printf_log(phba, KERN_INFO, LOG_FCP, "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n", phba->brd_no, cnt, i); } if (cnt == 0) ret = SUCCESS; else ret = FAILED; lpfc_release_scsi_buf(phba, lpfc_cmd); lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0714 SCSI layer issued Bus Reset Data: x%x\n", phba->brd_no, ret);out: return ret;}static intlpfc_reset_bus_handler(struct scsi_cmnd *cmnd){ int rc; spin_lock_irq(cmnd->device->host->host_lock); rc = __lpfc_reset_bus_handler(cmnd); spin_unlock_irq(cmnd->device->host->host_lock); return rc;}static intlpfc_slave_alloc(struct scsi_device *sdev){ struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0]; struct lpfc_scsi_buf *scsi_buf = NULL; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); uint32_t total = 0, i; uint32_t num_to_alloc = 0; unsigned long flags; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; sdev->hostdata = rport->dd_data; /* * Populate the cmds_per_lun count scsi_bufs into this host's globally * available list of scsi buffers. Don't allocate more than the * HBA limit conveyed to the midlayer via the host structure. The * formula accounts for the lun_queue_depth + error handlers + 1 * extra. This list of scsi bufs exists for the lifetime of the driver. */ total = phba->total_scsi_bufs; num_to_alloc = phba->cfg_lun_queue_depth + 2; if (total >= phba->cfg_hba_queue_depth) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0704 At limitation of %d preallocated " "command buffers\n", phba->brd_no, total); return 0; } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) { lpfc_printf_log(phba, KERN_WARNING, LOG_FCP, "%d:0705 Allocation request of %d command " "buffers will exceed max of %d. Reducing " "allocation request to %d.\n", phba->brd_no, num_to_alloc, phba->cfg_hba_queue_depth, (phba->cfg_hba_queue_depth - total)); num_to_alloc = phba->cfg_hba_queue_depth - total; } for (i = 0; i < num_to_alloc; i++) { scsi_buf = lpfc_new_scsi_buf(phba); if (!scsi_buf) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0706 Failed to allocate command " "buffer\n", phba->brd_no); break; } spin_lock_irqsave(phba->host->host_lock, flags); phba->total_scsi_bufs++; list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); spin_unlock_irqrestore(phba->host->host_lock, flags); } return 0;}static intlpfc_slave_configure(struct scsi_device *sdev){ struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata[0]; struct fc_rport *rport = starget_to_rport(sdev->sdev_target); if (sdev->tagged_supported) scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth); else scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth); /* * Initialize the fc transport attributes for the target * containing this scsi device. Also note that the driver's * target pointer is stored in the starget_data for the * driver's sysfs entry point functions. */ rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5; return 0;}static voidlpfc_slave_destroy(struct scsi_device *sdev){ sdev->hostdata = NULL; return;}struct scsi_host_template lpfc_template = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, .info = lpfc_info, .queuecommand = lpfc_queuecommand, .eh_abort_handler = lpfc_abort_handler, .eh_device_reset_handler= lpfc_reset_lun_handler, .eh_bus_reset_handler = lpfc_reset_bus_handler, .slave_alloc = lpfc_slave_alloc, .slave_configure = lpfc_slave_configure, .slave_destroy = lpfc_slave_destroy, .this_id = -1, .sg_tablesize = LPFC_SG_SEG_CNT, .cmd_per_lun = LPFC_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = lpfc_host_attrs, .max_sectors = 0xFFFF,};
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -