📄 lpfc_sli.c
字号:
{ /* Insert the caller's iocb in the txq tail for later processing. */ list_add_tail(&piocb->list, &pring->txq); pring->txq_cnt++; return (0);}static struct lpfc_iocbq *lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq ** piocb){ struct lpfc_iocbq * nextiocb; nextiocb = lpfc_sli_ringtx_get(phba, pring); if (!nextiocb) { nextiocb = *piocb; *piocb = NULL; } return nextiocb;}intlpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb, uint32_t flag){ struct lpfc_iocbq *nextiocb; IOCB_t *iocb; /* * We should never get an IOCB if we are in a < LINK_DOWN state */ if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) return IOCB_ERROR; /* * Check to see if we are blocking IOCB processing because of a * outstanding mbox command. */ if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX)) goto iocb_busy; if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) { /* * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF * can be issued if the link is not up. */ switch (piocb->iocb.ulpCommand) { case CMD_QUE_RING_BUF_CN: case CMD_QUE_RING_BUF64_CN: /* * For IOCBs, like QUE_RING_BUF, that have no rsp ring * completion, iocb_cmpl MUST be 0. */ if (piocb->iocb_cmpl) piocb->iocb_cmpl = NULL; /*FALLTHROUGH*/ case CMD_CREATE_XRI_CR: break; default: goto iocb_busy; } /* * For FCP commands, we must be in a state where we can process link * attention events. */ } else if (unlikely(pring->ringno == phba->sli.fcp_ring && !(phba->sli.sli_flag & LPFC_PROCESS_LA))) goto iocb_busy; /* * Check to see if this is a high priority command. * If so bypass tx queue processing. */ if (unlikely((flag & SLI_IOCB_HIGH_PRIORITY) && (iocb = lpfc_sli_next_iocb_slot(phba, pring)))) { lpfc_sli_submit_iocb(phba, pring, iocb, piocb); piocb = NULL; } while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); if (iocb) lpfc_sli_update_ring(phba, pring); else lpfc_sli_update_full_ring(phba, pring); if (!piocb) return IOCB_SUCCESS; goto out_busy; iocb_busy: pring->stats.iocb_cmd_delay++; out_busy: if (!(flag & SLI_IOCB_RET_IOCB)) { lpfc_sli_ringtx_put(phba, pring, piocb); return IOCB_SUCCESS; } return IOCB_BUSY;}intlpfc_sli_setup(struct lpfc_hba *phba){ int i, totiocb = 0; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; psli->num_rings = MAX_CONFIGURED_RINGS; psli->sli_flag = 0; psli->fcp_ring = LPFC_FCP_RING; psli->next_ring = LPFC_FCP_NEXT_RING; psli->ip_ring = LPFC_IP_RING; psli->iocbq_lookup = NULL; psli->iocbq_lookup_len = 0; psli->last_iotag = 0; for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; switch (i) { case LPFC_FCP_RING: /* ring 0 - FCP */ /* numCiocb and numRiocb are used in config_port */ pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; pring->iotag_ctr = 0; pring->iotag_max = (phba->cfg_hba_queue_depth * 2); pring->fast_iotag = pring->iotag_max; pring->num_mask = 0; break; case LPFC_IP_RING: /* ring 1 - IP */ /* numCiocb and numRiocb are used in config_port */ pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; pring->num_mask = 0; break; case LPFC_ELS_RING: /* ring 2 - ELS / CT */ /* numCiocb and numRiocb are used in config_port */ pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; pring->fast_iotag = 0; pring->iotag_ctr = 0; pring->iotag_max = 4096; pring->num_mask = 4; pring->prt[0].profile = 0; /* Mask 0 */ pring->prt[0].rctl = FC_ELS_REQ; pring->prt[0].type = FC_ELS_DATA; pring->prt[0].lpfc_sli_rcv_unsol_event = lpfc_els_unsol_event; pring->prt[1].profile = 0; /* Mask 1 */ pring->prt[1].rctl = FC_ELS_RSP; pring->prt[1].type = FC_ELS_DATA; pring->prt[1].lpfc_sli_rcv_unsol_event = lpfc_els_unsol_event; pring->prt[2].profile = 0; /* Mask 2 */ /* NameServer Inquiry */ pring->prt[2].rctl = FC_UNSOL_CTL; /* NameServer */ pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; pring->prt[2].lpfc_sli_rcv_unsol_event = lpfc_ct_unsol_event; pring->prt[3].profile = 0; /* Mask 3 */ /* NameServer response */ pring->prt[3].rctl = FC_SOL_CTL; /* NameServer */ pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; pring->prt[3].lpfc_sli_rcv_unsol_event = lpfc_ct_unsol_event; break; } totiocb += (pring->numCiocb + pring->numRiocb); } if (totiocb > MAX_SLI2_IOCB) { /* Too many cmd / rsp ring entries in SLI2 SLIM */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0462 Too many cmd / rsp ring entries in " "SLI2 SLIM Data: x%x x%x\n", phba->brd_no, totiocb, MAX_SLI2_IOCB); } return 0;}intlpfc_sli_queue_setup(struct lpfc_hba * phba){ struct lpfc_sli *psli; struct lpfc_sli_ring *pring; int i; psli = &phba->sli; spin_lock_irq(phba->host->host_lock); INIT_LIST_HEAD(&psli->mboxq); /* Initialize list headers for txq and txcmplq as double linked lists */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; pring->ringno = i; pring->next_cmdidx = 0; pring->local_getidx = 0; pring->cmdidx = 0; INIT_LIST_HEAD(&pring->txq); INIT_LIST_HEAD(&pring->txcmplq); INIT_LIST_HEAD(&pring->iocb_continueq); INIT_LIST_HEAD(&pring->postbufq); } spin_unlock_irq(phba->host->host_lock); return (1);}intlpfc_sli_hba_down(struct lpfc_hba * phba){ struct lpfc_sli *psli; struct lpfc_sli_ring *pring; LPFC_MBOXQ_t *pmb; struct lpfc_iocbq *iocb, *next_iocb; IOCB_t *icmd = NULL; int i; unsigned long flags = 0; psli = &phba->sli; lpfc_hba_down_prep(phba); spin_lock_irqsave(phba->host->host_lock, flags); for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; pring->flag |= LPFC_DEFERRED_RING_EVENT; /* * Error everything on the txq since these iocbs have not been * given to the FW yet. */ pring->txq_cnt = 0; list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { list_del_init(&iocb->list); if (iocb->iocb_cmpl) { icmd = &iocb->iocb; icmd->ulpStatus = IOSTAT_LOCAL_REJECT; icmd->un.ulpWord[4] = IOERR_SLI_DOWN; spin_unlock_irqrestore(phba->host->host_lock, flags); (iocb->iocb_cmpl) (phba, iocb, iocb); spin_lock_irqsave(phba->host->host_lock, flags); } else lpfc_sli_release_iocbq(phba, iocb); } INIT_LIST_HEAD(&(pring->txq)); kfree(pring->fast_lookup); pring->fast_lookup = NULL; } spin_unlock_irqrestore(phba->host->host_lock, flags); /* Return any active mbox cmds */ del_timer_sync(&psli->mbox_tmo); spin_lock_irqsave(phba->host->host_lock, flags); phba->work_hba_events &= ~WORKER_MBOX_TMO; if (psli->mbox_active) { pmb = psli->mbox_active; pmb->mb.mbxStatus = MBX_NOT_FINISHED; if (pmb->mbox_cmpl) { spin_unlock_irqrestore(phba->host->host_lock, flags); pmb->mbox_cmpl(phba,pmb); spin_lock_irqsave(phba->host->host_lock, flags); } } psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; psli->mbox_active = NULL; /* Return any pending mbox cmds */ while ((pmb = lpfc_mbox_get(phba)) != NULL) { pmb->mb.mbxStatus = MBX_NOT_FINISHED; if (pmb->mbox_cmpl) { spin_unlock_irqrestore(phba->host->host_lock, flags); pmb->mbox_cmpl(phba,pmb); spin_lock_irqsave(phba->host->host_lock, flags); } } INIT_LIST_HEAD(&psli->mboxq); spin_unlock_irqrestore(phba->host->host_lock, flags); /* * Provided the hba is not in an error state, reset it. It is not * capable of IO anymore. */ if (phba->hba_state != LPFC_HBA_ERROR) { phba->hba_state = LPFC_INIT_START; lpfc_sli_brdreset(phba, 1); } return 1;}voidlpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt){ uint32_t *src = srcp; uint32_t *dest = destp; uint32_t ldata; int i; for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { ldata = *src; ldata = le32_to_cpu(ldata); *dest = ldata; src++; dest++; }}intlpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, struct lpfc_dmabuf * mp){ /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up later */ list_add_tail(&mp->list, &pring->postbufq); pring->postbufq_cnt++; return 0;}struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, dma_addr_t phys){ struct lpfc_dmabuf *mp, *next_mp; struct list_head *slp = &pring->postbufq; /* Search postbufq, from the begining, looking for a match on phys */ list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { if (mp->phys == phys) { list_del_init(&mp->list); pring->postbufq_cnt--; return mp; } } lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "%d:0410 Cannot find virtual addr for mapped buf on " "ring %d Data x%llx x%p x%p x%x\n", phba->brd_no, pring->ringno, (unsigned long long)phys, slp->next, slp->prev, pring->postbufq_cnt); return NULL;}static voidlpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb){ struct lpfc_dmabuf *buf_ptr, *buf_ptr1; /* Free the resources associated with the ELS_REQUEST64 IOCB the driver * just aborted. * In this case, context2 = cmd, context2->next = rsp, context3 = bpl */ if (cmdiocb->context2) { buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2; /* Free the response IOCB before completing the abort command. */ buf_ptr = NULL; list_remove_head((&buf_ptr1->list), buf_ptr, struct lpfc_dmabuf, list); if (buf_ptr) { lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); } lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys); kfree(buf_ptr1); } if (cmdiocb->context3) { buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3; lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); } lpfc_sli_release_iocbq(phba, cmdiocb); return;}intlpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, struct lpfc_iocbq * cmdiocb){ struct lpfc_iocbq *abtsiocbp; IOCB_t *icmd = NULL; IOCB_t *iabt = NULL; /* issue ABTS for this IOCB based on iotag */ abtsiocbp = lpfc_sli_get_iocbq(phba); if (abtsiocbp == NULL) return 0; iabt = &abtsiocbp->iocb; icmd = &cmdiocb->iocb; switch (icmd->ulpCommand) { case CMD_ELS_REQUEST64_CR: /* Even though we abort the ELS command, the firmware may access * the BPL or other resources before it processes our * ABORT_MXRI64. Thus we must delay reusing the cmdiocb * resources till the actual abort request completes. */ abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand); abtsiocbp->context2 = cmdiocb->context2; abtsiocbp->context3 = cmdiocb->context3; cmdiocb->context2 = NULL; cmdiocb->context3 = NULL; abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl; break; default: lpfc_sli_release_iocbq(phba, abtsiocbp); return 0; } iabt->un.amxri.abortType = ABORT_TYPE_ABTS; iabt->un.amxri.iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32; iabt->ulpLe = 1; iabt->ulpClass = CLASS3; iabt->ulpCommand = CMD_ABORT_MXRI64_CN; if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocbp); return 0; } return 1;}static intlpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id, uint64_t lun_id, uint32_t ctx, lpfc_ctx_cmd ctx_cmd){ struct lpfc_scsi_buf *lpfc_cmd; struct scsi_cmnd *cmnd; int rc = 1; if (!(iocbq->iocb_flag & LPFC_IO_FCP)) return rc; lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); cmnd = lpfc_cmd->pCmd; if (cmnd == NULL) return rc; switch (ctx_cmd) { case LPFC_CTX_LUN: if ((cmnd->device->id == tgt_id) && (cmnd->device->lun == lun_id)) rc = 0; break; case LPFC_CTX_TGT: if (cmnd->device->id == tgt_id) rc = 0; break; case LPFC_CTX_CTX: if (iocbq->iocb.ulpContext == ctx) rc = 0; break; case LPFC_CTX_HOST: rc = 0; break; default: printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", __FUNCTION__, ctx_cmd); break; } return rc;}intlpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd){ struct lpfc_iocbq *iocbq; int sum, i; for (i = 1, sum = 0; i <= p
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -