📄 lpfc_sli.c
字号:
case MBX_READ_LA: case MBX_CLEAR_LA: case MBX_DUMP_MEMORY: case MBX_DUMP_CONTEXT: case MBX_RUN_DIAGS: case MBX_RESTART: case MBX_UPDATE_CFG: case MBX_DOWN_LOAD: case MBX_DEL_LD_ENTRY: case MBX_RUN_PROGRAM: case MBX_SET_MASK: case MBX_SET_SLIM: case MBX_UNREG_D_ID: case MBX_CONFIG_FARP: case MBX_LOAD_AREA: case MBX_RUN_BIU_DIAG64: case MBX_CONFIG_PORT: case MBX_READ_SPARM64: case MBX_READ_RPI64: case MBX_REG_LOGIN64: case MBX_READ_LA64: case MBX_FLASH_WR_ULA: case MBX_SET_DEBUG: case MBX_LOAD_EXP_ROM: ret = mbxCommand; break; default: ret = MBX_SHUTDOWN; break; } return (ret);}static voidlpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq){ wait_queue_head_t *pdone_q; /* * If pdone_q is empty, the driver thread gave up waiting and * continued running. */ pdone_q = (wait_queue_head_t *) pmboxq->context1; if (pdone_q) wake_up_interruptible(pdone_q); return;}voidlpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb){ struct lpfc_dmabuf *mp; mp = (struct lpfc_dmabuf *) (pmb->context1); if (mp) { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } mempool_free( pmb, phba->mbox_mem_pool); return;}intlpfc_sli_handle_mb_event(struct lpfc_hba * phba){ MAILBOX_t *mbox; MAILBOX_t *pmbox; LPFC_MBOXQ_t *pmb; struct lpfc_sli *psli; int i, rc; uint32_t process_next; psli = &phba->sli; /* We should only get here if we are in SLI2 mode */ if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) { return (1); } phba->sli.slistat.mbox_event++; /* Get a Mailbox buffer to setup mailbox commands for callback */ if ((pmb = phba->sli.mbox_active)) { pmbox = &pmb->mb; mbox = &phba->slim2p->mbx; /* First check out the status word */ lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t)); /* Sanity check to ensure the host owns the mailbox */ if (pmbox->mbxOwner != OWN_HOST) { /* Lets try for a while */ for (i = 0; i < 10240; i++) { /* First copy command data */ lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t)); if (pmbox->mbxOwner == OWN_HOST) goto mbout; } /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus <status> */ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "%d:0304 Stray Mailbox Interrupt " "mbxCommand x%x mbxStatus x%x\n", phba->brd_no, pmbox->mbxCommand, pmbox->mbxStatus); spin_lock_irq(phba->host->host_lock); phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; spin_unlock_irq(phba->host->host_lock); return (1); } mbout: del_timer_sync(&phba->sli.mbox_tmo); phba->work_hba_events &= ~WORKER_MBOX_TMO; /* * It is a fatal error if unknown mbox command completion. */ if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == MBX_SHUTDOWN) { /* Unknow mailbox command compl */ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "%d:0323 Unknown Mailbox command %x Cmpl\n", phba->brd_no, pmbox->mbxCommand); phba->hba_state = LPFC_HBA_ERROR; phba->work_hs = HS_FFER3; lpfc_handle_eratt(phba); return (0); } phba->sli.mbox_active = NULL; if (pmbox->mbxStatus) { phba->sli.slistat.mbox_stat_err++; if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { /* Mbox cmd cmpl error - RETRYing */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "%d:0305 Mbox cmd cmpl error - " "RETRYing Data: x%x x%x x%x x%x\n", phba->brd_no, pmbox->mbxCommand, pmbox->mbxStatus, pmbox->un.varWords[0], phba->hba_state); pmbox->mbxStatus = 0; pmbox->mbxOwner = OWN_HOST; spin_lock_irq(phba->host->host_lock); phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irq(phba->host->host_lock); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_SUCCESS) return (0); } } /* Mailbox cmd <cmd> Cmpl <cmpl> */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "%d:0307 Mailbox cmd x%x Cmpl x%p " "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", phba->brd_no, pmbox->mbxCommand, pmb->mbox_cmpl, *((uint32_t *) pmbox), pmbox->un.varWords[0], pmbox->un.varWords[1], pmbox->un.varWords[2], pmbox->un.varWords[3], pmbox->un.varWords[4], pmbox->un.varWords[5], pmbox->un.varWords[6], pmbox->un.varWords[7]); if (pmb->mbox_cmpl) { lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE); pmb->mbox_cmpl(phba,pmb); } } do { process_next = 0; /* by default don't loop */ spin_lock_irq(phba->host->host_lock); phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; /* Process next mailbox command if there is one */ if ((pmb = lpfc_mbox_get(phba))) { spin_unlock_irq(phba->host->host_lock); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { pmb->mb.mbxStatus = MBX_NOT_FINISHED; pmb->mbox_cmpl(phba,pmb); process_next = 1; continue; /* loop back */ } } else { spin_unlock_irq(phba->host->host_lock); /* Turn on IOCB processing */ for (i = 0; i < phba->sli.num_rings; i++) { lpfc_sli_turn_on_ring(phba, i); } /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */ while (!list_empty(&phba->freebufList)) { struct lpfc_dmabuf *mp; mp = NULL; list_remove_head((&phba->freebufList), mp, struct lpfc_dmabuf, list); if (mp) { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } } } } while (process_next); return (0);}static intlpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *saveq){ IOCB_t * irsp; WORD5 * w5p; uint32_t Rctl, Type; uint32_t match, i; match = 0; irsp = &(saveq->iocb); if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) { Rctl = FC_ELS_REQ; Type = FC_ELS_DATA; } else { w5p = (WORD5 *) & (saveq->iocb.un. ulpWord[5]); Rctl = w5p->hcsw.Rctl; Type = w5p->hcsw.Type; /* Firmware Workaround */ if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) { Rctl = FC_ELS_REQ; Type = FC_ELS_DATA; w5p->hcsw.Rctl = Rctl; w5p->hcsw.Type = Type; } } /* unSolicited Responses */ if (pring->prt[0].profile) { (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, saveq); match = 1; } else { /* We must search, based on rctl / type for the right routine */ for (i = 0; i < pring->num_mask; i++) { if ((pring->prt[i].rctl == Rctl) && (pring->prt[i]. type == Type)) { (pring->prt[i].lpfc_sli_rcv_unsol_event) (phba, pring, saveq); match = 1; break; } } } if (match == 0) { /* Unexpected Rctl / Type received */ /* Ring <ringno> handler: unexpected Rctl <Rctl> Type <Type> received */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "%d:0313 Ring %d handler: unexpected Rctl x%x " "Type x%x received \n", phba->brd_no, pring->ringno, Rctl, Type); } return(1);}static struct lpfc_iocbq *lpfc_sli_iocbq_lookup(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, struct lpfc_iocbq * prspiocb){ struct lpfc_iocbq *cmd_iocb = NULL; uint16_t iotag; iotag = prspiocb->iocb.ulpIoTag; if (iotag != 0 && iotag <= phba->sli.last_iotag) { cmd_iocb = phba->sli.iocbq_lookup[iotag]; list_del(&cmd_iocb->list); pring->txcmplq_cnt--; return cmd_iocb; } lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "%d:0317 iotag x%x is out off " "range: max iotag x%x wd0 x%x\n", phba->brd_no, iotag, phba->sli.last_iotag, *(((uint32_t *) &prspiocb->iocb) + 7)); return NULL;}static intlpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, struct lpfc_iocbq *saveq){ struct lpfc_iocbq * cmdiocbp; int rc = 1; unsigned long iflag; /* Based on the iotag field, get the cmd IOCB from the txcmplq */ spin_lock_irqsave(phba->host->host_lock, iflag); cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); if (cmdiocbp) { if (cmdiocbp->iocb_cmpl) { /* * Post all ELS completions to the worker thread. * All other are passed to the completion callback. */ if (pring->ringno == LPFC_ELS_RING) { spin_unlock_irqrestore(phba->host->host_lock, iflag); (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); spin_lock_irqsave(phba->host->host_lock, iflag); } else { spin_unlock_irqrestore(phba->host->host_lock, iflag); (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); spin_lock_irqsave(phba->host->host_lock, iflag); } } else lpfc_sli_release_iocbq(phba, cmdiocbp); } else { /* * Unknown initiating command based on the response iotag. * This could be the case on the ELS ring because of * lpfc_els_abort(). */ if (pring->ringno != LPFC_ELS_RING) { /* * Ring <ringno> handler: unexpected completion IoTag * <IoTag> */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "%d:0322 Ring %d handler: unexpected " "completion IoTag x%x Data: x%x x%x x%x x%x\n", phba->brd_no, pring->ringno, saveq->iocb.ulpIoTag, saveq->iocb.ulpStatus, saveq->iocb.un.ulpWord[4], saveq->iocb.ulpCommand, saveq->iocb.ulpContext); } } spin_unlock_irqrestore(phba->host->host_lock, iflag); return rc;}/* * This routine presumes LPFC_FCP_RING handling and doesn't bother * to check it explicitly. */static intlpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, uint32_t mask){ struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; IOCB_t *irsp = NULL; IOCB_t *entry = NULL; struct lpfc_iocbq *cmdiocbq = NULL; struct lpfc_iocbq rspiocbq; uint32_t status; uint32_t portRspPut, portRspMax; int rc = 1; lpfc_iocb_type type; unsigned long iflag; uint32_t rsp_cmpl = 0; void __iomem *to_slim; spin_lock_irqsave(phba->host->host_lock, iflag); pring->stats.iocb_event++; /* * The next available response entry should never exceed the maximum * entries. If it does, treat it as an adapter hardware error. */ portRspMax = pring->numRiocb; portRspPut = le32_to_cpu(pgp->rspPutInx); if (unlikely(portRspPut >= portRspMax)) { /* * Ring <ringno> handler: portRspPut <portRspPut> is bigger then * rsp ring <portRspMax> */ lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "%d:0312 Ring %d handler: portRspPut %d " "is bigger then rsp ring %d\n", phba->brd_no, pring->ringno, portRspPut, portRspMax); phba->hba_state = LPFC_HBA_ERROR; /* All error attention handlers are posted to worker thread */ phba->work_ha |= HA_ERATT; phba->work_hs = HS_FFER3; if (phba->work_wait) wake_up(phba->work_wait); spin_unlock_irqrestore(phba->host->host_lock, iflag); return 1; } rmb(); while (pring->rspidx != portRspPut) { /* * Fetch an entry off the ring and copy it into a local data * structure. The copy involves a byte-swap since the * network byte order and pci byte orders are different. */ entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); lpfc_sli_pcimem_bcopy((uint32_t *) entry, (uint32_t *) &rspiocbq.iocb, sizeof (IOCB_t)); irsp = &rspiocbq.iocb; type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); pring->stats.iocb_rsp++; rsp_cmpl++; if (unlikely(irsp->ulpStatus)) { /* Rsp ring <ringno> error: IOCB */ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "%d:0326 Rsp Ring %d error: IOCB Data: " "x%x x%x x%x x%x x%x x%x x%x x%x\n", phba->brd_no, pring->ringno, irsp->un.ulpWord[0], irsp->un.ulpWord[1], irsp->un.ulpWord[2], irsp->un.ulpWord[3], irsp->un.ulpWord[4], irsp->un.ulpWord[5], *(((uint32_t *) irsp) + 6), *(((uint32_t *) irsp) + 7)); } switch (type) { case LPFC_ABORT_IOCB: case LPFC_SOL_IOCB: /* * Idle exchange closed via ABTS from port. No iocb * resources need to be recovered. */ if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { printk(KERN_INFO "%s: IOCB cmd 0x%x processed. " "Skipping completion\n", __FUNCTION__, irsp->ulpCommand); break; } cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, &rspiocbq); if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { spin_unlock_irqrestore( phba->host->host_lock, iflag); (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &rspiocbq); spin_lock_irqsave(phba->host->host_lock, iflag); } break; default: if (irsp->ulpCommand == CMD_ADAPTER_MSG) { char adaptermsg[LPFC_MAX_ADPTMSG]; memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); memcpy(&adaptermsg[0], (uint8_t *) irsp, MAX_MSG_DATA); dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s", phba->brd_no, adaptermsg);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -