📄 lpfc_hbadisc.c
字号:
} phba->fc_ns_retry = 0; /* Good status, issue CT Request to NameServer */ if (lpfc_ns_cmd(phba, ndlp, SLI_CTNS_GID_FT)) { /* Cannot issue NameServer Query, so finish up discovery */ lpfc_disc_start(phba); } lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free( pmb, phba->mbox_mem_pool); return;}static voidlpfc_register_remote_port(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp){ struct fc_rport *rport; struct lpfc_rport_data *rdata; struct fc_rport_identifiers rport_ids; /* Remote port has reappeared. Re-register w/ FC transport */ rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); rport_ids.port_id = ndlp->nlp_DID; rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; if (ndlp->nlp_type & NLP_FCP_TARGET) rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; if (ndlp->nlp_type & NLP_FCP_INITIATOR) rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; scsi_block_requests(phba->host); ndlp->rport = rport = fc_remote_port_add(phba->host, 0, &rport_ids); if (!rport) { dev_printk(KERN_WARNING, &phba->pcidev->dev, "Warning: fc_remote_port_add failed\n"); return; } /* initialize static port data */ rport->maxframe_size = ndlp->nlp_maxframe; rport->supported_classes = ndlp->nlp_class_sup; if ((rport->scsi_target_id != -1) && (rport->scsi_target_id < MAX_FCP_TARGET)) { ndlp->nlp_sid = rport->scsi_target_id; } rdata = rport->dd_data; rdata->pnode = ndlp; scsi_unblock_requests(phba->host); return;}static voidlpfc_unregister_remote_port(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp){ struct fc_rport *rport = ndlp->rport; struct lpfc_rport_data *rdata = rport->dd_data; ndlp->rport = NULL; rdata->pnode = NULL; scsi_block_requests(phba->host); fc_remote_port_delete(rport); scsi_unblock_requests(phba->host); return;}intlpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list){ enum { none, unmapped, mapped } rport_add = none, rport_del = none; struct lpfc_sli *psli; psli = &phba->sli; /* Sanity check to ensure we are not moving to / from the same list */ if ((nlp->nlp_flag & NLP_LIST_MASK) == list) { if (list != NLP_NO_LIST) return(0); } switch(nlp->nlp_flag & NLP_LIST_MASK) { case NLP_NO_LIST: /* Not on any list */ break; case NLP_UNUSED_LIST: phba->fc_unused_cnt--; list_del(&nlp->nlp_listp); break; case NLP_PLOGI_LIST: phba->fc_plogi_cnt--; list_del(&nlp->nlp_listp); break; case NLP_ADISC_LIST: phba->fc_adisc_cnt--; list_del(&nlp->nlp_listp); break; case NLP_REGLOGIN_LIST: phba->fc_reglogin_cnt--; list_del(&nlp->nlp_listp); break; case NLP_PRLI_LIST: phba->fc_prli_cnt--; list_del(&nlp->nlp_listp); break; case NLP_UNMAPPED_LIST: phba->fc_unmap_cnt--; list_del(&nlp->nlp_listp); spin_lock_irq(phba->host->host_lock); nlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; nlp->nlp_type &= ~NLP_FC_NODE; spin_unlock_irq(phba->host->host_lock); phba->nport_event_cnt++; if (nlp->rport) rport_del = unmapped; break; case NLP_MAPPED_LIST: phba->fc_map_cnt--; list_del(&nlp->nlp_listp); phba->nport_event_cnt++; if (nlp->rport) rport_del = mapped; break; case NLP_NPR_LIST: phba->fc_npr_cnt--; list_del(&nlp->nlp_listp); /* Stop delay tmo if taking node off NPR list */ if ((nlp->nlp_flag & NLP_DELAY_TMO) && (list != NLP_NPR_LIST)) { spin_lock_irq(phba->host->host_lock); nlp->nlp_flag &= ~NLP_DELAY_TMO; spin_unlock_irq(phba->host->host_lock); del_timer_sync(&nlp->nlp_delayfunc); if (!list_empty(&nlp->els_retry_evt.evt_listp)) list_del_init(&nlp->els_retry_evt.evt_listp); } break; } spin_lock_irq(phba->host->host_lock); nlp->nlp_flag &= ~NLP_LIST_MASK; spin_unlock_irq(phba->host->host_lock); /* Add NPort <did> to <num> list */ lpfc_printf_log(phba, KERN_INFO, LOG_NODE, "%d:0904 Add NPort x%x to %d list Data: x%x\n", phba->brd_no, nlp->nlp_DID, list, nlp->nlp_flag); switch(list) { case NLP_NO_LIST: /* No list, just remove it */ lpfc_nlp_remove(phba, nlp); /* as node removed - stop further transport calls */ rport_del = none; break; case NLP_UNUSED_LIST: spin_lock_irq(phba->host->host_lock); nlp->nlp_flag |= list; spin_unlock_irq(phba->host->host_lock); /* Put it at the end of the unused list */ list_add_tail(&nlp->nlp_listp, &phba->fc_unused_list); phba->fc_unused_cnt++; break; case NLP_PLOGI_LIST: spin_lock_irq(phba->host->host_lock); nlp->nlp_flag |= list; spin_unlock_irq(phba->host->host_lock); /* Put it at the end of the plogi list */ list_add_tail(&nlp->nlp_listp, &phba->fc_plogi_list); phba->fc_plogi_cnt++; break; case NLP_ADISC_LIST: spin_lock_irq(phba->host->host_lock); nlp->nlp_flag |= list; spin_unlock_irq(phba->host->host_lock); /* Put it at the end of the adisc list */ list_add_tail(&nlp->nlp_listp, &phba->fc_adisc_list); phba->fc_adisc_cnt++; break; case NLP_REGLOGIN_LIST: spin_lock_irq(phba->host->host_lock); nlp->nlp_flag |= list; spin_unlock_irq(phba->host->host_lock); /* Put it at the end of the reglogin list */ list_add_tail(&nlp->nlp_listp, &phba->fc_reglogin_list); phba->fc_reglogin_cnt++; break; case NLP_PRLI_LIST: spin_lock_irq(phba->host->host_lock); nlp->nlp_flag |= list; spin_unlock_irq(phba->host->host_lock); /* Put it at the end of the prli list */ list_add_tail(&nlp->nlp_listp, &phba->fc_prli_list); phba->fc_prli_cnt++; break; case NLP_UNMAPPED_LIST: rport_add = unmapped; /* ensure all vestiges of "mapped" significance are gone */ nlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); spin_lock_irq(phba->host->host_lock); nlp->nlp_flag |= list; spin_unlock_irq(phba->host->host_lock); /* Put it at the end of the unmap list */ list_add_tail(&nlp->nlp_listp, &phba->fc_nlpunmap_list); phba->fc_unmap_cnt++; phba->nport_event_cnt++; /* stop nodev tmo if running */ if (nlp->nlp_flag & NLP_NODEV_TMO) { spin_lock_irq(phba->host->host_lock); nlp->nlp_flag &= ~NLP_NODEV_TMO; spin_unlock_irq(phba->host->host_lock); del_timer_sync(&nlp->nlp_tmofunc); if (!list_empty(&nlp->nodev_timeout_evt.evt_listp)) list_del_init(&nlp->nodev_timeout_evt. evt_listp); } nlp->nlp_type |= NLP_FC_NODE; break; case NLP_MAPPED_LIST: rport_add = mapped; spin_lock_irq(phba->host->host_lock); nlp->nlp_flag |= list; spin_unlock_irq(phba->host->host_lock); /* Put it at the end of the map list */ list_add_tail(&nlp->nlp_listp, &phba->fc_nlpmap_list); phba->fc_map_cnt++; phba->nport_event_cnt++; /* stop nodev tmo if running */ if (nlp->nlp_flag & NLP_NODEV_TMO) { nlp->nlp_flag &= ~NLP_NODEV_TMO; del_timer_sync(&nlp->nlp_tmofunc); if (!list_empty(&nlp->nodev_timeout_evt.evt_listp)) list_del_init(&nlp->nodev_timeout_evt. evt_listp); } break; case NLP_NPR_LIST: spin_lock_irq(phba->host->host_lock); nlp->nlp_flag |= list; spin_unlock_irq(phba->host->host_lock); /* Put it at the end of the npr list */ list_add_tail(&nlp->nlp_listp, &phba->fc_npr_list); phba->fc_npr_cnt++; /* * Sanity check for Fabric entity. * Set nodev_tmo for NPR state, for Fabric use 1 sec. */ if (nlp->nlp_type & NLP_FABRIC) { mod_timer(&nlp->nlp_tmofunc, jiffies + HZ); } else { mod_timer(&nlp->nlp_tmofunc, jiffies + HZ * phba->cfg_nodev_tmo); } spin_lock_irq(phba->host->host_lock); nlp->nlp_flag |= NLP_NODEV_TMO; nlp->nlp_flag &= ~NLP_RCV_PLOGI; spin_unlock_irq(phba->host->host_lock); break; case NLP_JUST_DQ: break; } /* * We make all the calls into the transport after we have * moved the node between lists. This so that we don't * release the lock while in-between lists. */ /* Don't upcall midlayer if we're unloading */ if (!(phba->fc_flag & FC_UNLOADING)) { /* * We revalidate the rport pointer as the "add" function * may have removed the remote port. */ if ((rport_del != none) && nlp->rport) lpfc_unregister_remote_port(phba, nlp); if (rport_add != none) { /* * Tell the fc transport about the port, if we haven't * already. If we have, and it's a scsi entity, be * sure to unblock any attached scsi devices */ if (!nlp->rport) lpfc_register_remote_port(phba, nlp); /* * if we added to Mapped list, but the remote port * registration failed or assigned a target id outside * our presentable range - move the node to the * Unmapped List */ if ((rport_add == mapped) && ((!nlp->rport) || (nlp->rport->scsi_target_id == -1) || (nlp->rport->scsi_target_id >= MAX_FCP_TARGET))) { nlp->nlp_state = NLP_STE_UNMAPPED_NODE; spin_lock_irq(phba->host->host_lock); nlp->nlp_flag |= NLP_TGT_NO_SCSIID; spin_unlock_irq(phba->host->host_lock); lpfc_nlp_list(phba, nlp, NLP_UNMAPPED_LIST); } } } return (0);}/* * Start / ReStart rescue timer for Discovery / RSCN handling */voidlpfc_set_disctmo(struct lpfc_hba * phba){ uint32_t tmo; tmo = ((phba->fc_ratov * 2) + 1); mod_timer(&phba->fc_disctmo, jiffies + HZ * tmo); spin_lock_irq(phba->host->host_lock); phba->fc_flag |= FC_DISC_TMO; spin_unlock_irq(phba->host->host_lock); /* Start Discovery Timer state <hba_state> */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, "%d:0247 Start Discovery Timer state x%x " "Data: x%x x%lx x%x x%x\n", phba->brd_no, phba->hba_state, tmo, (unsigned long)&phba->fc_disctmo, phba->fc_plogi_cnt, phba->fc_adisc_cnt); return;}/* * Cancel rescue timer for Discovery / RSCN handling */intlpfc_can_disctmo(struct lpfc_hba * phba){ /* Turn off discovery timer if its running */ if (phba->fc_flag & FC_DISC_TMO) { spin_lock_irq(phba->host->host_lock); phba->fc_flag &= ~FC_DISC_TMO; spin_unlock_irq(phba->host->host_lock); del_timer_sync(&phba->fc_disctmo); phba->work_hba_events &= ~WORKER_DISC_TMO; } /* Cancel Discovery Timer state <hba_state> */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, "%d:0248 Cancel Discovery Timer state x%x " "Data: x%x x%x x%x\n", phba->brd_no, phba->hba_state, phba->fc_flag, phba->fc_plogi_cnt, phba->fc_adisc_cnt); return (0);}/* * Check specified ring for outstanding IOCB on the SLI queue * Return true if iocb matches the specified nport */intlpfc_check_sli_ndlp(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocb, struct lpfc_nodelist * ndlp){ struct lpfc_sli *psli; IOCB_t *icmd; psli = &phba->sli; icmd = &iocb->iocb; if (pring->ringno == LPFC_ELS_RING) { switch (icmd->ulpCommand) { case CMD_GEN_REQUEST64_CR: if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) return (1); case CMD_ELS_REQUEST64_CR: case CMD_XMIT_ELS_RSP64_CX: if (iocb->context1 == (uint8_t *) ndlp) return (1); } } else if (pring->ringno == psli->ip_ring) { } else if (pring->ringno == psli->fcp_ring) { /* Skip match check if waiting to relogin to FCP target */ if ((ndlp->nlp_type & NLP_FCP_TARGET) && (ndlp->nlp_flag & NLP_DELAY_TMO)) { return (0); } if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { return (1); } } else if (pring->ringno == psli->next_ring) { } return (0);}/* * Free resources / clean up outstanding I/Os * associated with nlp_rpi in the LPFC_NODELIST entry. */static intlpfc_no_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp){ struct lpfc_sli *psli; struct lpfc_sli_ring *pring; struct lpfc_iocbq *iocb, *next_iocb; IOCB_t *icmd; uint32_t rpi, i; /* * Everything that matches on txcmplq will be returned * by firmware with a no rpi error. */ psli = &phba->sli; rpi = ndlp->nlp_rpi; if (rpi) { /* Now process each ring */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; spin_lock_irq(phba->host->host_lock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { /* * Check to see if iocb matches the nport we are * looking for */ if ((lpfc_check_sli_ndlp (phba, pring, iocb, ndlp))) { /* It matches, so deque and call compl with an error */ list_del(&iocb->list); pring->txq_cnt--; if (iocb->iocb_cmpl) { icmd = &iocb->iocb; icmd->ulpStatus = IOSTAT_LOCAL_REJECT; icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; spin_unlock_irq(phba->host-> host_lock); (iocb->iocb_cmpl) (phba, iocb, iocb); spin_lock_irq(phba->host-> host_lock); } else lpfc_sli_release_iocbq(phba, iocb); } } spin_unlock_irq(phba->host->host_lock); } } return (0);}/* * Free rpi associated with LPFC_NODELIST entry. * This routine is called from lpfc_freenode(), when we are removing * a LPFC_NODELIST entry. It is also called if the driver initiates a * LOGO that completes successfully, and we are waiting to PLOGI back * to the remote NPort. In addition, it is called after we receive * and unsolicated ELS cmd, send back a rsp, the rsp completes and * we are waiting to PLOGI back to the remote NPort. */intlpfc_unreg_rpi(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp){ LPFC_MBOXQ_t *mbox; int rc; if (ndlp->nlp_rpi) { if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) { lpfc_unreg_login(phba, ndlp->nlp_rpi, mbox); mbox->mbox_cmpl=lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox (phba, mbox, (MBX_NOWAIT | MBX_STOP_IOCB)); if (rc == MBX_NOT_FINISHED) mempool_free( mbox, phba->mbox_mem_pool); } lpfc_no_rpi(phba, ndlp); ndlp->nlp_rpi = 0;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -