⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 lpfc_els.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 5 页
字号:
/******************************************************************* * This file is part of the Emulex Linux Device Driver for         * * Fibre Channel Host Bus Adapters.                                * * Copyright (C) 2004-2005 Emulex.  All rights reserved.           * * EMULEX and SLI are trademarks of Emulex.                        * * www.emulex.com                                                  * * Portions Copyright (C) 2004-2005 Christoph Hellwig              * *                                                                 * * This program is free software; you can redistribute it and/or   * * modify it under the terms of version 2 of the GNU General       * * Public License as published by the Free Software Foundation.    * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID.  See the GNU General Public License for  * * more details, a copy of which can be found in the file COPYING  * * included with this package.                                     * *******************************************************************/#include <linux/blkdev.h>#include <linux/pci.h>#include <linux/interrupt.h>#include <scsi/scsi.h>#include <scsi/scsi_device.h>#include <scsi/scsi_host.h>#include <scsi/scsi_transport_fc.h>#include "lpfc_hw.h"#include "lpfc_sli.h"#include "lpfc_disc.h"#include "lpfc_scsi.h"#include "lpfc.h"#include "lpfc_logmsg.h"#include "lpfc_crtn.h"static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,			  struct lpfc_iocbq *);static int lpfc_max_els_tries = 3;static intlpfc_els_chk_latt(struct lpfc_hba * phba){	struct lpfc_sli *psli;	LPFC_MBOXQ_t *mbox;	uint32_t ha_copy;	int rc;	psli = &phba->sli;	if ((phba->hba_state >= LPFC_HBA_READY) ||	    (phba->hba_state == LPFC_LINK_DOWN))		return 0;	/* Read the HBA Host Attention Register */	spin_lock_irq(phba->host->host_lock);	ha_copy = readl(phba->HAregaddr);	spin_unlock_irq(phba->host->host_lock);	if (!(ha_copy & HA_LATT))		return 0;	/* Pending Link Event during Discovery */	lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY,			"%d:0237 Pending Link Event during "			"Discovery: State x%x\n",			phba->brd_no, phba->hba_state);	/* CLEAR_LA should re-enable link attention events and	 * we should then imediately take a LATT event. The	 * LATT processing should call lpfc_linkdown() which	 * will cleanup any left over in-progress discovery	 * events.	 */	spin_lock_irq(phba->host->host_lock);	phba->fc_flag |= FC_ABORT_DISCOVERY;	spin_unlock_irq(phba->host->host_lock);	if (phba->hba_state != LPFC_CLEAR_LA) {		if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))) {			phba->hba_state = LPFC_CLEAR_LA;			lpfc_clear_la(phba, mbox);			mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;			rc = lpfc_sli_issue_mbox (phba, mbox,						  (MBX_NOWAIT | MBX_STOP_IOCB));			if (rc == MBX_NOT_FINISHED) {				mempool_free(mbox, phba->mbox_mem_pool);				phba->hba_state = LPFC_HBA_ERROR;			}		}	}	return (1);}static struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_hba * phba,		   uint8_t expectRsp,		   uint16_t cmdSize,		   uint8_t retry, struct lpfc_nodelist * ndlp, uint32_t elscmd){	struct lpfc_sli_ring *pring;	struct lpfc_iocbq *elsiocb;	struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;	struct ulp_bde64 *bpl;	IOCB_t *icmd;	pring = &phba->sli.ring[LPFC_ELS_RING];	if (phba->hba_state < LPFC_LINK_UP)		return  NULL;	/* Allocate buffer for  command iocb */	spin_lock_irq(phba->host->host_lock);	elsiocb = lpfc_sli_get_iocbq(phba);	spin_unlock_irq(phba->host->host_lock);	if (elsiocb == NULL)		return NULL;	icmd = &elsiocb->iocb;	/* fill in BDEs for command */	/* Allocate buffer for command payload */	if (((pcmd = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL)) == 0) ||	    ((pcmd->virt = lpfc_mbuf_alloc(phba,					   MEM_PRI, &(pcmd->phys))) == 0)) {		kfree(pcmd);		spin_lock_irq(phba->host->host_lock);		lpfc_sli_release_iocbq(phba, elsiocb);		spin_unlock_irq(phba->host->host_lock);		return NULL;	}	INIT_LIST_HEAD(&pcmd->list);	/* Allocate buffer for response payload */	if (expectRsp) {		prsp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);		if (prsp)			prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,						     &prsp->phys);		if (prsp == 0 || prsp->virt == 0) {			kfree(prsp);			lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);			kfree(pcmd);			spin_lock_irq(phba->host->host_lock);			lpfc_sli_release_iocbq(phba, elsiocb);			spin_unlock_irq(phba->host->host_lock);			return NULL;		}		INIT_LIST_HEAD(&prsp->list);	} else {		prsp = NULL;	}	/* Allocate buffer for Buffer ptr list */	pbuflist = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);	if (pbuflist)	    pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,					     &pbuflist->phys);	if (pbuflist == 0 || pbuflist->virt == 0) {		spin_lock_irq(phba->host->host_lock);		lpfc_sli_release_iocbq(phba, elsiocb);		spin_unlock_irq(phba->host->host_lock);		lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);		lpfc_mbuf_free(phba, prsp->virt, prsp->phys);		kfree(pcmd);		kfree(prsp);		kfree(pbuflist);		return NULL;	}	INIT_LIST_HEAD(&pbuflist->list);	icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);	icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);	icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BDL;	if (expectRsp) {		icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));		icmd->un.elsreq64.remoteID = ndlp->nlp_DID;	/* DID */		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;	} else {		icmd->un.elsreq64.bdl.bdeSize = sizeof (struct ulp_bde64);		icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;	}	icmd->ulpBdeCount = 1;	icmd->ulpLe = 1;	icmd->ulpClass = CLASS3;	bpl = (struct ulp_bde64 *) pbuflist->virt;	bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));	bpl->tus.f.bdeSize = cmdSize;	bpl->tus.f.bdeFlags = 0;	bpl->tus.w = le32_to_cpu(bpl->tus.w);	if (expectRsp) {		bpl++;		bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));		bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));		bpl->tus.f.bdeSize = FCELSSIZE;		bpl->tus.f.bdeFlags = BUFF_USE_RCV;		bpl->tus.w = le32_to_cpu(bpl->tus.w);	}	/* Save for completion so we can release these resources */	elsiocb->context1 = (uint8_t *) ndlp;	elsiocb->context2 = (uint8_t *) pcmd;	elsiocb->context3 = (uint8_t *) pbuflist;	elsiocb->retry = retry;	elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;	if (prsp) {		list_add(&prsp->list, &pcmd->list);	}	if (expectRsp) {		/* Xmit ELS command <elsCmd> to remote NPORT <did> */		lpfc_printf_log(phba, KERN_INFO, LOG_ELS,				"%d:0116 Xmit ELS command x%x to remote "				"NPORT x%x Data: x%x x%x\n",				phba->brd_no, elscmd,				ndlp->nlp_DID, icmd->ulpIoTag, phba->hba_state);	} else {		/* Xmit ELS response <elsCmd> to remote NPORT <did> */		lpfc_printf_log(phba, KERN_INFO, LOG_ELS,				"%d:0117 Xmit ELS response x%x to remote "				"NPORT x%x Data: x%x x%x\n",				phba->brd_no, elscmd,				ndlp->nlp_DID, icmd->ulpIoTag, cmdSize);	}	return (elsiocb);}static intlpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,		struct serv_parm *sp, IOCB_t *irsp){	LPFC_MBOXQ_t *mbox;	int rc;	spin_lock_irq(phba->host->host_lock);	phba->fc_flag |= FC_FABRIC;	spin_unlock_irq(phba->host->host_lock);	phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */		phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;	if (phba->fc_topology == TOPOLOGY_LOOP) {		spin_lock_irq(phba->host->host_lock);		phba->fc_flag |= FC_PUBLIC_LOOP;		spin_unlock_irq(phba->host->host_lock);	} else {		/*		 * If we are a N-port connected to a Fabric, fixup sparam's so		 * logins to devices on remote loops work.		 */		phba->fc_sparam.cmn.altBbCredit = 1;	}	phba->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof (struct lpfc_name));	ndlp->nlp_class_sup = 0;	if (sp->cls1.classValid)		ndlp->nlp_class_sup |= FC_COS_CLASS1;	if (sp->cls2.classValid)		ndlp->nlp_class_sup |= FC_COS_CLASS2;	if (sp->cls3.classValid)		ndlp->nlp_class_sup |= FC_COS_CLASS3;	if (sp->cls4.classValid)		ndlp->nlp_class_sup |= FC_COS_CLASS4;	ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |				sp->cmn.bbRcvSizeLsb;	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);	if (!mbox)		goto fail;	phba->hba_state = LPFC_FABRIC_CFG_LINK;	lpfc_config_link(phba, mbox);	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);	if (rc == MBX_NOT_FINISHED)		goto fail_free_mbox;	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);	if (!mbox)		goto fail;	if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0))		goto fail_free_mbox;	/*	 * set_slim mailbox command needs to execute first,	 * queue this command to be processed later.	 */	mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;	mbox->context2 = ndlp;	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT | MBX_STOP_IOCB);	if (rc == MBX_NOT_FINISHED)		goto fail_free_mbox;	return 0; fail_free_mbox:	mempool_free(mbox, phba->mbox_mem_pool); fail:	return -ENXIO;}/* * We FLOGIed into an NPort, initiate pt2pt protocol */static intlpfc_cmpl_els_flogi_nport(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,		struct serv_parm *sp){	LPFC_MBOXQ_t *mbox;	int rc;	spin_lock_irq(phba->host->host_lock);	phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);	spin_unlock_irq(phba->host->host_lock);	phba->fc_edtov = FF_DEF_EDTOV;	phba->fc_ratov = FF_DEF_RATOV;	rc = memcmp(&phba->fc_portname, &sp->portName,			sizeof(struct lpfc_name));	if (rc >= 0) {		/* This side will initiate the PLOGI */		spin_lock_irq(phba->host->host_lock);		phba->fc_flag |= FC_PT2PT_PLOGI;		spin_unlock_irq(phba->host->host_lock);		/*		 * N_Port ID cannot be 0, set our to LocalID the other		 * side will be RemoteID.		 */		/* not equal */		if (rc)			phba->fc_myDID = PT2PT_LocalID;		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);		if (!mbox)			goto fail;		lpfc_config_link(phba, mbox);		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;		rc = lpfc_sli_issue_mbox(phba, mbox,				MBX_NOWAIT | MBX_STOP_IOCB);		if (rc == MBX_NOT_FINISHED) {			mempool_free(mbox, phba->mbox_mem_pool);			goto fail;		}		mempool_free(ndlp, phba->nlp_mem_pool);		ndlp = lpfc_findnode_did(phba, NLP_SEARCH_ALL, PT2PT_RemoteID);		if (!ndlp) {			/*			 * Cannot find existing Fabric ndlp, so allocate a			 * new one			 */			ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);			if (!ndlp)				goto fail;			lpfc_nlp_init(phba, ndlp, PT2PT_RemoteID);		}		memcpy(&ndlp->nlp_portname, &sp->portName,				sizeof(struct lpfc_name));		memcpy(&ndlp->nlp_nodename, &sp->nodeName,				sizeof(struct lpfc_name));		ndlp->nlp_state = NLP_STE_NPR_NODE;		lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST);		ndlp->nlp_flag |= NLP_NPR_2B_DISC;	} else {		/* This side will wait for the PLOGI */		mempool_free( ndlp, phba->nlp_mem_pool);	}	spin_lock_irq(phba->host->host_lock);	phba->fc_flag |= FC_PT2PT;	spin_unlock_irq(phba->host->host_lock);	/* Start discovery - this should just do CLEAR_LA */	lpfc_disc_start(phba);	return 0; fail:	return -ENXIO;}static voidlpfc_cmpl_els_flogi(struct lpfc_hba * phba,		    struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb){	IOCB_t *irsp = &rspiocb->iocb;	struct lpfc_nodelist *ndlp = cmdiocb->context1;	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;	struct serv_parm *sp;	int rc;	/* Check to see if link went down during discovery */	if (lpfc_els_chk_latt(phba)) {		lpfc_nlp_remove(phba, ndlp);		goto out;	}	if (irsp->ulpStatus) {		/* Check for retry */		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {			/* ELS command is being retried */			goto out;		}		/* FLOGI failed, so there is no fabric */		spin_lock_irq(phba->host->host_lock);		phba->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);		spin_unlock_irq(phba->host->host_lock);		/* If private loop, then allow max outstandting els to be		 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no		 * alpa map would take too long otherwise.		 */		if (phba->alpa_map[0] == 0) {			phba->cfg_discovery_threads =			    LPFC_MAX_DISC_THREADS;		}		/* FLOGI failure */		lpfc_printf_log(phba,				KERN_INFO,				LOG_ELS,				"%d:0100 FLOGI failure Data: x%x x%x\n",				phba->brd_no,				irsp->ulpStatus, irsp->un.ulpWord[4]);		goto flogifail;	}	/*	 * The FLogI succeeded.  Sync the data for the CPU before	 * accessing it.	 */	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);	sp = prsp->virt + sizeof(uint32_t);	/* FLOGI completes successfully */	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,			"%d:0101 FLOGI completes sucessfully "			"Data: x%x x%x x%x x%x\n",			phba->brd_no,			irsp->un.ulpWord[4], sp->cmn.e_d_tov,			sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);	if (phba->hba_state == LPFC_FLOGI) {		/*		 * If Common Service Parameters indicate Nport		 * we are point to point, if Fport we are Fabric.		 */		if (sp->cmn.fPort)			rc = lpfc_cmpl_els_flogi_fabric(phba, ndlp, sp, irsp);		else			rc = lpfc_cmpl_els_flogi_nport(phba, ndlp, sp);		if (!rc)			goto out;	}flogifail:	lpfc_nlp_remove(phba, ndlp);	if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||	    (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED &&	     irsp->un.ulpWord[4] != IOERR_SLI_DOWN)) {		/* FLOGI failed, so just use loop map to make discovery list */		lpfc_disc_list_loopmap(phba);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -