📄 lpfc_ct.c
字号:
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2004-2005 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************//* * Fibre Channel SCSI LAN Device Driver CT support */#include <linux/blkdev.h>#include <linux/pci.h>#include <linux/interrupt.h>#include <linux/utsname.h>#include <scsi/scsi.h>#include <scsi/scsi_device.h>#include <scsi/scsi_host.h>#include <scsi/scsi_transport_fc.h>#include "lpfc_hw.h"#include "lpfc_sli.h"#include "lpfc_disc.h"#include "lpfc_scsi.h"#include "lpfc.h"#include "lpfc_logmsg.h"#include "lpfc_crtn.h"#include "lpfc_version.h"#define HBA_PORTSPEED_UNKNOWN 0 /* Unknown - transceiver * incapable of reporting */#define HBA_PORTSPEED_1GBIT 1 /* 1 GBit/sec */#define HBA_PORTSPEED_2GBIT 2 /* 2 GBit/sec */#define HBA_PORTSPEED_4GBIT 8 /* 4 GBit/sec */#define HBA_PORTSPEED_8GBIT 16 /* 8 GBit/sec */#define HBA_PORTSPEED_10GBIT 4 /* 10 GBit/sec */#define HBA_PORTSPEED_NOT_NEGOTIATED 5 /* Speed not established */#define FOURBYTES 4static char *lpfc_release_version = LPFC_DRIVER_VERSION;/* * lpfc_ct_unsol_event */voidlpfc_ct_unsol_event(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocbq){ struct lpfc_iocbq *next_piocbq; struct lpfc_dmabuf *pmbuf = NULL; struct lpfc_dmabuf *matp, *next_matp; uint32_t ctx = 0, size = 0, cnt = 0; IOCB_t *icmd = &piocbq->iocb; IOCB_t *save_icmd = icmd; int i, go_exit = 0; struct list_head head; if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) && ((icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING)) { /* Not enough posted buffers; Try posting more buffers */ phba->fc_stat.NoRcvBuf++; lpfc_post_buffer(phba, pring, 0, 1); return; } /* If there are no BDEs associated with this IOCB, * there is nothing to do. */ if (icmd->ulpBdeCount == 0) return; INIT_LIST_HEAD(&head); list_add_tail(&head, &piocbq->list); list_for_each_entry_safe(piocbq, next_piocbq, &head, list) { icmd = &piocbq->iocb; if (ctx == 0) ctx = (uint32_t) (icmd->ulpContext); if (icmd->ulpBdeCount == 0) continue; for (i = 0; i < icmd->ulpBdeCount; i++) { matp = lpfc_sli_ringpostbuf_get(phba, pring, getPaddr(icmd->un. cont64[i]. addrHigh, icmd->un. cont64[i]. addrLow)); if (!matp) { /* Insert lpfc log message here */ lpfc_post_buffer(phba, pring, cnt, 1); go_exit = 1; goto ct_unsol_event_exit_piocbq; } /* Typically for Unsolicited CT requests */ if (!pmbuf) { pmbuf = matp; INIT_LIST_HEAD(&pmbuf->list); } else list_add_tail(&matp->list, &pmbuf->list); size += icmd->un.cont64[i].tus.f.bdeSize; cnt++; } icmd->ulpBdeCount = 0; } lpfc_post_buffer(phba, pring, cnt, 1); if (save_icmd->ulpStatus) { go_exit = 1; }ct_unsol_event_exit_piocbq: if (pmbuf) { list_for_each_entry_safe(matp, next_matp, &pmbuf->list, list) { lpfc_mbuf_free(phba, matp->virt, matp->phys); list_del(&matp->list); kfree(matp); } lpfc_mbuf_free(phba, pmbuf->virt, pmbuf->phys); kfree(pmbuf); } return;}static voidlpfc_free_ct_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mlist){ struct lpfc_dmabuf *mlast, *next_mlast; list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) { lpfc_mbuf_free(phba, mlast->virt, mlast->phys); list_del(&mlast->list); kfree(mlast); } lpfc_mbuf_free(phba, mlist->virt, mlist->phys); kfree(mlist); return;}static struct lpfc_dmabuf *lpfc_alloc_ct_rsp(struct lpfc_hba * phba, int cmdcode, struct ulp_bde64 * bpl, uint32_t size, int *entries){ struct lpfc_dmabuf *mlist = NULL; struct lpfc_dmabuf *mp; int cnt, i = 0; /* We get chucks of FCELSSIZE */ cnt = size > FCELSSIZE ? FCELSSIZE: size; while (size) { /* Allocate buffer for rsp payload */ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { if (mlist) lpfc_free_ct_rsp(phba, mlist); return NULL; } INIT_LIST_HEAD(&mp->list); if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT)) mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); else mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys)); if (!mp->virt) { kfree(mp); lpfc_free_ct_rsp(phba, mlist); return NULL; } /* Queue it to a linked list */ if (!mlist) mlist = mp; else list_add_tail(&mp->list, &mlist->list); bpl->tus.f.bdeFlags = BUFF_USE_RCV; /* build buffer ptr list for IOCB */ bpl->addrLow = le32_to_cpu( putPaddrLow(mp->phys) ); bpl->addrHigh = le32_to_cpu( putPaddrHigh(mp->phys) ); bpl->tus.f.bdeSize = (uint16_t) cnt; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; i++; size -= cnt; } *entries = i; return mlist;}static intlpfc_gen_req(struct lpfc_hba *phba, struct lpfc_dmabuf *bmp, struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp, void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *), struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry, uint32_t tmo){ struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; IOCB_t *icmd; struct lpfc_iocbq *geniocb; /* Allocate buffer for command iocb */ spin_lock_irq(phba->host->host_lock); geniocb = lpfc_sli_get_iocbq(phba); spin_unlock_irq(phba->host->host_lock); if (geniocb == NULL) return 1; icmd = &geniocb->iocb; icmd->un.genreq64.bdl.ulpIoTag32 = 0; icmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); icmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL; icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof (struct ulp_bde64)); if (usr_flg) geniocb->context3 = NULL; else geniocb->context3 = (uint8_t *) bmp; /* Save for completion so we can release these resources */ geniocb->context1 = (uint8_t *) inp; geniocb->context2 = (uint8_t *) outp; /* Fill in payload, bp points to frame payload */ icmd->ulpCommand = CMD_GEN_REQUEST64_CR; /* Fill in rest of iocb */ icmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); icmd->un.genreq64.w5.hcsw.Dfctl = 0; icmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL; icmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP; if (!tmo) tmo = (2 * phba->fc_ratov) + 1; icmd->ulpTimeout = tmo; icmd->ulpBdeCount = 1; icmd->ulpLe = 1; icmd->ulpClass = CLASS3; icmd->ulpContext = ndlp->nlp_rpi; /* Issue GEN REQ IOCB for NPORT <did> */ lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "%d:0119 Issue GEN REQ IOCB for NPORT x%x " "Data: x%x x%x\n", phba->brd_no, icmd->un.ulpWord[5], icmd->ulpIoTag, phba->hba_state); geniocb->iocb_cmpl = cmpl; geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; spin_lock_irq(phba->host->host_lock); if (lpfc_sli_issue_iocb(phba, pring, geniocb, 0) == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, geniocb); spin_unlock_irq(phba->host->host_lock); return 1; } spin_unlock_irq(phba->host->host_lock); return 0;}static intlpfc_ct_cmd(struct lpfc_hba *phba, struct lpfc_dmabuf *inmp, struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp, void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *), uint32_t rsp_size){ struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt; struct lpfc_dmabuf *outmp; int cnt = 0, status; int cmdcode = ((struct lpfc_sli_ct_request *) inmp->virt)-> CommandResponse.bits.CmdRsp; bpl++; /* Skip past ct request */ /* Put buffer(s) for ct rsp in bpl */ outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt); if (!outmp) return -ENOMEM; status = lpfc_gen_req(phba, bmp, inmp, outmp, cmpl, ndlp, 0, cnt+1, 0); if (status) { lpfc_free_ct_rsp(phba, outmp); return -ENOMEM; } return 0;}static intlpfc_ns_rsp(struct lpfc_hba * phba, struct lpfc_dmabuf * mp, uint32_t Size){ struct lpfc_sli_ct_request *Response = (struct lpfc_sli_ct_request *) mp->virt; struct lpfc_nodelist *ndlp = NULL; struct lpfc_dmabuf *mlast, *next_mp; uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; uint32_t Did; uint32_t CTentry; int Cnt; struct list_head head; lpfc_set_disctmo(phba); Cnt = Size > FCELSSIZE ? FCELSSIZE : Size; list_add_tail(&head, &mp->list); list_for_each_entry_safe(mp, next_mp, &head, list) { mlast = mp; Size -= Cnt; if (!ctptr) ctptr = (uint32_t *) mlast->virt; else Cnt -= 16; /* subtract length of CT header */ /* Loop through entire NameServer list of DIDs */ while (Cnt) { /* Get next DID from NameServer List */ CTentry = *ctptr++; Did = ((be32_to_cpu(CTentry)) & Mask_DID); ndlp = NULL; if (Did != phba->fc_myDID) { /* Check for rscn processing or not */ ndlp = lpfc_setup_disc_node(phba, Did); } /* Mark all node table entries that are in the Nameserver */ if (ndlp) { /* NameServer Rsp */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, "%d:0238 Process x%x NameServer" " Rsp Data: x%x x%x x%x\n", phba->brd_no, Did, ndlp->nlp_flag, phba->fc_flag, phba->fc_rscn_id_cnt); } else { /* NameServer Rsp */ lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY, "%d:0239 Skip x%x NameServer " "Rsp Data: x%x x%x x%x\n", phba->brd_no, Did, Size, phba->fc_flag, phba->fc_rscn_id_cnt); } if (CTentry & (be32_to_cpu(SLI_CT_LAST_ENTRY))) goto nsout1; Cnt -= sizeof (uint32_t); } ctptr = NULL; }nsout1: list_del(&head); /* Here we are finished in the case RSCN */ if (phba->hba_state == LPFC_HBA_READY) { lpfc_els_flush_rscn(phba); spin_lock_irq(phba->host->host_lock); phba->fc_flag |= FC_RSCN_MODE; /* we are still in RSCN mode */ spin_unlock_irq(phba->host->host_lock); } return 0;}static voidlpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, struct lpfc_iocbq * rspiocb){ IOCB_t *irsp; struct lpfc_sli *psli; struct lpfc_dmabuf *bmp; struct lpfc_dmabuf *inp;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -