📄 mptlan.c
字号:
memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl)); priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i; } dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n")); priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int), GFP_KERNEL); if (priv->mpt_rxfidx == NULL) goto out_SendCtl; priv->mpt_rxfidx_tail = -1; priv->RcvCtl = kmalloc(priv->max_buckets_out * sizeof(struct BufferControl), GFP_KERNEL); if (priv->RcvCtl == NULL) goto out_mpt_rxfidx; for (i = 0; i < priv->max_buckets_out; i++) { memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl)); priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; }/**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));/**/ for (i = 0; i < priv->tx_max_out; i++)/**/ dlprintk((" %xh", priv->mpt_txfidx[i]));/**/ dlprintk(("\n")); dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); mpt_lan_post_receive_buckets(dev); printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", IOC_AND_NETDEV_NAMES_s_s(dev)); if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) { printk (KERN_WARNING MYNAM "/lo: Unable to register for Event" " Notifications. This is a bad thing! We're not going " "to go ahead, but I'd be leery of system stability at " "this point.\n"); } netif_start_queue(dev); dlprintk((KERN_INFO MYNAM "/lo: Done.\n")); return 0;out_mpt_rxfidx: kfree(priv->mpt_rxfidx); priv->mpt_rxfidx = NULL;out_SendCtl: kfree(priv->SendCtl); priv->SendCtl = NULL;out_mpt_txfidx: kfree(priv->mpt_txfidx); priv->mpt_txfidx = NULL;out: return -ENOMEM;}/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*//* Send a LanReset message to the FW. This should result in the FW returning any buckets it still has. */static intmpt_lan_reset(struct net_device *dev){ MPT_FRAME_HDR *mf; LANResetRequest_t *pResetReq; struct mpt_lan_priv *priv = netdev_priv(dev); mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev); if (mf == NULL) {/* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! " "Unable to allocate a request frame.\n"));*/ return -1; } pResetReq = (LANResetRequest_t *) mf; pResetReq->Function = MPI_FUNCTION_LAN_RESET; pResetReq->ChainOffset = 0; pResetReq->Reserved = 0; pResetReq->PortNumber = priv->pnum; pResetReq->MsgFlags = 0; pResetReq->Reserved2 = 0; mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf); return 0;}/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/static intmpt_lan_close(struct net_device *dev){ struct mpt_lan_priv *priv = netdev_priv(dev); MPT_ADAPTER *mpt_dev = priv->mpt_dev; unsigned long timeout; int i; dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n")); mpt_event_deregister(LanCtx); dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets " "since driver was loaded, %d still out\n", priv->total_posted,atomic_read(&priv->buckets_out))); netif_stop_queue(dev); mpt_lan_reset(dev); timeout = jiffies + 2 * HZ; while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout)) schedule_timeout_interruptible(1); for (i = 0; i < priv->max_buckets_out; i++) { if (priv->RcvCtl[i].skb != NULL) {/**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "/**/ "is still out\n", i)); pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma, priv->RcvCtl[i].len, PCI_DMA_FROMDEVICE); dev_kfree_skb(priv->RcvCtl[i].skb); } } kfree(priv->RcvCtl); kfree(priv->mpt_rxfidx); for (i = 0; i < priv->tx_max_out; i++) { if (priv->SendCtl[i].skb != NULL) { pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma, priv->SendCtl[i].len, PCI_DMA_TODEVICE); dev_kfree_skb(priv->SendCtl[i].skb); } } kfree(priv->SendCtl); kfree(priv->mpt_txfidx); atomic_set(&priv->buckets_out, 0); printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n", IOC_AND_NETDEV_NAMES_s_s(dev)); return 0;}/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/static struct net_device_stats *mpt_lan_get_stats(struct net_device *dev){ struct mpt_lan_priv *priv = netdev_priv(dev); return (struct net_device_stats *) &priv->stats;}/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/static intmpt_lan_change_mtu(struct net_device *dev, int new_mtu){ if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU)) return -EINVAL; dev->mtu = new_mtu; return 0;}/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*//* Tx timeout handler. */static voidmpt_lan_tx_timeout(struct net_device *dev){ struct mpt_lan_priv *priv = netdev_priv(dev); MPT_ADAPTER *mpt_dev = priv->mpt_dev; if (mpt_dev->active) { dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name)); netif_wake_queue(dev); }}/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*///static inline intstatic intmpt_lan_send_turbo(struct net_device *dev, u32 tmsg){ struct mpt_lan_priv *priv = netdev_priv(dev); MPT_ADAPTER *mpt_dev = priv->mpt_dev; struct sk_buff *sent; unsigned long flags; u32 ctx; ctx = GET_LAN_BUFFER_CONTEXT(tmsg); sent = priv->SendCtl[ctx].skb; priv->stats.tx_packets++; priv->stats.tx_bytes += sent->len; dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", IOC_AND_NETDEV_NAMES_s_s(dev), __FUNCTION__, sent)); priv->SendCtl[ctx].skb = NULL; pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, priv->SendCtl[ctx].len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(sent); spin_lock_irqsave(&priv->txfidx_lock, flags); priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx; spin_unlock_irqrestore(&priv->txfidx_lock, flags); netif_wake_queue(dev); return 0;}/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/static intmpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep){ struct mpt_lan_priv *priv = netdev_priv(dev); MPT_ADAPTER *mpt_dev = priv->mpt_dev; struct sk_buff *sent; unsigned long flags; int FreeReqFrame = 0; u32 *pContext; u32 ctx; u8 count; count = pSendRep->NumberOfContexts; dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n", le16_to_cpu(pSendRep->IOCStatus))); /* Add check for Loginfo Flag in IOCStatus */ switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) { case MPI_IOCSTATUS_SUCCESS: priv->stats.tx_packets += count; break; case MPI_IOCSTATUS_LAN_CANCELED: case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED: break; case MPI_IOCSTATUS_INVALID_SGL: priv->stats.tx_errors += count; printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n", IOC_AND_NETDEV_NAMES_s_s(dev)); goto out; default: priv->stats.tx_errors += count; break; } pContext = &pSendRep->BufferContext; spin_lock_irqsave(&priv->txfidx_lock, flags); while (count > 0) { ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext)); sent = priv->SendCtl[ctx].skb; priv->stats.tx_bytes += sent->len; dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n", IOC_AND_NETDEV_NAMES_s_s(dev), __FUNCTION__, sent)); priv->SendCtl[ctx].skb = NULL; pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma, priv->SendCtl[ctx].len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(sent); priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx; pContext++; count--; } spin_unlock_irqrestore(&priv->txfidx_lock, flags);out: if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) FreeReqFrame = 1; netif_wake_queue(dev); return FreeReqFrame;}/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/static intmpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev){ struct mpt_lan_priv *priv = netdev_priv(dev); MPT_ADAPTER *mpt_dev = priv->mpt_dev; MPT_FRAME_HDR *mf; LANSendRequest_t *pSendReq; SGETransaction32_t *pTrans; SGESimple64_t *pSimple; dma_addr_t dma; unsigned long flags; int ctx; u16 cur_naa = 0x1000; dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n", __FUNCTION__, skb)); spin_lock_irqsave(&priv->txfidx_lock, flags); if (priv->mpt_txfidx_tail < 0) { netif_stop_queue(dev); spin_unlock_irqrestore(&priv->txfidx_lock, flags); printk (KERN_ERR "%s: no tx context available: %u\n", __FUNCTION__, priv->mpt_txfidx_tail); return 1; } mf = mpt_get_msg_frame(LanCtx, mpt_dev); if (mf == NULL) { netif_stop_queue(dev); spin_unlock_irqrestore(&priv->txfidx_lock, flags); printk (KERN_ERR "%s: Unable to alloc request frame\n", __FUNCTION__); return 1; } ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--]; spin_unlock_irqrestore(&priv->txfidx_lock, flags);// dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",// IOC_AND_NETDEV_NAMES_s_s(dev))); pSendReq = (LANSendRequest_t *) mf; /* Set the mac.raw pointer, since this apparently isn't getting * done before we get the skb. Pull the data pointer past the mac data. */ skb->mac.raw = skb->data; skb_pull(skb, 12); dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len, PCI_DMA_TODEVICE); priv->SendCtl[ctx].skb = skb; priv->SendCtl[ctx].dma = dma; priv->SendCtl[ctx].len = skb->len; /* Message Header */ pSendReq->Reserved = 0; pSendReq->Function = MPI_FUNCTION_LAN_SEND; pSendReq->ChainOffset = 0; pSendReq->Reserved2 = 0; pSendReq->MsgFlags = 0; pSendReq->PortNumber = priv->pnum; /* Transaction Context Element */ pTrans = (SGETransaction32_t *) pSendReq->SG_List; /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */ pTrans->ContextSize = sizeof(u32); pTrans->DetailsLength = 2 * sizeof(u32); pTrans->Flags = 0; pTrans->TransactionContext[0] = cpu_to_le32(ctx);// dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",// IOC_AND_NETDEV_NAMES_s_s(dev),// ctx, skb, skb->data));#ifdef QLOGIC_NAA_WORKAROUND{ struct NAA_Hosed *nh; /* Munge the NAA for Tx packets to QLogic boards, which don't follow RFC 2625. The longer I look at this, the more my opinion of Qlogic drops. */ read_lock_irq(&bad_naa_lock); for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) { if ((nh->ieee[0] == skb->mac.raw[0]) && (nh->ieee[1] == skb->mac.raw[1]) && (nh->ieee[2] == skb->mac.raw[2]) && (nh->ieee[3] == skb->mac.raw[3]) && (nh->ieee[4] == skb->mac.raw[4]) && (nh->ieee[5] == skb->mac.raw[5])) { cur_naa = nh->NAA; dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value " "= %04x.\n", cur_naa)); break; } } read_unlock_irq(&bad_naa_lock);}#endif pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) | (skb->mac.raw[0] << 8) | (skb->mac.raw[1] << 0)); pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) | (skb->mac.raw[3] << 16) | (skb->mac.raw[4] << 8) | (skb->mac.raw[5] << 0)); pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2]; /* If we ever decide to send more than one Simple SGE per LANSend, then we will need to make sure that LAST_ELEMENT only gets set on the last one. Otherwise, bad voodoo and evil funkiness will commence. */ pSimple->FlagsLength = cpu_to_le32( ((MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_SYSTEM_ADDRESS | MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_64_BIT_ADDRESSING | MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) | skb->len); pSimple->Address.Low = cpu_to_le32((u32) dma); if (sizeof(dma_addr_t) > sizeof(u32)) pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -