📄 skge.c
字号:
/* Stop port */ spin_lock_irqsave(&pAC->TxPort[pNet->PortNr] [TX_PRIO_LOW].TxDesRingLock, Flags); SkGeStopPort(pAC, pAC->IoBase, pNet->PortNr, SK_STOP_ALL, SK_HARD_RST); spin_unlock_irqrestore(&pAC->TxPort[pNet->PortNr] [TX_PRIO_LOW].TxDesRingLock, Flags); } if (pAC->RlmtNets == 1) { /* clear all descriptor rings */ for (i=0; i<pAC->GIni.GIMacsFound; i++) { ReceiveIrq(pAC, &pAC->RxPort[i], SK_TRUE); ClearRxRing(pAC, &pAC->RxPort[i]); ClearTxRing(pAC, &pAC->TxPort[i][TX_PRIO_LOW]); } } else { /* clear port descriptor rings */ ReceiveIrq(pAC, &pAC->RxPort[pNet->PortNr], SK_TRUE); ClearRxRing(pAC, &pAC->RxPort[pNet->PortNr]); ClearTxRing(pAC, &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW]); } SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY, ("SkGeClose: done ")); SK_MEMSET(&(pAC->PnmiBackup), 0, sizeof(SK_PNMI_STRUCT_DATA)); SK_MEMCPY(&(pAC->PnmiBackup), &(pAC->PnmiStruct), sizeof(SK_PNMI_STRUCT_DATA)); pAC->MaxPorts--; pNet->Up = 0; return (0);} /* SkGeClose *//***************************************************************************** * * SkGeXmit - Linux frame transmit function * * Description: * The system calls this function to send frames onto the wire. * It puts the frame in the tx descriptor ring. If the ring is * full then, the 'tbusy' flag is set. * * Returns: * 0, if everything is ok * !=0, on error * WARNING: returning 1 in 'tbusy' case caused system crashes (double * allocated skb's) !!! */static int SkGeXmit(struct sk_buff *skb, struct SK_NET_DEVICE *dev){DEV_NET *pNet;SK_AC *pAC;int Rc; /* return code of XmitFrame */ pNet = netdev_priv(dev); pAC = pNet->pAC; if ((!skb_shinfo(skb)->nr_frags) || (pAC->GIni.GIChipId == CHIP_ID_GENESIS)) { /* Don't activate scatter-gather and hardware checksum */ if (pAC->RlmtNets == 2) Rc = XmitFrame( pAC, &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW], skb); else Rc = XmitFrame( pAC, &pAC->TxPort[pAC->ActivePort][TX_PRIO_LOW], skb); } else { /* scatter-gather and hardware TCP checksumming anabled*/ if (pAC->RlmtNets == 2) Rc = XmitFrameSG( pAC, &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW], skb); else Rc = XmitFrameSG( pAC, &pAC->TxPort[pAC->ActivePort][TX_PRIO_LOW], skb); } /* Transmitter out of resources? */ if (Rc <= 0) { netif_stop_queue(dev); } /* If not taken, give buffer ownership back to the * queueing layer. */ if (Rc < 0) return (1); dev->trans_start = jiffies; return (0);} /* SkGeXmit *//***************************************************************************** * * XmitFrame - fill one socket buffer into the transmit ring * * Description: * This function puts a message into the transmit descriptor ring * if there is a descriptors left. * Linux skb's consist of only one continuous buffer. * The first step locks the ring. It is held locked * all time to avoid problems with SWITCH_../PORT_RESET. * Then the descriptoris allocated. * The second part is linking the buffer to the descriptor. * At the very last, the Control field of the descriptor * is made valid for the BMU and a start TX command is given * if necessary. * * Returns: * > 0 - on succes: the number of bytes in the message * = 0 - on resource shortage: this frame sent or dropped, now * the ring is full ( -> set tbusy) * < 0 - on failure: other problems ( -> return failure to upper layers) */static int XmitFrame(SK_AC *pAC, /* pointer to adapter context */TX_PORT *pTxPort, /* pointer to struct of port to send to */struct sk_buff *pMessage) /* pointer to send-message */{ TXD *pTxd; /* the rxd to fill */ TXD *pOldTxd; unsigned long Flags; SK_U64 PhysAddr; int BytesSend = pMessage->len; SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("X")); spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags);#ifndef USE_TX_COMPLETE FreeTxDescriptors(pAC, pTxPort);#endif if (pTxPort->TxdRingFree == 0) { /* ** no enough free descriptors in ring at the moment. ** Maybe free'ing some old one help? */ FreeTxDescriptors(pAC, pTxPort); if (pTxPort->TxdRingFree == 0) { spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); SK_PNMI_CNT_NO_TX_BUF(pAC, pTxPort->PortIndex); SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("XmitFrame failed\n")); /* ** the desired message can not be sent ** Because tbusy seems to be set, the message ** should not be freed here. It will be used ** by the scheduler of the ethernet handler */ return (-1); } } /* ** If the passed socket buffer is of smaller MTU-size than 60, ** copy everything into new buffer and fill all bytes between ** the original packet end and the new packet end of 60 with 0x00. ** This is to resolve faulty padding by the HW with 0xaa bytes. */ if (BytesSend < C_LEN_ETHERNET_MINSIZE) { if ((pMessage = skb_padto(pMessage, C_LEN_ETHERNET_MINSIZE)) == NULL) { spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); return 0; } pMessage->len = C_LEN_ETHERNET_MINSIZE; } /* ** advance head counter behind descriptor needed for this frame, ** so that needed descriptor is reserved from that on. The next ** action will be to add the passed buffer to the TX-descriptor */ pTxd = pTxPort->pTxdRingHead; pTxPort->pTxdRingHead = pTxd->pNextTxd; pTxPort->TxdRingFree--;#ifdef SK_DUMP_TX DumpMsg(pMessage, "XmitFrame");#endif /* ** First step is to map the data to be sent via the adapter onto ** the DMA memory. Kernel 2.2 uses virt_to_bus(), but kernels 2.4 ** and 2.6 need to use pci_map_page() for that mapping. */ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, virt_to_page(pMessage->data), ((unsigned long) pMessage->data & ~PAGE_MASK), pMessage->len, PCI_DMA_TODEVICE); pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); pTxd->pMBuf = pMessage; if (pMessage->ip_summed == CHECKSUM_HW) { u16 hdrlen = pMessage->h.raw - pMessage->data; u16 offset = hdrlen + pMessage->csum; if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) && (pAC->GIni.GIChipRev == 0) && (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { pTxd->TBControl = BMU_TCP_CHECK; } else { pTxd->TBControl = BMU_UDP_CHECK; } pTxd->TcpSumOfs = 0; pTxd->TcpSumSt = hdrlen; pTxd->TcpSumWr = offset; pTxd->TBControl |= BMU_OWN | BMU_STF | BMU_SW | BMU_EOF |#ifdef USE_TX_COMPLETE BMU_IRQ_EOF |#endif pMessage->len; } else { pTxd->TBControl = BMU_OWN | BMU_STF | BMU_CHECK | BMU_SW | BMU_EOF |#ifdef USE_TX_COMPLETE BMU_IRQ_EOF |#endif pMessage->len; } /* ** If previous descriptor already done, give TX start cmd */ pOldTxd = xchg(&pTxPort->pTxdRingPrev, pTxd); if ((pOldTxd->TBControl & BMU_OWN) == 0) { SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START); } /* ** after releasing the lock, the skb may immediately be free'd */ spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); if (pTxPort->TxdRingFree != 0) { return (BytesSend); } else { return (0); }} /* XmitFrame *//***************************************************************************** * * XmitFrameSG - fill one socket buffer into the transmit ring * (use SG and TCP/UDP hardware checksumming) * * Description: * This function puts a message into the transmit descriptor ring * if there is a descriptors left. * * Returns: * > 0 - on succes: the number of bytes in the message * = 0 - on resource shortage: this frame sent or dropped, now * the ring is full ( -> set tbusy) * < 0 - on failure: other problems ( -> return failure to upper layers) */static int XmitFrameSG(SK_AC *pAC, /* pointer to adapter context */TX_PORT *pTxPort, /* pointer to struct of port to send to */struct sk_buff *pMessage) /* pointer to send-message */{ TXD *pTxd; TXD *pTxdFst; TXD *pTxdLst; int CurrFrag; int BytesSend; skb_frag_t *sk_frag; SK_U64 PhysAddr; unsigned long Flags; SK_U32 Control; spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags);#ifndef USE_TX_COMPLETE FreeTxDescriptors(pAC, pTxPort);#endif if ((skb_shinfo(pMessage)->nr_frags +1) > pTxPort->TxdRingFree) { FreeTxDescriptors(pAC, pTxPort); if ((skb_shinfo(pMessage)->nr_frags + 1) > pTxPort->TxdRingFree) { spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); SK_PNMI_CNT_NO_TX_BUF(pAC, pTxPort->PortIndex); SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("XmitFrameSG failed - Ring full\n")); /* this message can not be sent now */ return(-1); } } pTxd = pTxPort->pTxdRingHead; pTxdFst = pTxd; pTxdLst = pTxd; BytesSend = 0; /* ** Map the first fragment (header) into the DMA-space */ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, virt_to_page(pMessage->data), ((unsigned long) pMessage->data & ~PAGE_MASK), skb_headlen(pMessage), PCI_DMA_TODEVICE); pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); /* ** Does the HW need to evaluate checksum for TCP or UDP packets? */ if (pMessage->ip_summed == CHECKSUM_HW) { u16 hdrlen = pMessage->h.raw - pMessage->data; u16 offset = hdrlen + pMessage->csum; Control = BMU_STFWD; /* ** We have to use the opcode for tcp here, because the ** opcode for udp is not working in the hardware yet ** (Revision 2.0) */ if ((pMessage->h.ipiph->protocol == IPPROTO_UDP ) && (pAC->GIni.GIChipRev == 0) && (pAC->GIni.GIChipId == CHIP_ID_YUKON)) { Control |= BMU_TCP_CHECK; } else { Control |= BMU_UDP_CHECK; } pTxd->TcpSumOfs = 0; pTxd->TcpSumSt = hdrlen; pTxd->TcpSumWr = offset; } else Control = BMU_CHECK | BMU_SW; pTxd->TBControl = BMU_STF | Control | skb_headlen(pMessage); pTxd = pTxd->pNextTxd; pTxPort->TxdRingFree--; BytesSend += skb_headlen(pMessage); /* ** Browse over all SG fragments and map each of them into the DMA space */ for (CurrFrag = 0; CurrFrag < skb_shinfo(pMessage)->nr_frags; CurrFrag++) { sk_frag = &skb_shinfo(pMessage)->frags[CurrFrag]; /* ** we already have the proper value in entry */ PhysAddr = (SK_U64) pci_map_page(pAC->PciDev, sk_frag->page, sk_frag->page_offset, sk_frag->size, PCI_DMA_TODEVICE); pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff); pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32); pTxd->pMBuf = pMessage; pTxd->TBControl = Control | BMU_OWN | sk_frag->size;; /* ** Do we have the last fragment? */ if( (CurrFrag+1) == skb_shinfo(pMessage)->nr_frags ) {#ifdef USE_TX_COMPLETE pTxd->TBControl |= BMU_EOF | BMU_IRQ_EOF;#else pTxd->TBControl |= BMU_EOF;#endif pTxdFst->TBControl |= BMU_OWN | BMU_SW; } pTxdLst = pTxd; pTxd = pTxd->pNextTxd; pTxPort->TxdRingFree--; BytesSend += sk_frag->size; } /* ** If previous descriptor already done, give TX start cmd */ if ((pTxPort->pTxdRingPrev->TBControl & BMU_OWN) == 0) { SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START); } pTxPort->pTxdRingPrev = pTxdLst; pTxPort->pTxdRingHead = pTxd; spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); if (pTxPort->TxdRingFree > 0) { return (BytesSend); } else { return (0); }}/***************************************************************************** * * FreeTxDescriptors - release descriptors from the descriptor ring * * Description: * This function releases descriptors from a transmit ring if they * have been sent by the BMU. * If a descriptors is sent, it can be freed and the message can * be freed, too. * The SOFTWARE controllable bit is used to prevent running around a * completely free ring for ever. If this bit is no set in the * frame (by XmitFrame), this frame has never been sent or is * already freed. * The Tx descriptor ring lock must be held while calling this function !!! * * Returns: * none */static void FreeTxDescriptors(SK_AC *pAC, /* pointer to the adapter context */TX_PORT *pTxPort) /* pointer to destination port structure */{TXD *pTxd; /* pointer to the checked descriptor */TXD *pNewTail; /* pointer to 'end' of the ring */SK_U32 Control; /* TBControl field of descriptor */SK_U64 PhysAddr; /* address of DMA mapping */ pNewTail = pTxPort->pTxdRingTail; pTxd = pNewTail; /* ** loop forever; exits if BMU_SW bit not set in start frame ** or BMU_OWN bit set in any frame */ while (1) { Control = pTxd->TBControl; if ((Control & BMU_SW) == 0) { /* ** software controllable bit is set in first ** fragment when given to BMU. Not set means that ** this fragment was never sent or is already ** freed ( -> ring completely free now).
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -