📄 velocity_main.c
字号:
if (pTDInfo->skb_dma[k] && (pTDInfo->skb_dma[k] != pTDInfo->buf_dma)) { pci_unmap_single(pInfo->pcid, pTDInfo->skb_dma[k], pTDInfo->skb->len, PCI_DMA_TODEVICE); pTDInfo->skb_dma[k] = (dma_addr_t)NULL; } if (pTDInfo->skb) { dev_kfree_skb(pTDInfo->skb); pTDInfo->skb = NULL; } } if (pInfo->apTDInfos[j]) { kfree(pInfo->apTDInfos[j]); pInfo->apTDInfos[j] = NULL; } }}/*-----------------------------------------------------------------*/static int velocity_rx_srv(PVELOCITY_INFO pInfo, int status){ PRX_DESC pRD; struct net_device_stats* pStats = &pInfo->stats; //PMAC_REGS pMacRegs = pInfo->pMacRegs; int iCurrRDIdx = pInfo->hw.iCurrRDIdx; int works = 0; U16 wRSR; while (TRUE) { pRD = &(pInfo->hw.aRDRing[iCurrRDIdx]); if ((pInfo->aRDInfo[iCurrRDIdx]).skb == NULL) { if (!velocity_alloc_rx_buf(pInfo, iCurrRDIdx)) break; } if (works++ > INT_WORKS_DEF) break; if (pRD->rdesc0 & cpu_to_le32(RDESC0_OWN)) break; pInfo->adwRMONStats[RMON_Octets] += (U16)((cpu_to_le32(pRD->rdesc0) >> 16) & 0x00003fffL); wRSR = (U16)(cpu_to_le32(pRD->rdesc0)); // don't drop CE or RL error frame although RXOK is off if( (wRSR & RSR_RXOK) || (!(wRSR & RSR_RXOK) && (wRSR & (RSR_CE | RSR_RL)))) { if (velocity_receive_frame(pInfo, iCurrRDIdx)) { if (!velocity_alloc_rx_buf(pInfo, iCurrRDIdx)) { VELOCITY_PRT(msglevel, MSG_LEVEL_ERR, KERN_ERR "%s: can not allocate rx buf\n", pInfo->dev->name); break; } } else { pStats->rx_dropped++; } } else { if (wRSR & RSR_CRC) pStats->rx_crc_errors++; if (wRSR & RSR_FAE) pStats->rx_frame_errors++; pStats->rx_dropped++; } if ((iCurrRDIdx % 4) == 3) { int i, iPrevRDIdx = iCurrRDIdx; for (i=0; i<4; i++) { pRD = &(pInfo->hw.aRDRing[iPrevRDIdx]); pRD->rdesc0 |= cpu_to_le32(RDESC0_OWN); SUB_ONE_WITH_WRAP_AROUND(iPrevRDIdx, pInfo->hw.sOpts.nRxDescs); } CSR_WRITE_2(&pInfo->hw, 4, MAC_REG_RBRDU); } pInfo->dev->last_rx = jiffies; ADD_ONE_WITH_WRAP_AROUND(iCurrRDIdx, pInfo->hw.sOpts.nRxDescs); } pInfo->hw.iCurrRDIdx = iCurrRDIdx; VAR_USED(pStats); return works;}static inline voidvelocity_rx_csum(PRX_DESC pRD, struct sk_buff* skb){ U8 byCSM; skb->ip_summed = CHECKSUM_NONE; byCSM = (U8)(cpu_to_le32(pRD->rdesc1) >> 16); if ((byCSM & CSM_IPKT) && (byCSM & CSM_IPOK)) { if ((byCSM & CSM_TCPKT) || (byCSM & CSM_UDPKT)) { if (!(byCSM & CSM_TUPOK)) return; skb->ip_summed = CHECKSUM_UNNECESSARY; } }}static BOOLvelocity_receive_frame(PVELOCITY_INFO pInfo, int idx){ PVELOCITY_RD_INFO pRDInfo = &(pInfo->aRDInfo[idx]); PRX_DESC pRD = &(pInfo->hw.aRDRing[idx]); struct sk_buff* skb; U16 wRSR, wLength; wRSR = (U16)(cpu_to_le32(pRD->rdesc0)); //wLength = (U16)((cpu_to_le32(pRD->rdesc0) >> 16) & 0x00003fffL); wLength = VELOCITY_GET_RD_PACKET_SIZE(pRD); if (wRSR & (RSR_STP|RSR_EDP)) { VELOCITY_PRT(msglevel, MSG_LEVEL_VERBOSE, KERN_NOTICE "%s: the received frame span multple RDs\n", pInfo->dev->name); pInfo->stats.rx_length_errors++; return FALSE; } if (wRSR & RSR_MAR) pInfo->stats.multicast++; if (wRSR & RSR_BAR) pInfo->adwRMONStats[RMON_BroadcastPkts]++; skb = pRDInfo->skb; skb->dev = pInfo->dev; pci_unmap_single(pInfo->pcid,pRDInfo->skb_dma, pInfo->hw.rx_buf_sz, PCI_DMA_FROMDEVICE); pRDInfo->skb_dma = (dma_addr_t)NULL; pRDInfo->skb = NULL; if (pInfo->hw.flags & VELOCITY_FLAGS_IP_ALIGN) { int i; for (i = wLength; i >= 0; i--) *(skb->data + i + 2) = *(skb->data + i); skb->data += 2; skb->tail += 2; } skb_put(skb, (wLength-4)); skb->protocol = eth_type_trans(skb, skb->dev); //drop frame not met IEEE 802.3 if (pInfo->hw.flags & VELOCITY_FLAGS_VAL_PKT_LEN) { if (wRSR & RSR_RL) { pInfo->stats.rx_length_errors++; return FALSE; } } velocity_rx_csum(pRD, skb); pInfo->stats.rx_bytes += skb->len; netif_rx(skb); return TRUE;}static BOOL velocity_alloc_rx_buf(PVELOCITY_INFO pInfo, int idx) { PRX_DESC pRD=&(pInfo->hw.aRDRing[idx]); PVELOCITY_RD_INFO pRDInfo=&(pInfo->aRDInfo[idx]); pRDInfo->skb = dev_alloc_skb(pInfo->hw.rx_buf_sz+64); if (pRDInfo->skb == NULL) return FALSE; ASSERT(pRDInfo->skb); skb_reserve(pRDInfo->skb, 64 - ((unsigned long)pRDInfo->skb->tail & 63)); pRDInfo->skb->dev = pInfo->dev; pRDInfo->skb_dma= pci_map_single(pInfo->pcid, pRDInfo->skb->tail, pInfo->hw.rx_buf_sz, PCI_DMA_FROMDEVICE); *((PU32)&(pRD->rdesc0)) = 0; VELOCITY_SET_RD_BUFFER_SIZE(pRD, pInfo->hw.rx_buf_sz); pRD->rdesc3 |= cpu_to_le32(RDESC3_INTCTL); pRD->dwBufAddrLo = cpu_to_le32(pRDInfo->skb_dma); // mask off RD data buffer address high to zero pRD->rdesc3 &= cpu_to_le32(0xffff0000L); return TRUE;}static int velocity_tx_srv(PVELOCITY_INFO pInfo, U32 status){ PTX_DESC pTD; int iQNo; BOOL bFull = FALSE; int idx; int works = 0; PVELOCITY_TD_INFO pTDInfo; struct net_device_stats* pStats = &pInfo->stats; U16 wTSR; for (iQNo=0; iQNo<pInfo->hw.nTxQueues; iQNo++) { for (idx = pInfo->hw.aiTailTDIdx[iQNo]; pInfo->hw.iTDUsed[iQNo]>0; idx = (idx+1) % pInfo->hw.sOpts.nTxDescs) { // Get Tx Descriptor pTD = &(pInfo->hw.apTDRings[iQNo][idx]); pTDInfo = &(pInfo->apTDInfos[iQNo][idx]); if (pTD->tdesc0 & cpu_to_le32(TDESC0_OWN)) { break; } if (works++ > INT_WORKS_DEF) { break; } wTSR = (U16)cpu_to_le32(pTD->tdesc0); if (wTSR & TSR0_TERR) { pStats->tx_errors++; pStats->tx_dropped++; if (wTSR & TSR0_CDH) pStats->tx_heartbeat_errors++; if (wTSR & TSR0_CRS) pStats->tx_carrier_errors++; if (wTSR & TSR0_ABT) pStats->tx_aborted_errors++; if (wTSR & TSR0_OWC) pStats->tx_window_errors++; } else { pStats->tx_packets++; pStats->tx_bytes += pTDInfo->skb->len; } velocity_free_tx_buf(pInfo, pTDInfo, pTD); pInfo->hw.iTDUsed[iQNo]--; } // for (idx) pInfo->hw.aiTailTDIdx[iQNo]=idx; if (AVAIL_TD(&pInfo->hw, iQNo) < 1 ) { bFull = TRUE; } } // for (iQNo) if (netif_queue_stopped(pInfo->dev) && (bFull==FALSE) && (!(pInfo->hw.mii_status & VELOCITY_LINK_FAIL))) { netif_wake_queue(pInfo->dev); } return works;}static void velocity_error(PVELOCITY_INFO pInfo, int status) { struct net_device* dev = pInfo->dev; // (1) LSTEI if (status & ISR_TXSTLI) { printk("TD structure errror TDindex=%X\n", CSR_READ_2(&pInfo->hw, MAC_REG_TDIDX0)); BYTE_REG_BITS_ON(&pInfo->hw, TXESR_TDSTR, MAC_REG_TXE_SR); CSR_WRITE_2(&pInfo->hw, TRDCSR_RUN, MAC_REG_TDCSR_CLR); netif_stop_queue(dev); } // (2) SRCI if (status & ISR_SRCI) { if (pInfo->hw.sOpts.spd_dpx == SPD_DPX_AUTO) { pInfo->hw.mii_status = check_connectiontype(&pInfo->hw); // if it's 3119, disable frame bursting in halfduplex mode and // enable it in fullduplex mode if (pInfo->hw.byRevId < REV_ID_VT3216_A0) { if (pInfo->hw.mii_status | VELOCITY_DUPLEX_FULL) BYTE_REG_BITS_ON(&pInfo->hw, TCR_TB2BDIS, MAC_REG_TCR); else BYTE_REG_BITS_OFF(&pInfo->hw, TCR_TB2BDIS, MAC_REG_TCR); } // only enable CD heart beat counter in 10HD mode if (!(pInfo->hw.mii_status & VELOCITY_DUPLEX_FULL) && (pInfo->hw.mii_status & VELOCITY_SPEED_10)) { BYTE_REG_BITS_OFF(&pInfo->hw, TESTCFG_HBDIS, MAC_REG_TESTCFG); } else { BYTE_REG_BITS_ON(&pInfo->hw, TESTCFG_HBDIS, MAC_REG_TESTCFG); } //-------------------------------------------------------- // [1.18] Adaptive Interrupt if (pInfo->hw.byRevId >= REV_ID_VT3216_A0) { if ( (pInfo->hw.mii_status & VELOCITY_SPEED_1000) || (pInfo->hw.mii_status & VELOCITY_SPEED_100) ) { CSR_WRITE_1(&pInfo->hw, 0x59, MAC_REG_TQETMR); // 100us CSR_WRITE_1(&pInfo->hw, 0x14, MAC_REG_TQETMR); // 20us } else { CSR_WRITE_1(&pInfo->hw, 0x00, MAC_REG_TQETMR); CSR_WRITE_1(&pInfo->hw, 0x00, MAC_REG_TQETMR); } } //-------------------------------------------------------- } // get link status from PHYSR0 pInfo->hw.mii_status = check_connectiontype(&pInfo->hw); velocity_print_link_status(&pInfo->hw); enable_flow_control_ability(&pInfo->hw); // re-enable auto-polling because SRCI will disable auto-polling EnableMiiAutoPoll(&pInfo->hw); if (pInfo->hw.mii_status & VELOCITY_LINK_FAIL) { netif_carrier_off(dev); netif_stop_queue(dev); } else { netif_carrier_on(dev); netif_wake_queue(dev); } } // ISR_SRCI // (3) MIBFI if (status & ISR_MIBFI) velocity_update_hw_mibs(&pInfo->hw); // (4) LSTEI: RD used up, re-wake RD ring if (status & ISR_LSTEI) { mac_rx_queue_wake(&pInfo->hw); }}static void velocity_free_tx_buf(PVELOCITY_INFO pInfo, PVELOCITY_TD_INFO pTDInfo, PTX_DESC pTD){ struct sk_buff* skb = pTDInfo->skb; int i;#ifdef VELOCITY_ZERO_COPY_SUPPORT WORD wBufSize;#endif // Don't unmap the pre-allocaed tx_bufs if (pTDInfo->skb_dma && (pTDInfo->skb_dma[0] != pTDInfo->buf_dma)) { for (i=0; i<pTDInfo->nskb_dma; i++) {#ifdef VELOCITY_ZERO_COPY_SUPPORT wBufSize = VELOCITY_GET_TD_BUFFER_SIZE(pTD->aTdBufs[i]); pci_unmap_single(pInfo->pcid, pTDInfo->skb_dma[i], wBufSize, PCI_DMA_TODEVICE);#else pci_unmap_single(pInfo->pcid, pTDInfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE);#endif } } dev_kfree_skb_irq(skb); for (i=0; i<pTDInfo->nskb_dma; i++) pTDInfo->skb_dma[i] = 0; pTDInfo->skb = 0;}static int velocity_open(struct net_device *dev) { PVELOCITY_INFO pInfo=(PVELOCITY_INFO) dev->priv; int i; pInfo->hw.rx_buf_sz=(dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32); if (!velocity_init_rings(pInfo)) return -ENOMEM; if (!velocity_init_rd_ring(pInfo)) return -ENOMEM; if (!velocity_init_td_ring(pInfo)) return -ENOMEM; velocity_init_pci(pInfo); velocity_init_adapter(pInfo, VELOCITY_INIT_COLD); i=request_irq(pInfo->pcid->irq, &velocity_intr, SA_SHIRQ, dev->name, dev); if (i) return i; mac_enable_int(&pInfo->hw); netif_start_queue(dev); pInfo->hw.flags |=VELOCITY_FLAGS_OPENED;#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) MOD_INC_USE_COUNT;#endif return 0;}static int velocity_change_mtu(struct net_device *dev,int new_mtu) { PVELOCITY_INFO pInfo = (PVELOCITY_INFO)dev->priv; unsigned long flags; int oldmtu = dev->mtu; if ((new_mtu<VELOCITY_MIN_MTU)||new_mtu>(VELOCITY_MAX_MTU)) { VELOCITY_PRT(msglevel, MSG_LEVEL_ERR, KERN_NOTICE "%s: Invaild MTU\n",pInfo->dev->name); return -EINVAL; } if (new_mtu!=oldmtu) { spin_lock_irqsave(&pInfo->lock, flags); netif_stop_queue(dev);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -