📄 velocity_main.c
字号:
velocity_shutdown(&pInfo->hw); velocity_free_td_ring(pInfo); velocity_free_rd_ring(pInfo); dev->mtu=new_mtu; if (new_mtu>8192) pInfo->hw.rx_buf_sz=9*1024; else if (new_mtu>4096) pInfo->hw.rx_buf_sz=8192; else pInfo->hw.rx_buf_sz=4*1024; if (!velocity_init_rd_ring(pInfo)) return -ENOMEM; if (!velocity_init_td_ring(pInfo)) return -ENOMEM; velocity_init_adapter(pInfo, VELOCITY_INIT_COLD); mac_enable_int(&pInfo->hw); netif_start_queue(dev); spin_unlock_irqrestore(&pInfo->lock, flags); } return 0;}static int velocity_close(struct net_device *dev) { PVELOCITY_INFO pInfo= netdev_priv(dev); netif_stop_queue(dev); velocity_shutdown(&pInfo->hw); if (pInfo->hw.flags & VELOCITY_FLAGS_WOL_ENABLED) velocity_get_ip(pInfo);#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) MOD_DEC_USE_COUNT;#endif if (dev->irq!=0) free_irq(dev->irq, dev); velocity_free_td_ring(pInfo); velocity_free_rd_ring(pInfo); velocity_free_rings(pInfo); pInfo->hw.flags &=(~VELOCITY_FLAGS_OPENED); return 0;}static int velocity_xmit(struct sk_buff *skb, struct net_device *dev){ PVELOCITY_INFO pInfo = netdev_priv(dev); int iQNo = 0; PTX_DESC pTD; PVELOCITY_TD_INFO pTDInfo; unsigned long flags; int iCurrTDIdx; int iPrevTDIdx;#ifdef VELOCITY_ZERO_COPY_SUPPORT unsigned int nfrags = skb_shinfo(skb)->nr_frags;#endif#ifdef VELOCITY_TSO_SUPPORT unsigned int mss = tcp_skb_mss(skb);#endif spin_lock_irqsave(&pInfo->lock, flags); iCurrTDIdx = pInfo->hw.aiCurrTDIdx[iQNo]; pTD = &(pInfo->hw.apTDRings[iQNo][iCurrTDIdx]); pTDInfo = &(pInfo->apTDInfos[iQNo][iCurrTDIdx]); // Init TDESC0,1 pTD->tdesc0 = 0x00000000UL; pTD->tdesc1 = 0x00000000UL; VELOCITY_SET_TD_TCPLS(pTD, TCPLS_NORMAL); pTD->tdesc1 |= cpu_to_le32(TCR_TIC); pTD->aTdBufs[0].dwBufaddrHi &= cpu_to_le32(~TDTXBUF_QUE); if (skb->len < ETH_ZLEN) { // packet size is less than 60 bytes#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) skb_linearize(skb);#else skb_linearize(skb, GFP_ATOMIC);#endif memcpy(pTDInfo->buf, skb->data, skb->len); VELOCITY_SET_TD_PACKET_SIZE(pTD, ETH_ZLEN); // padding zero memset(pTDInfo->buf+skb->len, 0, ETH_ZLEN-skb->len); pTDInfo->skb = skb; pTDInfo->skb_dma[0] = pTDInfo->buf_dma; pTD->aTdBufs[0].dwBufAddrLo = cpu_to_le32(pTDInfo->skb_dma[0]); // mask off TD data buffer address high to zero pTD->aTdBufs[0].dwBufaddrHi &= cpu_to_le32(0xffff0000L); VELOCITY_SET_TD_BUFFER_SIZE(pTD->aTdBufs[0], ETH_ZLEN); pTDInfo->nskb_dma = 1; VELOCITY_SET_TD_CMDZ(pTD, 2); } else#ifdef VELOCITY_ZERO_COPY_SUPPORT if (nfrags > 0) { pTDInfo->skb = skb; if (nfrags > 6) { // elements > 7 ==> copy to a buffer#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) skb_linearize(skb);#else skb_linearize(skb, GFP_ATOMIC);#endif memcpy(pTDInfo->buf, skb->data, skb->len); VELOCITY_SET_TD_PACKET_SIZE(pTD, skb->len); pTDInfo->skb_dma[0] = pTDInfo->buf_dma; pTD->aTdBufs[0].dwBufAddrLo = cpu_to_le32(pTDInfo->skb_dma[0]); // mask off TD data buffer address high to zero pTD->aTdBufs[0].dwBufaddrHi &= cpu_to_le32(0xffff0000L); VELOCITY_SET_TD_BUFFER_SIZE(pTD->aTdBufs[0], skb->len); pTDInfo->nskb_dma = 1; VELOCITY_SET_TD_CMDZ(pTD, 2); } else { // elements <= 7 int i = 0; pTDInfo->nskb_dma = 0; VELOCITY_SET_TD_PACKET_SIZE(pTD, skb->len); pTDInfo->skb_dma[i] = pci_map_single(pInfo->pcid, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE); pTD->aTdBufs[i].dwBufAddrLo = cpu_to_le32(pTDInfo->skb_dma[i]); // mask off TD data buffer address high to zero pTD->aTdBufs[i].dwBufaddrHi &= cpu_to_le32(0xffff0000L); VELOCITY_SET_TD_BUFFER_SIZE(pTD->aTdBufs[i], skb->len - skb->data_len); for (i=0; i<nfrags; i++) { skb_frag_t *frag; void* addr; frag = &skb_shinfo(skb)->frags[i]; addr = ((void *)page_address(frag->page + frag->page_offset)); pTDInfo->skb_dma[i+1] = pci_map_single(pInfo->pcid, addr, frag->size, PCI_DMA_TODEVICE); pTD->aTdBufs[i+1].dwBufAddrLo = cpu_to_le32(pTDInfo->skb_dma[i+1]); // mask off TD data buffer address high to zero pTD->aTdBufs[i+1].dwBufaddrHi &= cpu_to_le32(0xffff0000L); VELOCITY_SET_TD_BUFFER_SIZE(pTD->aTdBufs[i+1], frag->size); } pTDInfo->nskb_dma = i + 1; VELOCITY_SET_TD_CMDZ(pTD, i + 2); } } else#endif { pTDInfo->skb = skb; pTDInfo->skb_dma[0] = pci_map_single(pInfo->pcid, skb->data, skb->len, PCI_DMA_TODEVICE); VELOCITY_SET_TD_PACKET_SIZE(pTD, skb->len); pTD->aTdBufs[0].dwBufAddrLo = cpu_to_le32(pTDInfo->skb_dma[0]); // mask off TD data buffer address high to zero pTD->aTdBufs[0].dwBufaddrHi &= cpu_to_le32(0xffff0000L); VELOCITY_SET_TD_BUFFER_SIZE(pTD->aTdBufs[0], skb->len); pTDInfo->nskb_dma = 1; VELOCITY_SET_TD_CMDZ(pTD, 2); } if (pInfo->hw.flags & VELOCITY_FLAGS_TAGGING) { // clear CFI and priority pTD->tdesc1 &= cpu_to_le32(0xffff0000L); VELOCITY_SET_TD_VLANID(pTD, pInfo->hw.sOpts.vid & 0xfff); pTD->tdesc1 |= cpu_to_le32(TCR_VETAG); }#ifdef VELOCITY_TX_CSUM_SUPPORT if ( (pInfo->hw.flags & VELOCITY_FLAGS_TX_CSUM) && (skb->ip_summed == CHECKSUM_HW) ) { struct iphdr* ip = skb->nh.iph; if (ip->protocol == IPPROTO_TCP) { // request TCP checksum calculation pTD->tdesc1 |= cpu_to_le32(TCR_TCPCK); } else if (ip->protocol == IPPROTO_UDP) { // request UDP checksum calculation pTD->tdesc1 |= cpu_to_le32(TCR_UDPCK); } // request IP checksum calculation pTD->tdesc1 |= cpu_to_le32(TCR_IPCK); }#endif // Set OWN bit of current TD pTD->tdesc0 |= cpu_to_le32(TDESC0_OWN); pInfo->hw.iTDUsed[iQNo]++; pInfo->hw.aiCurrTDIdx[iQNo] = (iCurrTDIdx + 1) % pInfo->hw.sOpts.nTxDescs; if (AVAIL_TD(&pInfo->hw, iQNo) < 1) netif_stop_queue(dev); iPrevTDIdx = (iCurrTDIdx + pInfo->hw.sOpts.nTxDescs - 1) % pInfo->hw.sOpts.nTxDescs; pTD = &(pInfo->hw.apTDRings[iQNo][iPrevTDIdx]); pTD->aTdBufs[0].dwBufaddrHi |= cpu_to_le32(TDTXBUF_QUE); mac_tx_queue_wake(&pInfo->hw, iQNo); dev->trans_start = jiffies; spin_unlock_irqrestore(&pInfo->lock, flags); return 0;}static irqreturn_t velocity_intr(int irq, void *dev_instance, struct pt_regs *regs){ struct net_device* dev = dev_instance; PVELOCITY_INFO pInfo = netdev_priv(dev); U32 isr_status; int max_count = 0; int handled = 0; if (!spin_trylock(&pInfo->lock)) return IRQ_RETVAL(handled); isr_status = mac_read_isr(&pInfo->hw); if (isr_status == 0) { spin_unlock(&pInfo->lock); return IRQ_RETVAL(handled); } handled = 1; mac_disable_int(&pInfo->hw); while (isr_status != 0) { mac_write_isr(&pInfo->hw, isr_status); velocity_error(pInfo, isr_status); max_count += velocity_rx_srv(pInfo, isr_status); max_count += velocity_tx_srv(pInfo, isr_status); // [1.18], for performance max_count += velocity_rx_srv(pInfo, isr_status); max_count += velocity_tx_srv(pInfo, isr_status); isr_status = mac_read_isr(&pInfo->hw); if (max_count > pInfo->hw.sOpts.int_works) break; } mac_enable_int(&pInfo->hw); spin_unlock(&pInfo->lock); return IRQ_RETVAL(handled);}static unsigned const ethernet_polynomial = 0x04c11db7U;static inline u32 ether_crc(int length, unsigned char *data){ int crc = -1; while(--length >= 0) { unsigned char current_octet = *data++; int bit; for (bit = 0; bit < 8; bit++, current_octet >>= 1) { crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0); } } return crc;}static void velocity_set_multi(struct net_device *dev){ PVELOCITY_INFO pInfo = netdev_priv(dev); u8 rx_mode; int i; struct dev_mc_list *mclist; if (dev->flags & IFF_PROMISC) { // Set promiscuous. // Unconditionally log net taps. printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); velocity_set_all_multi(&pInfo->hw); rx_mode = (RCR_AM|RCR_AB|RCR_PROM); } else if ((dev->mc_count > pInfo->hw.multicast_limit) || (dev->flags & IFF_ALLMULTI)) { velocity_set_all_multi(&pInfo->hw); rx_mode = (RCR_AM|RCR_AB); } else { int offset=MCAM_SIZE-pInfo->hw.multicast_limit; // Clear multicast address registers velocity_clear_all_multi(&pInfo->hw); mac_get_cam_mask(&pInfo->hw,pInfo->hw.abyMCAMMask,VELOCITY_MULTICAST_CAM); for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { mac_set_cam(&pInfo->hw,i+offset,mclist->dmi_addr,VELOCITY_MULTICAST_CAM); pInfo->hw.abyMCAMMask[(offset+i)/8]|=1<<((offset+i) & 7); } mac_set_cam_mask(&pInfo->hw,pInfo->hw.abyMCAMMask,VELOCITY_MULTICAST_CAM); rx_mode=(RCR_AP|RCR_AM|RCR_AB); } if (dev->mtu>1500) rx_mode|=RCR_AL; BYTE_REG_BITS_ON(&pInfo->hw, rx_mode, MAC_REG_RCR);}static struct net_device_stats *velocity_get_stats(struct net_device *dev){ PVELOCITY_INFO pInfo = netdev_priv(dev); spin_lock_irq(&pInfo->lock); velocity_update_hw_mibs(&pInfo->hw); spin_unlock_irq(&pInfo->lock); pInfo->stats.rx_packets=pInfo->hw.adwHWMIBCounters[HW_MIB_ifRxAllPkts]; pInfo->stats.rx_errors=pInfo->hw.adwHWMIBCounters[HW_MIB_ifRxErrorPkts]; pInfo->stats.rx_length_errors=pInfo->hw.adwHWMIBCounters[HW_MIB_ifInRangeLengthErrors]; pInfo->stats.collisions=pInfo->hw.adwHWMIBCounters[HW_MIB_ifTxEtherCollisions]; // detailed rx_errors: pInfo->stats.rx_crc_errors=pInfo->hw.adwHWMIBCounters[HW_MIB_ifRxPktCRCE]; // detailed tx_errors return &pInfo->stats;}static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd){ switch(cmd) {#ifdef VELOCITY_ETHTOOL_IOCTL_SUPPORT case SIOCETHTOOL: return velocity_ethtool_ioctl(dev, rq); break;#endif#ifdef VELOCITY_MII_IOCTL_SUPPORT case SIOCGMIIPHY: /* Get address of MII PHY in use. */ case SIOCGMIIREG: /* Read MII PHY register. */ case SIOCSMIIREG: /* Write to MII PHY register. */ return velocity_mii_ioctl(dev, rq, cmd); break;#endif default: return -EOPNOTSUPP; } return 0;}/*------------------------------------------------------------------*/MODULE_DEVICE_TABLE(pci, velocity_id_table);static struct pci_driver velocity_driver = { name: VELOCITY_NAME, id_table: velocity_id_table, probe: velocity_found1, remove: velocity_remove1,#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,9)#ifdef CONFIG_PM suspend: velocity_suspend, resume: velocity_resume,#endif#endif};static int __init velocity_init_module(void){ int ret; ret=pci_module_init(&velocity_driver);#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,9)#ifdef CONFIG_PM register_inetaddr_notifier(&velocity_inetaddr_notifier); if(ret >= 0) register_reboot_notifier(&velocity_notifier);#endif#endif return ret;}static void __exit velocity_cleanup_module(void){#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,9)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -