📄 e1000_main.c
字号:
if(!netdev->polling){ e1000_clean_rx_irq(adapter); e1000_clean_tx_irq(adapter); } i--;#ifdef E1000_COUNT_ICR adapter->icr_txdw += icr & 0x01; icr >>= 1; adapter->icr_txqe += icr & 0x01; icr >>= 1; adapter->icr_lsc += icr & 0x01; icr >>= 1; adapter->icr_rxseq += icr & 0x01; icr >>= 1; adapter->icr_rxdmt += icr & 0x01; icr >>= 2; adapter->icr_rxo += icr & 0x01; icr >>= 1; adapter->icr_rxt += icr & 0x01; icr >>= 2; adapter->icr_mdac += icr & 0x01; icr >>= 1; adpater->icr_rxcfg += icr & 0x01; icr >>= 1; adpater->icr_gpi += icr & 0x01;#endif }}/** * e1000_clean_tx_irq - Reclaim resources after transmit completes * @adapter: board private structure **/static voide1000_clean_tx_irq(struct e1000_adapter *adapter){ struct e1000_desc_ring *tx_ring = &adapter->tx_ring; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_tx_desc *tx_desc; int i; i = tx_ring->next_to_clean; tx_desc = E1000_TX_DESC(*tx_ring, i); while(tx_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { if(tx_ring->buffer_info[i].dma) { pci_unmap_page(pdev, tx_ring->buffer_info[i].dma, tx_ring->buffer_info[i].length, PCI_DMA_TODEVICE); tx_ring->buffer_info[i].dma = 0; } if(tx_ring->buffer_info[i].skb) { dev_kfree_skb_any(tx_ring->buffer_info[i].skb); tx_ring->buffer_info[i].skb = NULL; } tx_desc->upper.data = 0; i = (i + 1) % tx_ring->count; tx_desc = E1000_TX_DESC(*tx_ring, i); } tx_ring->next_to_clean = i; if(netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && (E1000_DESC_UNUSED(tx_ring) > E1000_TX_QUEUE_WAKE)) {#ifdef IANS{ iANSsupport_t *ans = adapter->iANSdata; if((ans->iANS_status == IANS_COMMUNICATION_UP) && (ans->reporting_mode == IANS_STATUS_REPORTING_ON) && (ans_notify)) ans_notify(netdev, IANS_IND_XMIT_QUEUE_READY);}#endif netif_wake_queue(netdev); }}/** * e1000_clean_rx_irq - Send received data up the network stack, * @adapter: board private structure **/static voide1000_clean_rx_irq(struct e1000_adapter *adapter){ struct e1000_desc_ring *rx_ring = &adapter->rx_ring; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc; struct sk_buff *skb; unsigned long flags; uint32_t length; uint8_t last_byte; int i; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); while(rx_desc->status & E1000_RXD_STAT_DD) { pci_unmap_single(pdev, rx_ring->buffer_info[i].dma, rx_ring->buffer_info[i].length, PCI_DMA_FROMDEVICE); skb = rx_ring->buffer_info[i].skb; length = le16_to_cpu(rx_desc->length); if(!(rx_desc->status & E1000_RXD_STAT_EOP)) { /* All receives must fit into a single buffer */ E1000_DBG("Receive packet consumed multiple buffers\n"); dev_kfree_skb_irq(skb); rx_desc->status = 0; rx_ring->buffer_info[i].skb = NULL; i = (i + 1) % rx_ring->count; rx_desc = E1000_RX_DESC(*rx_ring, i); continue; } if(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { last_byte = *(skb->data + length - 1); if(TBI_ACCEPT(&adapter->hw, rx_desc->status, rx_desc->errors, length, last_byte)) { spin_lock_irqsave(&adapter->stats_lock, flags); e1000_tbi_adjust_stats(&adapter->hw, &adapter->stats, length, skb->data); spin_unlock_irqrestore(&adapter->stats_lock, flags); length--; } else { dev_kfree_skb_irq(skb); rx_desc->status = 0; rx_ring->buffer_info[i].skb = NULL; i = (i + 1) % rx_ring->count; rx_desc = E1000_RX_DESC(*rx_ring, i); continue; } } /* Good Receive */ skb_put(skb, length - ETHERNET_FCS_SIZE);#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)) /* NFS mountd workaround on 2.2.x */ if(length < 512) { struct sk_buff *tmp_skb; tmp_skb = alloc_skb(E1000_RXBUFFER_2048>>1, GFP_ATOMIC); if(tmp_skb != NULL) { tmp_skb->dev = skb->dev; skb_reserve(tmp_skb, skb->data - skb->head); skb_put(tmp_skb, skb->len); memcpy(tmp_skb->head, skb->head, tmp_skb->end - tmp_skb->head); dev_kfree_skb(skb); skb = tmp_skb; } }#endif /* Receive Checksum Offload */ e1000_rx_checksum(adapter, rx_desc, skb);#ifdef IANS{ iANSsupport_t *ans = adapter->iANSdata; if(ans->iANS_status == IANS_COMMUNICATION_UP) { if(bd_ans_os_Receive(adapter, rx_desc, skb) == BD_ANS_FAILURE) dev_kfree_skb_irq(skb); else netif_rx(skb); } else {#endif skb->protocol = eth_type_trans(skb, netdev);#ifdef NETIF_F_HW_VLAN_TX if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) { vlan_hwaccel_rx(skb, adapter->vlgrp, (rx_desc->special & E1000_RXD_SPC_VLAN_MASK)); } else { netif_rx(skb); }#else netif_rx(skb);#endif#ifdef IANS }}#endif netdev->last_rx = jiffies; rx_desc->status = 0; rx_ring->buffer_info[i].skb = NULL; i = (i + 1) % rx_ring->count; rx_desc = E1000_RX_DESC(*rx_ring, i); } rx_ring->next_to_clean = i; e1000_alloc_rx_buffers(adapter);}/** * e1000_alloc_rx_buffers - Replace used receive buffers * @data: address of board private structure **/static voide1000_alloc_rx_buffers(struct e1000_adapter *adapter){ struct e1000_desc_ring *rx_ring = &adapter->rx_ring; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc; struct sk_buff *skb; int reserve_len; int i;#ifdef IANS reserve_len = BD_ANS_INFO_SIZE; E1000_ROUNDUP(reserve_len, 16); reserve_len += 2;#else reserve_len = 2;#endif i = rx_ring->next_to_use; while(!rx_ring->buffer_info[i].skb) { rx_desc = E1000_RX_DESC(*rx_ring, i); skb = alloc_skb(adapter->rx_buffer_len + reserve_len, GFP_ATOMIC); if(!skb) { /* Better luck next round */ break; } /* Make buffer alignment 2 beyond a 16 byte boundary * this will result in a 16 byte aligned IP header after * the 14 byte MAC header is removed */ skb_reserve(skb, reserve_len); skb->dev = netdev; rx_ring->buffer_info[i].skb = skb; rx_ring->buffer_info[i].length = adapter->rx_buffer_len; rx_ring->buffer_info[i].dma = pci_map_single(pdev, skb->data, adapter->rx_buffer_len, PCI_DMA_FROMDEVICE); rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma); if(!(i % E1000_RX_BUFFER_WRITE)) { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); E1000_WRITE_REG(&adapter->hw, RDT, i); } i = (i + 1) % rx_ring->count; } rx_ring->next_to_use = i;}/** * e1000_ioctl - * @netdev: * @ifreq: * @cmd: **/static inte1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd){ switch (cmd) {#ifdef IANS case IANS_BASE_SIOC: { IANS_BD_PARAM_HEADER *header; header = (IANS_BD_PARAM_HEADER *) ifr->ifr_data; if((header->Opcode != IANS_OP_EXT_GET_STATUS) && (!capable(CAP_NET_ADMIN))) return -EPERM; return bd_ans_os_Ioctl(netdev, ifr, cmd); }#endif#ifdef IDIAG case IDIAG_PRO_BASE_SIOC: return e1000_diag_ioctl(netdev, ifr);#endif#ifdef SIOCETHTOOL case SIOCETHTOOL: return e1000_ethtool_ioctl(netdev, ifr);#endif default: return -EOPNOTSUPP; }}/** * e1000_rx_checksum - Receive Checksum Offload for 82543 * @adapter: board private structure * @rx_desc: receive descriptor * @sk_buff: socket buffer with received data **/static inline voide1000_rx_checksum(struct e1000_adapter *adapter, struct e1000_rx_desc *rx_desc, struct sk_buff *skb){ /* 82543 or newer only */ if((adapter->hw.mac_type < e1000_82543) || /* Ignore Checksum bit is set */ (rx_desc->status & E1000_RXD_STAT_IXSM) || /* TCP Checksum has not been calculated */ (!(rx_desc->status & E1000_RXD_STAT_TCPCS))) { skb->ip_summed = CHECKSUM_NONE; return; } /* At this point we know the hardware did the TCP checksum */ /* now look at the TCP checksum error bit */ if(rx_desc->errors & E1000_RXD_ERR_TCPE) { /* let the stack verify checksum errors */ skb->ip_summed = CHECKSUM_NONE; adapter->hw_csum_err++; } else { /* TCP checksum is good */ skb->ip_summed = CHECKSUM_UNNECESSARY; adapter->hw_csum_good++; }}voide1000_pci_set_mwi(struct e1000_hw *hw){ struct e1000_adapter *adapter = hw->back;#ifdef HAVE_PCI_SET_MWI pci_set_mwi(adapter->pdev);#else pci_write_config_word(adapter->pdev, PCI_COMMAND, adapter->hw.pci_cmd_word | PCI_COMMAND_INVALIDATE);#endif}voide1000_pci_clear_mwi(struct e1000_hw *hw){ struct e1000_adapter *adapter = hw->back;#ifdef HAVE_PCI_SET_MWI pci_clear_mwi(adapter->pdev);#else pci_write_config_word(adapter->pdev, PCI_COMMAND, adapter->hw.pci_cmd_word & ~PCI_COMMAND_INVALIDATE);#endif}voide1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value){ struct e1000_adapter *adapter = hw->back; pci_read_config_word(adapter->pdev, reg, value);}voide1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value){ struct e1000_adapter *adapter = hw->back; pci_write_config_word(adapter->pdev, reg, *value);}uint32_te1000_io_read(struct e1000_hw *hw, uint32_t port){ return inl(port);}voide1000_io_write(struct e1000_hw *hw, uint32_t port, uint32_t value){ outl(value, port);}#ifdef NETIF_F_HW_VLAN_TXstatic voide1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp){ struct e1000_adapter *adapter = netdev->priv; uint32_t ctrl, rctl; e1000_irq_disable(adapter); adapter->vlgrp = grp; if(grp) { /* enable VLAN tag insert/strip */ E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE); ctrl = E1000_READ_REG(&adapter->hw, CTRL); ctrl |= E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); /* enable VLAN receive filtering */ rctl = E1000_READ_REG(&adapter->hw, RCTL); rctl |= E1000_RCTL_VFE; rctl &= ~E1000_RCTL_CFIEN; E1000_WRITE_REG(&adapter->hw, RCTL, rctl); } else { /* disable VLAN tag insert/strip */ ctrl = E1000_READ_REG(&adapter->hw, CTRL); ctrl &= ~E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); /* disable VLAN filtering */ rctl = E1000_READ_REG(&adapter->hw, RCTL); rctl &= ~E1000_RCTL_VFE; E1000_WRITE_REG(&adapter->hw, RCTL, rctl); } e1000_irq_enable(adapter);}static voide1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid){ struct e1000_adapter *adapter = netdev->priv; uint32_t vfta, index; /* add VID to filter table */ index = (vi
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -