📄 e1000_main.c
字号:
if(likely(e1000_tso(adapter, skb))) tx_flags |= E1000_TX_FLAGS_TSO; else if(likely(e1000_tx_csum(adapter, skb))) tx_flags |= E1000_TX_FLAGS_CSUM; e1000_tx_queue(adapter, e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), tx_flags); netdev->trans_start = jiffies; return 0;}/** * e1000_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/static voide1000_tx_timeout(struct net_device *netdev){ struct e1000_adapter *adapter = netdev->priv; /* Do the reset outside of interrupt context */ schedule_task(&adapter->tx_timeout_task);}static voide1000_tx_timeout_task(struct net_device *netdev){ struct e1000_adapter *adapter = netdev->priv; e1000_down(adapter); e1000_up(adapter);}/** * e1000_get_stats - Get System Network Statistics * @netdev: network interface device structure * * Returns the address of the device statistics structure. * The statistics are actually updated from the timer callback. **/static struct net_device_stats *e1000_get_stats(struct net_device *netdev){ struct e1000_adapter *adapter = netdev->priv; e1000_update_stats(adapter); return &adapter->net_stats;}/** * e1000_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/static inte1000_change_mtu(struct net_device *netdev, int new_mtu){ struct e1000_adapter *adapter = netdev->priv; int old_mtu = adapter->rx_buffer_len; int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); return -EINVAL; } if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) { adapter->rx_buffer_len = E1000_RXBUFFER_2048; } else if(adapter->hw.mac_type < e1000_82543) { DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n"); return -EINVAL; } else if(max_frame <= E1000_RXBUFFER_4096) { adapter->rx_buffer_len = E1000_RXBUFFER_4096; } else if(max_frame <= E1000_RXBUFFER_8192) { adapter->rx_buffer_len = E1000_RXBUFFER_8192; } else { adapter->rx_buffer_len = E1000_RXBUFFER_16384; } if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) { e1000_down(adapter); e1000_up(adapter); } netdev->mtu = new_mtu; adapter->hw.max_frame_size = max_frame; return 0;}/** * e1000_update_stats - Update the board statistics counters * @adapter: board private structure **/voide1000_update_stats(struct e1000_adapter *adapter){ struct e1000_hw *hw = &adapter->hw; unsigned long flags; uint16_t phy_tmp;#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF spin_lock_irqsave(&adapter->stats_lock, flags); /* these counters are modified from e1000_adjust_tbi_stats, * called from the interrupt context, so they must only * be written while holding adapter->stats_lock */ adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS); adapter->stats.gprc += E1000_READ_REG(hw, GPRC); adapter->stats.gorcl += E1000_READ_REG(hw, GORCL); adapter->stats.gorch += E1000_READ_REG(hw, GORCH); adapter->stats.bprc += E1000_READ_REG(hw, BPRC); adapter->stats.mprc += E1000_READ_REG(hw, MPRC); adapter->stats.roc += E1000_READ_REG(hw, ROC); adapter->stats.prc64 += E1000_READ_REG(hw, PRC64); adapter->stats.prc127 += E1000_READ_REG(hw, PRC127); adapter->stats.prc255 += E1000_READ_REG(hw, PRC255); adapter->stats.prc511 += E1000_READ_REG(hw, PRC511); adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023); adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522); adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS); adapter->stats.mpc += E1000_READ_REG(hw, MPC); adapter->stats.scc += E1000_READ_REG(hw, SCC); adapter->stats.ecol += E1000_READ_REG(hw, ECOL); adapter->stats.mcc += E1000_READ_REG(hw, MCC); adapter->stats.latecol += E1000_READ_REG(hw, LATECOL); adapter->stats.dc += E1000_READ_REG(hw, DC); adapter->stats.sec += E1000_READ_REG(hw, SEC); adapter->stats.rlec += E1000_READ_REG(hw, RLEC); adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC); adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC); adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC); adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC); adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC); adapter->stats.gptc += E1000_READ_REG(hw, GPTC); adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL); adapter->stats.gotch += E1000_READ_REG(hw, GOTCH); adapter->stats.rnbc += E1000_READ_REG(hw, RNBC); adapter->stats.ruc += E1000_READ_REG(hw, RUC); adapter->stats.rfc += E1000_READ_REG(hw, RFC); adapter->stats.rjc += E1000_READ_REG(hw, RJC); adapter->stats.torl += E1000_READ_REG(hw, TORL); adapter->stats.torh += E1000_READ_REG(hw, TORH); adapter->stats.totl += E1000_READ_REG(hw, TOTL); adapter->stats.toth += E1000_READ_REG(hw, TOTH); adapter->stats.tpr += E1000_READ_REG(hw, TPR); adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64); adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127); adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255); adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511); adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023); adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522); adapter->stats.mptc += E1000_READ_REG(hw, MPTC); adapter->stats.bptc += E1000_READ_REG(hw, BPTC); /* used for adaptive IFS */ hw->tx_packet_delta = E1000_READ_REG(hw, TPT); adapter->stats.tpt += hw->tx_packet_delta; hw->collision_delta = E1000_READ_REG(hw, COLC); adapter->stats.colc += hw->collision_delta; if(hw->mac_type >= e1000_82543) { adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC); adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC); adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS); adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR); adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC); adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC); } /* Fill out the OS statistics structure */ adapter->net_stats.rx_packets = adapter->stats.gprc; adapter->net_stats.tx_packets = adapter->stats.gptc; adapter->net_stats.rx_bytes = adapter->stats.gorcl; adapter->net_stats.tx_bytes = adapter->stats.gotcl; adapter->net_stats.multicast = adapter->stats.mprc; adapter->net_stats.collisions = adapter->stats.colc; /* Rx Errors */ adapter->net_stats.rx_errors = adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.rlec + adapter->stats.rnbc + adapter->stats.mpc + adapter->stats.cexterr; adapter->net_stats.rx_dropped = adapter->stats.rnbc; adapter->net_stats.rx_length_errors = adapter->stats.rlec; adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; adapter->net_stats.rx_fifo_errors = adapter->stats.mpc; adapter->net_stats.rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ adapter->net_stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; adapter->net_stats.tx_window_errors = adapter->stats.latecol; adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; /* Tx Dropped needs to be maintained elsewhere */ /* Phy Stats */ if(hw->media_type == e1000_media_type_copper) { if((adapter->link_speed == SPEED_1000) && (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; adapter->phy_stats.idle_errors += phy_tmp; } if((hw->mac_type <= e1000_82546) && (hw->phy_type == e1000_phy_m88) && !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) adapter->phy_stats.receive_errors += phy_tmp; } spin_unlock_irqrestore(&adapter->stats_lock, flags);}/** * e1000_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure * @pt_regs: CPU registers structure **/static irqreturn_te1000_intr(int irq, void *data, struct pt_regs *regs){ struct net_device *netdev = data; struct e1000_adapter *adapter = netdev->priv; struct e1000_hw *hw = &adapter->hw; uint32_t icr = E1000_READ_REG(hw, ICR);#ifndef CONFIG_E1000_NAPI unsigned int i;#endif if(unlikely(!icr)) return IRQ_NONE; /* Not our interrupt */ if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { hw->get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies); }#ifdef CONFIG_E1000_NAPI if(likely(netif_rx_schedule_prep(netdev))) { /* Disable interrupts and register for poll. The flush of the posted write is intentionally left out. */ atomic_inc(&adapter->irq_sem); E1000_WRITE_REG(hw, IMC, ~0); __netif_rx_schedule(netdev); }#else for(i = 0; i < E1000_MAX_INTR; i++) if(unlikely(!e1000_clean_rx_irq(adapter) & !e1000_clean_tx_irq(adapter))) break;#endif return IRQ_HANDLED;}#ifdef CONFIG_E1000_NAPI/** * e1000_clean - NAPI Rx polling callback * @adapter: board private structure **/static inte1000_clean(struct net_device *netdev, int *budget){ struct e1000_adapter *adapter = netdev->priv; int work_to_do = min(*budget, netdev->quota); int tx_cleaned; int work_done = 0; if (!netif_carrier_ok(netdev)) goto quit_polling; tx_cleaned = e1000_clean_tx_irq(adapter); e1000_clean_rx_irq(adapter, &work_done, work_to_do); *budget -= work_done; netdev->quota -= work_done; /* if no Rx and Tx cleanup work was done, exit the polling mode */ if(!tx_cleaned || (work_done < work_to_do) || !netif_running(netdev)) {quit_polling: netif_rx_complete(netdev); e1000_irq_enable(adapter); return 0; } return (work_done >= work_to_do);}#endif/** * e1000_clean_tx_irq - Reclaim resources after transmit completes * @adapter: board private structure **/static boolean_te1000_clean_tx_irq(struct e1000_adapter *adapter){ struct e1000_desc_ring *tx_ring = &adapter->tx_ring; struct net_device *netdev = adapter->netdev; struct e1000_tx_desc *tx_desc, *eop_desc; struct e1000_buffer *buffer_info; unsigned int i, eop; boolean_t cleaned = FALSE; i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { for(cleaned = FALSE; !cleaned; ) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; e1000_unmap_and_free_tx_resource(adapter, buffer_info); tx_desc->buffer_addr = 0; tx_desc->lower.data = 0; tx_desc->upper.data = 0; cleaned = (i == eop); if(unlikely(++i == tx_ring->count)) i = 0; } eop = tx_ring->buffer_info[i].next_to_watch; eop_desc = E1000_TX_DESC(*tx_ring, eop); } tx_ring->next_to_clean = i; spin_lock(&adapter->tx_lock); if(unlikely(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev))) netif_wake_queue(netdev); spin_unlock(&adapter->tx_lock); return cleaned;}/** * e1000_rx_checksum - Receive Checksum Offload for 82543 * @adapter: board private structure * @rx_desc: receive descriptor * @sk_buff: socket buffer with received data **/static inline voide1000_rx_checksum(struct e1000_adapter *adapter, struct e1000_rx_desc *rx_desc, struct sk_buff *skb){ /* 82543 or newer only */ if(unlikely((adapter->hw.mac_type < e1000_82543) || /* Ignore Checksum bit is set */ (rx_desc->status & E1000_RXD_STAT_IXSM) || /* TCP Checksum has not been calculated */ (!(rx_desc->status & E1000_RXD_STAT_TCPCS)))) { skb->ip_summed = CHECKSUM_NONE; return; } /* At this point we know the hardware did the TCP checksum */ /* now look at the TCP checksum error bit */ if(rx_desc->errors & E1000_RXD_ERR_TCPE) { /* let the stack verify checksum errors */ skb->ip_summed = CHECKSUM_NONE; adapter->hw_csum_err++; } else { /* TCP checksum is good */ skb->ip_summed = CHECKSUM_UNNECESSARY; adapter->hw_csum_good++; }}/** * e1000_clean_rx_irq - Send received data up the network stack * @adapter: board private structure **/static boolean_t#ifdef CONFIG_E1000_NAPIe1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done, int work_to_do)#elsee1000_clean_rx_irq(struct e1000_adapter *adapter)#endif{ struct e1000_desc_ring *rx_ring = &adapter->rx_ring; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct e1000_rx_desc *rx_desc; struct e1000_buffer *buffer_info; struct sk_buff *skb; unsigned long flags; uint32_t length; uint8_t last_byte; unsigned int i; boolean_t cleaned = FALSE; i = rx_ring->next_to_clean; rx_desc = E1000_RX_DESC(*rx_ring, i); while(rx_desc->status & E1000_RXD_STAT_DD) { buffer_info = &rx_ring->buffer_info[i];#ifdef CONFIG_E1000_NAPI if(*work_done >= work_to_do) break; (*work_done)++;#endif cleaned = TRUE; pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, PCI_DMA_FROMDEVICE); skb = buffer_info->skb; length = le16_to_cpu(rx_desc->length); if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) { /* All receives must fit into a single buffer */ E1000_DBG("%s: Receive packet consumed multiple" " buffers\n", netdev->name); dev_kfree_skb_irq(skb); goto next_desc; } if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { last_byte = *(
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -