⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 e1000_main.c

📁 COPE the first practical network coding scheme which is developped on click
💻 C
📖 第 1 页 / 共 5 页
字号:
		/* Workaround for potential 82544 hang in PCI-X.  Avoid		 * terminating buffers within evenly-aligned dwords. */		if(unlikely(adapter->pcix_82544 &&		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&		   size > 4))			size -= 4;		buffer_info->length = size;		buffer_info->dma =			pci_map_single(adapter->pdev,				skb->data + offset,				size,				PCI_DMA_TODEVICE);		buffer_info->time_stamp = jiffies;		len -= size;		offset += size;		count++;		if(unlikely(++i == tx_ring->count)) i = 0;	}#ifdef MAX_SKB_FRAGS	for(f = 0; f < nr_frags; f++) {		struct skb_frag_struct *frag;		frag = &skb_shinfo(skb)->frags[f];		len = frag->size;		offset = frag->page_offset;		while(len) {			buffer_info = &tx_ring->buffer_info[i];			size = min(len, max_per_txd);#ifdef NETIF_F_TSO			/* Workaround for premature desc write-backs			 * in TSO mode.  Append 4-byte sentinel desc */			if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))				size -= 4;#endif			/* Workaround for potential 82544 hang in PCI-X.			 * Avoid terminating buffers within evenly-aligned			 * dwords. */			if(unlikely(adapter->pcix_82544 &&			   !((unsigned long)(frag->page+offset+size-1) & 4) &&			   size > 4))				size -= 4;			buffer_info->length = size;			buffer_info->dma =				pci_map_page(adapter->pdev,					frag->page,					offset,					size,					PCI_DMA_TODEVICE);			buffer_info->time_stamp = jiffies;			len -= size;			offset += size;			count++;			if(unlikely(++i == tx_ring->count)) i = 0;		}	}#endif	i = (i == 0) ? tx_ring->count - 1 : i - 1;	tx_ring->buffer_info[i].skb = skb;	tx_ring->buffer_info[first].next_to_watch = i;	return count;}static inline voide1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags){	struct e1000_desc_ring *tx_ring = &adapter->tx_ring;	struct e1000_tx_desc *tx_desc = NULL;	struct e1000_buffer *buffer_info;	uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;	unsigned int i;	if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |		             E1000_TXD_CMD_TSE;		txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;	}	if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;		txd_upper |= E1000_TXD_POPTS_TXSM << 8;	}	if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {		txd_lower |= E1000_TXD_CMD_VLE;		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);	}	i = tx_ring->next_to_use;	while(count--) {		buffer_info = &tx_ring->buffer_info[i];		tx_desc = E1000_TX_DESC(*tx_ring, i);		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);		tx_desc->lower.data =			cpu_to_le32(txd_lower | buffer_info->length);		tx_desc->upper.data = cpu_to_le32(txd_upper);		if(unlikely(++i == tx_ring->count)) i = 0;	}	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);	/* Force memory writes to complete before letting h/w	 * know there are new descriptors to fetch.  (Only	 * applicable for weak-ordered memory model archs,	 * such as IA-64). */	wmb();	tx_ring->next_to_use = i;	E1000_WRITE_REG(&adapter->hw, TDT, i);}/** * 82547 workaround to avoid controller hang in half-duplex environment. * The workaround is to avoid queuing a large packet that would span * the internal Tx FIFO ring boundary by notifying the stack to resend * the packet at a later time.  This gives the Tx FIFO an opportunity to * flush all packets.  When that occurs, we reset the Tx FIFO pointers * to the beginning of the Tx FIFO. **/#define E1000_FIFO_HDR			0x10#define E1000_82547_PAD_LEN		0x3E0static inline inte1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb){	uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;	uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;	E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);	if(adapter->link_duplex != HALF_DUPLEX)		goto no_fifo_stall_required;	if(atomic_read(&adapter->tx_fifo_stall))		return 1;	if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {		atomic_set(&adapter->tx_fifo_stall, 1);		return 1;	}no_fifo_stall_required:	adapter->tx_fifo_head += skb_fifo_len;	if(adapter->tx_fifo_head >= adapter->tx_fifo_size)		adapter->tx_fifo_head -= adapter->tx_fifo_size;	return 0;}#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )static inte1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev){	struct e1000_adapter *adapter = netdev->priv;	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;	unsigned int tx_flags = 0;	unsigned int len = skb->len;	unsigned long flags;	unsigned int nr_frags = 0;	unsigned int mss = 0;	int count = 0;#ifdef MAX_SKB_FRAGS	unsigned int f;	len -= skb->data_len;#endif	if(unlikely(skb->len <= 0)) {		dev_kfree_skb_any(skb);		return NETDEV_TX_OK;	}#ifdef NETIF_F_TSO	mss = skb_shinfo(skb)->tso_size;	/* The controller does a simple calculation to	 * make sure there is enough room in the FIFO before	 * initiating the DMA for each buffer.  The calc is:	 * 4 = ceil(buffer len/mss).  To make sure we don't	 * overrun the FIFO, adjust the max buffer len if mss	 * drops. */	if(mss) {		max_per_txd = min(mss << 2, max_per_txd);		max_txd_pwr = fls(max_per_txd) - 1;	}	if((mss) || (skb->ip_summed == CHECKSUM_HW))		count++;	count++;	/* for sentinel desc */#else	if(skb->ip_summed == CHECKSUM_HW)		count++;#endif	count += TXD_USE_COUNT(len, max_txd_pwr);	if(adapter->pcix_82544)		count++;#ifdef MAX_SKB_FRAGS	nr_frags = skb_shinfo(skb)->nr_frags;	for(f = 0; f < nr_frags; f++)		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,				       max_txd_pwr);	if(adapter->pcix_82544)		count += nr_frags;#endif#ifdef NETIF_F_LLTX	local_irq_save(flags);	if (!spin_trylock(&adapter->tx_lock)) {		/* Collision - tell upper layer to requeue */		local_irq_restore(flags);		return NETDEV_TX_LOCKED;	}#else	spin_lock_irqsave(&adapter->tx_lock, flags);#endif	/* need: count + 2 desc gap to keep tail from touching	 * head, otherwise try next time */	if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) {		netif_stop_queue(netdev);		spin_unlock_irqrestore(&adapter->tx_lock, flags);		return NETDEV_TX_BUSY;	}	if(unlikely(adapter->hw.mac_type == e1000_82547)) {		if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {			netif_stop_queue(netdev);			mod_timer(&adapter->tx_fifo_stall_timer, jiffies);			spin_unlock_irqrestore(&adapter->tx_lock, flags);			return NETDEV_TX_BUSY;		}	}#ifndef NETIF_F_LLTX	spin_unlock_irqrestore(&adapter->tx_lock, flags);#endif#ifdef NETIF_F_HW_VLAN_TX	if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {		tx_flags |= E1000_TX_FLAGS_VLAN;		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);	}#endif	first = adapter->tx_ring.next_to_use;		if(likely(e1000_tso(adapter, skb)))		tx_flags |= E1000_TX_FLAGS_TSO;	else if(likely(e1000_tx_csum(adapter, skb)))		tx_flags |= E1000_TX_FLAGS_CSUM;	e1000_tx_queue(adapter,		e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),		tx_flags);	netdev->trans_start = jiffies;#ifdef NETIF_F_LLTX	/* Make sure there is space in the ring for the next send. */	if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2))		netif_stop_queue(netdev);	spin_unlock_irqrestore(&adapter->tx_lock, flags);#endif	return NETDEV_TX_OK;}/** * e1000_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/static voide1000_tx_timeout(struct net_device *netdev){	struct e1000_adapter *adapter = netdev->priv;	/* Do the reset outside of interrupt context */	schedule_work(&adapter->tx_timeout_task);}static voide1000_tx_timeout_task(struct net_device *netdev){	struct e1000_adapter *adapter = netdev->priv;	e1000_down(adapter);	e1000_up(adapter);}/** * e1000_get_stats - Get System Network Statistics * @netdev: network interface device structure * * Returns the address of the device statistics structure. * The statistics are actually updated from the timer callback. **/static struct net_device_stats *e1000_get_stats(struct net_device *netdev){	struct e1000_adapter *adapter = netdev->priv;	e1000_update_stats(adapter);	return &adapter->net_stats;}/** * e1000_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/static inte1000_change_mtu(struct net_device *netdev, int new_mtu){	struct e1000_adapter *adapter = netdev->priv;	int old_mtu = adapter->rx_buffer_len;	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;	if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||		(max_frame > MAX_JUMBO_FRAME_SIZE)) {			DPRINTK(PROBE, ERR, "Invalid MTU setting\n");			return -EINVAL;	}	if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) {		adapter->rx_buffer_len = E1000_RXBUFFER_2048;	} else if(adapter->hw.mac_type < e1000_82543) {		DPRINTK(PROBE, ERR, "Jumbo Frames not supported on 82542\n");		return -EINVAL;	} else if(max_frame <= E1000_RXBUFFER_4096) {		adapter->rx_buffer_len = E1000_RXBUFFER_4096;	} else if(max_frame <= E1000_RXBUFFER_8192) {		adapter->rx_buffer_len = E1000_RXBUFFER_8192;	} else {		adapter->rx_buffer_len = E1000_RXBUFFER_16384;	}	if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {		e1000_down(adapter);		e1000_up(adapter);	}	netdev->mtu = new_mtu;	adapter->hw.max_frame_size = max_frame;	return 0;}/** * e1000_update_stats - Update the board statistics counters * @adapter: board private structure **/voide1000_update_stats(struct e1000_adapter *adapter){	struct e1000_hw *hw = &adapter->hw;	unsigned long flags;	uint16_t phy_tmp;#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF	spin_lock_irqsave(&adapter->stats_lock, flags);	/* these counters are modified from e1000_adjust_tbi_stats,	 * called from the interrupt context, so they must only	 * be written while holding adapter->stats_lock	 */	adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);	adapter->stats.gprc += E1000_READ_REG(hw, GPRC);	adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);	adapter->stats.gorch += E1000_READ_REG(hw, GORCH);	adapter->stats.bprc += E1000_READ_REG(hw, BPRC);	adapter->stats.mprc += E1000_READ_REG(hw, MPRC);	adapter->stats.roc += E1000_READ_REG(hw, ROC);	adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);	adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);	adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);	adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);	adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);	adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);	adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);	adapter->stats.mpc += E1000_READ_REG(hw, MPC);	adapter->stats.scc += E1000_READ_REG(hw, SCC);	adapter->stats.ecol += E1000_READ_REG(hw, ECOL);	adapter->stats.mcc += E1000_READ_REG(hw, MCC);	adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);	adapter->stats.dc += E1000_READ_REG(hw, DC);	adapter->stats.sec += E1000_READ_REG(hw, SEC);	adapter->stats.rlec += E1000_READ_REG(hw, RLEC);	adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);	adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);	adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);	adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);	adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);	adapter->stats.gptc += E1000_READ_REG(hw, GPTC);	adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);	adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);	adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);	adapter->stats.ruc += E1000_READ_REG(hw, RUC);	adapter->stats.rfc += E1000_READ_REG(hw, RFC);	adapter->stats.rjc += E1000_READ_REG(hw, RJC);	adapter->stats.torl += E1000_READ_REG(hw, TORL);	adapter->stats.torh += E1000_READ_REG(hw, TORH);	adapter->stats.totl += E1000_READ_REG(hw, TOTL);	adapter->stats.toth += E1000_READ_REG(hw, TOTH);	adapter->stats.tpr += E1000_READ_REG(hw, TPR);	adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);	adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);	adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);	adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);	adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);	adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);	adapter->stats.mptc += E1000_READ_REG(hw, MPTC);	adapter->stats.bptc += E1000_READ_REG(hw, BPTC);	/* used for adaptive IFS */	hw->tx_packet_delta = E1000_READ_REG(hw, TPT);	adapter->stats.tpt += hw->tx_packet_delta;	hw->collision_delta = E1000_READ_REG(hw, COLC);	adapter->stats.colc += hw->collision_delta;	if(hw->mac_type >= e1000_82543) {		adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);		adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);		adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);		adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);		adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);		adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);	}	/* Fill out the OS statistics structure */	adapter->net_stats.rx_packets = adapter->sta

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -