⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 e1000_main.c

📁 linux-2.4.29操作系统的源码
💻 C
📖 第 1 页 / 共 5 页
字号:
			E1000_WRITE_REG(&adapter->hw, TDFHS,					adapter->tx_head_addr);			E1000_WRITE_REG(&adapter->hw, TCTL, tctl);			E1000_WRITE_FLUSH(&adapter->hw);			adapter->tx_fifo_head = 0;			atomic_set(&adapter->tx_fifo_stall, 0);			netif_wake_queue(netdev);		} else {			mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);		}	}}/** * e1000_watchdog - Timer Call-back * @data: pointer to netdev cast into an unsigned long **/static voide1000_watchdog(unsigned long data){	struct e1000_adapter *adapter = (struct e1000_adapter *) data;	struct net_device *netdev = adapter->netdev;	struct e1000_desc_ring *txdr = &adapter->tx_ring;	unsigned int i;	uint32_t link;	e1000_check_for_link(&adapter->hw);	if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&	   !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))		link = !adapter->hw.serdes_link_down;	else		link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;	if(link) {		if(!netif_carrier_ok(netdev)) {			e1000_get_speed_and_duplex(&adapter->hw,			                           &adapter->link_speed,			                           &adapter->link_duplex);			DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",			       adapter->link_speed,			       adapter->link_duplex == FULL_DUPLEX ?			       "Full Duplex" : "Half Duplex");			netif_carrier_on(netdev);			netif_wake_queue(netdev);			mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);			adapter->smartspeed = 0;		}	} else {		if(netif_carrier_ok(netdev)) {			adapter->link_speed = 0;			adapter->link_duplex = 0;			DPRINTK(LINK, INFO, "NIC Link is Down\n");			netif_carrier_off(netdev);			netif_stop_queue(netdev);			mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);		}		e1000_smartspeed(adapter);	}	e1000_update_stats(adapter);	adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;	adapter->tpt_old = adapter->stats.tpt;	adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;	adapter->colc_old = adapter->stats.colc;	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;	adapter->gorcl_old = adapter->stats.gorcl;	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;	adapter->gotcl_old = adapter->stats.gotcl;	e1000_update_adaptive(&adapter->hw);	if(!netif_carrier_ok(netdev)) {		if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {			/* We've lost link, so the controller stops DMA,			 * but we've got queued Tx work that's never going			 * to get done, so reset controller to flush Tx.			 * (Do the reset outside of interrupt context). */			schedule_task(&adapter->tx_timeout_task);		}	}	/* Dynamic mode for Interrupt Throttle Rate (ITR) */	if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {		/* Symmetric Tx/Rx gets a reduced ITR=2000; Total		 * asymmetrical Tx or Rx gets ITR=8000; everyone		 * else is between 2000-8000. */		uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;		uint32_t dif = (adapter->gotcl > adapter->gorcl ? 			adapter->gotcl - adapter->gorcl :			adapter->gorcl - adapter->gotcl) / 10000;		uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;		E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));	}	/* Cause software interrupt to ensure rx ring is cleaned */	E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);	/* Early detection of hung controller */	i = txdr->next_to_clean;	if(txdr->buffer_info[i].dma &&	   time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&	   !(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF))		netif_stop_queue(netdev);	/* Reset the timer */	mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);}#define E1000_TX_FLAGS_CSUM		0x00000001#define E1000_TX_FLAGS_VLAN		0x00000002#define E1000_TX_FLAGS_TSO		0x00000004#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000#define E1000_TX_FLAGS_VLAN_SHIFT	16static inline boolean_te1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb){#ifdef NETIF_F_TSO	struct e1000_context_desc *context_desc;	unsigned int i;	uint32_t cmd_length = 0;	uint16_t ipcse, tucse, mss;	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;	if(skb_shinfo(skb)->tso_size) {		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));		mss = skb_shinfo(skb)->tso_size;		skb->nh.iph->tot_len = 0;		skb->nh.iph->check = 0;		skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,		                                      skb->nh.iph->daddr,		                                      0,		                                      IPPROTO_TCP,		                                      0);		ipcss = skb->nh.raw - skb->data;		ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;		ipcse = skb->h.raw - skb->data - 1;		tucss = skb->h.raw - skb->data;		tucso = (void *)&(skb->h.th->check) - (void *)skb->data;		tucse = 0;		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |			       E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |			       (skb->len - (hdr_len)));		i = adapter->tx_ring.next_to_use;		context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);		context_desc->lower_setup.ip_fields.ipcss  = ipcss;		context_desc->lower_setup.ip_fields.ipcso  = ipcso;		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);		context_desc->upper_setup.tcp_fields.tucss = tucss;		context_desc->upper_setup.tcp_fields.tucso = tucso;		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;		context_desc->cmd_and_length = cpu_to_le32(cmd_length);		if(++i == adapter->tx_ring.count) i = 0;		adapter->tx_ring.next_to_use = i;		return TRUE;	}#endif	return FALSE;}static inline boolean_te1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb){	struct e1000_context_desc *context_desc;	unsigned int i;	uint8_t css;	if(likely(skb->ip_summed == CHECKSUM_HW)) {		css = skb->h.raw - skb->data;		i = adapter->tx_ring.next_to_use;		context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);		context_desc->upper_setup.tcp_fields.tucss = css;		context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;		context_desc->upper_setup.tcp_fields.tucse = 0;		context_desc->tcp_seg_setup.data = 0;		context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);		if(unlikely(++i == adapter->tx_ring.count)) i = 0;		adapter->tx_ring.next_to_use = i;		return TRUE;	}	return FALSE;}#define E1000_MAX_TXD_PWR	12#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)static inline inte1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,	unsigned int first, unsigned int max_per_txd,	unsigned int nr_frags, unsigned int mss){	struct e1000_desc_ring *tx_ring = &adapter->tx_ring;	struct e1000_buffer *buffer_info;	unsigned int len = skb->len;	unsigned int offset = 0, size, count = 0, i;	unsigned int f;	len -= skb->data_len;	i = tx_ring->next_to_use;	while(len) {		buffer_info = &tx_ring->buffer_info[i];		size = min(len, max_per_txd);#ifdef NETIF_F_TSO		/* Workaround for premature desc write-backs		 * in TSO mode.  Append 4-byte sentinel desc */		if(unlikely(mss && !nr_frags && size == len && size > 8))			size -= 4;#endif		/* Workaround for potential 82544 hang in PCI-X.  Avoid		 * terminating buffers within evenly-aligned dwords. */		if(unlikely(adapter->pcix_82544 &&		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&		   size > 4))			size -= 4;		buffer_info->length = size;		buffer_info->dma =			pci_map_single(adapter->pdev,				skb->data + offset,				size,				PCI_DMA_TODEVICE);		buffer_info->time_stamp = jiffies;		len -= size;		offset += size;		count++;		if(unlikely(++i == tx_ring->count)) i = 0;	}	for(f = 0; f < nr_frags; f++) {		struct skb_frag_struct *frag;		frag = &skb_shinfo(skb)->frags[f];		len = frag->size;		offset = frag->page_offset;		while(len) {			buffer_info = &tx_ring->buffer_info[i];			size = min(len, max_per_txd);#ifdef NETIF_F_TSO			/* Workaround for premature desc write-backs			 * in TSO mode.  Append 4-byte sentinel desc */			if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))				size -= 4;#endif			/* Workaround for potential 82544 hang in PCI-X.			 * Avoid terminating buffers within evenly-aligned			 * dwords. */			if(unlikely(adapter->pcix_82544 &&			   !((unsigned long)(frag->page+offset+size-1) & 4) &&			   size > 4))				size -= 4;			buffer_info->length = size;			buffer_info->dma =				pci_map_page(adapter->pdev,					frag->page,					offset,					size,					PCI_DMA_TODEVICE);			buffer_info->time_stamp = jiffies;			len -= size;			offset += size;			count++;			if(unlikely(++i == tx_ring->count)) i = 0;		}	}	i = (i == 0) ? tx_ring->count - 1 : i - 1;	tx_ring->buffer_info[i].skb = skb;	tx_ring->buffer_info[first].next_to_watch = i;	return count;}static inline voide1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags){	struct e1000_desc_ring *tx_ring = &adapter->tx_ring;	struct e1000_tx_desc *tx_desc = NULL;	struct e1000_buffer *buffer_info;	uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;	unsigned int i;	if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |		             E1000_TXD_CMD_TSE;		txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;	}	if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;		txd_upper |= E1000_TXD_POPTS_TXSM << 8;	}	if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {		txd_lower |= E1000_TXD_CMD_VLE;		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);	}	i = tx_ring->next_to_use;	while(count--) {		buffer_info = &tx_ring->buffer_info[i];		tx_desc = E1000_TX_DESC(*tx_ring, i);		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);		tx_desc->lower.data =			cpu_to_le32(txd_lower | buffer_info->length);		tx_desc->upper.data = cpu_to_le32(txd_upper);		if(unlikely(++i == tx_ring->count)) i = 0;	}	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);	/* Force memory writes to complete before letting h/w	 * know there are new descriptors to fetch.  (Only	 * applicable for weak-ordered memory model archs,	 * such as IA-64). */	wmb();	tx_ring->next_to_use = i;	E1000_WRITE_REG(&adapter->hw, TDT, i);}/** * 82547 workaround to avoid controller hang in half-duplex environment. * The workaround is to avoid queuing a large packet that would span * the internal Tx FIFO ring boundary by notifying the stack to resend * the packet at a later time.  This gives the Tx FIFO an opportunity to * flush all packets.  When that occurs, we reset the Tx FIFO pointers * to the beginning of the Tx FIFO. **/#define E1000_FIFO_HDR			0x10#define E1000_82547_PAD_LEN		0x3E0static inline inte1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb){	uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;	uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;	E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);	if(adapter->link_duplex != HALF_DUPLEX)		goto no_fifo_stall_required;	if(atomic_read(&adapter->tx_fifo_stall))		return 1;	if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {		atomic_set(&adapter->tx_fifo_stall, 1);		return 1;	}no_fifo_stall_required:	adapter->tx_fifo_head += skb_fifo_len;	if(adapter->tx_fifo_head >= adapter->tx_fifo_size)		adapter->tx_fifo_head -= adapter->tx_fifo_size;	return 0;}#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )static inte1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev){	struct e1000_adapter *adapter = netdev->priv;	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;	unsigned int tx_flags = 0;	unsigned int len = skb->len;	unsigned long flags;	unsigned int nr_frags = 0;	unsigned int mss = 0;	int count = 0;	unsigned int f;	len -= skb->data_len;	if(unlikely(skb->len <= 0)) {		dev_kfree_skb_any(skb);		return 0;	}#ifdef NETIF_F_TSO	mss = skb_shinfo(skb)->tso_size;	/* The controller does a simple calculation to	 * make sure there is enough room in the FIFO before	 * initiating the DMA for each buffer.  The calc is:	 * 4 = ceil(buffer len/mss).  To make sure we don't	 * overrun the FIFO, adjust the max buffer len if mss	 * drops. */	if(mss) {		max_per_txd = min(mss << 2, max_per_txd);		max_txd_pwr = fls(max_per_txd) - 1;	}	if((mss) || (skb->ip_summed == CHECKSUM_HW))		count++;	count++;	/* for sentinel desc */#else	if(skb->ip_summed == CHECKSUM_HW)		count++;#endif	count += TXD_USE_COUNT(len, max_txd_pwr);	if(adapter->pcix_82544)		count++;	nr_frags = skb_shinfo(skb)->nr_frags;	for(f = 0; f < nr_frags; f++)		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,				       max_txd_pwr);	if(adapter->pcix_82544)		count += nr_frags;	spin_lock_irqsave(&adapter->tx_lock, flags);	/* need: count + 2 desc gap to keep tail from touching	 * head, otherwise try next time */	if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) {		netif_stop_queue(netdev);		spin_unlock_irqrestore(&adapter->tx_lock, flags);		return 1;	}	if(unlikely(adapter->hw.mac_type == e1000_82547)) {		if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {			netif_stop_queue(netdev);			mod_timer(&adapter->tx_fifo_stall_timer, jiffies);			return 1;			spin_unlock_irqrestore(&adapter->tx_lock, flags);		}	}	spin_unlock_irqrestore(&adapter->tx_lock, flags);	if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {		tx_flags |= E1000_TX_FLAGS_VLAN;		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);	}	first = adapter->tx_ring.next_to_use;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -