⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 netdev.c

📁 grub源码分析文档
💻 C
📖 第 1 页 / 共 5 页
字号:
/*******************************************************************************  Intel PRO/1000 Linux driver  Copyright(c) 1999 - 2008 Intel Corporation.  This program is free software; you can redistribute it and/or modify it  under the terms and conditions of the GNU General Public License,  version 2, as published by the Free Software Foundation.  This program is distributed in the hope it will be useful, but WITHOUT  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for  more details.  You should have received a copy of the GNU General Public License along with  this program; if not, write to the Free Software Foundation, Inc.,  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.  The full GNU General Public License is included in this distribution in  the file called "COPYING".  Contact Information:  Linux NICS <linux.nics@intel.com>  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497*******************************************************************************/#include <linux/module.h>#include <linux/types.h>#include <linux/init.h>#include <linux/pci.h>#include <linux/vmalloc.h>#include <linux/pagemap.h>#include <linux/delay.h>#include <linux/netdevice.h>#include <linux/tcp.h>#include <linux/ipv6.h>#ifdef NETIF_F_TSO#include <net/checksum.h>#ifdef NETIF_F_TSO6#include <net/ip6_checksum.h>#endif#endif#include <linux/mii.h>#include <linux/ethtool.h>#include <linux/if_vlan.h>#include "e1000.h"#define DRV_DEBUG#define DRV_VERSION "0.2.9.5" DRV_DEBUGchar e1000e_driver_name[] = "e1000e";const char e1000e_driver_version[] = DRV_VERSION;static const struct e1000_info *e1000_info_tbl[] = {	[board_82571]		= &e1000_82571_info,	[board_82572]		= &e1000_82572_info,	[board_82573]		= &e1000_82573_info,	[board_80003es2lan]	= &e1000_es2_info,	[board_ich8lan]		= &e1000_ich8_info,	[board_ich9lan]		= &e1000_ich9_info,};/** * e1000_desc_unused - calculate if we have unused descriptors **/static int e1000_desc_unused(struct e1000_ring *ring){	if (ring->next_to_clean > ring->next_to_use)		return ring->next_to_clean - ring->next_to_use - 1;	return ring->count + ring->next_to_clean - ring->next_to_use - 1;}/** * e1000_receive_skb - helper function to handle Rx indications * @adapter: board private structure * @status: descriptor status field as written by hardware * @vlan: descriptor vlan field as written by hardware (no le/be conversion) * @skb: pointer to sk_buff to be indicated to stack **/static void e1000_receive_skb(struct e1000_adapter *adapter,			      struct net_device *netdev,			      struct sk_buff *skb,			      u8 status, u16 vlan){	skb->protocol = eth_type_trans(skb, netdev);	if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))		vlan_hwaccel_receive_skb(skb, adapter->vlgrp,					 le16_to_cpu(vlan) &					 E1000_RXD_SPC_VLAN_MASK);	else		netif_receive_skb(skb);	netdev->last_rx = jiffies;}/** * e1000_rx_checksum - Receive Checksum Offload for 82543 * @adapter:     board private structure * @status_err:  receive descriptor status and error fields * @csum:	receive descriptor csum field * @sk_buff:     socket buffer with received data **/static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,			      u32 csum, struct sk_buff *skb){	u16 status = (u16)status_err;	u8 errors = (u8)(status_err >> 24);	skb->ip_summed = CHECKSUM_NONE;	/* Ignore Checksum bit is set */	if (status & E1000_RXD_STAT_IXSM)		return;	/* TCP/UDP checksum error bit is set */	if (errors & E1000_RXD_ERR_TCPE) {		/* let the stack verify checksum errors */		adapter->hw_csum_err++;		return;	}	/* TCP/UDP Checksum has not been calculated */	if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))		return;	/* It must be a TCP or UDP packet with a valid checksum */	if (status & E1000_RXD_STAT_TCPCS) {		/* TCP checksum is good */		skb->ip_summed = CHECKSUM_UNNECESSARY;	} else {		/*		 * IP fragment with UDP payload		 * Hardware complements the payload checksum, so we undo it		 * and then put the value in host order for further stack use.		 */		csum = ntohl(csum ^ 0xFFFF);		skb->csum = csum;		skb->ip_summed = CHECKSUM_COMPLETE;	}	adapter->hw_csum_good++;}/** * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended * @adapter: address of board private structure **/static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,				   int cleaned_count){	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_rx_desc *rx_desc;	struct e1000_buffer *buffer_info;	struct sk_buff *skb;	unsigned int i;	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;	i = rx_ring->next_to_use;	buffer_info = &rx_ring->buffer_info[i];	while (cleaned_count--) {		skb = buffer_info->skb;		if (skb) {			skb_trim(skb, 0);			goto map_skb;		}		skb = netdev_alloc_skb(netdev, bufsz);		if (!skb) {			/* Better luck next round */			adapter->alloc_rx_buff_failed++;			break;		}		/*		 * Make buffer alignment 2 beyond a 16 byte boundary		 * this will result in a 16 byte aligned IP header after		 * the 14 byte MAC header is removed		 */		skb_reserve(skb, NET_IP_ALIGN);		buffer_info->skb = skb;map_skb:		buffer_info->dma = pci_map_single(pdev, skb->data,						  adapter->rx_buffer_len,						  PCI_DMA_FROMDEVICE);		if (pci_dma_mapping_error(buffer_info->dma)) {			dev_err(&pdev->dev, "RX DMA map failed\n");			adapter->rx_dma_failed++;			break;		}		rx_desc = E1000_RX_DESC(*rx_ring, i);		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);		i++;		if (i == rx_ring->count)			i = 0;		buffer_info = &rx_ring->buffer_info[i];	}	if (rx_ring->next_to_use != i) {		rx_ring->next_to_use = i;		if (i-- == 0)			i = (rx_ring->count - 1);		/*		 * Force memory writes to complete before letting h/w		 * know there are new descriptors to fetch.  (Only		 * applicable for weak-ordered memory model archs,		 * such as IA-64).		 */		wmb();		writel(i, adapter->hw.hw_addr + rx_ring->tail);	}}/** * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @adapter: address of board private structure **/static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,				      int cleaned_count){	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	union e1000_rx_desc_packet_split *rx_desc;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info;	struct e1000_ps_page *ps_page;	struct sk_buff *skb;	unsigned int i, j;	i = rx_ring->next_to_use;	buffer_info = &rx_ring->buffer_info[i];	while (cleaned_count--) {		rx_desc = E1000_RX_DESC_PS(*rx_ring, i);		for (j = 0; j < PS_PAGE_BUFFERS; j++) {			ps_page = &buffer_info->ps_pages[j];			if (j >= adapter->rx_ps_pages) {				/* all unused desc entries get hw null ptr */				rx_desc->read.buffer_addr[j+1] = ~0;				continue;			}			if (!ps_page->page) {				ps_page->page = alloc_page(GFP_ATOMIC);				if (!ps_page->page) {					adapter->alloc_rx_buff_failed++;					goto no_buffers;				}				ps_page->dma = pci_map_page(pdev,						   ps_page->page,						   0, PAGE_SIZE,						   PCI_DMA_FROMDEVICE);				if (pci_dma_mapping_error(ps_page->dma)) {					dev_err(&adapter->pdev->dev,					  "RX DMA page map failed\n");					adapter->rx_dma_failed++;					goto no_buffers;				}			}			/*			 * Refresh the desc even if buffer_addrs			 * didn't change because each write-back			 * erases this info.			 */			rx_desc->read.buffer_addr[j+1] =			     cpu_to_le64(ps_page->dma);		}		skb = netdev_alloc_skb(netdev,				       adapter->rx_ps_bsize0 + NET_IP_ALIGN);		if (!skb) {			adapter->alloc_rx_buff_failed++;			break;		}		/*		 * Make buffer alignment 2 beyond a 16 byte boundary		 * this will result in a 16 byte aligned IP header after		 * the 14 byte MAC header is removed		 */		skb_reserve(skb, NET_IP_ALIGN);		buffer_info->skb = skb;		buffer_info->dma = pci_map_single(pdev, skb->data,						  adapter->rx_ps_bsize0,						  PCI_DMA_FROMDEVICE);		if (pci_dma_mapping_error(buffer_info->dma)) {			dev_err(&pdev->dev, "RX DMA map failed\n");			adapter->rx_dma_failed++;			/* cleanup skb */			dev_kfree_skb_any(skb);			buffer_info->skb = NULL;			break;		}		rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);		i++;		if (i == rx_ring->count)			i = 0;		buffer_info = &rx_ring->buffer_info[i];	}no_buffers:	if (rx_ring->next_to_use != i) {		rx_ring->next_to_use = i;		if (!(i--))			i = (rx_ring->count - 1);		/*		 * Force memory writes to complete before letting h/w		 * know there are new descriptors to fetch.  (Only		 * applicable for weak-ordered memory model archs,		 * such as IA-64).		 */		wmb();		/*		 * Hardware increments by 16 bytes, but packet split		 * descriptors are 32 bytes...so we increment tail		 * twice as much.		 */		writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);	}}/** * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers * @adapter: address of board private structure * @rx_ring: pointer to receive ring structure * @cleaned_count: number of buffers to allocate this pass **/static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,                                         int cleaned_count){	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct e1000_rx_desc *rx_desc;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info;	struct sk_buff *skb;	unsigned int i;	unsigned int bufsz = 256 -	                     16 /* for skb_reserve */ -	                     NET_IP_ALIGN;	i = rx_ring->next_to_use;	buffer_info = &rx_ring->buffer_info[i];	while (cleaned_count--) {		skb = buffer_info->skb;		if (skb) {			skb_trim(skb, 0);			goto check_page;		}		skb = netdev_alloc_skb(netdev, bufsz);		if (unlikely(!skb)) {			/* Better luck next round */			adapter->alloc_rx_buff_failed++;			break;		}		/* Make buffer alignment 2 beyond a 16 byte boundary		 * this will result in a 16 byte aligned IP header after		 * the 14 byte MAC header is removed		 */		skb_reserve(skb, NET_IP_ALIGN);		buffer_info->skb = skb;check_page:		/* allocate a new page if necessary */		if (!buffer_info->page) {			buffer_info->page = alloc_page(GFP_ATOMIC);			if (unlikely(!buffer_info->page)) {				adapter->alloc_rx_buff_failed++;				break;			}		}		if (!buffer_info->dma)			buffer_info->dma = pci_map_page(pdev,			                                buffer_info->page, 0,			                                PAGE_SIZE,			                                PCI_DMA_FROMDEVICE);		rx_desc = E1000_RX_DESC(*rx_ring, i);		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);		if (unlikely(++i == rx_ring->count))			i = 0;		buffer_info = &rx_ring->buffer_info[i];	}	if (likely(rx_ring->next_to_use != i)) {		rx_ring->next_to_use = i;		if (unlikely(i-- == 0))			i = (rx_ring->count - 1);		/* Force memory writes to complete before letting h/w		 * know there are new descriptors to fetch.  (Only		 * applicable for weak-ordered memory model archs,		 * such as IA-64). */		wmb();		writel(i, adapter->hw.hw_addr + rx_ring->tail);	}}/** * e1000_clean_rx_irq - Send received data up the network stack; legacy * @adapter: board private structure * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,			       int *work_done, int work_to_do){	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_rx_desc *rx_desc, *next_rxd;	struct e1000_buffer *buffer_info, *next_buffer;	u32 length;	unsigned int i;	int cleaned_count = 0;	bool cleaned = 0;	unsigned int total_rx_bytes = 0, total_rx_packets = 0;	i = rx_ring->next_to_clean;	rx_desc = E1000_RX_DESC(*rx_ring, i);	buffer_info = &rx_ring->buffer_info[i];	while (rx_desc->status & E1000_RXD_STAT_DD) {		struct sk_buff *skb;		u8 status;		if (*work_done >= work_to_do)			break;		(*work_done)++;		status = rx_desc->status;		skb = buffer_info->skb;		buffer_info->skb = NULL;		prefetch(skb->data - NET_IP_ALIGN);		i++;		if (i == rx_ring->count)			i = 0;		next_rxd = E1000_RX_DESC(*rx_ring, i);		prefetch(next_rxd);		next_buffer = &rx_ring->buffer_info[i];		cleaned = 1;		cleaned_count++;		pci_unmap_single(pdev,				 buffer_info->dma,				 adapter->rx_buffer_len,				 PCI_DMA_FROMDEVICE);		buffer_info->dma = 0;		length = le16_to_cpu(rx_desc->length);		/* !EOP means multiple descriptors were used to store a single

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -