⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ixgb_main.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 4 页
字号:
	adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);	/* Fill out the OS statistics structure */	adapter->net_stats.rx_packets = adapter->stats.gprcl;	adapter->net_stats.tx_packets = adapter->stats.gptcl;	adapter->net_stats.rx_bytes = adapter->stats.gorcl;	adapter->net_stats.tx_bytes = adapter->stats.gotcl;	adapter->net_stats.multicast = adapter->stats.mprcl;	adapter->net_stats.collisions = 0;	/* ignore RLEC as it reports errors for padded (<64bytes) frames	 * with a length in the type/len field */	adapter->net_stats.rx_errors =	    /* adapter->stats.rnbc + */ adapter->stats.crcerrs +	    adapter->stats.ruc +	    adapter->stats.roc /*+ adapter->stats.rlec */  +	    adapter->stats.icbc +	    adapter->stats.ecbc + adapter->stats.mpc;	/* see above	 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;	 */	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;	adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;	adapter->net_stats.rx_missed_errors = adapter->stats.mpc;	adapter->net_stats.rx_over_errors = adapter->stats.mpc;	adapter->net_stats.tx_errors = 0;	adapter->net_stats.rx_frame_errors = 0;	adapter->net_stats.tx_aborted_errors = 0;	adapter->net_stats.tx_carrier_errors = 0;	adapter->net_stats.tx_fifo_errors = 0;	adapter->net_stats.tx_heartbeat_errors = 0;	adapter->net_stats.tx_window_errors = 0;}#define IXGB_MAX_INTR 10/** * ixgb_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure * @pt_regs: CPU registers structure **/static irqreturn_tixgb_intr(int irq, void *data, struct pt_regs *regs){	struct net_device *netdev = data;	struct ixgb_adapter *adapter = netdev_priv(netdev);	struct ixgb_hw *hw = &adapter->hw;	uint32_t icr = IXGB_READ_REG(hw, ICR);#ifndef CONFIG_IXGB_NAPI	unsigned int i;#endif	if(unlikely(!icr))		return IRQ_NONE;  /* Not our interrupt */	if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {		mod_timer(&adapter->watchdog_timer, jiffies);	}#ifdef CONFIG_IXGB_NAPI	if(netif_rx_schedule_prep(netdev)) {		/* Disable interrupts and register for poll. The flush 		  of the posted write is intentionally left out.		*/		atomic_inc(&adapter->irq_sem);		IXGB_WRITE_REG(&adapter->hw, IMC, ~0);		__netif_rx_schedule(netdev);	}#else	/* yes, that is actually a & and it is meant to make sure that	 * every pass through this for loop checks both receive and	 * transmit queues for completed descriptors, intended to	 * avoid starvation issues and assist tx/rx fairness. */	for(i = 0; i < IXGB_MAX_INTR; i++)		if(!ixgb_clean_rx_irq(adapter) &		   !ixgb_clean_tx_irq(adapter))			break;#endif 	return IRQ_HANDLED;}#ifdef CONFIG_IXGB_NAPI/** * ixgb_clean - NAPI Rx polling callback * @adapter: board private structure **/static intixgb_clean(struct net_device *netdev, int *budget){	struct ixgb_adapter *adapter = netdev_priv(netdev);	int work_to_do = min(*budget, netdev->quota);	int tx_cleaned;	int work_done = 0;	tx_cleaned = ixgb_clean_tx_irq(adapter);	ixgb_clean_rx_irq(adapter, &work_done, work_to_do);	*budget -= work_done;	netdev->quota -= work_done;	/* if no Tx and not enough Rx work done, exit the polling mode */	if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {		netif_rx_complete(netdev);		ixgb_irq_enable(adapter);		return 0;	}	return 1;}#endif/** * ixgb_clean_tx_irq - Reclaim resources after transmit completes * @adapter: board private structure **/static boolean_tixgb_clean_tx_irq(struct ixgb_adapter *adapter){	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;	struct net_device *netdev = adapter->netdev;	struct ixgb_tx_desc *tx_desc, *eop_desc;	struct ixgb_buffer *buffer_info;	unsigned int i, eop;	boolean_t cleaned = FALSE;	i = tx_ring->next_to_clean;	eop = tx_ring->buffer_info[i].next_to_watch;	eop_desc = IXGB_TX_DESC(*tx_ring, eop);	while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {		for(cleaned = FALSE; !cleaned; ) {			tx_desc = IXGB_TX_DESC(*tx_ring, i);			buffer_info = &tx_ring->buffer_info[i];			if (tx_desc->popts			    & (IXGB_TX_DESC_POPTS_TXSM |			       IXGB_TX_DESC_POPTS_IXSM))				adapter->hw_csum_tx_good++;			ixgb_unmap_and_free_tx_resource(adapter, buffer_info);			*(uint32_t *)&(tx_desc->status) = 0;			cleaned = (i == eop);			if(++i == tx_ring->count) i = 0;		}		eop = tx_ring->buffer_info[i].next_to_watch;		eop_desc = IXGB_TX_DESC(*tx_ring, eop);	}	tx_ring->next_to_clean = i;	spin_lock(&adapter->tx_lock);	if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&	   (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {		netif_wake_queue(netdev);	}	spin_unlock(&adapter->tx_lock);	if(adapter->detect_tx_hung) {		/* detect a transmit hang in hardware, this serializes the		 * check with the clearing of time_stamp and movement of i */		adapter->detect_tx_hung = FALSE;		if(tx_ring->buffer_info[i].dma &&		   time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)		   && !(IXGB_READ_REG(&adapter->hw, STATUS) &			IXGB_STATUS_TXOFF))			netif_stop_queue(netdev);	}	return cleaned;}/** * ixgb_rx_checksum - Receive Checksum Offload for 82597. * @adapter: board private structure * @rx_desc: receive descriptor * @sk_buff: socket buffer with received data **/static inline voidixgb_rx_checksum(struct ixgb_adapter *adapter,		 struct ixgb_rx_desc *rx_desc,		 struct sk_buff *skb){	/* Ignore Checksum bit is set OR	 * TCP Checksum has not been calculated	 */	if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||	   (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {		skb->ip_summed = CHECKSUM_NONE;		return;	}	/* At this point we know the hardware did the TCP checksum */	/* now look at the TCP checksum error bit */	if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {		/* let the stack verify checksum errors */		skb->ip_summed = CHECKSUM_NONE;		adapter->hw_csum_rx_error++;	} else {		/* TCP checksum is good */		skb->ip_summed = CHECKSUM_UNNECESSARY;		adapter->hw_csum_rx_good++;	}}/** * ixgb_clean_rx_irq - Send received data up the network stack, * @adapter: board private structure **/static boolean_t#ifdef CONFIG_IXGB_NAPIixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)#elseixgb_clean_rx_irq(struct ixgb_adapter *adapter)#endif{	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct ixgb_rx_desc *rx_desc, *next_rxd;	struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;	uint32_t length;	unsigned int i, j;	boolean_t cleaned = FALSE;	i = rx_ring->next_to_clean;	rx_desc = IXGB_RX_DESC(*rx_ring, i);	buffer_info = &rx_ring->buffer_info[i];	while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {		struct sk_buff *skb, *next_skb;		u8 status;#ifdef CONFIG_IXGB_NAPI		if(*work_done >= work_to_do)			break;		(*work_done)++;#endif		status = rx_desc->status;		skb = buffer_info->skb;		prefetch(skb->data);		if(++i == rx_ring->count) i = 0;		next_rxd = IXGB_RX_DESC(*rx_ring, i);		prefetch(next_rxd);		if((j = i + 1) == rx_ring->count) j = 0;		next2_buffer = &rx_ring->buffer_info[j];		prefetch(next2_buffer);		next_buffer = &rx_ring->buffer_info[i];		next_skb = next_buffer->skb;		prefetch(next_skb);		cleaned = TRUE;		pci_unmap_single(pdev,				 buffer_info->dma,				 buffer_info->length,				 PCI_DMA_FROMDEVICE);		length = le16_to_cpu(rx_desc->length);		if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {			/* All receives must fit into a single buffer */			IXGB_DBG("Receive packet consumed multiple buffers "					 "length<%x>\n", length);			dev_kfree_skb_irq(skb);			goto rxdesc_done;		}		if (unlikely(rx_desc->errors			     & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE				| IXGB_RX_DESC_ERRORS_P |				IXGB_RX_DESC_ERRORS_RXE))) {			dev_kfree_skb_irq(skb);			goto rxdesc_done;		}		/* Good Receive */		skb_put(skb, length);		/* Receive Checksum Offload */		ixgb_rx_checksum(adapter, rx_desc, skb);		skb->protocol = eth_type_trans(skb, netdev);#ifdef CONFIG_IXGB_NAPI		if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {			vlan_hwaccel_receive_skb(skb, adapter->vlgrp,				le16_to_cpu(rx_desc->special) &					IXGB_RX_DESC_SPECIAL_VLAN_MASK);		} else {			netif_receive_skb(skb);		}#else /* CONFIG_IXGB_NAPI */		if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {			vlan_hwaccel_rx(skb, adapter->vlgrp,				le16_to_cpu(rx_desc->special) &					IXGB_RX_DESC_SPECIAL_VLAN_MASK);		} else {			netif_rx(skb);		}#endif /* CONFIG_IXGB_NAPI */		netdev->last_rx = jiffies;rxdesc_done:		/* clean up descriptor, might be written over by hw */		rx_desc->status = 0;		buffer_info->skb = NULL;		/* use prefetched values */		rx_desc = next_rxd;		buffer_info = next_buffer;	}	rx_ring->next_to_clean = i;	ixgb_alloc_rx_buffers(adapter);	return cleaned;}/** * ixgb_alloc_rx_buffers - Replace used receive buffers * @adapter: address of board private structure **/static voidixgb_alloc_rx_buffers(struct ixgb_adapter *adapter){	struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct ixgb_rx_desc *rx_desc;	struct ixgb_buffer *buffer_info;	struct sk_buff *skb;	unsigned int i;	int num_group_tail_writes;	long cleancount;	i = rx_ring->next_to_use;	buffer_info = &rx_ring->buffer_info[i];	cleancount = IXGB_DESC_UNUSED(rx_ring);	num_group_tail_writes = IXGB_RX_BUFFER_WRITE;	/* leave three descriptors unused */	while(--cleancount > 2) {		rx_desc = IXGB_RX_DESC(*rx_ring, i);		skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);		if(unlikely(!skb)) {			/* Better luck next round */			break;		}		/* Make buffer alignment 2 beyond a 16 byte boundary		 * this will result in a 16 byte aligned IP header after		 * the 14 byte MAC header is removed		 */		skb_reserve(skb, NET_IP_ALIGN);		skb->dev = netdev;		buffer_info->skb = skb;		buffer_info->length = adapter->rx_buffer_len;		buffer_info->dma =			pci_map_single(pdev,				   skb->data,				   adapter->rx_buffer_len,				   PCI_DMA_FROMDEVICE);		rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);		/* guarantee DD bit not set now before h/w gets descriptor		 * this is the rest of the workaround for h/w double 		 * writeback. */		rx_desc->status = 0;		if((i & ~(num_group_tail_writes- 1)) == i) {			/* Force memory writes to complete before letting h/w			 * know there are new descriptors to fetch.  (Only			 * applicable for weak-ordered memory model archs,			 * such as IA-64). */			wmb();			IXGB_WRITE_REG(&adapter->hw, RDT, i);		}		if(++i == rx_ring->count) i = 0;		buffer_info = &rx_ring->buffer_info[i];	}	rx_ring->next_to_use = i;}/** * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping. *  * @param netdev network interface device structure * @param grp indicates to enable or disable tagging/stripping **/static voidixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp){	struct ixgb_adapter *adapter = netdev_priv(netdev);	uint32_t ctrl, rctl;	ixgb_irq_disable(adapter);	adapter->vlgrp = grp;	if(grp) {		/* enable VLAN tag insert/strip */		ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);		ctrl |= IXGB_CTRL0_VME;		IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);		/* enable VLAN receive filtering */		rctl = IXGB_READ_REG(&adapter->hw, RCTL);		rctl |= IXGB_RCTL_VFE;		rctl &= ~IXGB_RCTL_CFIEN;		IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);	} else {		/* disable VLAN tag insert/strip */		ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);		ctrl &= ~IXGB_CTRL0_VME;		IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);		/* disable VLAN filtering */		rctl = IXGB_READ_REG(&adapter->hw, RCTL);		rctl &= ~IXGB_RCTL_VFE;		IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);	}	ixgb_irq_enable(adapter);}static voidixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid){	struct ixgb_adapter *adapter = netdev_priv(netdev);	uint32_t vfta, index;	/* add VID to filter table */	index = (vid >> 5) & 0x7F;	vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);	vfta |= (1 << (vid & 0x1F));	ixgb_write_vfta(&adapter->hw, index, vfta);}static voidixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid){	struct ixgb_adapter *adapter = netdev_priv(netdev);	uint32_t vfta, index;	ixgb_irq_disable(adapter);	if(adapter->vlgrp)		adapter->vlgrp->vlan_devices[vid] = NULL;	ixgb_irq_enable(adapter);	/* remove VID from filter table*/	index = (vid >> 5) & 0x7F;	vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);	vfta &= ~(1 << (vid & 0x1F));	ixgb_write_vfta(&adapter->hw, index, vfta);}static voidixgb_restore_vlan(struct ixgb_adapter *adapter){	ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);	if(adapter->vlgrp) {		uint16_t vid;		for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {			if(!adapter->vlgrp->vlan_devices[vid])				continue;			ixgb_vlan_rx_add_vid(adapter->netdev, vid);		}	}}#ifdef CONFIG_NET_POLL_CONTROLLER/* * Polling 'interrupt' - used by things like netconsole to send skbs * without having to re-enable interrupts. It's not called while * the interrupt routine is executing. */static void ixgb_netpoll(struct net_device *dev){	struct ixgb_adapter *adapter = dev->priv;	disable_irq(adapter->pdev->irq);	ixgb_intr(adapter->pdev->irq, dev, NULL);	enable_irq(adapter->pdev->irq);}#endif/* ixgb_main.c */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -