netdev.c

来自「linux 内核源代码」· C语言 代码 · 共 2,361 行 · 第 1/5 页

C
2,361
字号
		buffer_info->skb = NULL;	}}static void e1000_print_tx_hang(struct e1000_adapter *adapter){	struct e1000_ring *tx_ring = adapter->tx_ring;	unsigned int i = tx_ring->next_to_clean;	unsigned int eop = tx_ring->buffer_info[i].next_to_watch;	struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);	struct net_device *netdev = adapter->netdev;	/* detected Tx unit hang */	ndev_err(netdev,		 "Detected Tx Unit Hang:\n"		 "  TDH                  <%x>\n"		 "  TDT                  <%x>\n"		 "  next_to_use          <%x>\n"		 "  next_to_clean        <%x>\n"		 "buffer_info[next_to_clean]:\n"		 "  time_stamp           <%lx>\n"		 "  next_to_watch        <%x>\n"		 "  jiffies              <%lx>\n"		 "  next_to_watch.status <%x>\n",		 readl(adapter->hw.hw_addr + tx_ring->head),		 readl(adapter->hw.hw_addr + tx_ring->tail),		 tx_ring->next_to_use,		 tx_ring->next_to_clean,		 tx_ring->buffer_info[eop].time_stamp,		 eop,		 jiffies,		 eop_desc->upper.fields.status);}/** * e1000_clean_tx_irq - Reclaim resources after transmit completes * @adapter: board private structure * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/static bool e1000_clean_tx_irq(struct e1000_adapter *adapter){	struct net_device *netdev = adapter->netdev;	struct e1000_hw *hw = &adapter->hw;	struct e1000_ring *tx_ring = adapter->tx_ring;	struct e1000_tx_desc *tx_desc, *eop_desc;	struct e1000_buffer *buffer_info;	unsigned int i, eop;	unsigned int count = 0;	bool cleaned = 0;	unsigned int total_tx_bytes = 0, total_tx_packets = 0;	i = tx_ring->next_to_clean;	eop = tx_ring->buffer_info[i].next_to_watch;	eop_desc = E1000_TX_DESC(*tx_ring, eop);	while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {		for (cleaned = 0; !cleaned; ) {			tx_desc = E1000_TX_DESC(*tx_ring, i);			buffer_info = &tx_ring->buffer_info[i];			cleaned = (i == eop);			if (cleaned) {				struct sk_buff *skb = buffer_info->skb;				unsigned int segs, bytecount;				segs = skb_shinfo(skb)->gso_segs ?: 1;				/* multiply data chunks by size of headers */				bytecount = ((segs - 1) * skb_headlen(skb)) +					    skb->len;				total_tx_packets += segs;				total_tx_bytes += bytecount;			}			e1000_put_txbuf(adapter, buffer_info);			tx_desc->upper.data = 0;			i++;			if (i == tx_ring->count)				i = 0;		}		eop = tx_ring->buffer_info[i].next_to_watch;		eop_desc = E1000_TX_DESC(*tx_ring, eop);#define E1000_TX_WEIGHT 64		/* weight of a sort for tx, to avoid endless transmit cleanup */		if (count++ == E1000_TX_WEIGHT)			break;	}	tx_ring->next_to_clean = i;#define TX_WAKE_THRESHOLD 32	if (cleaned && netif_carrier_ok(netdev) &&		     e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {		/* Make sure that anybody stopping the queue after this		 * sees the new next_to_clean.		 */		smp_mb();		if (netif_queue_stopped(netdev) &&		    !(test_bit(__E1000_DOWN, &adapter->state))) {			netif_wake_queue(netdev);			++adapter->restart_queue;		}	}	if (adapter->detect_tx_hung) {		/* Detect a transmit hang in hardware, this serializes the		 * check with the clearing of time_stamp and movement of i */		adapter->detect_tx_hung = 0;		if (tx_ring->buffer_info[eop].dma &&		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp			       + (adapter->tx_timeout_factor * HZ))		    && !(er32(STATUS) &			 E1000_STATUS_TXOFF)) {			e1000_print_tx_hang(adapter);			netif_stop_queue(netdev);		}	}	adapter->total_tx_bytes += total_tx_bytes;	adapter->total_tx_packets += total_tx_packets;	return cleaned;}/** * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split * @adapter: board private structure * * the return value indicates whether actual cleaning was done, there * is no guarantee that everything was cleaned **/static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,				  int *work_done, int work_to_do){	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;	struct net_device *netdev = adapter->netdev;	struct pci_dev *pdev = adapter->pdev;	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info, *next_buffer;	struct e1000_ps_page *ps_page;	struct sk_buff *skb;	unsigned int i, j;	u32 length, staterr;	int cleaned_count = 0;	bool cleaned = 0;	unsigned int total_rx_bytes = 0, total_rx_packets = 0;	i = rx_ring->next_to_clean;	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);	buffer_info = &rx_ring->buffer_info[i];	while (staterr & E1000_RXD_STAT_DD) {		if (*work_done >= work_to_do)			break;		(*work_done)++;		skb = buffer_info->skb;		/* in the packet split case this is header only */		prefetch(skb->data - NET_IP_ALIGN);		i++;		if (i == rx_ring->count)			i = 0;		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);		prefetch(next_rxd);		next_buffer = &rx_ring->buffer_info[i];		cleaned = 1;		cleaned_count++;		pci_unmap_single(pdev, buffer_info->dma,				 adapter->rx_ps_bsize0,				 PCI_DMA_FROMDEVICE);		buffer_info->dma = 0;		if (!(staterr & E1000_RXD_STAT_EOP)) {			ndev_dbg(netdev, "%s: Packet Split buffers didn't pick "				 "up the full packet\n", netdev->name);			dev_kfree_skb_irq(skb);			goto next_desc;		}		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {			dev_kfree_skb_irq(skb);			goto next_desc;		}		length = le16_to_cpu(rx_desc->wb.middle.length0);		if (!length) {			ndev_dbg(netdev, "%s: Last part of the packet spanning"				 " multiple descriptors\n", netdev->name);			dev_kfree_skb_irq(skb);			goto next_desc;		}		/* Good Receive */		skb_put(skb, length);		{		/* this looks ugly, but it seems compiler issues make it		   more efficient than reusing j */		int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);		/* page alloc/put takes too long and effects small packet		 * throughput, so unsplit small packets and save the alloc/put*/		if (l1 && (l1 <= copybreak) &&		    ((length + l1) <= adapter->rx_ps_bsize0)) {			u8 *vaddr;			ps_page = &buffer_info->ps_pages[0];			/* there is no documentation about how to call			 * kmap_atomic, so we can't hold the mapping			 * very long */			pci_dma_sync_single_for_cpu(pdev, ps_page->dma,				PAGE_SIZE, PCI_DMA_FROMDEVICE);			vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);			memcpy(skb_tail_pointer(skb), vaddr, l1);			kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);			pci_dma_sync_single_for_device(pdev, ps_page->dma,				PAGE_SIZE, PCI_DMA_FROMDEVICE);			skb_put(skb, l1);			goto copydone;		} /* if */		}		for (j = 0; j < PS_PAGE_BUFFERS; j++) {			length = le16_to_cpu(rx_desc->wb.upper.length[j]);			if (!length)				break;			ps_page = &buffer_info->ps_pages[j];			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,				       PCI_DMA_FROMDEVICE);			ps_page->dma = 0;			skb_fill_page_desc(skb, j, ps_page->page, 0, length);			ps_page->page = NULL;			skb->len += length;			skb->data_len += length;			skb->truesize += length;		}copydone:		total_rx_bytes += skb->len;		total_rx_packets++;		e1000_rx_checksum(adapter, staterr, le16_to_cpu(			rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);		if (rx_desc->wb.upper.header_status &			   cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))			adapter->rx_hdr_split++;		e1000_receive_skb(adapter, netdev, skb,				  staterr, rx_desc->wb.middle.vlan);next_desc:		rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);		buffer_info->skb = NULL;		/* return some buffers to hardware, one at a time is too slow */		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {			adapter->alloc_rx_buf(adapter, cleaned_count);			cleaned_count = 0;		}		/* use prefetched values */		rx_desc = next_rxd;		buffer_info = next_buffer;		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);	}	rx_ring->next_to_clean = i;	cleaned_count = e1000_desc_unused(rx_ring);	if (cleaned_count)		adapter->alloc_rx_buf(adapter, cleaned_count);	adapter->total_rx_packets += total_rx_packets;	adapter->total_rx_bytes += total_rx_bytes;	return cleaned;}/** * e1000_clean_rx_ring - Free Rx Buffers per Queue * @adapter: board private structure **/static void e1000_clean_rx_ring(struct e1000_adapter *adapter){	struct e1000_ring *rx_ring = adapter->rx_ring;	struct e1000_buffer *buffer_info;	struct e1000_ps_page *ps_page;	struct pci_dev *pdev = adapter->pdev;	unsigned int i, j;	/* Free all the Rx ring sk_buffs */	for (i = 0; i < rx_ring->count; i++) {		buffer_info = &rx_ring->buffer_info[i];		if (buffer_info->dma) {			if (adapter->clean_rx == e1000_clean_rx_irq)				pci_unmap_single(pdev, buffer_info->dma,						 adapter->rx_buffer_len,						 PCI_DMA_FROMDEVICE);			else if (adapter->clean_rx == e1000_clean_rx_irq_ps)				pci_unmap_single(pdev, buffer_info->dma,						 adapter->rx_ps_bsize0,						 PCI_DMA_FROMDEVICE);			buffer_info->dma = 0;		}		if (buffer_info->skb) {			dev_kfree_skb(buffer_info->skb);			buffer_info->skb = NULL;		}		for (j = 0; j < PS_PAGE_BUFFERS; j++) {			ps_page = &buffer_info->ps_pages[j];			if (!ps_page->page)				break;			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,				       PCI_DMA_FROMDEVICE);			ps_page->dma = 0;			put_page(ps_page->page);			ps_page->page = NULL;		}	}	/* there also may be some cached data from a chained receive */	if (rx_ring->rx_skb_top) {		dev_kfree_skb(rx_ring->rx_skb_top);		rx_ring->rx_skb_top = NULL;	}	/* Zero out the descriptor ring */	memset(rx_ring->desc, 0, rx_ring->size);	rx_ring->next_to_clean = 0;	rx_ring->next_to_use = 0;	writel(0, adapter->hw.hw_addr + rx_ring->head);	writel(0, adapter->hw.hw_addr + rx_ring->tail);}/** * e1000_intr_msi - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/static irqreturn_t e1000_intr_msi(int irq, void *data){	struct net_device *netdev = data;	struct e1000_adapter *adapter = netdev_priv(netdev);	struct e1000_hw *hw = &adapter->hw;	u32 icr = er32(ICR);	/* read ICR disables interrupts using IAM, so keep up with our	 * enable/disable accounting */	atomic_inc(&adapter->irq_sem);	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {		hw->mac.get_link_status = 1;		/* ICH8 workaround-- Call gig speed drop workaround on cable		 * disconnect (LSC) before accessing any PHY registers */		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&		    (!(er32(STATUS) & E1000_STATUS_LU)))			e1000e_gig_downshift_workaround_ich8lan(hw);		/* 80003ES2LAN workaround-- For packet buffer work-around on		 * link down event; disable receives here in the ISR and reset		 * adapter in watchdog */		if (netif_carrier_ok(netdev) &&		    adapter->flags & FLAG_RX_NEEDS_RESTART) {			/* disable receives */			u32 rctl = er32(RCTL);			ew32(RCTL, rctl & ~E1000_RCTL_EN);		}		/* guard against interrupt when we're going down */		if (!test_bit(__E1000_DOWN, &adapter->state))			mod_timer(&adapter->watchdog_timer, jiffies + 1);	}	if (netif_rx_schedule_prep(netdev, &adapter->napi)) {		adapter->total_tx_bytes = 0;		adapter->total_tx_packets = 0;		adapter->total_rx_bytes = 0;		adapter->total_rx_packets = 0;		__netif_rx_schedule(netdev, &adapter->napi);	} else {		atomic_dec(&adapter->irq_sem);	}	return IRQ_HANDLED;}/** * e1000_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure **/static irqreturn_t e1000_intr(int irq, void *data){	struct net_device *netdev = data;	struct e1000_adapter *adapter = netdev_priv(netdev);	struct e1000_hw *hw = &adapter->hw;	u32 rctl, icr = er32(ICR);	if (!icr)		return IRQ_NONE;  /* Not our interrupt */	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is	 * not set, then the adapter didn't send an interrupt */	if (!(icr & E1000_ICR_INT_ASSERTED))		return IRQ_NONE;	/* Interrupt Auto-Mask...upon reading ICR,	 * interrupts are masked.  No need for the	 * IMC write, but it does mean we should	 * account for it ASAP. */	atomic_inc(&adapter->irq_sem);	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {		hw->mac.get_link_status = 1;		/* ICH8 workaround-- Call gig speed drop workaround on cable		 * disconnect (LSC) before accessing any PHY registers */		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&		    (!(er32(STATUS) & E1000_STATUS_LU)))			e1000e_gig_downshift_workaround_ich8lan(hw);		/* 80003ES2LAN workaround--		 * For packet buffer work-around on link down event;		 * disable receives here in the ISR and		 * reset adapter in watchdog		 */		if (netif_carrier_ok(netdev) &&		    (adapter->flags & FLAG_RX_NEEDS_RESTART)) {			/* disable receives */			rctl = er32(RCTL);			ew32(RCTL, rctl & ~E1000_RCTL_EN);		}		/* guard against interrupt when we're going down */		if (!test_bit(__E1000_DOWN, &adapter->state))			mod_timer(&adapter->watchdog_timer, jiffies + 1);	}	if (netif_rx_schedule_prep(netdev, &adapter->napi)) {		adapter->total_tx_bytes = 0;		adapter->total_tx_packets = 0;		adapter->total_rx_bytes = 0;		adapter->total_rx_packets = 0;		__netif_rx_schedule(netdev, &adapter->napi);	} else {		atomic_dec(&adapter->irq_sem);	}	return IRQ_HANDLED;}static int e1000_request_irq(struct e1000_adapter *adapter){	struct net_device *netdev = adapter->netdev;	void (*handler) = &e1000_intr;	int irq_flags = IRQF_SHARED;	int err;	err = pci_enable_msi(adapter->pdev);	if (err) {		ndev_warn(netdev,		 "Unable to allocate MSI interrupt Error: %d\n", err);	} else {

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?