ixgb_main.c

来自「优龙2410linux2.6.8内核源代码」· C语言 代码 · 共 2,146 行 · 第 1/4 页

C
2,146
字号
			 * but we've got queued Tx work that's never going			 * to get done, so reset controller to flush Tx.			 * (Do the reset outside of interrupt context). */			schedule_work(&adapter->tx_timeout_task);		}	}	/* Early detection of hung controller */	i = txdr->next_to_clean;	if (txdr->buffer_info[i].dma &&	    time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&	    !(IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF))		netif_stop_queue(netdev);	/* generate an interrupt to force clean up of any stragglers */	IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);	/* Reset the timer */	mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);}#define IXGB_TX_FLAGS_CSUM		0x00000001#define IXGB_TX_FLAGS_VLAN		0x00000002#define IXGB_TX_FLAGS_TSO		0x00000004static inline boolean_tixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb){#ifdef NETIF_F_TSO	struct ixgb_context_desc *context_desc;	unsigned int i;	uint8_t ipcss, ipcso, tucss, tucso, hdr_len;	uint16_t ipcse, tucse, mss;	if (likely(skb_shinfo(skb)->tso_size)) {		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));		mss = skb_shinfo(skb)->tso_size;		skb->nh.iph->tot_len = 0;		skb->nh.iph->check = 0;		skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,						      skb->nh.iph->daddr,						      0, IPPROTO_TCP, 0);		ipcss = skb->nh.raw - skb->data;		ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;		ipcse = skb->h.raw - skb->data - 1;		tucss = skb->h.raw - skb->data;		tucso = (void *)&(skb->h.th->check) - (void *)skb->data;		tucse = 0;		i = adapter->tx_ring.next_to_use;		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);		context_desc->ipcss = ipcss;		context_desc->ipcso = ipcso;		context_desc->ipcse = cpu_to_le16(ipcse);		context_desc->tucss = tucss;		context_desc->tucso = tucso;		context_desc->tucse = cpu_to_le16(tucse);		context_desc->mss = cpu_to_le16(mss);		context_desc->hdr_len = hdr_len;		context_desc->status = 0;		context_desc->cmd_type_len = cpu_to_le32(IXGB_CONTEXT_DESC_TYPE							 |							 IXGB_CONTEXT_DESC_CMD_TSE							 |							 IXGB_CONTEXT_DESC_CMD_IP							 |							 IXGB_CONTEXT_DESC_CMD_TCP							 |							 IXGB_CONTEXT_DESC_CMD_RS							 |							 IXGB_CONTEXT_DESC_CMD_IDE							 | (skb->len -							    (hdr_len)));		if (++i == adapter->tx_ring.count)			i = 0;		adapter->tx_ring.next_to_use = i;		return TRUE;	}#endif	return FALSE;}static inline boolean_tixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb){	struct ixgb_context_desc *context_desc;	unsigned int i;	uint8_t css, cso;	if (likely(skb->ip_summed == CHECKSUM_HW)) {		css = skb->h.raw - skb->data;		cso = (skb->h.raw + skb->csum) - skb->data;		i = adapter->tx_ring.next_to_use;		context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);		context_desc->tucss = css;		context_desc->tucso = cso;		context_desc->tucse = 0;		/* zero out any previously existing data in one instruction */		*(uint32_t *) & (context_desc->ipcss) = 0;		context_desc->status = 0;		context_desc->hdr_len = 0;		context_desc->mss = 0;		context_desc->cmd_type_len =		    cpu_to_le32(IXGB_CONTEXT_DESC_TYPE				| IXGB_TX_DESC_CMD_RS | IXGB_TX_DESC_CMD_IDE);		if (++i == adapter->tx_ring.count)			i = 0;		adapter->tx_ring.next_to_use = i;		return TRUE;	}	return FALSE;}#define IXGB_MAX_TXD_PWR	14#define IXGB_MAX_DATA_PER_TXD	(1<<IXGB_MAX_TXD_PWR)static inline intixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,	    unsigned int first){	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;	struct ixgb_buffer *buffer_info;	int len = skb->len;	unsigned int offset = 0, size, count = 0, i;	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;	unsigned int f;	len -= skb->data_len;	i = tx_ring->next_to_use;	while (len) {		buffer_info = &tx_ring->buffer_info[i];		size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);		buffer_info->length = size;		buffer_info->dma =		    pci_map_single(adapter->pdev,				   skb->data + offset, size, PCI_DMA_TODEVICE);		buffer_info->time_stamp = jiffies;		len -= size;		offset += size;		count++;		if (++i == tx_ring->count)			i = 0;	}	for (f = 0; f < nr_frags; f++) {		struct skb_frag_struct *frag;		frag = &skb_shinfo(skb)->frags[f];		len = frag->size;		offset = 0;		while (len) {			buffer_info = &tx_ring->buffer_info[i];			size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);			buffer_info->length = size;			buffer_info->dma =			    pci_map_page(adapter->pdev,					 frag->page,					 frag->page_offset + offset,					 size, PCI_DMA_TODEVICE);			buffer_info->time_stamp = jiffies;			len -= size;			offset += size;			count++;			if (++i == tx_ring->count)				i = 0;		}	}	i = (i == 0) ? tx_ring->count - 1 : i - 1;	tx_ring->buffer_info[i].skb = skb;	tx_ring->buffer_info[first].next_to_watch = i;	return count;}static inline voidixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,	      int tx_flags){	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;	struct ixgb_tx_desc *tx_desc = NULL;	struct ixgb_buffer *buffer_info;	uint32_t cmd_type_len = adapter->tx_cmd_type;	uint8_t status = 0;	uint8_t popts = 0;	unsigned int i;	if (tx_flags & IXGB_TX_FLAGS_TSO) {		cmd_type_len |= IXGB_TX_DESC_CMD_TSE;		popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);	}	if (tx_flags & IXGB_TX_FLAGS_CSUM)		popts |= IXGB_TX_DESC_POPTS_TXSM;	if (tx_flags & IXGB_TX_FLAGS_VLAN) {		cmd_type_len |= IXGB_TX_DESC_CMD_VLE;	}	i = tx_ring->next_to_use;	while (count--) {		buffer_info = &tx_ring->buffer_info[i];		tx_desc = IXGB_TX_DESC(*tx_ring, i);		tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);		tx_desc->cmd_type_len =		    cpu_to_le32(cmd_type_len | buffer_info->length);		tx_desc->status = status;		tx_desc->popts = popts;		tx_desc->vlan = cpu_to_le16(vlan_id);		if (++i == tx_ring->count)			i = 0;	}	tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP					     | IXGB_TX_DESC_CMD_RS);	/* Force memory writes to complete before letting h/w	 * know there are new descriptors to fetch.  (Only	 * applicable for weak-ordered memory model archs,	 * such as IA-64). */	wmb();	tx_ring->next_to_use = i;	IXGB_WRITE_REG(&adapter->hw, TDT, i);}/* Tx Descriptors needed, worst case */#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \			 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev){	struct ixgb_adapter *adapter = netdev->priv;	unsigned int first;	unsigned int tx_flags = 0;	unsigned long flags;	int vlan_id = 0;	if (skb->len <= 0) {		dev_kfree_skb_any(skb);		return 0;	}	spin_lock_irqsave(&adapter->tx_lock, flags);	if (unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {		netif_stop_queue(netdev);		spin_unlock_irqrestore(&adapter->tx_lock, flags);		return 1;	}	spin_unlock_irqrestore(&adapter->tx_lock, flags);	if (adapter->vlgrp && vlan_tx_tag_present(skb)) {		tx_flags |= IXGB_TX_FLAGS_VLAN;		vlan_id = vlan_tx_tag_get(skb);	}	first = adapter->tx_ring.next_to_use;	if (ixgb_tso(adapter, skb))		tx_flags |= IXGB_TX_FLAGS_TSO;	else if (ixgb_tx_csum(adapter, skb))		tx_flags |= IXGB_TX_FLAGS_CSUM;	ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,		      tx_flags);	netdev->trans_start = jiffies;	return 0;}/** * ixgb_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure **/static void ixgb_tx_timeout(struct net_device *netdev){	struct ixgb_adapter *adapter = netdev->priv;	/* Do the reset outside of interrupt context */	schedule_work(&adapter->tx_timeout_task);}static void ixgb_tx_timeout_task(struct net_device *netdev){	struct ixgb_adapter *adapter = netdev->priv;	netif_device_detach(netdev);	ixgb_down(adapter, TRUE);	ixgb_up(adapter);	netif_device_attach(netdev);}/** * ixgb_get_stats - Get System Network Statistics * @netdev: network interface device structure * * Returns the address of the device statistics structure. * The statistics are actually updated from the timer callback. **/static struct net_device_stats *ixgb_get_stats(struct net_device *netdev){	struct ixgb_adapter *adapter = netdev->priv;	return &adapter->net_stats;}/** * ixgb_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure **/static int ixgb_change_mtu(struct net_device *netdev, int new_mtu){	struct ixgb_adapter *adapter = netdev->priv;	uint32_t old_mtu = adapter->rx_buffer_len;	int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;	if ((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)	    || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {		IXGB_ERR("Invalid MTU setting\n");		return -EINVAL;	}	if ((max_frame <=	     IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)	    || (max_frame <= IXGB_RXBUFFER_2048)) {		adapter->rx_buffer_len = IXGB_RXBUFFER_2048;	} else if (max_frame <= IXGB_RXBUFFER_4096) {		adapter->rx_buffer_len = IXGB_RXBUFFER_4096;	} else if (max_frame <= IXGB_RXBUFFER_8192) {		adapter->rx_buffer_len = IXGB_RXBUFFER_8192;	} else {		adapter->rx_buffer_len = IXGB_RXBUFFER_16384;	}	netdev->mtu = new_mtu;	if (old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {		ixgb_down(adapter, TRUE);		ixgb_up(adapter);	}	return 0;}/** * ixgb_update_stats - Update the board statistics counters. * @adapter: board private structure **/static void ixgb_update_stats(struct ixgb_adapter *adapter){	adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);	adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);	adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);	adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);	adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);	adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);	adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);	adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);	adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);	adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);	adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);	adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);	adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);	adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);	adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);	adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);	adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);	adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);	adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);	adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);	adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);	adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);	adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);	adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);	adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);	adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);	adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);	adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);	adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);	adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);	adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);	adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);	adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);	adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);	adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);	adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);	adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);	adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);	adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);	adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);	adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);	adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);	adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);	adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);	adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);	adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);	adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);	adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);	adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);	adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);	adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);	adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);	adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);	adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);	adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);	adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);	adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);	adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);	adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);	adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);	/* Fill out the OS statistics structure */	adapter->net_stats.rx_packets = adapter->stats.gprcl;	adapter->net_stats.tx_packets = adapter->stats.gptcl;	adapter->net_stats.rx_bytes = adapter->stats.gorcl;	adapter->net_stats.tx_bytes = adapter->stats.gotcl;	adapter->net_stats.multicast = adapter->stats.mprcl;	adapter->net_stats.collisions = 0;	/* ignore RLEC as it reports errors for padded (<64bytes) frames	 * with a length in the type/len field */	adapter->net_stats.rx_errors =	    /* adapter->stats.rnbc + */ adapter->stats.crcerrs +	    adapter->stats.ruc +	    adapter->stats.roc /*+ adapter->stats.rlec */  +	    adapter->stats.icbc +	    adapter->stats.ecbc + adapter->stats.mpc;	adapter->net_stats.rx_dropped = adapter->stats.mpc;	/* see above	 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;	 */	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;	adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;	adapter->net_stats.rx_missed_errors = adapter->stats.mpc;	adapter->net_stats.rx_over_errors = adapter->stats.mpc;	adapter->net_stats.tx_errors = 0;	adapter->net_stats.rx_frame_errors = 0;	adapter->net_stats.tx_aborted_errors = 0;	adapter->net_stats.tx_carrier_errors = 0;	adapter->net_stats.tx_fifo_errors = 0;	adapter->net_stats.tx_heartbeat_errors = 0;	adapter->net_stats.tx_window_errors = 0;}/** * ixgb_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/static inline void ixgb_irq_disable(struct ixgb_adapter *adapter){	atomic_inc(&adapter->irq_sem);	IXGB_WRITE_REG(&adapter->hw, IMC, ~0);	IXGB_WRITE_FLUSH(&adapter->hw);	synchronize_irq(adapter->pdev->irq);}/** * ixgb_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/static inline void ixgb_irq_enable(struct ixgb_adapter *adapter){	if (atomic_dec_and_test(&adapter->irq_sem)) {		IXGB_WRITE_REG(&adapter->hw, IMS,			       IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |			       IXGB_INT_RXO | IXGB_INT_LSC);		IXGB_WRITE_FLUSH(&adapter->hw);	}}#define IXGB_MAX_INTR 10/** * ixgb_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure * @pt_regs: CPU registers structure **/static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs){	struct net_device *netdev = data;	struct ixgb_adapter *adapter = netdev->priv;	struct ixgb_hw *hw = &adapter->hw;	uint32_t icr = IXGB_READ_REG(&adapter->hw, ICR);#ifndef CONFIG_IXGB_NAPI	unsigned int i;#endif	if (unlikely(!icr))		return IRQ_NONE;	/* Not our interrupt */	if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {		mod_timer(&adapter->watchdog_timer, jiffies);	}#ifdef CONFIG_IXGB_NAPI	if (netif_rx_schedule_prep(netdev)) {		/* Disable interrupts and register for poll. The flush 		   of the posted write is intentionally left out.		 */

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?