⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ixgbe_main.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
	ixgbe_configure(adapter);	err = ixgbe_request_irq(adapter, &num_rx_queues);	if (err)		goto err_req_irq;	/* ixgbe_request might have reduced num_rx_queues */	if (num_rx_queues < adapter->num_rx_queues) {		/* We didn't get MSI-X, so we need to release everything,		 * set our Rx queue count to num_rx_queues, and redo the		 * whole init process.		 */		ixgbe_free_irq(adapter);		if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {			pci_disable_msi(adapter->pdev);			adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;		}		ixgbe_free_all_rx_resources(adapter);		ixgbe_free_all_tx_resources(adapter);		adapter->num_rx_queues = num_rx_queues;		/* Reset the hardware, and start over. */		ixgbe_reset(adapter);		goto try_intr_reinit;	}	err = ixgbe_up_complete(adapter);	if (err)		goto err_up;	return 0;err_up:	ixgbe_free_irq(adapter);err_req_irq:	ixgbe_free_all_rx_resources(adapter);err_setup_rx:	ixgbe_free_all_tx_resources(adapter);err_setup_tx:	ixgbe_reset(adapter);	return err;}/** * ixgbe_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS.  The hardware is still under the drivers control, but * needs to be disabled.  A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/static int ixgbe_close(struct net_device *netdev){	struct ixgbe_adapter *adapter = netdev_priv(netdev);	u32 ctrl_ext;	ixgbe_down(adapter);	ixgbe_free_irq(adapter);	ixgbe_free_all_tx_resources(adapter);	ixgbe_free_all_rx_resources(adapter);	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,			ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);	return 0;}/** * ixgbe_update_stats - Update the board statistics counters. * @adapter: board private structure **/void ixgbe_update_stats(struct ixgbe_adapter *adapter){	struct ixgbe_hw *hw = &adapter->hw;	u64 good_rx, missed_rx, bprc;	adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);	good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC);	missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0));	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1));	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2));	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3));	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4));	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5));	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6));	missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7));	adapter->stats.gprc += (good_rx - missed_rx);	adapter->stats.mpc[0] += missed_rx;	adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);	adapter->stats.bprc += bprc;	adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);	adapter->stats.mprc -= bprc;	adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);	adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);	adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);	adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);	adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);	adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);	adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);	adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);	adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);	adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC);	adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);	adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);	adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);	adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);	adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);	adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0));	adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);	adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);	adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);	adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);	adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);	adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);	adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);	adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);	adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);	adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);	adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);	adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);	adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);	/* Fill out the OS statistics structure */	adapter->net_stats.rx_packets = adapter->stats.gprc;	adapter->net_stats.tx_packets = adapter->stats.gptc;	adapter->net_stats.rx_bytes = adapter->stats.gorc;	adapter->net_stats.tx_bytes = adapter->stats.gotc;	adapter->net_stats.multicast = adapter->stats.mprc;	/* Rx Errors */	adapter->net_stats.rx_errors = adapter->stats.crcerrs +						adapter->stats.rlec;	adapter->net_stats.rx_dropped = 0;	adapter->net_stats.rx_length_errors = adapter->stats.rlec;	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;	adapter->net_stats.rx_missed_errors = adapter->stats.mpc[0];}/** * ixgbe_watchdog - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/static void ixgbe_watchdog(unsigned long data){	struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;	struct net_device *netdev = adapter->netdev;	bool link_up;	u32 link_speed = 0;	adapter->hw.phy.ops.check(&adapter->hw, &(link_speed), &link_up);	if (link_up) {		if (!netif_carrier_ok(netdev)) {			u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);			u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS);#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)			DPRINTK(LINK, INFO, "NIC Link is Up %s, "				"Flow Control: %s\n",				(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?				 "10 Gbps" :				 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?				  "1 Gpbs" : "unknown speed")),				((FLOW_RX && FLOW_TX) ? "RX/TX" :				 (FLOW_RX ? "RX" :				 (FLOW_TX ? "TX" : "None"))));			netif_carrier_on(netdev);			netif_wake_queue(netdev);		} else {			/* Force detection of hung controller */			adapter->detect_tx_hung = true;		}	} else {		if (netif_carrier_ok(netdev)) {			DPRINTK(LINK, INFO, "NIC Link is Down\n");			netif_carrier_off(netdev);			netif_stop_queue(netdev);		}	}	ixgbe_update_stats(adapter);	/* Reset the timer */	if (!test_bit(__IXGBE_DOWN, &adapter->state))		mod_timer(&adapter->watchdog_timer,			  round_jiffies(jiffies + 2 * HZ));}#define IXGBE_MAX_TXD_PWR	14#define IXGBE_MAX_DATA_PER_TXD	(1 << IXGBE_MAX_TXD_PWR)/* Tx Descriptors needed, worst case */#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \			 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \	MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)	/* for context */static int ixgbe_tso(struct ixgbe_adapter *adapter,			 struct ixgbe_ring *tx_ring, struct sk_buff *skb,			 u32 tx_flags, u8 *hdr_len){	struct ixgbe_adv_tx_context_desc *context_desc;	unsigned int i;	int err;	struct ixgbe_tx_buffer *tx_buffer_info;	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;	u32 mss_l4len_idx = 0, l4len;	*hdr_len = 0;	if (skb_is_gso(skb)) {		if (skb_header_cloned(skb)) {			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);			if (err)				return err;		}		l4len = tcp_hdrlen(skb);		*hdr_len += l4len;		if (skb->protocol == ntohs(ETH_P_IP)) {			struct iphdr *iph = ip_hdr(skb);			iph->tot_len = 0;			iph->check = 0;			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,								 iph->daddr, 0,								 IPPROTO_TCP,								 0);			adapter->hw_tso_ctxt++;		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {			ipv6_hdr(skb)->payload_len = 0;			tcp_hdr(skb)->check =			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,					     &ipv6_hdr(skb)->daddr,					     0, IPPROTO_TCP, 0);			adapter->hw_tso6_ctxt++;		}		i = tx_ring->next_to_use;		tx_buffer_info = &tx_ring->tx_buffer_info[i];		context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);		/* VLAN MACLEN IPLEN */		if (tx_flags & IXGBE_TX_FLAGS_VLAN)			vlan_macip_lens |=			    (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);		vlan_macip_lens |= ((skb_network_offset(skb)) <<				    IXGBE_ADVTXD_MACLEN_SHIFT);		*hdr_len += skb_network_offset(skb);		vlan_macip_lens |=		    (skb_transport_header(skb) - skb_network_header(skb));		*hdr_len +=		    (skb_transport_header(skb) - skb_network_header(skb));		context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);		context_desc->seqnum_seed = 0;		/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */		type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |				    IXGBE_ADVTXD_DTYP_CTXT);		if (skb->protocol == ntohs(ETH_P_IP))			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;		context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);		/* MSS L4LEN IDX */		mss_l4len_idx |=		    (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);		mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);		context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);		tx_buffer_info->time_stamp = jiffies;		tx_buffer_info->next_to_watch = i;		i++;		if (i == tx_ring->count)			i = 0;		tx_ring->next_to_use = i;		return true;	}	return false;}static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,				   struct ixgbe_ring *tx_ring,				   struct sk_buff *skb, u32 tx_flags){	struct ixgbe_adv_tx_context_desc *context_desc;	unsigned int i;	struct ixgbe_tx_buffer *tx_buffer_info;	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;	if (skb->ip_summed == CHECKSUM_PARTIAL ||	    (tx_flags & IXGBE_TX_FLAGS_VLAN)) {		i = tx_ring->next_to_use;		tx_buffer_info = &tx_ring->tx_buffer_info[i];		context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);		if (tx_flags & IXGBE_TX_FLAGS_VLAN)			vlan_macip_lens |=			    (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);		vlan_macip_lens |= (skb_network_offset(skb) <<				    IXGBE_ADVTXD_MACLEN_SHIFT);		if (skb->ip_summed == CHECKSUM_PARTIAL)			vlan_macip_lens |= (skb_transport_header(skb) -					    skb_network_header(skb));		context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);		context_desc->seqnum_seed = 0;		type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |				    IXGBE_ADVTXD_DTYP_CTXT);		if (skb->ip_summed == CHECKSUM_PARTIAL) {			if (skb->protocol == ntohs(ETH_P_IP))				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;			if (skb->sk->sk_protocol == IPPROTO_TCP)				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;		}		context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);		context_desc->mss_l4len_idx = 0;		tx_buffer_info->time_stamp = jiffies;		tx_buffer_info->next_to_watch = i;		adapter->hw_csum_tx_good++;		i++;		if (i == tx_ring->count)			i = 0;		tx_ring->next_to_use = i;		return true;	}	return false;}static int ixgbe_tx_map(struct ixgbe_adapter *adapter,			struct ixgbe_ring *tx_ring,			struct sk_buff *skb, unsigned int first){	struct ixgbe_tx_buffer *tx_buffer_info;	unsigned int len = skb->len;	unsigned int offset = 0, size, count = 0, i;	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;	unsigned int f;	len -= skb->data_len;	i = tx_ring->next_to_use;	while (len) {		tx_buffer_info = &tx_ring->tx_buffer_info[i];		size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);		tx_buffer_info->length = size;		tx_buffer_info->dma = pci_map_single(adapter->pdev,						  skb->data + offset,						  size, PCI_DMA_TODEVICE);		tx_buffer_info->time_stamp = jiffies;		tx_buffer_info->next_to_watch = i;		len -= size;		offset += size;		count++;		i++;		if (i == tx_ring->count)			i = 0;	}	for (f = 0; f < nr_frags; f++) {		struct skb_frag_struct *frag;		frag = &skb_shinfo(skb)->frags[f];		len = frag->size;		offset = frag->page_offset;		while (len) {			tx_buffer_info = &tx_ring->tx_buffer_info[i];			size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);			tx_buffer_info->length = size;			tx_buffer_info->dma = pci_map_page(adapter->pdev,							frag->page,							offset,							size, PCI_DMA_TODEVICE);			tx_buffer_info->time_stamp = jiffies;			tx_buffer_info->next_to_watch = i;			len -= size;			offset += size;			count++;			i++;			if (i == tx_ring->count)				i = 0;		}	}	if (i == 0)		i = tx_ring->count - 1;	else		i = i - 1;	tx_ring->tx_buffer_info[i].skb = skb;	tx_ring->tx_buffer_info[first].next_to_watch = i;	return count;}static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,			       struct ixgbe_ring *tx_ring,			       int tx_flags, int count, u32 paylen, u8 hdr_len){	union ixgbe_adv_tx_desc *tx_desc = NULL;	struct ixgbe_tx_buffer *tx_buffer_info;	u32 olinfo_status = 0, cmd_type_len = 0;	unsigned int i;	u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;	cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;	cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;	if (tx_flags & IXGBE_TX_FLAGS_VLAN)		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;	if (tx_flags & IXGBE_TX_FLAGS_TSO) {		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;		olinfo_status |= IXGBE_TXD_POPTS_TXSM <<						IXGBE_ADVTXD_POPTS_SHIFT;		if (tx_flags & IXGBE_TX_FLAGS_IPV4)			olinfo_status |= IXGBE_TXD_POPTS_IXSM <<						IXGBE_ADVTXD_POPTS_SHIFT;	} else if (tx_flags & IXGBE_TX_FLAGS_CSUM)		olinfo_status |= IXGBE_TXD_POPTS_TXSM <<						IXGBE_ADVTXD_POPTS_SHIFT;	olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);	i = tx_ring->next_to_use;	while (count--) {		tx_buffer_info = &tx_ring->tx_buffer_info[i];		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);		tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);		tx_desc->read.cmd_type_len =			cpu_to_le32(cmd_type_len | tx_buffer_info->length);		tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);		i++;		if (i == tx_ring->count)			i = 0;	}	tx_desc->read

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -