⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 myri10ge.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
		return -EINVAL;	return 0;}static voidmyri10ge_get_ringparam(struct net_device *netdev,		       struct ethtool_ringparam *ring){	struct myri10ge_priv *mgp = netdev_priv(netdev);	ring->rx_mini_max_pending = mgp->rx_small.mask + 1;	ring->rx_max_pending = mgp->rx_big.mask + 1;	ring->rx_jumbo_max_pending = 0;	ring->tx_max_pending = mgp->rx_small.mask + 1;	ring->rx_mini_pending = ring->rx_mini_max_pending;	ring->rx_pending = ring->rx_max_pending;	ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;	ring->tx_pending = ring->tx_max_pending;}static u32 myri10ge_get_rx_csum(struct net_device *netdev){	struct myri10ge_priv *mgp = netdev_priv(netdev);	if (mgp->csum_flag)		return 1;	else		return 0;}static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled){	struct myri10ge_priv *mgp = netdev_priv(netdev);	if (csum_enabled)		mgp->csum_flag = MXGEFW_FLAGS_CKSUM;	else		mgp->csum_flag = 0;	return 0;}static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled){	struct myri10ge_priv *mgp = netdev_priv(netdev);	unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);	if (tso_enabled)		netdev->features |= flags;	else		netdev->features &= ~flags;	return 0;}static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",	"rx_length_errors", "rx_over_errors", "rx_crc_errors",	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",	"tx_heartbeat_errors", "tx_window_errors",	/* device-specific stats */	"tx_boundary", "WC", "irq", "MSI",	"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",	"serial_number", "tx_pkt_start", "tx_pkt_done",	"tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",	"wake_queue", "stop_queue", "watchdog_resets", "tx_linearized",	"link_changes", "link_up", "dropped_link_overflow",	"dropped_link_error_or_filtered",	"dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",	"dropped_unicast_filtered", "dropped_multicast_filtered",	"dropped_runt", "dropped_overrun", "dropped_no_small_buffer",	"dropped_no_big_buffer", "LRO aggregated", "LRO flushed",	"LRO avg aggr", "LRO no_desc"};#define MYRI10GE_NET_STATS_LEN      21#define MYRI10GE_STATS_LEN  sizeof(myri10ge_gstrings_stats) / ETH_GSTRING_LENstatic voidmyri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data){	switch (stringset) {	case ETH_SS_STATS:		memcpy(data, *myri10ge_gstrings_stats,		       sizeof(myri10ge_gstrings_stats));		break;	}}static int myri10ge_get_sset_count(struct net_device *netdev, int sset){	switch (sset) {	case ETH_SS_STATS:		return MYRI10GE_STATS_LEN;	default:		return -EOPNOTSUPP;	}}static voidmyri10ge_get_ethtool_stats(struct net_device *netdev,			   struct ethtool_stats *stats, u64 * data){	struct myri10ge_priv *mgp = netdev_priv(netdev);	int i;	for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)		data[i] = ((unsigned long *)&mgp->stats)[i];	data[i++] = (unsigned int)mgp->tx.boundary;	data[i++] = (unsigned int)mgp->wc_enabled;	data[i++] = (unsigned int)mgp->pdev->irq;	data[i++] = (unsigned int)mgp->msi_enabled;	data[i++] = (unsigned int)mgp->read_dma;	data[i++] = (unsigned int)mgp->write_dma;	data[i++] = (unsigned int)mgp->read_write_dma;	data[i++] = (unsigned int)mgp->serial_number;	data[i++] = (unsigned int)mgp->tx.pkt_start;	data[i++] = (unsigned int)mgp->tx.pkt_done;	data[i++] = (unsigned int)mgp->tx.req;	data[i++] = (unsigned int)mgp->tx.done;	data[i++] = (unsigned int)mgp->rx_small.cnt;	data[i++] = (unsigned int)mgp->rx_big.cnt;	data[i++] = (unsigned int)mgp->wake_queue;	data[i++] = (unsigned int)mgp->stop_queue;	data[i++] = (unsigned int)mgp->watchdog_resets;	data[i++] = (unsigned int)mgp->tx_linearized;	data[i++] = (unsigned int)mgp->link_changes;	data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up);	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow);	data[i++] =	    (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered);	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_pause);	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_phy);	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_bad_crc32);	data[i++] =	    (unsigned int)ntohl(mgp->fw_stats->dropped_unicast_filtered);	data[i++] =	    (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered);	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt);	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun);	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer);	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer);	data[i++] = mgp->rx_done.lro_mgr.stats.aggregated;	data[i++] = mgp->rx_done.lro_mgr.stats.flushed;	if (mgp->rx_done.lro_mgr.stats.flushed)		data[i++] = mgp->rx_done.lro_mgr.stats.aggregated /		    mgp->rx_done.lro_mgr.stats.flushed;	else		data[i++] = 0;	data[i++] = mgp->rx_done.lro_mgr.stats.no_desc;}static void myri10ge_set_msglevel(struct net_device *netdev, u32 value){	struct myri10ge_priv *mgp = netdev_priv(netdev);	mgp->msg_enable = value;}static u32 myri10ge_get_msglevel(struct net_device *netdev){	struct myri10ge_priv *mgp = netdev_priv(netdev);	return mgp->msg_enable;}static const struct ethtool_ops myri10ge_ethtool_ops = {	.get_settings = myri10ge_get_settings,	.get_drvinfo = myri10ge_get_drvinfo,	.get_coalesce = myri10ge_get_coalesce,	.set_coalesce = myri10ge_set_coalesce,	.get_pauseparam = myri10ge_get_pauseparam,	.set_pauseparam = myri10ge_set_pauseparam,	.get_ringparam = myri10ge_get_ringparam,	.get_rx_csum = myri10ge_get_rx_csum,	.set_rx_csum = myri10ge_set_rx_csum,	.set_tx_csum = ethtool_op_set_tx_hw_csum,	.set_sg = ethtool_op_set_sg,	.set_tso = myri10ge_set_tso,	.get_link = ethtool_op_get_link,	.get_strings = myri10ge_get_strings,	.get_sset_count = myri10ge_get_sset_count,	.get_ethtool_stats = myri10ge_get_ethtool_stats,	.set_msglevel = myri10ge_set_msglevel,	.get_msglevel = myri10ge_get_msglevel};static int myri10ge_allocate_rings(struct net_device *dev){	struct myri10ge_priv *mgp;	struct myri10ge_cmd cmd;	int tx_ring_size, rx_ring_size;	int tx_ring_entries, rx_ring_entries;	int i, status;	size_t bytes;	mgp = netdev_priv(dev);	/* get ring sizes */	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);	tx_ring_size = cmd.data0;	status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);	if (status != 0)		return status;	rx_ring_size = cmd.data0;	tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);	rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);	mgp->tx.mask = tx_ring_entries - 1;	mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1;	status = -ENOMEM;	/* allocate the host shadow rings */	bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)	    * sizeof(*mgp->tx.req_list);	mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);	if (mgp->tx.req_bytes == NULL)		goto abort_with_nothing;	/* ensure req_list entries are aligned to 8 bytes */	mgp->tx.req_list = (struct mcp_kreq_ether_send *)	    ALIGN((unsigned long)mgp->tx.req_bytes, 8);	bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow);	mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);	if (mgp->rx_small.shadow == NULL)		goto abort_with_tx_req_bytes;	bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow);	mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);	if (mgp->rx_big.shadow == NULL)		goto abort_with_rx_small_shadow;	/* allocate the host info rings */	bytes = tx_ring_entries * sizeof(*mgp->tx.info);	mgp->tx.info = kzalloc(bytes, GFP_KERNEL);	if (mgp->tx.info == NULL)		goto abort_with_rx_big_shadow;	bytes = rx_ring_entries * sizeof(*mgp->rx_small.info);	mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL);	if (mgp->rx_small.info == NULL)		goto abort_with_tx_info;	bytes = rx_ring_entries * sizeof(*mgp->rx_big.info);	mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL);	if (mgp->rx_big.info == NULL)		goto abort_with_rx_small_info;	/* Fill the receive rings */	mgp->rx_big.cnt = 0;	mgp->rx_small.cnt = 0;	mgp->rx_big.fill_cnt = 0;	mgp->rx_small.fill_cnt = 0;	mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;	mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;	mgp->rx_small.watchdog_needed = 0;	mgp->rx_big.watchdog_needed = 0;	myri10ge_alloc_rx_pages(mgp, &mgp->rx_small,				mgp->small_bytes + MXGEFW_PAD, 0);	if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) {		printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n",		       dev->name, mgp->rx_small.fill_cnt);		goto abort_with_rx_small_ring;	}	myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0);	if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) {		printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n",		       dev->name, mgp->rx_big.fill_cnt);		goto abort_with_rx_big_ring;	}	return 0;abort_with_rx_big_ring:	for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {		int idx = i & mgp->rx_big.mask;		myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],				       mgp->big_bytes);		put_page(mgp->rx_big.info[idx].page);	}abort_with_rx_small_ring:	for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) {		int idx = i & mgp->rx_small.mask;		myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx],				       mgp->small_bytes + MXGEFW_PAD);		put_page(mgp->rx_small.info[idx].page);	}	kfree(mgp->rx_big.info);abort_with_rx_small_info:	kfree(mgp->rx_small.info);abort_with_tx_info:	kfree(mgp->tx.info);abort_with_rx_big_shadow:	kfree(mgp->rx_big.shadow);abort_with_rx_small_shadow:	kfree(mgp->rx_small.shadow);abort_with_tx_req_bytes:	kfree(mgp->tx.req_bytes);	mgp->tx.req_bytes = NULL;	mgp->tx.req_list = NULL;abort_with_nothing:	return status;}static void myri10ge_free_rings(struct net_device *dev){	struct myri10ge_priv *mgp;	struct sk_buff *skb;	struct myri10ge_tx_buf *tx;	int i, len, idx;	mgp = netdev_priv(dev);	for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) {		idx = i & mgp->rx_big.mask;		if (i == mgp->rx_big.fill_cnt - 1)			mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;		myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx],				       mgp->big_bytes);		put_page(mgp->rx_big.info[idx].page);	}	for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) {		idx = i & mgp->rx_small.mask;		if (i == mgp->rx_small.fill_cnt - 1)			mgp->rx_small.info[idx].page_offset =			    MYRI10GE_ALLOC_SIZE;		myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx],				       mgp->small_bytes + MXGEFW_PAD);		put_page(mgp->rx_small.info[idx].page);	}	tx = &mgp->tx;	while (tx->done != tx->req) {		idx = tx->done & tx->mask;		skb = tx->info[idx].skb;		/* Mark as free */		tx->info[idx].skb = NULL;		tx->done++;		len = pci_unmap_len(&tx->info[idx], len);		pci_unmap_len_set(&tx->info[idx], len, 0);		if (skb) {			mgp->stats.tx_dropped++;			dev_kfree_skb_any(skb);			if (len)				pci_unmap_single(mgp->pdev,						 pci_unmap_addr(&tx->info[idx],								bus), len,						 PCI_DMA_TODEVICE);		} else {			if (len)				pci_unmap_page(mgp->pdev,					       pci_unmap_addr(&tx->info[idx],							      bus), len,					       PCI_DMA_TODEVICE);		}	}	kfree(mgp->rx_big.info);	kfree(mgp->rx_small.info);	kfree(mgp->tx.info);	kfree(mgp->rx_big.shadow);	kfree(mgp->rx_small.shadow);	kfree(mgp->tx.req_bytes);	mgp->tx.req_bytes = NULL;	mgp->tx.req_list = NULL;}static int myri10ge_request_irq(struct myri10ge_priv *mgp){	struct pci_dev *pdev = mgp->pdev;	int status;	if (myri10ge_msi) {		status = pci_enable_msi(pdev);		if (status != 0)			dev_err(&pdev->dev,				"Error %d setting up MSI; falling back to xPIC\n",				status);		else			mgp->msi_enabled = 1;	} else {		mgp->msi_enabled = 0;	}	status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,			     mgp->dev->name, mgp);	if (status != 0) {		dev_err(&pdev->dev, "failed to allocate IRQ\n");		if (mgp->msi_enabled)			pci_disable_msi(pdev);	}	return status;}static void myri10ge_free_irq(struct myri10ge_priv *mgp){	struct pci_dev *pdev = mgp->pdev;	free_irq(pdev->irq, mgp);	if (mgp->msi_enabled)		pci_disable_msi(pdev);}static intmyri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,			 void **ip_hdr, void **tcpudp_hdr,			 u64 * hdr_flags, void *priv){	struct ethhdr *eh;	struct vlan_ethhdr *veh;	struct iphdr *iph;	u8 *va = page_address(frag->page) + frag->page_offset;	unsigned long ll_hlen;	__wsum csum = (__wsum) (unsigned long)priv;	/* find the mac header, aborting if not IPv4 */	eh = (struct ethhdr *)va;	*mac_hdr = eh;	ll_hlen = ETH_HLEN;	if (eh->h_proto != htons(ETH_P_IP)) {		if (eh->h_proto == htons(ETH_P_8021Q)) {			veh = (struct vlan_ethhdr *)va;			if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))				return -1;			ll_hlen += VLAN_HLEN;			/*			 *  HW checksum starts ETH_HLEN bytes into			 *  frame, so we must subtract off the VLAN			 *  header's checksum before csum can be used			 */			csum = csum_sub(csum, csum_partial(va + ETH_HLEN,							   VLAN_HLEN, 0));		} else {			return -1;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -