⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 c2.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 3 页
字号:
/* * Copyright (c) 2005 Ammasso, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses.  You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * *     Redistribution and use in source and binary forms, with or *     without modification, are permitted provided that the following *     conditions are met: * *      - Redistributions of source code must retain the above *        copyright notice, this list of conditions and the following *        disclaimer. * *      - Redistributions in binary form must reproduce the above *        copyright notice, this list of conditions and the following *        disclaimer in the documentation and/or other materials *        provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#include <linux/module.h>#include <linux/moduleparam.h>#include <linux/pci.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <linux/inetdevice.h>#include <linux/delay.h>#include <linux/ethtool.h>#include <linux/mii.h>#include <linux/if_vlan.h>#include <linux/crc32.h>#include <linux/in.h>#include <linux/ip.h>#include <linux/tcp.h>#include <linux/init.h>#include <linux/dma-mapping.h>#include <asm/io.h>#include <asm/irq.h>#include <asm/byteorder.h>#include <rdma/ib_smi.h>#include "c2.h"#include "c2_provider.h"MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");MODULE_LICENSE("Dual BSD/GPL");MODULE_VERSION(DRV_VERSION);static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;static int debug = -1;		/* defaults above */module_param(debug, int, 0);MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");static int c2_up(struct net_device *netdev);static int c2_down(struct net_device *netdev);static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);static void c2_tx_interrupt(struct net_device *netdev);static void c2_rx_interrupt(struct net_device *netdev);static irqreturn_t c2_interrupt(int irq, void *dev_id);static void c2_tx_timeout(struct net_device *netdev);static int c2_change_mtu(struct net_device *netdev, int new_mtu);static void c2_reset(struct c2_port *c2_port);static struct net_device_stats *c2_get_stats(struct net_device *netdev);static struct pci_device_id c2_pci_table[] = {	{ PCI_DEVICE(0x18b8, 0xb001) },	{ 0 }};MODULE_DEVICE_TABLE(pci, c2_pci_table);static void c2_print_macaddr(struct net_device *netdev){	pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, "		"IRQ %u\n", netdev->name,		netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],		netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],		netdev->irq);}static void c2_set_rxbufsize(struct c2_port *c2_port){	struct net_device *netdev = c2_port->netdev;	if (netdev->mtu > RX_BUF_SIZE)		c2_port->rx_buf_size =		    netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +		    NET_IP_ALIGN;	else		c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;}/* * Allocate TX ring elements and chain them together. * One-to-one association of adapter descriptors with ring elements. */static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,			    dma_addr_t base, void __iomem * mmio_txp_ring){	struct c2_tx_desc *tx_desc;	struct c2_txp_desc __iomem *txp_desc;	struct c2_element *elem;	int i;	tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL);	if (!tx_ring->start)		return -ENOMEM;	elem = tx_ring->start;	tx_desc = vaddr;	txp_desc = mmio_txp_ring;	for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {		tx_desc->len = 0;		tx_desc->status = 0;		/* Set TXP_HTXD_UNINIT */		__raw_writeq(cpu_to_be64(0x1122334455667788ULL),			     (void __iomem *) txp_desc + C2_TXP_ADDR);		__raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);		__raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),			     (void __iomem *) txp_desc + C2_TXP_FLAGS);		elem->skb = NULL;		elem->ht_desc = tx_desc;		elem->hw_desc = txp_desc;		if (i == tx_ring->count - 1) {			elem->next = tx_ring->start;			tx_desc->next_offset = base;		} else {			elem->next = elem + 1;			tx_desc->next_offset =			    base + (i + 1) * sizeof(*tx_desc);		}	}	tx_ring->to_use = tx_ring->to_clean = tx_ring->start;	return 0;}/* * Allocate RX ring elements and chain them together. * One-to-one association of adapter descriptors with ring elements. */static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,			    dma_addr_t base, void __iomem * mmio_rxp_ring){	struct c2_rx_desc *rx_desc;	struct c2_rxp_desc __iomem *rxp_desc;	struct c2_element *elem;	int i;	rx_ring->start = kmalloc(sizeof(*elem) * rx_ring->count, GFP_KERNEL);	if (!rx_ring->start)		return -ENOMEM;	elem = rx_ring->start;	rx_desc = vaddr;	rxp_desc = mmio_rxp_ring;	for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {		rx_desc->len = 0;		rx_desc->status = 0;		/* Set RXP_HRXD_UNINIT */		__raw_writew(cpu_to_be16(RXP_HRXD_OK),		       (void __iomem *) rxp_desc + C2_RXP_STATUS);		__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);		__raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);		__raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),			     (void __iomem *) rxp_desc + C2_RXP_ADDR);		__raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),			     (void __iomem *) rxp_desc + C2_RXP_FLAGS);		elem->skb = NULL;		elem->ht_desc = rx_desc;		elem->hw_desc = rxp_desc;		if (i == rx_ring->count - 1) {			elem->next = rx_ring->start;			rx_desc->next_offset = base;		} else {			elem->next = elem + 1;			rx_desc->next_offset =			    base + (i + 1) * sizeof(*rx_desc);		}	}	rx_ring->to_use = rx_ring->to_clean = rx_ring->start;	return 0;}/* Setup buffer for receiving */static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem){	struct c2_dev *c2dev = c2_port->c2dev;	struct c2_rx_desc *rx_desc = elem->ht_desc;	struct sk_buff *skb;	dma_addr_t mapaddr;	u32 maplen;	struct c2_rxp_hdr *rxp_hdr;	skb = dev_alloc_skb(c2_port->rx_buf_size);	if (unlikely(!skb)) {		pr_debug("%s: out of memory for receive\n",			c2_port->netdev->name);		return -ENOMEM;	}	/* Zero out the rxp hdr in the sk_buff */	memset(skb->data, 0, sizeof(*rxp_hdr));	skb->dev = c2_port->netdev;	maplen = c2_port->rx_buf_size;	mapaddr =	    pci_map_single(c2dev->pcidev, skb->data, maplen,			   PCI_DMA_FROMDEVICE);	/* Set the sk_buff RXP_header to RXP_HRXD_READY */	rxp_hdr = (struct c2_rxp_hdr *) skb->data;	rxp_hdr->flags = RXP_HRXD_READY;	__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);	__raw_writew(cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),		     elem->hw_desc + C2_RXP_LEN);	__raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);	__raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);	elem->skb = skb;	elem->mapaddr = mapaddr;	elem->maplen = maplen;	rx_desc->len = maplen;	return 0;}/* * Allocate buffers for the Rx ring * For receive:  rx_ring.to_clean is next received frame */static int c2_rx_fill(struct c2_port *c2_port){	struct c2_ring *rx_ring = &c2_port->rx_ring;	struct c2_element *elem;	int ret = 0;	elem = rx_ring->start;	do {		if (c2_rx_alloc(c2_port, elem)) {			ret = 1;			break;		}	} while ((elem = elem->next) != rx_ring->start);	rx_ring->to_clean = rx_ring->start;	return ret;}/* Free all buffers in RX ring, assumes receiver stopped */static void c2_rx_clean(struct c2_port *c2_port){	struct c2_dev *c2dev = c2_port->c2dev;	struct c2_ring *rx_ring = &c2_port->rx_ring;	struct c2_element *elem;	struct c2_rx_desc *rx_desc;	elem = rx_ring->start;	do {		rx_desc = elem->ht_desc;		rx_desc->len = 0;		__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);		__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);		__raw_writew(0, elem->hw_desc + C2_RXP_LEN);		__raw_writeq(cpu_to_be64(0x99aabbccddeeffULL),			     elem->hw_desc + C2_RXP_ADDR);		__raw_writew(cpu_to_be16(RXP_HRXD_UNINIT),			     elem->hw_desc + C2_RXP_FLAGS);		if (elem->skb) {			pci_unmap_single(c2dev->pcidev, elem->mapaddr,					 elem->maplen, PCI_DMA_FROMDEVICE);			dev_kfree_skb(elem->skb);			elem->skb = NULL;		}	} while ((elem = elem->next) != rx_ring->start);}static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem){	struct c2_tx_desc *tx_desc = elem->ht_desc;	tx_desc->len = 0;	pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,			 PCI_DMA_TODEVICE);	if (elem->skb) {		dev_kfree_skb_any(elem->skb);		elem->skb = NULL;	}	return 0;}/* Free all buffers in TX ring, assumes transmitter stopped */static void c2_tx_clean(struct c2_port *c2_port){	struct c2_ring *tx_ring = &c2_port->tx_ring;	struct c2_element *elem;	struct c2_txp_desc txp_htxd;	int retry;	unsigned long flags;	spin_lock_irqsave(&c2_port->tx_lock, flags);	elem = tx_ring->start;	do {		retry = 0;		do {			txp_htxd.flags =			    readw(elem->hw_desc + C2_TXP_FLAGS);			if (txp_htxd.flags == TXP_HTXD_READY) {				retry = 1;				__raw_writew(0,					     elem->hw_desc + C2_TXP_LEN);				__raw_writeq(0,					     elem->hw_desc + C2_TXP_ADDR);				__raw_writew(cpu_to_be16(TXP_HTXD_DONE),					     elem->hw_desc + C2_TXP_FLAGS);				c2_port->netstats.tx_dropped++;				break;			} else {				__raw_writew(0,					     elem->hw_desc + C2_TXP_LEN);				__raw_writeq(cpu_to_be64(0x1122334455667788ULL),					     elem->hw_desc + C2_TXP_ADDR);				__raw_writew(cpu_to_be16(TXP_HTXD_UNINIT),					     elem->hw_desc + C2_TXP_FLAGS);			}			c2_tx_free(c2_port->c2dev, elem);		} while ((elem = elem->next) != tx_ring->start);	} while (retry);	c2_port->tx_avail = c2_port->tx_ring.count - 1;	c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;	if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)		netif_wake_queue(c2_port->netdev);	spin_unlock_irqrestore(&c2_port->tx_lock, flags);}/* * Process transmit descriptors marked 'DONE' by the firmware, * freeing up their unneeded sk_buffs. */static void c2_tx_interrupt(struct net_device *netdev){	struct c2_port *c2_port = netdev_priv(netdev);	struct c2_dev *c2dev = c2_port->c2dev;	struct c2_ring *tx_ring = &c2_port->tx_ring;	struct c2_element *elem;	struct c2_txp_desc txp_htxd;	spin_lock(&c2_port->tx_lock);	for (elem = tx_ring->to_clean; elem != tx_ring->to_use;	     elem = elem->next) {		txp_htxd.flags =		    be16_to_cpu(readw(elem->hw_desc + C2_TXP_FLAGS));		if (txp_htxd.flags != TXP_HTXD_DONE)			break;		if (netif_msg_tx_done(c2_port)) {			/* PCI reads are expensive in fast path */			txp_htxd.len =			    be16_to_cpu(readw(elem->hw_desc + C2_TXP_LEN));			pr_debug("%s: tx done slot %3Zu status 0x%x len "				"%5u bytes\n",				netdev->name, elem - tx_ring->start,				txp_htxd.flags, txp_htxd.len);		}		c2_tx_free(c2dev, elem);		++(c2_port->tx_avail);	}	tx_ring->to_clean = elem;	if (netif_queue_stopped(netdev)	    && c2_port->tx_avail > MAX_SKB_FRAGS + 1)		netif_wake_queue(netdev);	spin_unlock(&c2_port->tx_lock);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -