⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 c2.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 3 页
字号:
static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem){	struct c2_rx_desc *rx_desc = elem->ht_desc;	struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;	if (rxp_hdr->status != RXP_HRXD_OK ||	    rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {		pr_debug("BAD RXP_HRXD\n");		pr_debug("  rx_desc : %p\n", rx_desc);		pr_debug("    index : %Zu\n",			elem - c2_port->rx_ring.start);		pr_debug("    len   : %u\n", rx_desc->len);		pr_debug("  rxp_hdr : %p [PA %p]\n", rxp_hdr,			(void *) __pa((unsigned long) rxp_hdr));		pr_debug("    flags : 0x%x\n", rxp_hdr->flags);		pr_debug("    status: 0x%x\n", rxp_hdr->status);		pr_debug("    len   : %u\n", rxp_hdr->len);		pr_debug("    rsvd  : 0x%x\n", rxp_hdr->rsvd);	}	/* Setup the skb for reuse since we're dropping this pkt */	elem->skb->data = elem->skb->head;	skb_reset_tail_pointer(elem->skb);	/* Zero out the rxp hdr in the sk_buff */	memset(elem->skb->data, 0, sizeof(*rxp_hdr));	/* Write the descriptor to the adapter's rx ring */	__raw_writew(0, elem->hw_desc + C2_RXP_STATUS);	__raw_writew(0, elem->hw_desc + C2_RXP_COUNT);	__raw_writew(cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),		     elem->hw_desc + C2_RXP_LEN);	__raw_writeq(cpu_to_be64(elem->mapaddr), elem->hw_desc + C2_RXP_ADDR);	__raw_writew(cpu_to_be16(RXP_HRXD_READY), elem->hw_desc + C2_RXP_FLAGS);	pr_debug("packet dropped\n");	c2_port->netstats.rx_dropped++;}static void c2_rx_interrupt(struct net_device *netdev){	struct c2_port *c2_port = netdev_priv(netdev);	struct c2_dev *c2dev = c2_port->c2dev;	struct c2_ring *rx_ring = &c2_port->rx_ring;	struct c2_element *elem;	struct c2_rx_desc *rx_desc;	struct c2_rxp_hdr *rxp_hdr;	struct sk_buff *skb;	dma_addr_t mapaddr;	u32 maplen, buflen;	unsigned long flags;	spin_lock_irqsave(&c2dev->lock, flags);	/* Begin where we left off */	rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;	for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;	     elem = elem->next) {		rx_desc = elem->ht_desc;		mapaddr = elem->mapaddr;		maplen = elem->maplen;		skb = elem->skb;		rxp_hdr = (struct c2_rxp_hdr *) skb->data;		if (rxp_hdr->flags != RXP_HRXD_DONE)			break;		buflen = rxp_hdr->len;		/* Sanity check the RXP header */		if (rxp_hdr->status != RXP_HRXD_OK ||		    buflen > (rx_desc->len - sizeof(*rxp_hdr))) {			c2_rx_error(c2_port, elem);			continue;		}		/*		 * Allocate and map a new skb for replenishing the host		 * RX desc		 */		if (c2_rx_alloc(c2_port, elem)) {			c2_rx_error(c2_port, elem);			continue;		}		/* Unmap the old skb */		pci_unmap_single(c2dev->pcidev, mapaddr, maplen,				 PCI_DMA_FROMDEVICE);		prefetch(skb->data);		/*		 * Skip past the leading 8 bytes comprising of the		 * "struct c2_rxp_hdr", prepended by the adapter		 * to the usual Ethernet header ("struct ethhdr"),		 * to the start of the raw Ethernet packet.		 *		 * Fix up the various fields in the sk_buff before		 * passing it up to netif_rx(). The transfer size		 * (in bytes) specified by the adapter len field of		 * the "struct rxp_hdr_t" does NOT include the		 * "sizeof(struct c2_rxp_hdr)".		 */		skb->data += sizeof(*rxp_hdr);		skb_set_tail_pointer(skb, buflen);		skb->len = buflen;		skb->protocol = eth_type_trans(skb, netdev);		netif_rx(skb);		netdev->last_rx = jiffies;		c2_port->netstats.rx_packets++;		c2_port->netstats.rx_bytes += buflen;	}	/* Save where we left off */	rx_ring->to_clean = elem;	c2dev->cur_rx = elem - rx_ring->start;	C2_SET_CUR_RX(c2dev, c2dev->cur_rx);	spin_unlock_irqrestore(&c2dev->lock, flags);}/* * Handle netisr0 TX & RX interrupts. */static irqreturn_t c2_interrupt(int irq, void *dev_id){	unsigned int netisr0, dmaisr;	int handled = 0;	struct c2_dev *c2dev = (struct c2_dev *) dev_id;	/* Process CCILNET interrupts */	netisr0 = readl(c2dev->regs + C2_NISR0);	if (netisr0) {		/*		 * There is an issue with the firmware that always		 * provides the status of RX for both TX & RX		 * interrupts.  So process both queues here.		 */		c2_rx_interrupt(c2dev->netdev);		c2_tx_interrupt(c2dev->netdev);		/* Clear the interrupt */		writel(netisr0, c2dev->regs + C2_NISR0);		handled++;	}	/* Process RNIC interrupts */	dmaisr = readl(c2dev->regs + C2_DISR);	if (dmaisr) {		writel(dmaisr, c2dev->regs + C2_DISR);		c2_rnic_interrupt(c2dev);		handled++;	}	if (handled) {		return IRQ_HANDLED;	} else {		return IRQ_NONE;	}}static int c2_up(struct net_device *netdev){	struct c2_port *c2_port = netdev_priv(netdev);	struct c2_dev *c2dev = c2_port->c2dev;	struct c2_element *elem;	struct c2_rxp_hdr *rxp_hdr;	struct in_device *in_dev;	size_t rx_size, tx_size;	int ret, i;	unsigned int netimr0;	if (netif_msg_ifup(c2_port))		pr_debug("%s: enabling interface\n", netdev->name);	/* Set the Rx buffer size based on MTU */	c2_set_rxbufsize(c2_port);	/* Allocate DMA'able memory for Tx/Rx host descriptor rings */	rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);	tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);	c2_port->mem_size = tx_size + rx_size;	c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,					    &c2_port->dma);	if (c2_port->mem == NULL) {		pr_debug("Unable to allocate memory for "			"host descriptor rings\n");		return -ENOMEM;	}	memset(c2_port->mem, 0, c2_port->mem_size);	/* Create the Rx host descriptor ring */	if ((ret =	     c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,			      c2dev->mmio_rxp_ring))) {		pr_debug("Unable to create RX ring\n");		goto bail0;	}	/* Allocate Rx buffers for the host descriptor ring */	if (c2_rx_fill(c2_port)) {		pr_debug("Unable to fill RX ring\n");		goto bail1;	}	/* Create the Tx host descriptor ring */	if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,				    c2_port->dma + rx_size,				    c2dev->mmio_txp_ring))) {		pr_debug("Unable to create TX ring\n");		goto bail1;	}	/* Set the TX pointer to where we left off */	c2_port->tx_avail = c2_port->tx_ring.count - 1;	c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =	    c2_port->tx_ring.start + c2dev->cur_tx;	/* missing: Initialize MAC */	BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);	/* Reset the adapter, ensures the driver is in sync with the RXP */	c2_reset(c2_port);	/* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */	for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;	     i++, elem++) {		rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;		rxp_hdr->flags = 0;		__raw_writew(cpu_to_be16(RXP_HRXD_READY),			     elem->hw_desc + C2_RXP_FLAGS);	}	/* Enable network packets */	netif_start_queue(netdev);	/* Enable IRQ */	writel(0, c2dev->regs + C2_IDIS);	netimr0 = readl(c2dev->regs + C2_NIMR0);	netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);	writel(netimr0, c2dev->regs + C2_NIMR0);	/* Tell the stack to ignore arp requests for ipaddrs bound to	 * other interfaces.  This is needed to prevent the host stack	 * from responding to arp requests to the ipaddr bound on the	 * rdma interface.	 */	in_dev = in_dev_get(netdev);	IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);	in_dev_put(in_dev);	return 0;      bail1:	c2_rx_clean(c2_port);	kfree(c2_port->rx_ring.start);      bail0:	pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,			    c2_port->dma);	return ret;}static int c2_down(struct net_device *netdev){	struct c2_port *c2_port = netdev_priv(netdev);	struct c2_dev *c2dev = c2_port->c2dev;	if (netif_msg_ifdown(c2_port))		pr_debug("%s: disabling interface\n",			netdev->name);	/* Wait for all the queued packets to get sent */	c2_tx_interrupt(netdev);	/* Disable network packets */	netif_stop_queue(netdev);	/* Disable IRQs by clearing the interrupt mask */	writel(1, c2dev->regs + C2_IDIS);	writel(0, c2dev->regs + C2_NIMR0);	/* missing: Stop transmitter */	/* missing: Stop receiver */	/* Reset the adapter, ensures the driver is in sync with the RXP */	c2_reset(c2_port);	/* missing: Turn off LEDs here */	/* Free all buffers in the host descriptor rings */	c2_tx_clean(c2_port);	c2_rx_clean(c2_port);	/* Free the host descriptor rings */	kfree(c2_port->rx_ring.start);	kfree(c2_port->tx_ring.start);	pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,			    c2_port->dma);	return 0;}static void c2_reset(struct c2_port *c2_port){	struct c2_dev *c2dev = c2_port->c2dev;	unsigned int cur_rx = c2dev->cur_rx;	/* Tell the hardware to quiesce */	C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);	/*	 * The hardware will reset the C2_PCI_HRX_QUI bit once	 * the RXP is quiesced.  Wait 2 seconds for this.	 */	ssleep(2);	cur_rx = C2_GET_CUR_RX(c2dev);	if (cur_rx & C2_PCI_HRX_QUI)		pr_debug("c2_reset: failed to quiesce the hardware!\n");	cur_rx &= ~C2_PCI_HRX_QUI;	c2dev->cur_rx = cur_rx;	pr_debug("Current RX: %u\n", c2dev->cur_rx);}static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev){	struct c2_port *c2_port = netdev_priv(netdev);	struct c2_dev *c2dev = c2_port->c2dev;	struct c2_ring *tx_ring = &c2_port->tx_ring;	struct c2_element *elem;	dma_addr_t mapaddr;	u32 maplen;	unsigned long flags;	unsigned int i;	spin_lock_irqsave(&c2_port->tx_lock, flags);	if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {		netif_stop_queue(netdev);		spin_unlock_irqrestore(&c2_port->tx_lock, flags);		pr_debug("%s: Tx ring full when queue awake!\n",			netdev->name);		return NETDEV_TX_BUSY;	}	maplen = skb_headlen(skb);	mapaddr =	    pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);	elem = tx_ring->to_use;	elem->skb = skb;	elem->mapaddr = mapaddr;	elem->maplen = maplen;	/* Tell HW to xmit */	__raw_writeq(cpu_to_be64(mapaddr), elem->hw_desc + C2_TXP_ADDR);	__raw_writew(cpu_to_be16(maplen), elem->hw_desc + C2_TXP_LEN);	__raw_writew(cpu_to_be16(TXP_HTXD_READY), elem->hw_desc + C2_TXP_FLAGS);	c2_port->netstats.tx_packets++;	c2_port->netstats.tx_bytes += maplen;	/* Loop thru additional data fragments and queue them */	if (skb_shinfo(skb)->nr_frags) {		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];			maplen = frag->size;			mapaddr =			    pci_map_page(c2dev->pcidev, frag->page,					 frag->page_offset, maplen,					 PCI_DMA_TODEVICE);			elem = elem->next;			elem->skb = NULL;			elem->mapaddr = mapaddr;			elem->maplen = maplen;			/* Tell HW to xmit */			__raw_writeq(cpu_to_be64(mapaddr),				     elem->hw_desc + C2_TXP_ADDR);			__raw_writew(cpu_to_be16(maplen),				     elem->hw_desc + C2_TXP_LEN);			__raw_writew(cpu_to_be16(TXP_HTXD_READY),				     elem->hw_desc + C2_TXP_FLAGS);			c2_port->netstats.tx_packets++;			c2_port->netstats.tx_bytes += maplen;		}	}	tx_ring->to_use = elem->next;	c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);	if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {		netif_stop_queue(netdev);		if (netif_msg_tx_queued(c2_port))			pr_debug("%s: transmit queue full\n",				netdev->name);	}	spin_unlock_irqrestore(&c2_port->tx_lock, flags);	netdev->trans_start = jiffies;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -