⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 via-rhine.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
		if (!netif_carrier_ok(mii->dev))			netif_carrier_on(mii->dev);	}	else	/* Let MMI library update carrier status */		rhine_check_media(mii->dev, 0);	if (debug > 1)		printk(KERN_INFO "%s: force_media %d, carrier %d\n",		       mii->dev->name, mii->force_media,		       netif_carrier_ok(mii->dev));}static void init_registers(struct net_device *dev){	struct rhine_private *rp = netdev_priv(dev);	void __iomem *ioaddr = rp->base;	int i;	for (i = 0; i < 6; i++)		iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);	/* Initialize other registers. */	iowrite16(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */	/* Configure initial FIFO thresholds. */	iowrite8(0x20, ioaddr + TxConfig);	rp->tx_thresh = 0x20;	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */	iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);	iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);	rhine_set_rx_mode(dev);#ifdef CONFIG_VIA_RHINE_NAPI	napi_enable(&rp->napi);#endif	/* Enable interrupts by setting the interrupt mask. */	iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |	       IntrRxDropped | IntrRxNoBuf | IntrTxAborted |	       IntrTxDone | IntrTxError | IntrTxUnderrun |	       IntrPCIErr | IntrStatsMax | IntrLinkChange,	       ioaddr + IntrEnable);	iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),	       ioaddr + ChipCmd);	rhine_check_media(dev, 1);}/* Enable MII link status auto-polling (required for IntrLinkChange) */static void rhine_enable_linkmon(void __iomem *ioaddr){	iowrite8(0, ioaddr + MIICmd);	iowrite8(MII_BMSR, ioaddr + MIIRegAddr);	iowrite8(0x80, ioaddr + MIICmd);	RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));	iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);}/* Disable MII link status auto-polling (required for MDIO access) */static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks){	iowrite8(0, ioaddr + MIICmd);	if (quirks & rqRhineI) {		iowrite8(0x01, ioaddr + MIIRegAddr);	// MII_BMSR		/* Can be called from ISR. Evil. */		mdelay(1);		/* 0x80 must be set immediately before turning it off */		iowrite8(0x80, ioaddr + MIICmd);		RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);		/* Heh. Now clear 0x80 again. */		iowrite8(0, ioaddr + MIICmd);	}	else		RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);}/* Read and write over the MII Management Data I/O (MDIO) interface. */static int mdio_read(struct net_device *dev, int phy_id, int regnum){	struct rhine_private *rp = netdev_priv(dev);	void __iomem *ioaddr = rp->base;	int result;	rhine_disable_linkmon(ioaddr, rp->quirks);	/* rhine_disable_linkmon already cleared MIICmd */	iowrite8(phy_id, ioaddr + MIIPhyAddr);	iowrite8(regnum, ioaddr + MIIRegAddr);	iowrite8(0x40, ioaddr + MIICmd);		/* Trigger read */	RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));	result = ioread16(ioaddr + MIIData);	rhine_enable_linkmon(ioaddr);	return result;}static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value){	struct rhine_private *rp = netdev_priv(dev);	void __iomem *ioaddr = rp->base;	rhine_disable_linkmon(ioaddr, rp->quirks);	/* rhine_disable_linkmon already cleared MIICmd */	iowrite8(phy_id, ioaddr + MIIPhyAddr);	iowrite8(regnum, ioaddr + MIIRegAddr);	iowrite16(value, ioaddr + MIIData);	iowrite8(0x20, ioaddr + MIICmd);		/* Trigger write */	RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));	rhine_enable_linkmon(ioaddr);}static int rhine_open(struct net_device *dev){	struct rhine_private *rp = netdev_priv(dev);	void __iomem *ioaddr = rp->base;	int rc;	rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name,			dev);	if (rc)		return rc;	if (debug > 1)		printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",		       dev->name, rp->pdev->irq);	rc = alloc_ring(dev);	if (rc) {		free_irq(rp->pdev->irq, dev);		return rc;	}	alloc_rbufs(dev);	alloc_tbufs(dev);	rhine_chip_reset(dev);	init_registers(dev);	if (debug > 2)		printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "		       "MII status: %4.4x.\n",		       dev->name, ioread16(ioaddr + ChipCmd),		       mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));	netif_start_queue(dev);	return 0;}static void rhine_tx_timeout(struct net_device *dev){	struct rhine_private *rp = netdev_priv(dev);	void __iomem *ioaddr = rp->base;	printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "	       "%4.4x, resetting...\n",	       dev->name, ioread16(ioaddr + IntrStatus),	       mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));	/* protect against concurrent rx interrupts */	disable_irq(rp->pdev->irq);#ifdef CONFIG_VIA_RHINE_NAPI	napi_disable(&rp->napi);#endif	spin_lock(&rp->lock);	/* clear all descriptors */	free_tbufs(dev);	free_rbufs(dev);	alloc_tbufs(dev);	alloc_rbufs(dev);	/* Reinitialize the hardware. */	rhine_chip_reset(dev);	init_registers(dev);	spin_unlock(&rp->lock);	enable_irq(rp->pdev->irq);	dev->trans_start = jiffies;	rp->stats.tx_errors++;	netif_wake_queue(dev);}static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev){	struct rhine_private *rp = netdev_priv(dev);	void __iomem *ioaddr = rp->base;	unsigned entry;	/* Caution: the write order is important here, set the field	   with the "ownership" bits last. */	/* Calculate the next Tx descriptor entry. */	entry = rp->cur_tx % TX_RING_SIZE;	if (skb_padto(skb, ETH_ZLEN))		return 0;	rp->tx_skbuff[entry] = skb;	if ((rp->quirks & rqRhineI) &&	    (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {		/* Must use alignment buffer. */		if (skb->len > PKT_BUF_SZ) {			/* packet too long, drop it */			dev_kfree_skb(skb);			rp->tx_skbuff[entry] = NULL;			rp->stats.tx_dropped++;			return 0;		}		/* Padding is not copied and so must be redone. */		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);		if (skb->len < ETH_ZLEN)			memset(rp->tx_buf[entry] + skb->len, 0,			       ETH_ZLEN - skb->len);		rp->tx_skbuff_dma[entry] = 0;		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +						      (rp->tx_buf[entry] -						       rp->tx_bufs));	} else {		rp->tx_skbuff_dma[entry] =			pci_map_single(rp->pdev, skb->data, skb->len,				       PCI_DMA_TODEVICE);		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);	}	rp->tx_ring[entry].desc_length =		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));	/* lock eth irq */	spin_lock_irq(&rp->lock);	wmb();	rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);	wmb();	rp->cur_tx++;	/* Non-x86 Todo: explicitly flush cache lines here. */	/* Wake the potentially-idle transmit channel */	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,	       ioaddr + ChipCmd1);	IOSYNC;	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)		netif_stop_queue(dev);	dev->trans_start = jiffies;	spin_unlock_irq(&rp->lock);	if (debug > 4) {		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",		       dev->name, rp->cur_tx-1, entry);	}	return 0;}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static irqreturn_t rhine_interrupt(int irq, void *dev_instance){	struct net_device *dev = dev_instance;	struct rhine_private *rp = netdev_priv(dev);	void __iomem *ioaddr = rp->base;	u32 intr_status;	int boguscnt = max_interrupt_work;	int handled = 0;	while ((intr_status = get_intr_status(dev))) {		handled = 1;		/* Acknowledge all of the current interrupt sources ASAP. */		if (intr_status & IntrTxDescRace)			iowrite8(0x08, ioaddr + IntrStatus2);		iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);		IOSYNC;		if (debug > 4)			printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",			       dev->name, intr_status);		if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |				   IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {#ifdef CONFIG_VIA_RHINE_NAPI			iowrite16(IntrTxAborted |				  IntrTxDone | IntrTxError | IntrTxUnderrun |				  IntrPCIErr | IntrStatsMax | IntrLinkChange,				  ioaddr + IntrEnable);			netif_rx_schedule(dev, &rp->napi);#else			rhine_rx(dev, RX_RING_SIZE);#endif		}		if (intr_status & (IntrTxErrSummary | IntrTxDone)) {			if (intr_status & IntrTxErrSummary) {				/* Avoid scavenging before Tx engine turned off */				RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));				if (debug > 2 &&				    ioread8(ioaddr+ChipCmd) & CmdTxOn)					printk(KERN_WARNING "%s: "					       "rhine_interrupt() Tx engine"					       "still on.\n", dev->name);			}			rhine_tx(dev);		}		/* Abnormal error summary/uncommon events handlers. */		if (intr_status & (IntrPCIErr | IntrLinkChange |				   IntrStatsMax | IntrTxError | IntrTxAborted |				   IntrTxUnderrun | IntrTxDescRace))			rhine_error(dev, intr_status);		if (--boguscnt < 0) {			printk(KERN_WARNING "%s: Too much work at interrupt, "			       "status=%#8.8x.\n",			       dev->name, intr_status);			break;		}	}	if (debug > 3)		printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",		       dev->name, ioread16(ioaddr + IntrStatus));	return IRQ_RETVAL(handled);}/* This routine is logically part of the interrupt handler, but isolated   for clarity. */static void rhine_tx(struct net_device *dev){	struct rhine_private *rp = netdev_priv(dev);	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;	spin_lock(&rp->lock);	/* find and cleanup dirty tx descriptors */	while (rp->dirty_tx != rp->cur_tx) {		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);		if (debug > 6)			printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",			       entry, txstatus);		if (txstatus & DescOwn)			break;		if (txstatus & 0x8000) {			if (debug > 1)				printk(KERN_DEBUG "%s: Transmit error, "				       "Tx status %8.8x.\n",				       dev->name, txstatus);			rp->stats.tx_errors++;			if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;			if (txstatus & 0x0200) rp->stats.tx_window_errors++;			if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;			if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||			    (txstatus & 0x0800) || (txstatus & 0x1000)) {				rp->stats.tx_fifo_errors++;				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);				break; /* Keep the skb - we try again */			}			/* Transmitter restarted in 'abnormal' handler. */		} else {			if (rp->quirks & rqRhineI)				rp->stats.collisions += (txstatus >> 3) & 0x0F;			else				rp->stats.collisions += txstatus & 0x0F;			if (debug > 6)				printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",				       (txstatus >> 3) & 0xF,				       txstatus & 0xF);			rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;			rp->stats.tx_packets++;		}		/* Free the original skb. */		if (rp->tx_skbuff_dma[entry]) {			pci_unmap_single(rp->pdev,					 rp->tx_skbuff_dma[entry],					 rp->tx_skbuff[entry]->len,					 PCI_DMA_TODEVICE);		}		dev_kfree_skb_irq(rp->tx_skbuff[entry]);		rp->tx_skbuff[entry] = NULL;		entry = (++rp->dirty_tx) % TX_RING_SIZE;	}	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)		netif_wake_queue(dev);	spin_unlock(&rp->lock);}/* Process up to limit frames from receive ring */static int rhine_rx(struct net_device *dev, int limit){	struct rhine_private *rp = netdev_priv(dev);	int count;	int entry = rp->cur_rx % RX_RING_SIZE;	if (debug > 4) {		printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",		       dev->name, entry,		       le32_to_cpu(rp->rx_head_desc->rx_status));	}	/* If EOP is set on the next entry, it's a new packet. Send it up. */	for (count = 0; count < limit; ++count) {		struct rx_desc *desc = rp->rx_head_desc;		u32 desc_status = le32_to_cpu(desc->rx_status);		int data_size = desc_status >> 16;		if (desc_status & DescOwn)			break;		if (debug > 4)			printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",			       desc_status);		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {			if ((desc_status & RxWholePkt) != RxWholePkt) {				printk(KERN_WARNING "%s: Oversized Ethernet "				       "frame spanned multiple buffers, entry "				       "%#x length %d status %8.8x!\n",				       dev->name, entry, data_size,				       desc_status);				printk(KERN_WARNING "%s: Oversized Ethernet "				       "frame %p vs %p.\n", dev->name,				       rp->rx_head_desc, &rp->rx_ring[entry]);				rp->stats.rx_length_errors++;			} else if (desc_status & RxErr) {				/* There was a error. */				if (debug > 2)					printk(KERN_DEBUG "rhine_rx() Rx "					       "error was %8.8x.\n",					       desc_status);				rp->stats.rx_errors++;				if (desc_status & 0x0030) rp->stats.rx_length_errors++;				if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;				if (desc_status & 0x0004) rp->stats.rx_frame_errors++;				if (desc_status & 0x0002) {					/* this can also be updated outside the interrupt handler */					spin_lock(&rp->lock);					rp->stats.rx_crc_errors++;					spin_unlock(&rp->lock);				}			}		} else {			struct sk_buff *skb;			/* Length should omit the CRC */			int pkt_len = data_size - 4;			/* Check if the packet is long enough to accept without			   copying to a minimally-sized skbuff. */			if (pkt_len < rx_copybreak &&				(skb = dev_alloc_skb(pkt_len + 2)) != NULL) {				skb_reserve(skb, 2);	/* 16 byte align the IP header */				pci_dma_sync_single_for_cpu(rp->pdev,							    rp->rx_skbuff_dma[entry],							    rp->rx_buf_sz,							    PCI_DMA_FROMDEVICE);				skb_copy_to_linear_data(skb,						 rp->rx_skbuff[entry]->data,						 pkt_len);				skb_put(skb, pkt_len);				pci_dma_sync_single_for_device(rp->pdev,							       rp->rx_skbuff_dma[entry],							       rp->rx_buf_sz,							       PCI_DMA_FROMDEVICE);			} else {				skb = rp->rx_skbuff[entry];				if (skb == NULL) {					printk(KERN_ERR "%s: Inconsistent Rx "					       "descriptor chain.\n",					       dev->name);					break;				}				rp->rx_skbuff[entry] = NULL;				skb_put(skb, pkt_len);				pci_unmap_single(rp->pdev,						 rp->rx_skbuff_dma[entry],						 rp->rx_buf_sz,						 PCI_DMA_FROMDEVICE);			}			skb->protocol = eth_type_trans(skb, dev);#ifdef CONFIG_VIA_RHINE_NAPI			netif_receive_skb(skb);#else			netif_rx(skb);#endif			dev->last_rx = jiffies;			rp->stats.rx_bytes += pkt_len;			rp->stats.rx_packets++;		}		entry = (++rp->cur_rx) % RX_RING_SIZE;		rp->rx_head_desc = &rp->rx_ring[entry];	}	/* Refill the Rx ring buffers. */	for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {		struct sk_buff *skb;		entry = rp->dirty_rx % RX_RING_SIZE;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -