⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 via-rhine.c

📁 Linux Kernel 2.6.9 for OMAP1710
💻 C
📖 第 1 页 / 共 4 页
字号:
		rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */		if (rp->rx_skbuff[i]) {			pci_unmap_single(rp->pdev,					 rp->rx_skbuff_dma[i],					 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);			dev_kfree_skb(rp->rx_skbuff[i]);		}		rp->rx_skbuff[i] = NULL;	}}static void alloc_tbufs(struct net_device* dev){	struct rhine_private *rp = netdev_priv(dev);	dma_addr_t next;	int i;	rp->dirty_tx = rp->cur_tx = 0;	next = rp->tx_ring_dma;	for (i = 0; i < TX_RING_SIZE; i++) {		rp->tx_skbuff[i] = NULL;		rp->tx_ring[i].tx_status = 0;		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);		next += sizeof(struct tx_desc);		rp->tx_ring[i].next_desc = cpu_to_le32(next);		rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];	}	rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);}static void free_tbufs(struct net_device* dev){	struct rhine_private *rp = netdev_priv(dev);	int i;	for (i = 0; i < TX_RING_SIZE; i++) {		rp->tx_ring[i].tx_status = 0;		rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);		rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */		if (rp->tx_skbuff[i]) {			if (rp->tx_skbuff_dma[i]) {				pci_unmap_single(rp->pdev,						 rp->tx_skbuff_dma[i],						 rp->tx_skbuff[i]->len,						 PCI_DMA_TODEVICE);			}			dev_kfree_skb(rp->tx_skbuff[i]);		}		rp->tx_skbuff[i] = NULL;		rp->tx_buf[i] = NULL;	}}static void rhine_check_media(struct net_device *dev, unsigned int init_media){	struct rhine_private *rp = netdev_priv(dev);	long ioaddr = dev->base_addr;	mii_check_media(&rp->mii_if, debug, init_media);	if (rp->mii_if.full_duplex)	    writeb(readb(ioaddr + ChipCmd1) | Cmd1FDuplex,		   ioaddr + ChipCmd1);	else	    writeb(readb(ioaddr + ChipCmd1) & ~Cmd1FDuplex,		   ioaddr + ChipCmd1);}static void init_registers(struct net_device *dev){	struct rhine_private *rp = netdev_priv(dev);	long ioaddr = dev->base_addr;	int i;	for (i = 0; i < 6; i++)		writeb(dev->dev_addr[i], ioaddr + StationAddr + i);	/* Initialize other registers. */	writew(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */	/* Configure initial FIFO thresholds. */	writeb(0x20, ioaddr + TxConfig);	rp->tx_thresh = 0x20;	rp->rx_thresh = 0x60;		/* Written in rhine_set_rx_mode(). */	writel(rp->rx_ring_dma, ioaddr + RxRingPtr);	writel(rp->tx_ring_dma, ioaddr + TxRingPtr);	rhine_set_rx_mode(dev);	/* Enable interrupts by setting the interrupt mask. */	writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |	       IntrRxDropped | IntrRxNoBuf | IntrTxAborted |	       IntrTxDone | IntrTxError | IntrTxUnderrun |	       IntrPCIErr | IntrStatsMax | IntrLinkChange,	       ioaddr + IntrEnable);	writew(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),	       ioaddr + ChipCmd);	rhine_check_media(dev, 1);}/* Enable MII link status auto-polling (required for IntrLinkChange) */static void rhine_enable_linkmon(long ioaddr){	writeb(0, ioaddr + MIICmd);	writeb(MII_BMSR, ioaddr + MIIRegAddr);	writeb(0x80, ioaddr + MIICmd);	RHINE_WAIT_FOR((readb(ioaddr + MIIRegAddr) & 0x20));	writeb(MII_BMSR | 0x40, ioaddr + MIIRegAddr);}/* Disable MII link status auto-polling (required for MDIO access) */static void rhine_disable_linkmon(long ioaddr, u32 quirks){	writeb(0, ioaddr + MIICmd);	if (quirks & rqRhineI) {		writeb(0x01, ioaddr + MIIRegAddr);	// MII_BMSR		/* Can be called from ISR. Evil. */		mdelay(1);		/* 0x80 must be set immediately before turning it off */		writeb(0x80, ioaddr + MIICmd);		RHINE_WAIT_FOR(readb(ioaddr + MIIRegAddr) & 0x20);		/* Heh. Now clear 0x80 again. */		writeb(0, ioaddr + MIICmd);	}	else		RHINE_WAIT_FOR(readb(ioaddr + MIIRegAddr) & 0x80);}/* Read and write over the MII Management Data I/O (MDIO) interface. */static int mdio_read(struct net_device *dev, int phy_id, int regnum){	long ioaddr = dev->base_addr;	struct rhine_private *rp = netdev_priv(dev);	int result;	rhine_disable_linkmon(ioaddr, rp->quirks);	/* rhine_disable_linkmon already cleared MIICmd */	writeb(phy_id, ioaddr + MIIPhyAddr);	writeb(regnum, ioaddr + MIIRegAddr);	writeb(0x40, ioaddr + MIICmd);		/* Trigger read */	RHINE_WAIT_FOR(!(readb(ioaddr + MIICmd) & 0x40));	result = readw(ioaddr + MIIData);	rhine_enable_linkmon(ioaddr);	return result;}static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value){	struct rhine_private *rp = netdev_priv(dev);	long ioaddr = dev->base_addr;	rhine_disable_linkmon(ioaddr, rp->quirks);	/* rhine_disable_linkmon already cleared MIICmd */	writeb(phy_id, ioaddr + MIIPhyAddr);	writeb(regnum, ioaddr + MIIRegAddr);	writew(value, ioaddr + MIIData);	writeb(0x20, ioaddr + MIICmd);		/* Trigger write */	RHINE_WAIT_FOR(!(readb(ioaddr + MIICmd) & 0x20));	rhine_enable_linkmon(ioaddr);}static int rhine_open(struct net_device *dev){	struct rhine_private *rp = netdev_priv(dev);	long ioaddr = dev->base_addr;	int rc;	rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,			dev);	if (rc)		return rc;	if (debug > 1)		printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",		       dev->name, rp->pdev->irq);	rc = alloc_ring(dev);	if (rc)		return rc;	alloc_rbufs(dev);	alloc_tbufs(dev);	rhine_chip_reset(dev);	init_registers(dev);	if (debug > 2)		printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "		       "MII status: %4.4x.\n",		       dev->name, readw(ioaddr + ChipCmd),		       mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));	netif_start_queue(dev);	return 0;}static void rhine_tx_timeout(struct net_device *dev){	struct rhine_private *rp = netdev_priv(dev);	long ioaddr = dev->base_addr;	printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "	       "%4.4x, resetting...\n",	       dev->name, readw(ioaddr + IntrStatus),	       mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));	/* protect against concurrent rx interrupts */	disable_irq(rp->pdev->irq);	spin_lock(&rp->lock);	/* clear all descriptors */	free_tbufs(dev);	free_rbufs(dev);	alloc_tbufs(dev);	alloc_rbufs(dev);	/* Reinitialize the hardware. */	rhine_chip_reset(dev);	init_registers(dev);	spin_unlock(&rp->lock);	enable_irq(rp->pdev->irq);	dev->trans_start = jiffies;	rp->stats.tx_errors++;	netif_wake_queue(dev);}static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev){	struct rhine_private *rp = netdev_priv(dev);	long ioaddr = dev->base_addr;	unsigned entry;	/* Caution: the write order is important here, set the field	   with the "ownership" bits last. */	/* Calculate the next Tx descriptor entry. */	entry = rp->cur_tx % TX_RING_SIZE;	if (skb->len < ETH_ZLEN) {		skb = skb_padto(skb, ETH_ZLEN);		if (skb == NULL)			return 0;	}	rp->tx_skbuff[entry] = skb;	if ((rp->quirks & rqRhineI) &&	    (((long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {		/* Must use alignment buffer. */		if (skb->len > PKT_BUF_SZ) {			/* packet too long, drop it */			dev_kfree_skb(skb);			rp->tx_skbuff[entry] = NULL;			rp->stats.tx_dropped++;			return 0;		}		skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);		rp->tx_skbuff_dma[entry] = 0;		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +						      (rp->tx_buf[entry] -						       rp->tx_bufs));	} else {		rp->tx_skbuff_dma[entry] =			pci_map_single(rp->pdev, skb->data, skb->len,				       PCI_DMA_TODEVICE);		rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);	}	rp->tx_ring[entry].desc_length =		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));	/* lock eth irq */	spin_lock_irq(&rp->lock);	wmb();	rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);	wmb();	rp->cur_tx++;	/* Non-x86 Todo: explicitly flush cache lines here. */	/* Wake the potentially-idle transmit channel */	writeb(readb(ioaddr + ChipCmd1) | Cmd1TxDemand,	       ioaddr + ChipCmd1);	IOSYNC;	if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)		netif_stop_queue(dev);	dev->trans_start = jiffies;	spin_unlock_irq(&rp->lock);	if (debug > 4) {		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",		       dev->name, rp->cur_tx-1, entry);	}	return 0;}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs){	struct net_device *dev = dev_instance;	long ioaddr;	u32 intr_status;	int boguscnt = max_interrupt_work;	int handled = 0;	ioaddr = dev->base_addr;	while ((intr_status = get_intr_status(dev))) {		handled = 1;		/* Acknowledge all of the current interrupt sources ASAP. */		if (intr_status & IntrTxDescRace)			writeb(0x08, ioaddr + IntrStatus2);		writew(intr_status & 0xffff, ioaddr + IntrStatus);		IOSYNC;		if (debug > 4)			printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",			       dev->name, intr_status);		if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |		    IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))			rhine_rx(dev);		if (intr_status & (IntrTxErrSummary | IntrTxDone)) {			if (intr_status & IntrTxErrSummary) {				/* Avoid scavenging before Tx engine turned off */				RHINE_WAIT_FOR(!(readb(ioaddr+ChipCmd) & CmdTxOn));				if (debug > 2 &&				    readb(ioaddr+ChipCmd) & CmdTxOn)					printk(KERN_WARNING "%s: "					       "rhine_interrupt() Tx engine"					       "still on.\n", dev->name);			}			rhine_tx(dev);		}		/* Abnormal error summary/uncommon events handlers. */		if (intr_status & (IntrPCIErr | IntrLinkChange |				   IntrStatsMax | IntrTxError | IntrTxAborted |				   IntrTxUnderrun | IntrTxDescRace))			rhine_error(dev, intr_status);		if (--boguscnt < 0) {			printk(KERN_WARNING "%s: Too much work at interrupt, "			       "status=%#8.8x.\n",			       dev->name, intr_status);			break;		}	}	if (debug > 3)		printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",		       dev->name, readw(ioaddr + IntrStatus));	return IRQ_RETVAL(handled);}/* This routine is logically part of the interrupt handler, but isolated   for clarity. */static void rhine_tx(struct net_device *dev){	struct rhine_private *rp = netdev_priv(dev);	int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;	spin_lock(&rp->lock);	/* find and cleanup dirty tx descriptors */	while (rp->dirty_tx != rp->cur_tx) {		txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);		if (debug > 6)			printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",			       entry, txstatus);		if (txstatus & DescOwn)			break;		if (txstatus & 0x8000) {			if (debug > 1)				printk(KERN_DEBUG "%s: Transmit error, "				       "Tx status %8.8x.\n",				       dev->name, txstatus);			rp->stats.tx_errors++;			if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;			if (txstatus & 0x0200) rp->stats.tx_window_errors++;			if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;			if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;			if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||			    (txstatus & 0x0800) || (txstatus & 0x1000)) {				rp->stats.tx_fifo_errors++;				rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);				break; /* Keep the skb - we try again */			}			/* Transmitter restarted in 'abnormal' handler. */		} else {			if (rp->quirks & rqRhineI)				rp->stats.collisions += (txstatus >> 3) & 0x0F;			else				rp->stats.collisions += txstatus & 0x0F;			if (debug > 6)				printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",				       (txstatus >> 3) & 0xF,				       txstatus & 0xF);			rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;			rp->stats.tx_packets++;		}		/* Free the original skb. */		if (rp->tx_skbuff_dma[entry]) {			pci_unmap_single(rp->pdev,					 rp->tx_skbuff_dma[entry],					 rp->tx_skbuff[entry]->len,					 PCI_DMA_TODEVICE);		}		dev_kfree_skb_irq(rp->tx_skbuff[entry]);		rp->tx_skbuff[entry] = NULL;		entry = (++rp->dirty_tx) % TX_RING_SIZE;	}	if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)		netif_wake_queue(dev);	spin_unlock(&rp->lock);}/* This routine is logically part of the interrupt handler, but isolated   for clarity and better register allocation. */static void rhine_rx(struct net_device *dev){	struct rhine_private *rp = netdev_priv(dev);	int entry = rp->cur_rx % RX_RING_SIZE;	int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;	if (debug > 4) {		printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",		       dev->name, entry,		       le32_to_cpu(rp->rx_head_desc->rx_status));	}	/* If EOP is set on the next entry, it's a new packet. Send it up. */	while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {		struct rx_desc *desc = rp->rx_head_desc;		u32 desc_status = le32_to_cpu(desc->rx_status);		int data_size = desc_status >> 16;		if (debug > 4)			printk(KERN_DEBUG " rhine_rx() status is %8.8x.\n",			       desc_status);		if (--boguscnt < 0)			break;		if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {			if ((desc_status & RxWholePkt) != RxWholePkt) {				printk(KERN_WARNING "%s: Oversized Ethernet "				       "frame spanned multiple buffers, entry "				       "%#x length %d status %8.8x!\n",				       dev->name, entry, data_size,				       desc_status);				printk(KERN_WARNING "%s: Oversized Ethernet "				       "frame %p vs %p.\n", dev->name,				       rp->rx_head_desc, &rp->rx_ring[entry]);				rp->stats.rx_length_errors++;			} else if (desc_status & RxErr) {				/* There was a error. */				if (debug > 2)					printk(KERN_DEBUG " rhine_rx() Rx "					       "error was %8.8x.\n",					       desc_status);				rp->stats.rx_errors++;				if (desc_status & 0x0030) rp->stats.rx_length_errors++;				if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;				if (desc_status & 0x0004) rp->stats.rx_frame_errors++;				if (desc_status & 0x0002) {					/* this can also be updated outside the interrupt handler */					spin_lock(&rp->lock);					rp->stats.rx_crc_errors++;					spin_unlock(&rp->lock);				}			}		} else {			struct sk_buff *skb;			/* Length should omit the CRC */			int pkt_len = data_size - 4;			/* Check if the packet is long enough to accept without			   copying to a minimally-sized skbuff. */			if (pkt_len < rx_copybreak &&				(skb = dev_alloc_skb(pkt_len + 2)) != NULL) {				skb->dev = dev;				skb_reserve(skb, 2);	/* 16 byte align the IP header */				pci_dma_sync_single_for_cpu(rp->pdev,							    rp->rx_skbuff_dma[entry],							    rp->rx_buf_sz,							    PCI_DMA_FROMDEVICE);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -