⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rtk8189c.c

📁 ADTEK RTK8189C Linux 2.4.X Driver
💻 C
📖 第 1 页 / 共 4 页
字号:
		skb->dev = dev;     /* Mark as being used by this device. */
		np->rx_ring[np->dirty_rx].buf_addr =
						 virt_to_le32desc(skb->tail);
		np->rx_ring[np->dirty_rx].ctrl_length =
						 cpu_to_le32(np->rx_buf_sz);
		np->rx_ring[np->dirty_rx].status = cpu_to_le32(DescOwn);

		--np->lackrxcount;

		/* update dirty_rx */
		if (np->lackrxcount!=0)
		{
		   ++np->dirty_rx;
		   if (np->dirty_rx==RX_RING_SIZE)
		     np->dirty_rx=0;
		}
	}

	writel(virt_to_bus(np->tx_ring + np->dirty_tx), ioaddr + TxRingPtr);
	writel(virt_to_bus(np->rx_ring + np->cur_rx), ioaddr + RxRingPtr);

	/* Configure the PCI bus bursts and FIFO thresholds. */
	writel(0x038, ioaddr + PCIBusCfg);
	/* Enable interrupts by setting the interrupt mask. */
	writel(IntrRxDone | IntrRxErr | IntrRxEmpty | IntrTxDone | IntrTxEmpty
		   | StatsMax | RxOverflow | TxUnderrun | IntrPCIErr | NWayDone
		   | LinkChange,
		   ioaddr + IntrEnable);
	writel(np->txrx_config, ioaddr + RxConfig);
	writel(0, ioaddr + TxStartDemand);
	writel(0, ioaddr + RxStartDemand);
}

static void netdev_timer(unsigned long data)
{
	struct net_device *dev = (struct net_device *)data;
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	long ioaddr = dev->base_addr;
	int next_tick = 10*HZ;
	struct sk_buff *skb;

	if (debug > 3) {
		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
			   dev->name, (int)readw(ioaddr + PHYMgmt + 10));
	}

//	  /* This will either have a small false-trigger window or will not
//	     catch tbusy incorrectly set when the queue is empty. */
//	  if (netif_queue_paused(dev)  &&
//		  np->cur_tx - np->dirty_tx > 1  &&
//		  (jiffies - dev->trans_start) > TX_TIMEOUT) {
//		  tx_timeout(dev);
//	  }
	if ( (np->txfreecount!=TX_RING_SIZE) &&
		(jiffies - dev->trans_start) > TX_TIMEOUT) {
		tx_timeout(dev);
	}

	/* Refill the Rx ring buffers. */
	while (np->lackrxcount) {
		skb = dev_alloc_skb(np->rx_buf_sz);
		if (skb == NULL)
		   break;	    /* Better luck next round. */

		skb->dev = dev;     /* Mark as being used by this device. */
		np->rx_ring[np->dirty_rx].buf_addr =
						 virt_to_le32desc(skb->tail);
		np->rx_ring[np->dirty_rx].ctrl_length =
						 cpu_to_le32(np->rx_buf_sz);
		np->rx_ring[np->dirty_rx].status = cpu_to_le32(DescOwn);

		--np->lackrxcount;

		/* update dirty_rx */
		if (np->lackrxcount!=0)
		{
		   ++np->dirty_rx;
		   if (np->dirty_rx==RX_RING_SIZE)
		     np->dirty_rx=0;
		}
	}

	//check_duplex(dev);
	np->timer.expires = jiffies + next_tick;
	add_timer(&np->timer);
}

static void tx_timeout(struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	long ioaddr = dev->base_addr;

	if (debug > 1)
		printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
		   " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));

#ifndef __alpha__
	{
		int i;
		printk(KERN_DEBUG "  Rx ring %8.8x: ", (int)np->rx_ring);
		for (i = 0; i < RX_RING_SIZE; i++)
			printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
		printk("\n"KERN_DEBUG"	Tx ring %8.8x: ", (int)np->tx_ring);
		for (i = 0; i < TX_RING_SIZE; i++)
			printk(" %8.8x", np->tx_ring[i].status);
		printk("\n");
	}
#endif

	resetnic(dev);

	dev->trans_start = jiffies;
	np->stats.tx_errors++;
	return;
}


/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void init_ring(struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	int i;

//	  np->tx_full = 0;
	np->cur_rx = np->cur_tx = 0;
	np->dirty_rx = np->dirty_tx = 0;
	np->txfreecount = TX_RING_SIZE;

	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
	np->rx_head_desc = &np->rx_ring[0];

	/* Initialize all Rx descriptors. */
	for (i = 0; i < RX_RING_SIZE; i++) {
		np->rx_ring[i].ctrl_length = cpu_to_le32(np->rx_buf_sz);
		np->rx_ring[i].status = 0;
		np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
		np->rx_ring[i].rxbuffer = NULL;
	}
	/* Mark the last entry as wrapping the ring. */
	np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);

	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
	for (i = 0; i < RX_RING_SIZE; i++) {
		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
		if (skb == NULL)
			break;
		np->rx_ring[i].rxbuffer = skb;
		skb->dev = dev;      /* Mark as being used by this device. */
		np->rx_ring[i].buf_addr = virt_to_le32desc(skb->tail);
		np->rx_ring[i].status = cpu_to_le32(DescOwn);
	}
	np->lackrxcount = RX_RING_SIZE - i;
	if (np->lackrxcount)
	  np->dirty_rx = (unsigned int)i;
	else
	  np->dirty_rx = 0;

	for (i = 0; i < TX_RING_SIZE; i++) {
		np->tx_skbuff[i] = 0;
		np->tx_ring[i].status = 0;
		np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
	}
	np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
	return;
}

static int start_tx(struct sk_buff *skb, struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	unsigned entry;

	if (!np->txfreecount) {
		if (jiffies - dev->trans_start > TX_TIMEOUT)
			tx_timeout(dev);
		return 1;
	}

	/* Block a timer-based transmit from overlapping.  This happens when
	   packets are presumed lost, and we use this check the Tx status. */
	if (netif_pause_tx_queue(dev) != 0) {
	     /* This watchdog code is redundant with the media monitor timer. */
		if (jiffies - dev->trans_start > TX_TIMEOUT)
			tx_timeout(dev);
		return 1;
	}

	/* Note: Ordering is important here, set the field with the
	   "ownership" bit last, and only then increment cur_tx. */

	/* Calculate the next Tx descriptor entry. */
//	entry = np->cur_tx % TX_RING_SIZE;
	entry = np->cur_tx;

	np->tx_skbuff[entry] = skb;

	np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
	np->tx_ring[entry].ctrl_length =
		cpu_to_le32(TxIntrOnDone | TxNormalPkt | (skb->len << 11) | skb->len);
	np->tx_ring[entry].status = cpu_to_le32(DescOwn);
	np->cur_tx++;
	if (np->cur_tx==TX_RING_SIZE)
	  np->cur_tx=0;

	/* On some architectures: explicitly flush cache lines here. */
//	  if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
//		  np->tx_full = 1;
//		  /* Check for a just-cleared queue. */
//		  if (np->cur_tx - (volatile unsigned int)np->dirty_tx
//			  < TX_QUEUE_LEN - 2) {
//			  np->tx_full = 0;
//			  netif_unpause_tx_queue(dev);
//		  }
//	  } else
//		  netif_unpause_tx_queue(dev);		  /* Typical path */
	netif_unpause_tx_queue(dev);

	--np->txfreecount;

	/* Wake the potentially-idle transmit channel. */
	writel(0, dev->base_addr + TxStartDemand);

	dev->trans_start = jiffies;

	if (debug > 4) {
		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
			   dev->name, np->cur_tx, entry);
	}
	return 0;
}

/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
{
	struct net_device *dev = (struct net_device *)dev_instance;
	struct netdev_private *np;
	long ioaddr;
	int boguscnt = max_interrupt_work;

#ifndef final_version			/* Can never occur. */
	if (dev == NULL) {
		printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
				"device.\n", irq);
		return;
	}
#endif

	ioaddr = dev->base_addr;
	np = (struct netdev_private *)dev->priv;
#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x020300
	/* A lock to prevent simultaneous entry bug on Intel SMP machines. */
	if (test_and_set_bit(0, (void*)&dev->interrupt)) {
		printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
			   dev->name);
		dev->interrupt = 0;	/* Avoid halting machine. */
		return;
	}
#endif

	do {
		u32 intr_status = readl(ioaddr + IntrStatus);

		/* Acknowledge all of the current interrupt sources ASAP. */
		writel(intr_status, ioaddr + IntrStatus);

		if (debug > 4)
			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
				   dev->name, intr_status);

		if (intr_status == 0)
			break;

		if (intr_status & IntrRxEmpty)
		   if (debug > 1)
			printk(KERN_ERR "%s: Receive buffer is empty!, "
			       "lack=%d, cur_rx=%d, dirty_rx=%d\n",
			       dev->name, np->lackrxcount, np->cur_rx,
			       np->dirty_rx);

		if (intr_status & IntrRxDone)
			netdev_rx(dev);

//		  for (; np->cur_tx - np->dirty_tx > 0; ) {
		  while (np->txfreecount!=TX_RING_SIZE) {
//			int entry = np->dirty_tx % TX_RING_SIZE;
			int entry = np->dirty_tx;
			int tx_status = le32_to_cpu(np->tx_ring[entry].status);
			if (tx_status & DescOwn)
				break;

			++np->txfreecount;

			if (debug > 5)
				printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
					   dev->name, tx_status);
			if (tx_status & (TxErrAbort | TxErrCarrier | TxErrLate
			    | TxErr16Colls | TxErrHeartbeat)) {
				if (debug > 3)
					printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
						   dev->name, tx_status);
				np->stats.tx_errors++;
				if (tx_status & TxErrCarrier) np->stats.tx_carrier_errors++;
				if (tx_status & TxErrLate) np->stats.tx_window_errors++;
				if (tx_status & TxErrHeartbeat) np->stats.tx_heartbeat_errors++;
#ifdef ETHER_STATS
				if (tx_status & TxErr16Colls) np->stats.collisions16++;
				if (tx_status & TxErrAbort) np->stats.tx_aborted_errors++;
#else
				if (tx_status & (TxErr16Colls|TxErrAbort))
					np->stats.tx_aborted_errors++;
#endif
			} else {
				np->stats.tx_packets++;
				np->stats.collisions += tx_status & TxColls;
#if LINUX_VERSION_CODE > 0x20127
				np->stats.tx_bytes += np->tx_skbuff[entry]->len;
#endif
#ifdef ETHER_STATS
				if (tx_status & TxErrDefer) np->stats.tx_deferred++;
#endif
			}

			/* Free the original skb. */
			dev_free_skb_irq(np->tx_skbuff[entry]);
			np->tx_skbuff[entry] = 0;

			/* update dirty_tx */
			++np->dirty_tx;
			if (np->dirty_tx==TX_RING_SIZE)
			   np->dirty_tx=0;
		}

		/* Note the 4 slot hysteresis in mark the queue non-full. */
//		  if (np->tx_full  &&
//		    np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
//			/* The ring is no longer full, allow new TX entries. */
//			  np->tx_full = 0;
//			  netif_resume_tx_queue(dev);
//		  }
		if (np->txfreecount)
		    netif_resume_tx_queue(dev);

		/* Abnormal error summary/uncommon events handlers. */
	  //	if (intr_status & (IntrRxErr | IntrRxEmpty | StatsMax
		if (intr_status & (IntrRxErr | StatsMax
		  | RxOverflow | TxUnderrun | IntrPCIErr | NWayDone
		  | LinkChange))
		   netdev_error(dev, intr_status);

		if (--boguscnt < 0) {
		    if (debug > 1)
		      printk(KERN_WARNING "%s: Too much work at interrupt, "
					 "status=0x%4.4x.\n",
				   dev->name, intr_status);
			break;
		}
	} while (1);

	if (debug > 3)
		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
			   dev->name, (int)readl(ioaddr + IntrStatus));

#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x020300
	clear_bit(0, (void*)&dev->interrupt);
#endif
	return;
}

/* This routine is logically part of the interrupt handler, but separated
   for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	struct sk_buff *skb;

	if (debug > 4) {
		printk(KERN_DEBUG " In netdev_rx(), cur_rx %d status %4.4x.\n",
			   np->cur_rx, np->rx_ring[np->cur_rx].status);
	}

	/* If EOP is set on the next entry, it's a new packet. Send it up. */
	while ( ! (np->rx_head_desc->status & cpu_to_le32(DescOwn))) {
		struct netdev_desc_rx *desc = np->rx_head_desc;
		u32 desc_status = le32_to_cpu(desc->status);

		if (debug > 4)
			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
				   desc_status);

		if (np->lackrxcount==RX_RING_SIZE)
			break;

		if ((desc_status & RxDescWholePkt) != RxDescWholePkt) {
			if (debug > 1)
			  printk(KERN_WARNING "%s: Oversized Ethernet frame "
			       "spanned multiple buffers, entry %#x length "
			       "%d status %4.4x!\n", dev->name, np->cur_rx,
			       desc_status >> 16, desc_status);
			np->stats.rx_length_errors++;
		} else if (desc_status & RxDescErrSum) {
			/* There was a error. */
			if (debug > 2)
				printk(KERN_DEBUG "  netdev_rx() Rx error was"
					" %8.8x.\n", desc_status);
			np->stats.rx_errors++;
			if (desc_status & (RxErrLong|RxErrRunt))
				np->stats.rx_length_errors++;
			if (desc_status & (RxErrFrame|RxErrCode))
				np->stats.rx_frame_errors++;
			if (desc_status & RxErrCRC)
				np->stats.rx_crc_errors++;
		} else {
			struct sk_buff *skb;
			/* Reported length should omit the CRC. */
			u16 pkt_len = ((desc_status >> 16) & 0xfff) - 4;

#ifndef final_version
			if (debug > 4)
				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt"
					   " length %d.\n", pkt_len);
#endif
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				eth_copy_and_sum(skb, np->rx_ring[np->cur_rx].rxbuffer->tail, pkt_len, 0);
				skb_put(skb, pkt_len);
			} else {
				skb_put(skb = (struct sk_buff *)
					      np->rx_ring[np->cur_rx].rxbuffer,
					pkt_len);
				np-> rx_ring[np->cur_rx].rxbuffer = NULL;
			}
#ifndef final_version				/* Remove after testing. */
			/* You will want this info for the initial debug. */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -