⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 myson803.c

📁 linux下网卡驱动实例,希望对大家有所帮助!
💻 C
📖 第 1 页 / 共 4 页
字号:
	/* Initialize other registers. */
	/* Configure the PCI bus bursts and FIFO thresholds. */
	writel(0x01f8, ioaddr + PCIBusCfg);

	if (dev->if_port == 0)
		dev->if_port = np->default_port;

	np->txrx_config = TxEnable | RxEnable | RxFlowCtrl | 0x00600000;
	np->mcast_filter[0] = np->mcast_filter[1] = 0;
	np->rx_died = 0;
	set_rx_mode(dev);
	netif_start_tx_queue(dev);

	/* Enable interrupts by setting the interrupt mask. */
	np->intr_enable = IntrRxDone | IntrRxErr | IntrRxEmpty | IntrTxDone
		| IntrTxEmpty | StatsMax | RxOverflow | TxUnderrun | IntrPCIErr
		| NWayDone | LinkChange;
	writel(np->intr_enable, ioaddr + IntrEnable);

	if (np->msg_level & NETIF_MSG_IFUP)
		printk(KERN_DEBUG "%s: Done netdev_open(), PHY status: %x %x.\n",
			   dev->name, (int)readw(ioaddr + PHYMgmt),
			   (int)readw(ioaddr + PHYMgmt + 2));

	/* Set the timer to check for link beat. */
	init_timer(&np->timer);
	np->timer.expires = jiffies + 3*HZ;
	np->timer.data = (unsigned long)dev;
	np->timer.function = &netdev_timer;				/* timer handler */
	add_timer(&np->timer);

	return 0;
}

static void check_duplex(struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	long ioaddr = dev->base_addr;
	int new_tx_mode = np->txrx_config;

	if (np->medialock) {
	} else {
		int mii_reg5 = mdio_read(dev, np->phys[0], 5);
		int negotiated = mii_reg5 & np->advertising;
		int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
		if (np->duplex_lock  ||  mii_reg5 == 0xffff)
			return;
		if (duplex)
			new_tx_mode |= TxModeFDX;
		if (np->full_duplex != duplex) {
			np->full_duplex = duplex;
			if (np->msg_level & NETIF_MSG_LINK)
				printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
					   " negotiated capability %4.4x.\n", dev->name,
					   duplex ? "full" : "half", np->phys[0], negotiated);
		}
	}
	if (np->txrx_config != new_tx_mode)
		writel(new_tx_mode, ioaddr + RxConfig);
}

static void netdev_timer(unsigned long data)
{
	struct net_device *dev = (struct net_device *)data;
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	long ioaddr = dev->base_addr;
	int next_tick = 10*HZ;

	if (np->msg_level & NETIF_MSG_TIMER) {
		printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
			   dev->name, (int)readw(ioaddr + PHYMgmt + 10));
	}
	/* This will either have a small false-trigger window or will not catch
	   tbusy incorrectly set when the queue is empty. */
	if (netif_queue_paused(dev)  &&
		np->cur_tx - np->dirty_tx > 1  &&
		(jiffies - dev->trans_start) > TX_TIMEOUT) {
		tx_timeout(dev);
	}
	/* It's dead Jim, no race condition. */
	if (np->rx_died)
		netdev_rx(dev);
	check_duplex(dev);
	np->timer.expires = jiffies + next_tick;
	add_timer(&np->timer);
}

static void tx_timeout(struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	long ioaddr = dev->base_addr;

	printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
		   " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));

	if (np->msg_level & NETIF_MSG_TX_ERR) {
		int i;
		printk(KERN_DEBUG "  Rx ring %p: ", np->rx_ring);
		for (i = 0; i < RX_RING_SIZE; i++)
			printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
		printk("\n"KERN_DEBUG"  Tx ring %p: ", np->tx_ring);
		for (i = 0; i < TX_RING_SIZE; i++)
			printk(" %8.8x", np->tx_ring[i].status);
		printk("\n");
	}

	/* Stop and restart the chip's Tx processes . */
	writel(np->txrx_config & ~TxEnable, ioaddr + RxConfig);
	writel(virt_to_bus(np->tx_ring + (np->dirty_tx%TX_RING_SIZE)),
		   ioaddr + TxRingPtr);
	writel(np->txrx_config, ioaddr + RxConfig);
	/* Trigger an immediate transmit demand. */
	writel(0, dev->base_addr + TxStartDemand);

	dev->trans_start = jiffies;
	np->stats.tx_errors++;
	return;
}


/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void init_ring(struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	int i;

	np->tx_full = 0;
	np->cur_rx = np->cur_tx = 0;
	np->dirty_rx = np->dirty_tx = 0;

	np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 4);
	np->rx_head_desc = &np->rx_ring[0];

	/* Initialize all Rx descriptors. */
	for (i = 0; i < RX_RING_SIZE; i++) {
		np->rx_ring[i].ctrl_length = cpu_to_le32(np->rx_buf_sz);
		np->rx_ring[i].status = 0;
		np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
		np->rx_skbuff[i] = 0;
	}
	/* Mark the last entry as wrapping the ring. */
	np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);

	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
	for (i = 0; i < RX_RING_SIZE; i++) {
		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
		np->rx_skbuff[i] = skb;
		if (skb == NULL)
			break;
		skb->dev = dev;			/* Mark as being used by this device. */
		np->rx_ring[i].buf_addr = virt_to_le32desc(skb->tail);
		np->rx_ring[i].status = cpu_to_le32(DescOwn);
	}
	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);

	for (i = 0; i < TX_RING_SIZE; i++) {
		np->tx_skbuff[i] = 0;
		np->tx_ring[i].status = 0;
		np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
	}
	np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
	return;
}

static int start_tx(struct sk_buff *skb, struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	unsigned entry;

	/* Block a timer-based transmit from overlapping.  This happens when
	   packets are presumed lost, and we use this check the Tx status. */
	if (netif_pause_tx_queue(dev) != 0) {
		/* This watchdog code is redundant with the media monitor timer. */
		if (jiffies - dev->trans_start > TX_TIMEOUT)
			tx_timeout(dev);
		return 1;
	}

	/* Note: Ordering is important here, set the field with the
	   "ownership" bit last, and only then increment cur_tx. */

	/* Calculate the next Tx descriptor entry. */
	entry = np->cur_tx % TX_RING_SIZE;

	np->tx_skbuff[entry] = skb;

	np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
	np->tx_ring[entry].ctrl_length =
		cpu_to_le32(TxIntrOnDone | TxNormalPkt | (skb->len << 11) | skb->len);
	np->tx_ring[entry].status = cpu_to_le32(DescOwn);
	np->cur_tx++;

	/* On some architectures: explicitly flushing cache lines here speeds
	   operation. */

	if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
		np->tx_full = 1;
		/* Check for a just-cleared queue. */
		if (np->cur_tx - (volatile unsigned int)np->dirty_tx
			< TX_QUEUE_LEN - 2) {
			np->tx_full = 0;
			netif_unpause_tx_queue(dev);
		} else
			netif_stop_tx_queue(dev);
	} else
		netif_unpause_tx_queue(dev);		/* Typical path */
	/* Wake the potentially-idle transmit channel. */
	writel(0, dev->base_addr + TxStartDemand);

	dev->trans_start = jiffies;

	if (np->msg_level & NETIF_MSG_TX_QUEUED) {
		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
			   dev->name, np->cur_tx, entry);
	}
	return 0;
}

/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */
static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
{
	struct net_device *dev = (struct net_device *)dev_instance;
	struct netdev_private *np;
	long ioaddr;
	int boguscnt;

#ifndef final_version			/* Can never occur. */
	if (dev == NULL) {
		printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
				"device.\n", irq);
		return;
	}
#endif

	ioaddr = dev->base_addr;
	np = (struct netdev_private *)dev->priv;
	boguscnt = np->max_interrupt_work;

#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x020300
	/* A lock to prevent simultaneous entry bug on Intel SMP machines. */
	if (test_and_set_bit(0, (void*)&dev->interrupt)) {
		printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
			   dev->name);
		dev->interrupt = 0;	/* Avoid halting machine. */
		return;
	}
#endif

	do {
		u32 intr_status = readl(ioaddr + IntrStatus);

		/* Acknowledge all of the current interrupt sources ASAP. */
		writel(intr_status, ioaddr + IntrStatus);

		if (np->msg_level & NETIF_MSG_INTR)
			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
				   dev->name, intr_status);

		if (intr_status == 0)
			break;

		if (intr_status & IntrRxDone)
			netdev_rx(dev);

		for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
			int entry = np->dirty_tx % TX_RING_SIZE;
			int tx_status = le32_to_cpu(np->tx_ring[entry].status);
			if (tx_status & DescOwn)
				break;
			if (np->msg_level & NETIF_MSG_TX_DONE)
				printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
					   dev->name, tx_status);
			if (tx_status & (TxErrAbort | TxErrCarrier | TxErrLate
							 | TxErr16Colls | TxErrHeartbeat)) {
				if (np->msg_level & NETIF_MSG_TX_ERR)
					printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
						   dev->name, tx_status);
				np->stats.tx_errors++;
				if (tx_status & TxErrCarrier) np->stats.tx_carrier_errors++;
				if (tx_status & TxErrLate) np->stats.tx_window_errors++;
				if (tx_status & TxErrHeartbeat) np->stats.tx_heartbeat_errors++;
#ifdef ETHER_STATS
				if (tx_status & TxErr16Colls) np->stats.collisions16++;
				if (tx_status & TxErrAbort) np->stats.tx_aborted_errors++;
#else
				if (tx_status & (TxErr16Colls|TxErrAbort))
					np->stats.tx_aborted_errors++;
#endif
			} else {
				np->stats.tx_packets++;
				np->stats.collisions += tx_status & TxColls;
#if LINUX_VERSION_CODE > 0x20127
				np->stats.tx_bytes += np->tx_skbuff[entry]->len;
#endif
#ifdef ETHER_STATS
				if (tx_status & TxErrDefer) np->stats.tx_deferred++;
#endif
			}
			/* Free the original skb. */
			dev_free_skb_irq(np->tx_skbuff[entry]);
			np->tx_skbuff[entry] = 0;
		}
		/* Note the 4 slot hysteresis to mark the queue non-full. */
		if (np->tx_full  &&  np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
			/* The ring is no longer full, allow new TX entries. */
			np->tx_full = 0;
			netif_resume_tx_queue(dev);
		}

		/* Abnormal error summary/uncommon events handlers. */
		if (intr_status & (IntrRxErr | IntrRxEmpty | StatsMax | RxOverflow
						   | TxUnderrun | IntrPCIErr | NWayDone | LinkChange))
			netdev_error(dev, intr_status);

		if (--boguscnt < 0) {
			printk(KERN_WARNING "%s: Too much work at interrupt, "
				   "status=0x%4.4x.\n",
				   dev->name, intr_status);
			break;
		}
	} while (1);

	if (np->msg_level & NETIF_MSG_INTR)
		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
			   dev->name, (int)readl(ioaddr + IntrStatus));

#if defined(__i386__)  &&  LINUX_VERSION_CODE < 0x020300
	clear_bit(0, (void*)&dev->interrupt);
#endif
	return;
}

/* This routine is logically part of the interrupt handler, but separated
   for clarity and better register allocation. */
static int netdev_rx(struct net_device *dev)
{
	struct netdev_private *np = (struct netdev_private *)dev->priv;
	int entry = np->cur_rx % RX_RING_SIZE;
	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
	int refilled = 0;

	if (np->msg_level & NETIF_MSG_RX_STATUS) {
		printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
			   entry, np->rx_ring[entry].status);
	}

	/* If EOP is set on the next entry, it's a new packet. Send it up. */
	while ( ! (np->rx_head_desc->status & cpu_to_le32(DescOwn))) {
		struct netdev_desc *desc = np->rx_head_desc;
		u32 desc_status = le32_to_cpu(desc->status);

		if (np->msg_level & NETIF_MSG_RX_STATUS)
			printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
				   desc_status);
		if (--boguscnt < 0)
			break;
		if ((desc_status & RxDescWholePkt) != RxDescWholePkt) {
			printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
				   "multiple buffers, entry %#x length %d status %4.4x!\n",
				   dev->name, np->cur_rx, desc_status >> 16, desc_status);
			np->stats.rx_length_errors++;
		} else if (desc_status & RxDescErrSum) {
			/* There was a error. */
			if (np->msg_level & NETIF_MSG_RX_ERR)
				printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
					   desc_status);
			np->stats.rx_errors++;
			if (desc_status & (RxErrLong|RxErrRunt))
				np->stats.rx_length_errors++;
			if (desc_status & (RxErrFrame|RxErrCode))
				np->stats.rx_frame_errors++;
			if (desc_status & RxErrCRC)
				np->stats.rx_crc_errors++;
		} else {
			struct sk_buff *skb;
			/* Reported length should omit the CRC. */
			u16 pkt_len = ((desc_status >> 16) & 0xfff) - 4;

#ifndef final_version
			if (np->msg_level & NETIF_MSG_RX_STATUS)
				printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
					   " of %d, bogus_cnt %d.\n",
					   pkt_len, pkt_len, boguscnt);
#endif
			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < np->rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb->dev = dev;
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
				skb_put(skb, pkt_len);
			} else {
				skb_put(skb = np->rx_skbuff[entry], pkt_len);
				np->rx_skbuff[entry] = NULL;
			}
#ifndef final_version				/* Remove after testing. */
			/* You will want this info for the initial debug. */
			if (np->msg_level & NETIF_MSG_PKTDATA)
				printk(KERN_DEBUG "  Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
					   "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
					   "%d.%d.%d.%d.\n",
					   skb->data[0], skb->data[1], skb->data[2], skb->data[3],
					   skb->data[4], skb->data[5], skb->data[6], skb->data[7],
					   skb->data[8], skb->data[9], skb->data[10],
					   skb->data[11], skb->data[12], skb->data[13],
					   skb->data[14], skb->data[15], skb->data[16],
					   skb->data[17]);
#endif
			skb->mac.raw = skb->data;
			/* Protocol lookup disabled until verified with all kernels. */
			if (0 && ntohs(skb->mac.ethernet->h_proto) >= 0x0800) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -