⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 via-rhine.c

📁 Linux下各种网卡的驱动程序
💻 C
📖 第 1 页 / 共 4 页
字号:
	/* For ESI phys, turn on bit 7 in register 0x17. */	mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) |			   (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);	if (np->msg_level & NETIF_MSG_IFUP)		printk(KERN_DEBUG "%s: Done netdev_open(), status %4.4x "			   "MII status: %4.4x.\n",			   dev->name, readw(ioaddr + ChipCmd),			   mdio_read(dev, np->phys[0], 1));	/* Set the timer to check for link beat. */	init_timer(&np->timer);	np->timer.expires = jiffies + 2;	np->timer.data = (unsigned long)dev;	np->timer.function = &netdev_timer;				/* timer handler */	add_timer(&np->timer);	return 0;}static void check_duplex(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	long ioaddr = dev->base_addr;	int mii_reg5 = mdio_read(dev, np->phys[0], 5);	int negotiated = mii_reg5 & np->advertising;	int duplex;	if (np->duplex_lock  ||  mii_reg5 == 0xffff)		return;	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;	if (np->full_duplex != duplex) {		np->full_duplex = duplex;		if (np->msg_level & NETIF_MSG_LINK)			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"				   " partner capability of %4.4x.\n", dev->name,				   duplex ? "full" : "half", np->phys[0], mii_reg5);		if (duplex)			np->chip_cmd |= CmdFDuplex;		else			np->chip_cmd &= ~CmdFDuplex;		writew(np->chip_cmd, ioaddr + ChipCmd);	}}static void netdev_timer(unsigned long data){	struct net_device *dev = (struct net_device *)data;	struct netdev_private *np = (struct netdev_private *)dev->priv;	long ioaddr = dev->base_addr;	int next_tick = 10*HZ;	if (np->msg_level & NETIF_MSG_TIMER) {		printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",			   dev->name, readw(ioaddr + IntrStatus));	}	if (netif_queue_paused(dev)		&& np->cur_tx - np->dirty_tx > 1		&& jiffies - dev->trans_start > TX_TIMEOUT)		tx_timeout(dev);	check_duplex(dev);	np->timer.expires = jiffies + next_tick;	add_timer(&np->timer);}static void tx_timeout(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	long ioaddr = dev->base_addr;	printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "		   "%4.4x, resetting...\n",		   dev->name, readw(ioaddr + IntrStatus),		   mdio_read(dev, np->phys[0], 1));	/* Perhaps we should reinitialize the hardware here. */	dev->if_port = 0;	/* Restart the chip's Tx processes . */	writel(virt_to_bus(np->tx_ring + (np->dirty_tx % TX_RING_SIZE)),		   ioaddr + TxRingPtr);	writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);	/* Trigger an immediate transmit demand. */	dev->trans_start = jiffies;	np->stats.tx_errors++;	return;}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static void init_ring(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	int i;	np->tx_full = 0;	np->cur_rx = np->cur_tx = 0;	np->dirty_rx = np->dirty_tx = 0;	/* Use 1518/+18 if the CRC is transferred. */	np->rx_buf_sz = dev->mtu + 14;	if (np->rx_buf_sz < PKT_BUF_SZ)		np->rx_buf_sz = PKT_BUF_SZ;	np->rx_head_desc = &np->rx_ring[0];	for (i = 0; i < RX_RING_SIZE; i++) {		np->rx_ring[i].rx_status = 0;		np->rx_ring[i].desc_length = cpu_to_le32(np->rx_buf_sz);		np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);		np->rx_skbuff[i] = 0;	}	/* Mark the last entry as wrapping the ring. */	np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */	for (i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);		np->rx_skbuff[i] = skb;		if (skb == NULL)			break;		skb->dev = dev;			/* Mark as being used by this device. */		np->rx_ring[i].addr = virt_to_le32desc(skb->tail);		np->rx_ring[i].rx_status = cpu_to_le32(DescOwn);	}	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);	for (i = 0; i < TX_RING_SIZE; i++) {		np->tx_skbuff[i] = 0;		np->tx_ring[i].tx_status = 0;		np->tx_ring[i].desc_length = cpu_to_le32(0x00e08000);		np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);		np->tx_buf[i] = 0;		/* Allocated as/if needed. */	}	np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);	return;}static int start_tx(struct sk_buff *skb, struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	unsigned entry;	/* Block a timer-based transmit from overlapping.  This happens when	   packets are presumed lost, and we use this check the Tx status. */	if (netif_pause_tx_queue(dev) != 0) {		/* This watchdog code is redundant with the media monitor timer. */		if (jiffies - dev->trans_start > TX_TIMEOUT)			tx_timeout(dev);		return 1;	}	/* Caution: the write order is important here, set the descriptor word	   with the "ownership" bit last.  No SMP locking is needed if the	   cur_tx is incremented after the descriptor is consistent.  */	/* Calculate the next Tx descriptor entry. */	entry = np->cur_tx % TX_RING_SIZE;	np->tx_skbuff[entry] = skb;	if ((np->drv_flags & ReqTxAlign)  && ((long)skb->data & 3)) {		/* Must use alignment buffer. */		if (np->tx_buf[entry] == NULL &&			(np->tx_buf[entry] = kmalloc(PKT_BUF_SZ, GFP_KERNEL)) == NULL)			return 1;		memcpy(np->tx_buf[entry], skb->data, skb->len);		np->tx_ring[entry].addr = virt_to_le32desc(np->tx_buf[entry]);	} else		np->tx_ring[entry].addr = virt_to_le32desc(skb->data);	/* Explicitly flush packet data cache lines here. */	np->tx_ring[entry].desc_length =		cpu_to_le32(0x00E08000 | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));	np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);	np->cur_tx++;	/* Explicitly flush descriptor cache lines here. */	/* Wake the potentially-idle transmit channel. */	writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);	if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {		np->tx_full = 1;		/* Check for a just-cleared queue. */		if (np->cur_tx - (volatile unsigned int)np->dirty_tx			< TX_QUEUE_LEN - 2) {			np->tx_full = 0;			netif_unpause_tx_queue(dev);		} else			netif_stop_tx_queue(dev);	} else		netif_unpause_tx_queue(dev);		/* Typical path */	dev->trans_start = jiffies;	if (np->msg_level & NETIF_MSG_TX_QUEUED) {		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",			   dev->name, np->cur_tx, entry);	}	return 0;}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs){	struct net_device *dev = (struct net_device *)dev_instance;	struct netdev_private *np = (void *)dev->priv;	long ioaddr = dev->base_addr;	int boguscnt = np->max_interrupt_work;	do {		u32 intr_status = readw(ioaddr + IntrStatus);		/* Acknowledge all of the current interrupt sources ASAP. */		writew(intr_status & 0xffff, ioaddr + IntrStatus);		if (np->msg_level & NETIF_MSG_INTR)			printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",				   dev->name, intr_status);		if (intr_status == 0)			break;		if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |						   IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))			netdev_rx(dev);		for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {			int entry = np->dirty_tx % TX_RING_SIZE;			int txstatus = le32_to_cpu(np->tx_ring[entry].tx_status);			if (txstatus & DescOwn)				break;			if (np->msg_level & NETIF_MSG_TX_DONE)				printk(KERN_DEBUG "  Tx scavenge %d status %4.4x.\n",					   entry, txstatus);			if (txstatus & 0x8000) {				if (np->msg_level & NETIF_MSG_TX_ERR)					printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",						   dev->name, txstatus);				np->stats.tx_errors++;				if (txstatus & 0x0400) np->stats.tx_carrier_errors++;				if (txstatus & 0x0200) np->stats.tx_window_errors++;				if (txstatus & 0x0100) np->stats.tx_aborted_errors++;				if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++;				if (txstatus & 0x0002) np->stats.tx_fifo_errors++;#ifdef ETHER_STATS				if (txstatus & 0x0100) np->stats.collisions16++;#endif				/* Transmitter restarted in 'abnormal' handler. */			} else {#ifdef ETHER_STATS				if (txstatus & 0x0001) np->stats.tx_deferred++;#endif				if (np->drv_flags & HasV1TxStat)					np->stats.collisions += (txstatus >> 3) & 15;				else					np->stats.collisions += txstatus & 15;#if defined(NETSTATS_VER2)				np->stats.tx_bytes += np->tx_skbuff[entry]->len;#endif				np->stats.tx_packets++;			}			/* Free the original skb. */			dev_free_skb_irq(np->tx_skbuff[entry]);			np->tx_skbuff[entry] = 0;		}		/* Note the 4 slot hysteresis in mark the queue non-full. */		if (np->tx_full  &&  np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {			/* The ring is no longer full, allow new TX entries. */			np->tx_full = 0;			netif_resume_tx_queue(dev);		}		/* Abnormal error summary/uncommon events handlers. */		if (intr_status & (IntrPCIErr | IntrLinkChange | IntrMIIChange |						   IntrStatsMax | IntrTxAbort | IntrTxUnderrun))			netdev_error(dev, intr_status);		if (--boguscnt < 0) {			printk(KERN_WARNING "%s: Too much work at interrupt, "				   "status=0x%4.4x.\n",				   dev->name, intr_status);			break;		}	} while (1);	if (np->msg_level & NETIF_MSG_INTR)		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",			   dev->name, (int)readw(ioaddr + IntrStatus));	return;}/* This routine is logically part of the interrupt handler, but isolated   for clarity and better register allocation. */static int netdev_rx(struct net_device *dev){	struct netdev_private *np = (struct netdev_private *)dev->priv;	int entry = np->cur_rx % RX_RING_SIZE;	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;	if (np->msg_level & NETIF_MSG_RX_STATUS) {		printk(KERN_DEBUG " In netdev_rx(), entry %d status %8.8x.\n",			   entry, np->rx_head_desc->rx_status);	}	/* If EOP is set on the next entry, it's a new packet. Send it up. */	while ( ! (np->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {		struct rx_desc *desc = np->rx_head_desc;		u32 desc_status = le32_to_cpu(desc->rx_status);		int data_size = desc_status >> 16;		if (np->msg_level & NETIF_MSG_RX_STATUS)			printk(KERN_DEBUG "  netdev_rx() status is %4.4x.\n",				   desc_status);		if (--boguscnt < 0)			break;		if ( (desc_status & (RxWholePkt | RxErr)) !=  RxWholePkt) {			if ((desc_status & RxWholePkt) !=  RxWholePkt) {				printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "					   "multiple buffers, entry %#x length %d status %4.4x!\n",					   dev->name, np->cur_rx, data_size, desc_status);				printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",					   dev->name, np->rx_head_desc,					   &np->rx_ring[np->cur_rx % RX_RING_SIZE]);				np->stats.rx_length_errors++;			} else if (desc_status & RxErr) {				/* There was a error. */				if (np->msg_level & NETIF_MSG_RX_ERR)					printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",						   desc_status);				np->stats.rx_errors++;				if (desc_status & 0x0030) np->stats.rx_length_errors++;				if (desc_status & 0x0048) np->stats.rx_fifo_errors++;				if (desc_status & 0x0004) np->stats.rx_frame_errors++;				if (desc_status & 0x0002) np->stats.rx_crc_errors++;			}		} else {			struct sk_buff *skb;			/* Length should omit the CRC */			int pkt_len = data_size - 4;			/* Check if the packet is long enough to accept without copying			   to a minimally-sized skbuff. */			if (pkt_len < np->rx_copybreak				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {				skb->dev = dev;				skb_reserve(skb, 2);	/* 16 byte align the IP header */#if HAS_IP_COPYSUM			/* Call copy + cksum if available. */				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);				skb_put(skb, pkt_len);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -