⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 eepro100.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
				printk(KERN_DEBUG "%s: Link status change.\n", dev->name);				printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",					   dev->name, sp->partner, partner, sp->mii_if.advertising);			}			sp->partner = partner;			if (flow_ctrl != sp->flow_ctrl) {				sp->flow_ctrl = flow_ctrl;				sp->rx_mode = -1;	/* Trigger a reload. */			}		}	}	mii_check_link(&sp->mii_if);	if (netif_msg_timer(sp)) {		printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",			   dev->name, ioread16(ioaddr + SCBStatus));	}	if (sp->rx_mode < 0  ||		(sp->rx_bug  && jiffies - sp->last_rx_time > 2*HZ)) {		/* We haven't received a packet in a Long Time.  We might have been		   bitten by the receiver hang bug.  This can be cleared by sending		   a set multicast list command. */		if (netif_msg_timer(sp))			printk(KERN_DEBUG "%s: Sending a multicast list set command"				   " from a timer routine,"				   " m=%d, j=%ld, l=%ld.\n",				   dev->name, sp->rx_mode, jiffies, sp->last_rx_time);		set_rx_mode(dev);	}	/* We must continue to monitor the media. */	sp->timer.expires = RUN_AT(2*HZ); 			/* 2.0 sec. */	add_timer(&sp->timer);}static void speedo_show_state(struct net_device *dev){	struct speedo_private *sp = netdev_priv(dev);	int i;	if (netif_msg_pktdata(sp)) {		printk(KERN_DEBUG "%s: Tx ring dump,  Tx queue %u / %u:\n",		    dev->name, sp->cur_tx, sp->dirty_tx);		for (i = 0; i < TX_RING_SIZE; i++)			printk(KERN_DEBUG "%s:  %c%c%2d %8.8x.\n", dev->name,			    i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',			    i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',			    i, sp->tx_ring[i].status);		printk(KERN_DEBUG "%s: Printing Rx ring"		    " (next to receive into %u, dirty index %u).\n",		    dev->name, sp->cur_rx, sp->dirty_rx);		for (i = 0; i < RX_RING_SIZE; i++)			printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,			    sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',			    i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',			    i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',			    i, (sp->rx_ringp[i] != NULL) ?			    (unsigned)sp->rx_ringp[i]->status : 0);	}#if 0	{		void __iomem *ioaddr = sp->regs;		int phy_num = sp->phy[0] & 0x1f;		for (i = 0; i < 16; i++) {			/* FIXME: what does it mean?  --SAW */			if (i == 6) i = 21;			printk(KERN_DEBUG "%s:  PHY index %d register %d is %4.4x.\n",				   dev->name, phy_num, i, mdio_read(dev, phy_num, i));		}	}#endif}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static voidspeedo_init_rx_ring(struct net_device *dev){	struct speedo_private *sp = netdev_priv(dev);	struct RxFD *rxf, *last_rxf = NULL;	dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;	int i;	sp->cur_rx = 0;	for (i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb;		skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));		if (skb)			rx_align(skb);        /* Align IP on 16 byte boundary */		sp->rx_skbuff[i] = skb;		if (skb == NULL)			break;			/* OK.  Just initially short of Rx bufs. */		skb->dev = dev;			/* Mark as being used by this device. */		rxf = (struct RxFD *)skb->data;		sp->rx_ringp[i] = rxf;		sp->rx_ring_dma[i] =			pci_map_single(sp->pdev, rxf,					PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);		skb_reserve(skb, sizeof(struct RxFD));		if (last_rxf) {			last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);			pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,										   sizeof(struct RxFD), PCI_DMA_TODEVICE);		}		last_rxf = rxf;		last_rxf_dma = sp->rx_ring_dma[i];		rxf->status = cpu_to_le32(0x00000001);	/* '1' is flag value only. */		rxf->link = 0;						/* None yet. */		/* This field unused by i82557. */		rxf->rx_buf_addr = 0xffffffff;		rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);		pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],									   sizeof(struct RxFD), PCI_DMA_TODEVICE);	}	sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);	/* Mark the last entry as end-of-list. */	last_rxf->status = cpu_to_le32(0xC0000002);	/* '2' is flag value only. */	pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],								   sizeof(struct RxFD), PCI_DMA_TODEVICE);	sp->last_rxf = last_rxf;	sp->last_rxf_dma = last_rxf_dma;}static void speedo_purge_tx(struct net_device *dev){	struct speedo_private *sp = netdev_priv(dev);	int entry;	while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {		entry = sp->dirty_tx % TX_RING_SIZE;		if (sp->tx_skbuff[entry]) {			sp->stats.tx_errors++;			pci_unmap_single(sp->pdev,					le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),					sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);			dev_kfree_skb_irq(sp->tx_skbuff[entry]);			sp->tx_skbuff[entry] = NULL;		}		sp->dirty_tx++;	}	while (sp->mc_setup_head != NULL) {		struct speedo_mc_block *t;		if (netif_msg_tx_err(sp))			printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);		pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,				sp->mc_setup_head->len, PCI_DMA_TODEVICE);		t = sp->mc_setup_head->next;		kfree(sp->mc_setup_head);		sp->mc_setup_head = t;	}	sp->mc_setup_tail = NULL;	sp->tx_full = 0;	netif_wake_queue(dev);}static void reset_mii(struct net_device *dev){	struct speedo_private *sp = netdev_priv(dev);	/* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */	if ((sp->phy[0] & 0x8000) == 0) {		int phy_addr = sp->phy[0] & 0x1f;		int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);		int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);		mdio_write(dev, phy_addr, MII_BMCR, 0x0400);		mdio_write(dev, phy_addr, MII_BMSR, 0x0000);		mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);		mdio_write(dev, phy_addr, MII_BMCR, 0x8000);#ifdef honor_default_port		mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);#else		mdio_read(dev, phy_addr, MII_BMCR);		mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);		mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);#endif	}}static void speedo_tx_timeout(struct net_device *dev){	struct speedo_private *sp = netdev_priv(dev);	void __iomem *ioaddr = sp->regs;	int status = ioread16(ioaddr + SCBStatus);	unsigned long flags;	if (netif_msg_tx_err(sp)) {		printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "		   " %4.4x at %d/%d command %8.8x.\n",		   dev->name, status, ioread16(ioaddr + SCBCmd),		   sp->dirty_tx, sp->cur_tx,		   sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);	}	speedo_show_state(dev);#if 0	if ((status & 0x00C0) != 0x0080		&&  (status & 0x003C) == 0x0010) {		/* Only the command unit has stopped. */		printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",			   dev->name);		iowrite32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),			 ioaddr + SCBPointer);		iowrite16(CUStart, ioaddr + SCBCmd);		reset_mii(dev);	} else {#else	{#endif		del_timer_sync(&sp->timer);		/* Reset the Tx and Rx units. */		iowrite32(PortReset, ioaddr + SCBPort);		/* We may get spurious interrupts here.  But I don't think that they		   may do much harm.  1999/12/09 SAW */		udelay(10);		/* Disable interrupts. */		iowrite16(SCBMaskAll, ioaddr + SCBCmd);		synchronize_irq(dev->irq);		speedo_tx_buffer_gc(dev);		/* Free as much as possible.		   It helps to recover from a hang because of out-of-memory.		   It also simplifies speedo_resume() in case TX ring is full or		   close-to-be full. */		speedo_purge_tx(dev);		speedo_refill_rx_buffers(dev, 1);		spin_lock_irqsave(&sp->lock, flags);		speedo_resume(dev);		sp->rx_mode = -1;		dev->trans_start = jiffies;		spin_unlock_irqrestore(&sp->lock, flags);		set_rx_mode(dev); /* it takes the spinlock itself --SAW */		/* Reset MII transceiver.  Do it before starting the timer to serialize		   mdio_xxx operations.  Yes, it's a paranoya :-)  2000/05/09 SAW */		reset_mii(dev);		sp->timer.expires = RUN_AT(2*HZ);		add_timer(&sp->timer);	}	return;}static intspeedo_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct speedo_private *sp = netdev_priv(dev);	void __iomem *ioaddr = sp->regs;	int entry;	/* Prevent interrupts from changing the Tx ring from underneath us. */	unsigned long flags;	spin_lock_irqsave(&sp->lock, flags);	/* Check if there are enough space. */	if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {		printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);		netif_stop_queue(dev);		sp->tx_full = 1;		spin_unlock_irqrestore(&sp->lock, flags);		return 1;	}	/* Calculate the Tx descriptor entry. */	entry = sp->cur_tx++ % TX_RING_SIZE;	sp->tx_skbuff[entry] = skb;	sp->tx_ring[entry].status =		cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);	if (!(entry & ((TX_RING_SIZE>>2)-1)))		sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);	sp->tx_ring[entry].link =		cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));	sp->tx_ring[entry].tx_desc_addr =		cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);	/* The data region is always in one buffer descriptor. */	sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);	sp->tx_ring[entry].tx_buf_addr0 =		cpu_to_le32(pci_map_single(sp->pdev, skb->data,					   skb->len, PCI_DMA_TODEVICE));	sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);	/* workaround for hardware bug on 10 mbit half duplex */	if ((sp->partner == 0) && (sp->chip_id == 1)) {		wait_for_cmd_done(dev, sp);		iowrite8(0 , ioaddr + SCBCmd);		udelay(1);	}	/* Trigger the command unit resume. */	wait_for_cmd_done(dev, sp);	clear_suspend(sp->last_cmd);	/* We want the time window between clearing suspend flag on the previous	   command and resuming CU to be as small as possible.	   Interrupts in between are very undesired.  --SAW */	iowrite8(CUResume, ioaddr + SCBCmd);	sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];	/* Leave room for set_rx_mode(). If there is no more space than reserved	   for multicast filter mark the ring as full. */	if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {		netif_stop_queue(dev);		sp->tx_full = 1;	}	spin_unlock_irqrestore(&sp->lock, flags);	dev->trans_start = jiffies;	return 0;}static void speedo_tx_buffer_gc(struct net_device *dev){	unsigned int dirty_tx;	struct speedo_private *sp = netdev_priv(dev);	dirty_tx = sp->dirty_tx;	while ((int)(sp->cur_tx - dirty_tx) > 0) {		int entry = dirty_tx % TX_RING_SIZE;		int status = le32_to_cpu(sp->tx_ring[entry].status);		if (netif_msg_tx_done(sp))			printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",				   entry, status);		if ((status & StatusComplete) == 0)			break;			/* It still hasn't been processed. */		if (status & TxUnderrun)			if (sp->tx_threshold < 0x01e08000) {				if (netif_msg_tx_err(sp))					printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",						   dev->name);				sp->tx_threshold += 0x00040000;			}		/* Free the original skb. */		if (sp->tx_skbuff[entry]) {			sp->stats.tx_packets++;	/* Count only user packets. */			sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;			pci_unmap_single(sp->pdev,					le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),					sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);			dev_kfree_skb_irq(sp->tx_skbuff[entry]);			sp->tx_skbuff[entry] = NULL;		}		dirty_tx++;	}	if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {		printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"			   " full=%d.\n",			   dirty_tx, sp->cur_tx, sp->tx_full);		dirty_tx += TX_RING_SIZE;	}	while (sp->mc_setup_head != NULL		   && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {		struct speedo_mc_block *t;		if (netif_msg_tx_err(sp))			printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);		pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,				sp->mc_setup_head->len, PCI_DMA_TODEVICE);		t = sp->mc_setup_head->next;		kfree(sp->mc_setup_head);		sp->mc_setup_head = t;	}	if (sp->mc_setup_head == NULL)		sp->mc_setup_tail = NULL;	sp->dirty_tx = dirty_tx;}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static irqreturn_t speedo_interrupt(int irq, void *dev_instance){	struct net_device *dev = (struct net_device *)dev_instance;	struct speedo_private *sp;	void __iomem *ioaddr;	long boguscnt = max_interrupt_work;	unsigned short status;	unsigned int handled = 0;	sp = netdev_priv(dev);	ioaddr = sp->regs;#ifndef final_version	/* A lock to prevent simultaneous entry on SMP machines. */	if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {		printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",			   dev->name);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -