⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 eepro100.c

📁 linux和2410结合开发 用他可以生成2410所需的zImage文件
💻 C
📖 第 1 页 / 共 5 页
字号:
		for (i = 0; i < 16; i++) {			/* FIXME: what does it mean?  --SAW */			if (i == 6) i = 21;			printk(KERN_DEBUG "%s:  PHY index %d register %d is %4.4x.\n",				   dev->name, phy_num, i, mdio_read(ioaddr, phy_num, i));		}	}#endif}/* Initialize the Rx and Tx rings, along with various 'dev' bits. */static voidspeedo_init_rx_ring(struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	struct RxFD *rxf, *last_rxf = NULL;	dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;	int i;	sp->cur_rx = 0;	for (i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb;		skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));		sp->rx_skbuff[i] = skb;		if (skb == NULL)			break;			/* OK.  Just initially short of Rx bufs. */		skb->dev = dev;			/* Mark as being used by this device. */		rxf = (struct RxFD *)skb->tail;		sp->rx_ringp[i] = rxf;		sp->rx_ring_dma[i] =			pci_map_single(sp->pdev, rxf,					PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);		skb_reserve(skb, sizeof(struct RxFD));		if (last_rxf) {			last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);			pci_dma_sync_single(sp->pdev, last_rxf_dma,					sizeof(struct RxFD), PCI_DMA_TODEVICE);		}		last_rxf = rxf;		last_rxf_dma = sp->rx_ring_dma[i];		rxf->status = cpu_to_le32(0x00000001);	/* '1' is flag value only. */		rxf->link = 0;						/* None yet. */		/* This field unused by i82557. */		rxf->rx_buf_addr = 0xffffffff;		rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);		pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[i],				sizeof(struct RxFD), PCI_DMA_TODEVICE);	}	sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);	/* Mark the last entry as end-of-list. */	last_rxf->status = cpu_to_le32(0xC0000002);	/* '2' is flag value only. */	pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],			sizeof(struct RxFD), PCI_DMA_TODEVICE);	sp->last_rxf = last_rxf;	sp->last_rxf_dma = last_rxf_dma;}static void speedo_purge_tx(struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	int entry;	while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {		entry = sp->dirty_tx % TX_RING_SIZE;		if (sp->tx_skbuff[entry]) {			sp->stats.tx_errors++;			pci_unmap_single(sp->pdev,					le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),					sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);			dev_kfree_skb_irq(sp->tx_skbuff[entry]);			sp->tx_skbuff[entry] = 0;		}		sp->dirty_tx++;	}	while (sp->mc_setup_head != NULL) {		struct speedo_mc_block *t;		if (speedo_debug > 1)			printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);		pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,				sp->mc_setup_head->len, PCI_DMA_TODEVICE);		t = sp->mc_setup_head->next;		kfree(sp->mc_setup_head);		sp->mc_setup_head = t;	}	sp->mc_setup_tail = NULL;	sp->tx_full = 0;	netif_wake_queue(dev);}static void reset_mii(struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	long ioaddr = dev->base_addr;	/* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */	if ((sp->phy[0] & 0x8000) == 0) {		int phy_addr = sp->phy[0] & 0x1f;		int advertising = mdio_read(ioaddr, phy_addr, 4);		int mii_bmcr = mdio_read(ioaddr, phy_addr, 0);		mdio_write(ioaddr, phy_addr, 0, 0x0400);		mdio_write(ioaddr, phy_addr, 1, 0x0000);		mdio_write(ioaddr, phy_addr, 4, 0x0000);		mdio_write(ioaddr, phy_addr, 0, 0x8000);#ifdef honor_default_port		mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);#else		mdio_read(ioaddr, phy_addr, 0);		mdio_write(ioaddr, phy_addr, 0, mii_bmcr);		mdio_write(ioaddr, phy_addr, 4, advertising);#endif	}}static void speedo_tx_timeout(struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	long ioaddr = dev->base_addr;	int status = inw(ioaddr + SCBStatus);	unsigned long flags;	printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "		   " %4.4x at %d/%d command %8.8x.\n",		   dev->name, status, inw(ioaddr + SCBCmd),		   sp->dirty_tx, sp->cur_tx,		   sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);	speedo_show_state(dev);#if 0	if ((status & 0x00C0) != 0x0080		&&  (status & 0x003C) == 0x0010) {		/* Only the command unit has stopped. */		printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",			   dev->name);		outl(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),			 ioaddr + SCBPointer);		outw(CUStart, ioaddr + SCBCmd);		reset_mii(dev);	} else {#else	{#endif		del_timer_sync(&sp->timer);		/* Reset the Tx and Rx units. */		outl(PortReset, ioaddr + SCBPort);		/* We may get spurious interrupts here.  But I don't think that they		   may do much harm.  1999/12/09 SAW */		udelay(10);		/* Disable interrupts. */		outw(SCBMaskAll, ioaddr + SCBCmd);		synchronize_irq();		speedo_tx_buffer_gc(dev);		/* Free as much as possible.		   It helps to recover from a hang because of out-of-memory.		   It also simplifies speedo_resume() in case TX ring is full or		   close-to-be full. */		speedo_purge_tx(dev);		speedo_refill_rx_buffers(dev, 1);		spin_lock_irqsave(&sp->lock, flags);		speedo_resume(dev);		sp->rx_mode = -1;		dev->trans_start = jiffies;		spin_unlock_irqrestore(&sp->lock, flags);		set_rx_mode(dev); /* it takes the spinlock itself --SAW */		/* Reset MII transceiver.  Do it before starting the timer to serialize		   mdio_xxx operations.  Yes, it's a paranoya :-)  2000/05/09 SAW */		reset_mii(dev);		sp->timer.expires = RUN_AT(2*HZ);		add_timer(&sp->timer);	}	return;}static intspeedo_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	long ioaddr = dev->base_addr;	int entry;	/* Prevent interrupts from changing the Tx ring from underneath us. */	unsigned long flags;	spin_lock_irqsave(&sp->lock, flags);	/* Check if there are enough space. */	if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {		printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);		netif_stop_queue(dev);		sp->tx_full = 1;		spin_unlock_irqrestore(&sp->lock, flags);		return 1;	}	/* Calculate the Tx descriptor entry. */	entry = sp->cur_tx++ % TX_RING_SIZE;	sp->tx_skbuff[entry] = skb;	sp->tx_ring[entry].status =		cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);	if (!(entry & ((TX_RING_SIZE>>2)-1)))		sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);	sp->tx_ring[entry].link =		cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));	sp->tx_ring[entry].tx_desc_addr =		cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);	/* The data region is always in one buffer descriptor. */	sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);	sp->tx_ring[entry].tx_buf_addr0 =		cpu_to_le32(pci_map_single(sp->pdev, skb->data,					   skb->len, PCI_DMA_TODEVICE));	sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);	/* workaround for hardware bug on 10 mbit half duplex */	if ((sp->partner == 0) || (sp->chip_id == 1)) {		wait_for_cmd_done(ioaddr + SCBCmd);		outb(0 , ioaddr + SCBCmd);	}	/* Trigger the command unit resume. */	wait_for_cmd_done(ioaddr + SCBCmd);	clear_suspend(sp->last_cmd);	/* We want the time window between clearing suspend flag on the previous	   command and resuming CU to be as small as possible.	   Interrupts in between are very undesired.  --SAW */	outb(CUResume, ioaddr + SCBCmd);	sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];	/* Leave room for set_rx_mode(). If there is no more space than reserved	   for multicast filter mark the ring as full. */	if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {		netif_stop_queue(dev);		sp->tx_full = 1;	}	spin_unlock_irqrestore(&sp->lock, flags);	dev->trans_start = jiffies;	return 0;}static void speedo_tx_buffer_gc(struct net_device *dev){	unsigned int dirty_tx;	struct speedo_private *sp = (struct speedo_private *)dev->priv;	dirty_tx = sp->dirty_tx;	while ((int)(sp->cur_tx - dirty_tx) > 0) {		int entry = dirty_tx % TX_RING_SIZE;		int status = le32_to_cpu(sp->tx_ring[entry].status);		if (speedo_debug > 5)			printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",				   entry, status);		if ((status & StatusComplete) == 0)			break;			/* It still hasn't been processed. */		if (status & TxUnderrun)			if (sp->tx_threshold < 0x01e08000) {				if (speedo_debug > 2)					printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",						   dev->name);				sp->tx_threshold += 0x00040000;			}		/* Free the original skb. */		if (sp->tx_skbuff[entry]) {			sp->stats.tx_packets++;	/* Count only user packets. */			sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;			pci_unmap_single(sp->pdev,					le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),					sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);			dev_kfree_skb_irq(sp->tx_skbuff[entry]);			sp->tx_skbuff[entry] = 0;		}		dirty_tx++;	}	if (speedo_debug && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {		printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"			   " full=%d.\n",			   dirty_tx, sp->cur_tx, sp->tx_full);		dirty_tx += TX_RING_SIZE;	}	while (sp->mc_setup_head != NULL		   && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {		struct speedo_mc_block *t;		if (speedo_debug > 1)			printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);		pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,				sp->mc_setup_head->len, PCI_DMA_TODEVICE);		t = sp->mc_setup_head->next;		kfree(sp->mc_setup_head);		sp->mc_setup_head = t;	}	if (sp->mc_setup_head == NULL)		sp->mc_setup_tail = NULL;	sp->dirty_tx = dirty_tx;}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs){	struct net_device *dev = (struct net_device *)dev_instance;	struct speedo_private *sp;	long ioaddr, boguscnt = max_interrupt_work;	unsigned short status;#ifndef final_version	if (dev == NULL) {		printk(KERN_ERR "speedo_interrupt(): irq %d for unknown device.\n", irq);		return;	}#endif	ioaddr = dev->base_addr;	sp = (struct speedo_private *)dev->priv;#ifndef final_version	/* A lock to prevent simultaneous entry on SMP machines. */	if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {		printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",			   dev->name);		sp->in_interrupt = 0;	/* Avoid halting machine. */		return;	}#endif	do {		status = inw(ioaddr + SCBStatus);		/* Acknowledge all of the current interrupt sources ASAP. */		/* Will change from 0xfc00 to 0xff00 when we start handling		   FCP and ER interrupts --Dragan */		outw(status & 0xfc00, ioaddr + SCBStatus);		if (speedo_debug > 4)			printk(KERN_DEBUG "%s: interrupt  status=%#4.4x.\n",				   dev->name, status);		if ((status & 0xfc00) == 0)			break;		/* Always check if all rx buffers are allocated.  --SAW */		speedo_refill_rx_buffers(dev, 0);		if ((status & 0x5000) ||	/* Packet received, or Rx error. */			(sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)									/* Need to gather the postponed packet. */			speedo_rx(dev);		if (status & 0x1000) {			spin_lock(&sp->lock);			if ((status & 0x003c) == 0x0028) {		/* No more Rx buffers. */				struct RxFD *rxf;				printk(KERN_WARNING "%s: card reports no RX buffers.\n",						dev->name);				rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];				if (rxf == NULL) {					if (speedo_debug > 2)						printk(KERN_DEBUG								"%s: NULL cur_rx in speedo_interrupt().\n",								dev->name);					sp->rx_ring_state |= RrNoMem|RrNoResources;				} else if (rxf == sp->last_rxf) {					if (speedo_debug > 2)						printk(KERN_DEBUG								"%s: cur_rx is last in speedo_interrupt().\n",								dev->name);					sp->rx_ring_state |= RrNoMem|RrNoResources;				} else					outb(RxResumeNoResources, ioaddr + SCBCmd);			} else if ((status & 0x003c) == 0x0008) { /* No resources. */				struct RxFD *rxf;				printk(KERN_WARNING "%s: card reports no resources.\n",						dev->name);				rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];				if (rxf == NULL) {					if (speedo_debug > 2)						printk(KERN_DEBUG								"%s: NULL cur_rx in speedo_interrupt().\n",								dev->name);					sp->rx_ring_state |= RrNoMem|RrNoResources;				} else if (rxf == sp->last_rxf) {					if (speedo_debug > 2)						printk(KERN_DEBUG

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -