⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 eepro100.c

📁 GNU Mach 微内核源代码, 基于美国卡内基美隆大学的 Mach 研究项目
💻 C
📖 第 1 页 / 共 5 页
字号:
								"%s: cur_rx is last in speedo_interrupt().\n",								dev->name);					sp->rx_ring_state |= RrNoMem|RrNoResources;				} else					outb(RxResumeNoResources, ioaddr + SCBCmd);			} else if ((status & 0x003c) == 0x0008) { /* No resources. */				struct RxFD *rxf;				printk(KERN_WARNING "%s: card reports no resources.\n",						dev->name);				rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];				if (rxf == NULL) {					if (speedo_debug > 2)						printk(KERN_DEBUG								"%s: NULL cur_rx in speedo_interrupt().\n",								dev->name);					sp->rx_ring_state |= RrNoMem|RrNoResources;				} else if (rxf == sp->last_rxf) {					if (speedo_debug > 2)						printk(KERN_DEBUG								"%s: cur_rx is last in speedo_interrupt().\n",								dev->name);					sp->rx_ring_state |= RrNoMem|RrNoResources;				} else {					/* Restart the receiver. */					outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),					   ioaddr + SCBPointer);					outb(RxStart, ioaddr + SCBCmd);				}			}			sp->stats.rx_errors++;			spin_unlock(&sp->lock);		}		if ((sp->rx_ring_state&(RrNoMem|RrNoResources)) == RrNoResources) {			printk(KERN_WARNING					"%s: restart the receiver after a possible hang.\n",					dev->name);			spin_lock(&sp->lock);			/* Restart the receiver.			   I'm not sure if it's always right to restart the receiver			   here but I don't know another way to prevent receiver hangs.			   1999/12/25 SAW */			outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),			   ioaddr + SCBPointer);			outb(RxStart, ioaddr + SCBCmd);			sp->rx_ring_state &= ~RrNoResources;			spin_unlock(&sp->lock);		}		/* User interrupt, Command/Tx unit interrupt or CU not active. */		if (status & 0xA400) {			spin_lock(&sp->lock);			speedo_tx_buffer_gc(dev);			if (sp->tx_full				&& (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {				/* The ring is no longer full. */				sp->tx_full = 0;				netif_wake_queue(dev); /* Attention: under a spinlock.  --SAW */			}			spin_unlock(&sp->lock);		}		if (--boguscnt < 0) {			printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",				   dev->name, status);			/* Clear all interrupt sources. */			/* Will change from 0xfc00 to 0xff00 when we start handling			   FCP and ER interrupts --Dragan */			outl(0xfc00, ioaddr + SCBStatus);			break;		}	} while (1);	if (speedo_debug > 3)		printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",			   dev->name, inw(ioaddr + SCBStatus));	dev->interrupt = 0;	clear_bit(0, (void*)&sp->in_interrupt);	return;}static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	struct RxFD *rxf;	struct sk_buff *skb;	/* Get a fresh skbuff to replace the consumed one. */	skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));	sp->rx_skbuff[entry] = skb;	if (skb == NULL) {		sp->rx_ringp[entry] = NULL;		return NULL;	}	rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;	skb->dev = dev;	skb_reserve(skb, sizeof(struct RxFD));	rxf->rx_buf_addr = virt_to_bus(skb->tail);	return rxf;}static inline void speedo_rx_link(struct net_device *dev, int entry,								  struct RxFD *rxf){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	rxf->status = cpu_to_le32(0xC0000001); 	/* '1' for driver use only. */	rxf->link = 0;			/* None yet. */	rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);	sp->last_rxf->link = virt_to_le32desc(rxf);	sp->last_rxf->status &= cpu_to_le32(~0xC0000000);	sp->last_rxf = rxf;}static int speedo_refill_rx_buf(struct net_device *dev, int force){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	int entry;	struct RxFD *rxf;	entry = sp->dirty_rx % RX_RING_SIZE;	if (sp->rx_skbuff[entry] == NULL) {		rxf = speedo_rx_alloc(dev, entry);		if (rxf == NULL) {			unsigned int forw;			int forw_entry;			if (speedo_debug > 2 || !(sp->rx_ring_state & RrOOMReported)) {				printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",						dev->name, force);				speedo_show_state(dev);				sp->rx_ring_state |= RrOOMReported;			}			if (!force)				return -1;	/* Better luck next time!  */			/* Borrow an skb from one of next entries. */			for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)				if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)					break;			if (forw == sp->cur_rx)				return -1;			forw_entry = forw % RX_RING_SIZE;			sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];			sp->rx_skbuff[forw_entry] = NULL;			rxf = sp->rx_ringp[forw_entry];			sp->rx_ringp[forw_entry] = NULL;			sp->rx_ringp[entry] = rxf;		}	} else {		rxf = sp->rx_ringp[entry];	}	speedo_rx_link(dev, entry, rxf);	sp->dirty_rx++;	sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */	return 0;}static void speedo_refill_rx_buffers(struct net_device *dev, int force){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	/* Refill the RX ring. */	while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&			speedo_refill_rx_buf(dev, force) != -1);}static intspeedo_rx(struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	int entry = sp->cur_rx % RX_RING_SIZE;	int status;	int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;	int alloc_ok = 1;	if (speedo_debug > 4)		printk(KERN_DEBUG " In speedo_rx().\n");	/* If we own the next entry, it's a new packet. Send it up. */	while (sp->rx_ringp[entry] != NULL &&		   (status = le32_to_cpu(sp->rx_ringp[entry]->status)) & RxComplete) {		int pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;		if (--rx_work_limit < 0)			break;		/* Check for a rare out-of-memory case: the current buffer is		   the last buffer allocated in the RX ring.  --SAW */		if (sp->last_rxf == sp->rx_ringp[entry]) {			/* Postpone the packet.  It'll be reaped at an interrupt when this			   packet is no longer the last packet in the ring. */			if (speedo_debug > 2)				printk(KERN_DEBUG "%s: RX packet postponed!\n",					   dev->name);			sp->rx_ring_state |= RrPostponed;			break;		}		if (speedo_debug > 4)			printk(KERN_DEBUG "  speedo_rx() status %8.8x len %d.\n", status,				   pkt_len);		if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {			if (status & RxErrTooBig)				printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "					   "status %8.8x!\n", dev->name, status);			else if (! (status & RxOK)) {				/* There was a fatal error.  This *should* be impossible. */				sp->stats.rx_errors++;				printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "					   "status %8.8x.\n",					   dev->name, status);			}		} else {			struct sk_buff *skb;			/* Check if the packet is long enough to just accept without			   copying to a properly sized skbuff. */			if (pkt_len < rx_copybreak				&& (skb = dev_alloc_skb(pkt_len + 2)) != 0) {				skb->dev = dev;				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */				/* 'skb_put()' points to the start of sk_buff data area. */#if !defined(__alpha__)				/* Packet is in one chunk -- we can copy + cksum. */				eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);				skb_put(skb, pkt_len);#else				memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,					   pkt_len);#endif			} else {				/* Pass up the already-filled skbuff. */				skb = sp->rx_skbuff[entry];				if (skb == NULL) {					printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",						   dev->name);					break;				}				sp->rx_skbuff[entry] = NULL;				skb_put(skb, pkt_len);				sp->rx_ringp[entry] = NULL;			}			skb->protocol = eth_type_trans(skb, dev);			netif_rx(skb);			sp->stats.rx_packets++;			sp->stats.rx_bytes += pkt_len;		}		entry = (++sp->cur_rx) % RX_RING_SIZE;		sp->rx_ring_state &= ~RrPostponed;		/* Refill the recently taken buffers.		   Do it one-by-one to handle traffic bursts better. */		if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)			alloc_ok = 0;	}	/* Try hard to refill the recently taken buffers. */	speedo_refill_rx_buffers(dev, 1);	sp->last_rx_time = jiffies;	return 0;}static intspeedo_close(struct net_device *dev){	long ioaddr = dev->base_addr;	struct speedo_private *sp = (struct speedo_private *)dev->priv;	int i;	dev->start = 0;	netif_stop_queue(dev);	if (speedo_debug > 1)		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",			   dev->name, inw(ioaddr + SCBStatus));	/* Shut off the media monitoring timer. */	start_bh_atomic();	del_timer(&sp->timer);	end_bh_atomic();	/* Shutting down the chip nicely fails to disable flow control. So.. */	outl(PortPartialReset, ioaddr + SCBPort);	free_irq(dev->irq, dev);	/* Print a few items for debugging. */	if (speedo_debug > 3)		speedo_show_state(dev);    /* Free all the skbuffs in the Rx and Tx queues. */	for (i = 0; i < RX_RING_SIZE; i++) {		struct sk_buff *skb = sp->rx_skbuff[i];		sp->rx_skbuff[i] = 0;		/* Clear the Rx descriptors. */		if (skb)			dev_free_skb(skb);	}	for (i = 0; i < TX_RING_SIZE; i++) {		struct sk_buff *skb = sp->tx_skbuff[i];		sp->tx_skbuff[i] = 0;		/* Clear the Tx descriptors. */		if (skb)			dev_free_skb(skb);	}	/* Free multicast setting blocks. */	for (i = 0; sp->mc_setup_head != NULL; i++) {		struct speedo_mc_block *t;		t = sp->mc_setup_head->next;		kfree(sp->mc_setup_head);		sp->mc_setup_head = t;	}	sp->mc_setup_tail = NULL;	if (speedo_debug > 0)		printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);	MOD_DEC_USE_COUNT;	return 0;}/* The Speedo-3 has an especially awkward and unusable method of getting   statistics out of the chip.  It takes an unpredictable length of time   for the dump-stats command to complete.  To avoid a busy-wait loop we   update the stats with the previous dump results, and then trigger a   new dump.   These problems are mitigated by the current /proc implementation, which   calls this routine first to judge the output length, and then to emit the   output.   Oh, and incoming frames are dropped while executing dump-stats!   */static struct enet_statistics *speedo_get_stats(struct net_device *dev){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	long ioaddr = dev->base_addr;	/* Update only if the previous dump finished. */	if (sp->lstats.done_marker == le32_to_cpu(0xA007)) {		sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats.tx_coll16_errs);		sp->stats.tx_window_errors += le32_to_cpu(sp->lstats.tx_late_colls);		sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_underruns);		sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_lost_carrier);		/*sp->stats.tx_deferred += le32_to_cpu(sp->lstats.tx_deferred);*/		sp->stats.collisions += le32_to_cpu(sp->lstats.tx_total_colls);		sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats.rx_crc_errs);		sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats.rx_align_errs);		sp->stats.rx_over_errors += le32_to_cpu(sp->lstats.rx_resource_errs);		sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats.rx_overrun_errs);		sp->stats.rx_length_errors += le32_to_cpu(sp->lstats.rx_runt_errs);		sp->lstats.done_marker = 0x0000;		if (dev->start) {			unsigned long flags;			/* Take a spinlock to make wait_for_cmd_done and sending the			   command atomic.  --SAW */			spin_lock_irqsave(&sp->lock, flags);			wait_for_cmd_done(ioaddr + SCBCmd);			outb(CUDumpStats, ioaddr + SCBCmd);			spin_unlock_irqrestore(&sp->lock, flags);		}	}	return &sp->stats;}static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd){	struct speedo_private *sp = (struct speedo_private *)dev->priv;	long ioaddr = dev->base_addr;	u16 *data = (u16 *)&rq->ifr_data;	int phy = sp->phy[0] & 0x1f;    switch(cmd) {	case SIOCDEVPRIVATE:		/* Get the address of the PHY in use. */		data[0] = phy;	case SIOCDEVPRIVATE+1:		/* Read the specified MII register. */		/* FIXME: these operations need to be serialized with MDIO		   access from the timeout handler.		   They are currently serialized only with MDIO access from the		   timer routine.  2000/05/09 SAW */		start_bh_atomic();		data[3] = mdio_read(ioaddr, data[0], data[1]);		end_bh_atomic();		return 0;	case SIOCDEVPRIVATE+2:		/* Write the specified MII register */	

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -