⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dl2k.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	np->timer.function = &rio_timer;	add_timer (&np->timer);	/* Start Tx/Rx */	writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable,			ioaddr + MACCtrl);	macctrl = 0;	macctrl |= (np->vlan) ? AutoVLANuntagging : 0;	macctrl |= (np->full_duplex) ? DuplexSelect : 0;	macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;	macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;	writew(macctrl,	ioaddr + MACCtrl);	netif_start_queue (dev);	/* Enable default interrupts */	EnableInt ();	return 0;}static voidrio_timer (unsigned long data){	struct net_device *dev = (struct net_device *)data;	struct netdev_private *np = netdev_priv(dev);	unsigned int entry;	int next_tick = 1*HZ;	unsigned long flags;	spin_lock_irqsave(&np->rx_lock, flags);	/* Recover rx ring exhausted error */	if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {		printk(KERN_INFO "Try to recover rx ring exhausted...\n");		/* Re-allocate skbuffs to fill the descriptor ring */		for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {			struct sk_buff *skb;			entry = np->old_rx % RX_RING_SIZE;			/* Dropped packets don't need to re-allocate */			if (np->rx_skbuff[entry] == NULL) {				skb = dev_alloc_skb (np->rx_buf_sz);				if (skb == NULL) {					np->rx_ring[entry].fraginfo = 0;					printk (KERN_INFO						"%s: Still unable to re-allocate Rx skbuff.#%d\n",						dev->name, entry);					break;				}				np->rx_skbuff[entry] = skb;				/* 16 byte align the IP header */				skb_reserve (skb, 2);				np->rx_ring[entry].fraginfo =				    cpu_to_le64 (pci_map_single					 (np->pdev, skb->data, np->rx_buf_sz,					  PCI_DMA_FROMDEVICE));			}			np->rx_ring[entry].fraginfo |=			    cpu_to_le64((u64)np->rx_buf_sz << 48);			np->rx_ring[entry].status = 0;		} /* end for */	} /* end if */	spin_unlock_irqrestore (&np->rx_lock, flags);	np->timer.expires = jiffies + next_tick;	add_timer(&np->timer);}static voidrio_tx_timeout (struct net_device *dev){	long ioaddr = dev->base_addr;	printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",		dev->name, readl (ioaddr + TxStatus));	rio_free_tx(dev, 0);	dev->if_port = 0;	dev->trans_start = jiffies;} /* allocate and initialize Tx and Rx descriptors */static voidalloc_list (struct net_device *dev){	struct netdev_private *np = netdev_priv(dev);	int i;	np->cur_rx = np->cur_tx = 0;	np->old_rx = np->old_tx = 0;	np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);	/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */	for (i = 0; i < TX_RING_SIZE; i++) {		np->tx_skbuff[i] = NULL;		np->tx_ring[i].status = cpu_to_le64 (TFDDone);		np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +					      ((i+1)%TX_RING_SIZE) *					      sizeof (struct netdev_desc));	}	/* Initialize Rx descriptors */	for (i = 0; i < RX_RING_SIZE; i++) {		np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +						((i + 1) % RX_RING_SIZE) *						sizeof (struct netdev_desc));		np->rx_ring[i].status = 0;		np->rx_ring[i].fraginfo = 0;		np->rx_skbuff[i] = NULL;	}	/* Allocate the rx buffers */	for (i = 0; i < RX_RING_SIZE; i++) {		/* Allocated fixed size of skbuff */		struct sk_buff *skb = dev_alloc_skb (np->rx_buf_sz);		np->rx_skbuff[i] = skb;		if (skb == NULL) {			printk (KERN_ERR				"%s: alloc_list: allocate Rx buffer error! ",				dev->name);			break;		}		skb_reserve (skb, 2);	/* 16 byte align the IP header. */		/* Rubicon now supports 40 bits of addressing space. */		np->rx_ring[i].fraginfo =		    cpu_to_le64 ( pci_map_single (			 	  np->pdev, skb->data, np->rx_buf_sz,				  PCI_DMA_FROMDEVICE));		np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);	}	/* Set RFDListPtr */	writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);	writel (0, dev->base_addr + RFDListPtr1);	return;}static intstart_xmit (struct sk_buff *skb, struct net_device *dev){	struct netdev_private *np = netdev_priv(dev);	struct netdev_desc *txdesc;	unsigned entry;	u32 ioaddr;	u64 tfc_vlan_tag = 0;	if (np->link_status == 0) {	/* Link Down */		dev_kfree_skb(skb);		return 0;	}	ioaddr = dev->base_addr;	entry = np->cur_tx % TX_RING_SIZE;	np->tx_skbuff[entry] = skb;	txdesc = &np->tx_ring[entry];#if 0	if (skb->ip_summed == CHECKSUM_PARTIAL) {		txdesc->status |=		    cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |				 IPChecksumEnable);	}#endif	if (np->vlan) {		tfc_vlan_tag = VLANTagInsert |		    ((u64)np->vlan << 32) |		    ((u64)skb->priority << 45);	}	txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,							skb->len,							PCI_DMA_TODEVICE));	txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);	/* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode	 * Work around: Always use 1 descriptor in 10Mbps mode */	if (entry % np->tx_coalesce == 0 || np->speed == 10)		txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |					      WordAlignDisable |					      TxDMAIndicate |					      (1 << FragCountShift));	else		txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |					      WordAlignDisable |					      (1 << FragCountShift));	/* TxDMAPollNow */	writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);	/* Schedule ISR */	writel(10000, ioaddr + CountDown);	np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;	if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE			< TX_QUEUE_LEN - 1 && np->speed != 10) {		/* do nothing */	} else if (!netif_queue_stopped(dev)) {		netif_stop_queue (dev);	}	/* The first TFDListPtr */	if (readl (dev->base_addr + TFDListPtr0) == 0) {		writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc),			dev->base_addr + TFDListPtr0);		writel (0, dev->base_addr + TFDListPtr1);	}	/* NETDEV WATCHDOG timer */	dev->trans_start = jiffies;	return 0;}static irqreturn_trio_interrupt (int irq, void *dev_instance){	struct net_device *dev = dev_instance;	struct netdev_private *np;	unsigned int_status;	long ioaddr;	int cnt = max_intrloop;	int handled = 0;	ioaddr = dev->base_addr;	np = netdev_priv(dev);	while (1) {		int_status = readw (ioaddr + IntStatus);		writew (int_status, ioaddr + IntStatus);		int_status &= DEFAULT_INTR;		if (int_status == 0 || --cnt < 0)			break;		handled = 1;		/* Processing received packets */		if (int_status & RxDMAComplete)			receive_packet (dev);		/* TxDMAComplete interrupt */		if ((int_status & (TxDMAComplete|IntRequested))) {			int tx_status;			tx_status = readl (ioaddr + TxStatus);			if (tx_status & 0x01)				tx_error (dev, tx_status);			/* Free used tx skbuffs */			rio_free_tx (dev, 1);		}		/* Handle uncommon events */		if (int_status &		    (HostError | LinkEvent | UpdateStats))			rio_error (dev, int_status);	}	if (np->cur_tx != np->old_tx)		writel (100, ioaddr + CountDown);	return IRQ_RETVAL(handled);}static inline dma_addr_t desc_to_dma(struct netdev_desc *desc){	return le64_to_cpu(desc->fraginfo) & DMA_48BIT_MASK;}static voidrio_free_tx (struct net_device *dev, int irq){	struct netdev_private *np = netdev_priv(dev);	int entry = np->old_tx % TX_RING_SIZE;	int tx_use = 0;	unsigned long flag = 0;	if (irq)		spin_lock(&np->tx_lock);	else		spin_lock_irqsave(&np->tx_lock, flag);	/* Free used tx skbuffs */	while (entry != np->cur_tx) {		struct sk_buff *skb;		if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))			break;		skb = np->tx_skbuff[entry];		pci_unmap_single (np->pdev,				  desc_to_dma(&np->tx_ring[entry]),				  skb->len, PCI_DMA_TODEVICE);		if (irq)			dev_kfree_skb_irq (skb);		else			dev_kfree_skb (skb);		np->tx_skbuff[entry] = NULL;		entry = (entry + 1) % TX_RING_SIZE;		tx_use++;	}	if (irq)		spin_unlock(&np->tx_lock);	else		spin_unlock_irqrestore(&np->tx_lock, flag);	np->old_tx = entry;	/* If the ring is no longer full, clear tx_full and	   call netif_wake_queue() */	if (netif_queue_stopped(dev) &&	    ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE	    < TX_QUEUE_LEN - 1 || np->speed == 10)) {		netif_wake_queue (dev);	}}static voidtx_error (struct net_device *dev, int tx_status){	struct netdev_private *np;	long ioaddr = dev->base_addr;	int frame_id;	int i;	np = netdev_priv(dev);	frame_id = (tx_status & 0xffff0000);	printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",		dev->name, tx_status, frame_id);	np->stats.tx_errors++;	/* Ttransmit Underrun */	if (tx_status & 0x10) {		np->stats.tx_fifo_errors++;		writew (readw (ioaddr + TxStartThresh) + 0x10,			ioaddr + TxStartThresh);		/* Transmit Underrun need to set TxReset, DMARest, FIFOReset */		writew (TxReset | DMAReset | FIFOReset | NetworkReset,			ioaddr + ASICCtrl + 2);		/* Wait for ResetBusy bit clear */		for (i = 50; i > 0; i--) {			if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)				break;			mdelay (1);		}		rio_free_tx (dev, 1);		/* Reset TFDListPtr */		writel (np->tx_ring_dma +			np->old_tx * sizeof (struct netdev_desc),			dev->base_addr + TFDListPtr0);		writel (0, dev->base_addr + TFDListPtr1);		/* Let TxStartThresh stay default value */	}	/* Late Collision */	if (tx_status & 0x04) {		np->stats.tx_fifo_errors++;		/* TxReset and clear FIFO */		writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2);		/* Wait reset done */		for (i = 50; i > 0; i--) {			if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)				break;			mdelay (1);		}		/* Let TxStartThresh stay default value */	}	/* Maximum Collisions */#ifdef ETHER_STATS	if (tx_status & 0x08)		np->stats.collisions16++;#else	if (tx_status & 0x08)		np->stats.collisions++;#endif	/* Restart the Tx */	writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);}static intreceive_packet (struct net_device *dev){	struct netdev_private *np = netdev_priv(dev);	int entry = np->cur_rx % RX_RING_SIZE;	int cnt = 30;	/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */	while (1) {		struct netdev_desc *desc = &np->rx_ring[entry];		int pkt_len;		u64 frame_status;		if (!(desc->status & cpu_to_le64(RFDDone)) ||		    !(desc->status & cpu_to_le64(FrameStart)) ||		    !(desc->status & cpu_to_le64(FrameEnd)))			break;		/* Chip omits the CRC. */		frame_status = le64_to_cpu(desc->status);		pkt_len = frame_status & 0xffff;		if (--cnt < 0)			break;		/* Update rx error statistics, drop packet. */		if (frame_status & RFS_Errors) {			np->stats.rx_errors++;			if (frame_status & (RxRuntFrame | RxLengthError))				np->stats.rx_length_errors++;			if (frame_status & RxFCSError)				np->stats.rx_crc_errors++;			if (frame_status & RxAlignmentError && np->speed != 1000)				np->stats.rx_frame_errors++;			if (frame_status & RxFIFOOverrun)	 			np->stats.rx_fifo_errors++;		} else {			struct sk_buff *skb;			/* Small skbuffs for short packets */			if (pkt_len > copy_thresh) {				pci_unmap_single (np->pdev,						  desc_to_dma(desc),						  np->rx_buf_sz,						  PCI_DMA_FROMDEVICE);				skb_put (skb = np->rx_skbuff[entry], pkt_len);				np->rx_skbuff[entry] = NULL;			} else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {				pci_dma_sync_single_for_cpu(np->pdev,							    desc_to_dma(desc),							    np->rx_buf_sz,							    PCI_DMA_FROMDEVICE);				/* 16 byte align the IP header */				skb_reserve (skb, 2);				skb_copy_to_linear_data (skb,						  np->rx_skbuff[entry]->data,						  pkt_len);				skb_put (skb, pkt_len);				pci_dma_sync_single_for_device(np->pdev,							       desc_to_dma(desc),							       np->rx_buf_sz,							       PCI_DMA_FROMDEVICE);			}			skb->protocol = eth_type_trans (skb, dev);#if 0			/* Checksum done by hw, but csum value unavailable. */			if (np->pdev->pci_rev_id >= 0x0c &&				!(frame_status & (TCPError | UDPError | IPError))) {				skb->ip_summed = CHECKSUM_UNNECESSARY;			}#endif			netif_rx (skb);			dev->last_rx = jiffies;		}		entry = (entry + 1) % RX_RING_SIZE;	}	spin_lock(&np->rx_lock);	np->cur_rx = entry;	/* Re-allocate skbuffs to fill the descriptor ring */	entry = np->old_rx;	while (entry != np->cur_rx) {		struct sk_buff *skb;		/* Dropped packets don't need to re-allocate */		if (np->rx_skbuff[entry] == NULL) {			skb = dev_alloc_skb (np->rx_buf_sz);			if (skb == NULL) {				np->rx_ring[entry].fraginfo = 0;				printk (KERN_INFO					"%s: receive_packet: "					"Unable to re-allocate Rx skbuff.#%d\n",					dev->name, entry);				break;			}			np->rx_skbuff[entry] = skb;			/* 16 byte align the IP header */			skb_reserve (skb, 2);			np->rx_ring[entry].fraginfo =			    cpu_to_le64 (pci_map_single					 (np->pdev, skb->data, np->rx_buf_sz,					  PCI_DMA_FROMDEVICE));

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -