⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dl2k.c

📁 linux和2410结合开发 用他可以生成2410所需的zImage文件
💻 C
📖 第 1 页 / 共 3 页
字号:
	/* Send one packet each time at 10Mbps mode */	/* Tx coalescing loop do not exceed 8 */	if (entry % tx_coalesce == 0 || np->speed == 10)		txdesc->status |= cpu_to_le64 (TxIndicate);	txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,							skb->len,							PCI_DMA_TODEVICE));	txdesc->fraginfo |= cpu_to_le64 (skb->len) << 48;	/* Clear TFDDone, then TxDMA start to send this descriptor */	txdesc->status &= ~cpu_to_le64 (TFDDone);	DEBUG_TFD_DUMP (np);	/* TxDMAPollNow */	writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl);	np->cur_tx++;	if (np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1 && np->speed != 10) {		/* do nothing */	} else {		spin_lock_irqsave(&np->lock, flags);		np->tx_full = 1;		netif_stop_queue (dev);		spin_unlock_irqrestore (&np->lock, flags);	}	/* The first TFDListPtr */	if (readl (dev->base_addr + TFDListPtr0) == 0) {		writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc),			dev->base_addr + TFDListPtr0);		writel (0, dev->base_addr + TFDListPtr1);	}		if (np->old_tx > TX_RING_SIZE) {		spin_lock_irqsave (&np->lock, flags);		tx_shift = TX_RING_SIZE;		np->old_tx -= tx_shift;		np->cur_tx -= tx_shift;		spin_unlock_irqrestore (&np->lock, flags);	}		/* NETDEV WATCHDOG timer */	dev->trans_start = jiffies;	return 0;}static voidrio_interrupt (int irq, void *dev_instance, struct pt_regs *rgs){	struct net_device *dev = dev_instance;	struct netdev_private *np;	unsigned int_status;	long ioaddr;	int cnt = max_intrloop;	ioaddr = dev->base_addr;	np = dev->priv;	spin_lock(&np->lock);	while (1) {		int_status = readw (ioaddr + IntStatus); 		writew (int_status, ioaddr + IntStatus);		int_status &= DEFAULT_INTR;		if (int_status == 0)			break;		/* Processing received packets */		if (int_status & RxDMAComplete)			receive_packet (dev);		/* TxComplete interrupt */		if ((int_status & TxComplete) || np->tx_full) {			int tx_status;			tx_status = readl (ioaddr + TxStatus);			if (tx_status & 0x01)				tx_error (dev, tx_status);			/* Free used tx skbuffs */			for (;np->cur_tx - np->old_tx > 0; np->old_tx++) {				int entry = np->old_tx % TX_RING_SIZE;				struct sk_buff *skb;				if (!(np->tx_ring[entry].status & TFDDone))					break;				skb = np->tx_skbuff[entry];				pci_unmap_single (np->pdev,						  np->tx_ring[entry].fraginfo,						  skb->len, PCI_DMA_TODEVICE);				dev_kfree_skb_irq (skb);				np->tx_skbuff[entry] = 0;			}		}		/* If the ring is no longer full, clear tx_full and 		   call netif_wake_queue() */		if (np->tx_full && np->cur_tx - np->old_tx < TX_QUEUE_LEN - 1) {			if (np->speed != 10 || int_status & TxComplete) {				np->tx_full = 0;				netif_wake_queue (dev);			}		}		/* Handle uncommon events */		if (int_status &		    (IntRequested | HostError | LinkEvent | UpdateStats))			rio_error (dev, int_status);		/* If too much interrupts here, disable all interrupts except 		   IntRequest. When CountDown down to 0, IntRequest will 		   be caught by rio_error() to recovery the interrupts */		if (--cnt < 0) {			get_stats (dev);			writel (1, ioaddr + CountDown);			writew (IntRequested, ioaddr + IntEnable);			break;		}	}	spin_unlock(&np->lock);}static voidtx_error (struct net_device *dev, int tx_status){	struct netdev_private *np;	long ioaddr = dev->base_addr;	int frame_id;	int i;	np = dev->priv;	frame_id = (tx_status & 0xffff0000) >> 16;	printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",		dev->name, tx_status, frame_id);	np->stats.tx_errors++;	np->stats.tx_dropped++;	/* Ttransmit Underrun */	if (tx_status & 0x10) {		np->stats.tx_fifo_errors++;		writew (readw (ioaddr + TxStartThresh) + 0x10,			ioaddr + TxStartThresh);		/* Transmit Underrun need to set TxReset, DMARest, FIFOReset */		writew (TxReset | DMAReset | FIFOReset | NetworkReset,			ioaddr + ASICCtrl + 2);		/* Wait for ResetBusy bit clear */		for (i = 50; i > 0; i--) {			if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)				break;			mdelay (1);		}		/* Free completed descriptors */		for (; np->cur_tx - np->old_tx > 0; np->old_tx++) {			int entry = np->old_tx % TX_RING_SIZE;			struct sk_buff *skb;			if (!(np->tx_ring[entry].status & TFDDone))				break;			skb = np->tx_skbuff[entry];			pci_unmap_single (np->pdev, np->tx_ring[entry].fraginfo,					  skb->len, PCI_DMA_TODEVICE);			dev_kfree_skb_irq (skb);			np->tx_skbuff[entry] = 0;		}		/* Reset TFDListPtr */		writel (np->tx_ring_dma +			np->old_tx * sizeof (struct netdev_desc),			dev->base_addr + TFDListPtr0);		writel (0, dev->base_addr + TFDListPtr1);		/* Let TxStartThresh stay default value */	}	/* Late Collision */	if (tx_status & 0x04) {		np->stats.tx_fifo_errors++;		/* TxReset and clear FIFO */		writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2);		/* Wait reset done */		for (i = 50; i > 0; i--) {			if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0)				break;			mdelay (1);		}		/* Let TxStartThresh stay default value */	}	/* Maximum Collisions */#ifdef ETHER_STATS	if (tx_status & 0x08)		np->stats.collisions16++;#else	if (tx_status & 0x08)		np->stats.collisions++;#endif	/* Restart the Tx */	writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl);}static intreceive_packet (struct net_device *dev){	struct netdev_private *np = (struct netdev_private *) dev->priv;	int entry = np->cur_rx % RX_RING_SIZE;	int cnt = np->old_rx + RX_RING_SIZE - np->cur_rx;	int rx_shift;	if (np->old_rx > RX_RING_SIZE) {		rx_shift = RX_RING_SIZE;		np->old_rx -= rx_shift;		np->cur_rx -= rx_shift;	}	DEBUG_RFD_DUMP (np, 1);	/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */	while (1) {		struct netdev_desc *desc = &np->rx_ring[entry];		int pkt_len;		u64 frame_status;		if (!(desc->status & RFDDone) ||		    !(desc->status & FrameStart) || !(desc->status & FrameEnd))			break;		/* Chip omits the CRC. */		pkt_len = le64_to_cpu (desc->status & 0xffff);		frame_status = le64_to_cpu (desc->status);		if (--cnt < 0)			break;		DEBUG_PKT_DUMP (np, pkt_len);		pci_dma_sync_single (np->pdev, desc->fraginfo, np->rx_buf_sz,				     PCI_DMA_FROMDEVICE);		/* Update rx error statistics, drop packet. */		if (frame_status & 0x003f0000) {			np->stats.rx_errors++;			if (frame_status & 0x00300000)				np->stats.rx_length_errors++;			if (frame_status & 0x00010000)	 			np->stats.rx_fifo_errors++;			if (frame_status & 0x00060000)				np->stats.rx_frame_errors++;			if (frame_status & 0x00080000)				np->stats.rx_crc_errors++;		} else {			struct sk_buff *skb;			/* Small skbuffs for short packets */			if (pkt_len > copy_thresh) {				pci_unmap_single (np->pdev, desc->fraginfo,						  np->rx_buf_sz,						  PCI_DMA_FROMDEVICE);				skb_put (skb = np->rx_skbuff[entry], pkt_len);				np->rx_skbuff[entry] = NULL;			} else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {				skb->dev = dev;				/* 16 byte align the IP header */				skb_reserve (skb, 2);				eth_copy_and_sum (skb,						  np->rx_skbuff[entry]->tail,						  pkt_len, 0);				skb_put (skb, pkt_len);			}			skb->protocol = eth_type_trans (skb, dev);#if 0			/* Checksum done by hw, but csum value unavailable. */			if (!(frame_status & (TCPError | UDPError | IPError))) {				skb->ip_summed = CHECKSUM_UNNECESSARY;			}#endif			netif_rx (skb);			dev->last_rx = jiffies;		}		entry = (++np->cur_rx) % RX_RING_SIZE;	}	/* Re-allocate skbuffs to fill the descriptor ring */	for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {		struct sk_buff *skb;		entry = np->old_rx % RX_RING_SIZE;		/* Dropped packets don't need to re-allocate */		if (np->rx_skbuff[entry] == NULL) {			skb = dev_alloc_skb (np->rx_buf_sz);			if (skb == NULL) {				np->rx_ring[entry].fraginfo = 0;				printk (KERN_ERR					"%s: Allocate Rx buffer error!",					dev->name);				break;			}			np->rx_skbuff[entry] = skb;			skb->dev = dev;			/* 16 byte align the IP header */			skb_reserve (skb, 2);			np->rx_ring[entry].fraginfo =			    cpu_to_le64 (pci_map_single					 (np->pdev, skb->tail, np->rx_buf_sz,					  PCI_DMA_FROMDEVICE));		}		np->rx_ring[entry].fraginfo |=		    cpu_to_le64 (np->rx_buf_sz) << 48;		np->rx_ring[entry].status = 0;	}	/* RxDMAPollNow */	writel (readl (dev->base_addr + DMACtrl) | 0x00000010,		dev->base_addr + DMACtrl);	DEBUG_RFD_DUMP (np, 2);	return 0;}static voidrio_error (struct net_device *dev, int int_status){	long ioaddr = dev->base_addr;	struct netdev_private *np = dev->priv;	u16 macctrl;	/* Stop the down counter and recovery the interrupt */	if (int_status & IntRequested) {		writew (0, ioaddr + IntEnable);		writel (0, ioaddr + CountDown);		/* Enable default interrupts */		EnableInt ();	}	/* Link change event */	if (int_status & LinkEvent) {		if (mii_wait_link (dev, 10) == 0) {			printk (KERN_INFO "%s: Link up\n", dev->name);			if (np->phy_media)				mii_get_media_pcs (dev);			else				mii_get_media (dev);			macctrl = 0;			macctrl |= (np->full_duplex) ? DuplexSelect : 0;			macctrl |= (np->tx_flow) ? 				TxFlowControlEnable : 0;			macctrl |= (np->rx_flow) ? 				RxFlowControlEnable : 0;			writew(macctrl,	ioaddr + MACCtrl);		} else {			printk (KERN_INFO "%s: Link off\n", dev->name);		}	}	/* UpdateStats statistics registers */	if (int_status & UpdateStats) {		get_stats (dev);	}	/* PCI Error, a catastronphic error related to the bus interface 	   occurs, set GlobalReset and HostReset to reset. */	if (int_status & HostError) {		printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",			dev->name, int_status);		writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2);		mdelay (500);	}}static struct net_device_stats *get_stats (struct net_device *dev){	long ioaddr = dev->base_addr;	struct netdev_private *np = dev->priv;	u16 temp1;	u16 temp2;	int i;	/* All statistics registers need to be acknowledged,	   else statistic overflow could cause problems */	np->stats.rx_packets += readl (ioaddr + FramesRcvOk);	np->stats.tx_packets += readl (ioaddr + FramesXmtOk);	np->stats.rx_bytes += readl (ioaddr + OctetRcvOk);	np->stats.tx_bytes += readl (ioaddr + OctetXmtOk);	temp1 = readw (ioaddr + FrameLostRxError);	np->stats.rx_errors += temp1;	np->stats.rx_missed_errors += temp1;	np->stats.tx_dropped += readw (ioaddr + FramesAbortXSColls);	temp1 = readl (ioaddr + SingleColFrames) +	    readl (ioaddr + MultiColFrames) + readl (ioaddr + LateCollisions);	temp2 = readw (ioaddr + CarrierSenseErrors);	np->stats.tx_carrier_errors += temp2;	np->stats.tx_errors += readw (ioaddr + FramesWEXDeferal) +	    readl (ioaddr + FramesWDeferredXmt) + temp2;	/* detailed rx_error */	np->stats.rx_length_errors += readw (ioaddr + FrameTooLongErrors);	np->stats.rx_crc_errors += readw (ioaddr + FrameCheckSeqError);	/* Clear all other statistic register. */	readw (ioaddr + InRangeLengthErrors);	readw (ioaddr + MacControlFramesXmtd);	readw (ioaddr + BcstFramesXmtdOk);	readl (ioaddr + McstFramesXmtdOk);	readl (ioaddr + BcstOctetXmtOk);	readl (ioaddr + McstOctetXmtOk);	readw (ioaddr + MacControlFramesRcvd);	readw (ioaddr + BcstFramesRcvOk);	readl (ioaddr + McstFramesRcvOk);	readl (ioaddr + BcstOctetRcvOk);	for (i = 0x100; i <= 0x150; i += 4)		readl (ioaddr + i);	readw (ioaddr + TxJumboFrames);	readw (ioaddr + RxJumboFrames);	readw (ioaddr + TCPCheckSumErrors);	readw (ioaddr + UDPCheckSumErrors);	readw (ioaddr + IPCheckSumErrors);	return &np->stats;}intchange_mtu (struct net_device *dev, int new_mtu){	struct netdev_private *np = dev->priv;	int max = (np->jumbo) ? MAX_JUMBO : 1536;	if ((new_mtu < 68) || (new_mtu > max)) {		return -EINVAL;	}	dev->mtu = new_mtu;	return 0;}#define CRC_POLY 0xedb88320static unsignedget_crc (unsigned char *p, int len){	int bit;	unsigned char byte;	unsigned crc = 0xffffffff;	while (--len >= 0) {		byte = *p++;		for (bit = 0; bit < 8; bit++, byte >>= 1) {			crc = (crc >> 1) ^ (((crc ^ byte) & 1) ? CRC_POLY : 0);		}	}	return crc;}static voidset_multicast (struct net_device *dev){	long ioaddr = dev->base_addr;	u32 hash_table[2];	u16 rx_mode = 0;	int i;	int bit;	int index, crc;	struct dev_mc_list *mclist;	struct netdev_private *np = dev->priv;		hash_table[0] = hash_table[1] = 0;	/* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */	hash_table[1] |= 0x02000000;	if (dev->flags & IFF_PROMISC) {		/* Receive all frames promiscuously. */		rx_mode = ReceiveAllFrames;	} else if ((dev->flags & IFF_ALLMULTI) || 			(dev->mc_count > multicast_filter_limit)) {		/* Receive broadcast and multicast frames */		rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;	} else if (dev->mc_count > 0) {		/* Receive broadcast frames and multicast frames filtering 		   by Hashtable */		rx_mode =		    ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;		for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count; 				i++, mclist=mclist->next) {			crc = get_crc (mclist->dmi_addr, ETH_ALEN);			for (index=0, bit=0; bit<6; bit++, crc<<=1) {				if (crc & 0x80000000) index |= 1 << bit;			}			hash_table[index / 32] |= (1 << (index % 32));		}	} else {		rx_mode = ReceiveBroadcast | ReceiveUnicast;	}	if (np->vlan) {		/* ReceiveVLANMatch field in ReceiveMode */		rx_mode |= ReceiveVLANMatch;	}	writel (hash_table[0], ioaddr + HashTable0);	writel (hash_table[1], ioaddr + HashTable1);	writew (rx_mode, ioaddr + ReceiveMode);}static intrio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd){	int phy_addr;	struct netdev_private *np = dev->priv;	struct mii_data *miidata = (struct mii_data *) &rq->ifr_data;#ifdef RIO_DEBUG	struct ioctl_data *iodata = (struct ioctl_data *) (rq->ifr_data);#endif	u16 *data = (u16 *) & rq->ifr_data;	struct netdev_desc *desc;	int i;	phy_addr = np->phy_addr;	switch (cmd) {	case SIOCDEVPRIVATE:#ifdef RIO_DEBUG		if (rio_ioctl_ext (dev, iodata) != 0)			return -EOPNOTSUPP;		break;#else		return -EOPNOTSUPP;#endif	case SIOCDEVPRIVATE + 1:		miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);		break;	case SIOCDEVPRIVATE + 2:		if (!capable(CAP_NET_ADMIN))			return -EPERM;		mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);		break;	case SIOCDEVPRIVATE + 3:		np->rx_debug = (data[0] <= 7) ? data[0] : 0;		printk ("rx_debug = %d\n", np->rx_debug);		break;	case SIOCDEVPRIVATE + 4:		np->tx_debug = (data[0] <= 7) ? data[0] : 0;		printk ("tx_debug = %d\n", np->tx_debug);		break;	case SIOCDEVPRIVATE + 5:		np->tx_full = 1;		netif_stop_queue (dev);		break;	case SIOCDEVPRIVATE + 6:		np->tx_full = 0;		netif_wake_queue (dev);		break;	case SIOCDEVPRIVATE + 7:		printk		    ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",		     np->tx_full, np->cur_tx, np->old_tx, np->cur_rx,		     np->old_rx);		break;	case SIOCDEVPRIVATE + 8:		for (i = 0; i < TX_RING_SIZE; i++) {			desc = &np->tx_ring[i];			printk			    ("cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",			     (u32) (np->tx_ring_dma + i * sizeof (*desc)),			     (u32) desc->next_desc,			     (u32) desc->status, (u32) (desc->fraginfo >> 32),			     (u32) desc->fraginfo);			printk ("\n");		}		printk ("\n");		break;	default:		return -EOPNOTSUPP;	}	return 0;}#ifdef RIO_DEBUGintrio_ioctl_ext (struct net_device *dev, struct ioctl_data *iodata){	struct netdev_private *np = dev->priv;	int phy_addr = np->phy_addr;	u32 hi, lo;	int i;	BMCR_t bmcr;	BMSR_t bmsr;	if (iodata == NULL)		goto invalid_cmd;	if (strcmp (iodata->signature, "rio") != 0)		goto invalid_cmd;	switch (iodata->cmd) {	case 0:		for (i = 0; i < TX_RING_SIZE; i++) {			hi = np->tx_ring[i].status >> 32;			lo = np->tx_ring[i].status;			printk ("TFC=%08x %08x \n", hi, lo);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -