⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 natsemi.c

📁 linux和2410结合开发 用他可以生成2410所需的zImage文件
💻 C
📖 第 1 页 / 共 5 页
字号:
		skb->dev = dev;			/* Mark as being used by this device. */		np->rx_dma[i] = pci_map_single(np->pci_dev,						skb->data, skb->len, PCI_DMA_FROMDEVICE);		np->rx_ring[i].addr = cpu_to_le32(np->rx_dma[i]);		np->rx_ring[i].cmd_status = cpu_to_le32(np->rx_buf_sz);	}	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);	for (i = 0; i < TX_RING_SIZE; i++) {		np->tx_skbuff[i] = NULL;		np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma					+sizeof(struct netdev_desc)					 *((i+1)%TX_RING_SIZE+RX_RING_SIZE));		np->tx_ring[i].cmd_status = 0;	}	dump_ring(dev);}static void drain_ring(struct net_device *dev){	struct netdev_private *np = dev->priv;	int i;	/* Free all the skbuffs in the Rx queue. */	for (i = 0; i < RX_RING_SIZE; i++) {		np->rx_ring[i].cmd_status = 0;		np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */		if (np->rx_skbuff[i]) {			pci_unmap_single(np->pci_dev,						np->rx_dma[i],						np->rx_skbuff[i]->len,						PCI_DMA_FROMDEVICE);			dev_kfree_skb(np->rx_skbuff[i]);		}		np->rx_skbuff[i] = NULL;	}	for (i = 0; i < TX_RING_SIZE; i++) {		if (np->tx_skbuff[i]) {			pci_unmap_single(np->pci_dev,						np->rx_dma[i],						np->rx_skbuff[i]->len,						PCI_DMA_TODEVICE);			dev_kfree_skb(np->tx_skbuff[i]);		}		np->tx_skbuff[i] = NULL;	}}static void free_ring(struct net_device *dev){	struct netdev_private *np = dev->priv;	pci_free_consistent(np->pci_dev,				sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),				np->rx_ring, np->ring_dma);}static int start_tx(struct sk_buff *skb, struct net_device *dev){	struct netdev_private *np = dev->priv;	unsigned entry;	/* Note: Ordering is important here, set the field with the	   "ownership" bit last, and only then increment cur_tx. */	/* Calculate the next Tx descriptor entry. */	entry = np->cur_tx % TX_RING_SIZE;	np->tx_skbuff[entry] = skb;	np->tx_dma[entry] = pci_map_single(np->pci_dev,				skb->data,skb->len, PCI_DMA_TODEVICE);	np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);	spin_lock_irq(&np->lock);		if (netif_device_present(dev)) {		np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);		/* StrongARM: Explicitly cache flush np->tx_ring and 		 * skb->data,skb->len. */		wmb();		np->cur_tx++;		if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {			netdev_tx_done(dev);			if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)				netif_stop_queue(dev);		}		/* Wake the potentially-idle transmit channel. */		writel(TxOn, dev->base_addr + ChipCmd);	} else {		dev_kfree_skb_irq(skb);		np->stats.tx_dropped++;	}	spin_unlock_irq(&np->lock);	dev->trans_start = jiffies;	if (netif_msg_tx_queued(np)) {		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",			   dev->name, np->cur_tx, entry);	}	return 0;}static void netdev_tx_done(struct net_device *dev){	struct netdev_private *np = dev->priv;	for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {		int entry = np->dirty_tx % TX_RING_SIZE;		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))			break;		if (netif_msg_tx_done(np))			printk(KERN_DEBUG 				"%s: tx frame #%d finished, status %#08x.\n",					dev->name, np->dirty_tx,					le32_to_cpu(np->tx_ring[entry].cmd_status));		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {			np->stats.tx_packets++;			np->stats.tx_bytes += np->tx_skbuff[entry]->len;		} else { /* Various Tx errors */			int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status);			if (tx_status & (DescTxAbort|DescTxExcColl)) 				np->stats.tx_aborted_errors++;			if (tx_status & DescTxFIFO) 				np->stats.tx_fifo_errors++;			if (tx_status & DescTxCarrier) 				np->stats.tx_carrier_errors++;			if (tx_status & DescTxOOWCol) 				np->stats.tx_window_errors++;			np->stats.tx_errors++;		}		pci_unmap_single(np->pci_dev,np->tx_dma[entry],					np->tx_skbuff[entry]->len,					PCI_DMA_TODEVICE);		/* Free the original skb. */		dev_kfree_skb_irq(np->tx_skbuff[entry]);		np->tx_skbuff[entry] = NULL;	}	if (netif_queue_stopped(dev)		&& np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {		/* The ring is no longer full, wake queue. */		netif_wake_queue(dev);	}}/* The interrupt handler does all of the Rx thread work and cleans up   after the Tx thread. */static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs){	struct net_device *dev = dev_instance;	struct netdev_private *np = dev->priv;	long ioaddr = dev->base_addr;	int boguscnt = max_interrupt_work;	if (!netif_device_present(dev))		return;	do {		/* Reading automatically acknowledges all int sources. */		u32 intr_status = readl(ioaddr + IntrStatus);		if (netif_msg_intr(np))			printk(KERN_DEBUG "%s: Interrupt, status %#08x.\n",				   dev->name, intr_status);		if (intr_status == 0)			break;		if (intr_status & (IntrRxDone | IntrRxIntr))			netdev_rx(dev);		if (intr_status & (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr) ) {			spin_lock(&np->lock);			netdev_tx_done(dev);			spin_unlock(&np->lock);		}		/* Abnormal error summary/uncommon events handlers. */		if (intr_status & IntrAbnormalSummary)			netdev_error(dev, intr_status);		if (--boguscnt < 0) {			printk(KERN_WARNING "%s: Too much work at interrupt, "				   "status=%#08x.\n",				   dev->name, intr_status);			break;		}	} while (1);	if (netif_msg_intr(np))		printk(KERN_DEBUG "%s: exiting interrupt.\n",			   dev->name);}/* This routine is logically part of the interrupt handler, but separated   for clarity and better register allocation. */static void netdev_rx(struct net_device *dev){	struct netdev_private *np = dev->priv;	int entry = np->cur_rx % RX_RING_SIZE;	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;	s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);	/* If the driver owns the next entry it's a new packet. Send it up. */	while (desc_status < 0) {        /* e.g. & DescOwn */		if (netif_msg_rx_status(np))			printk(KERN_DEBUG 				"  netdev_rx() entry %d status was %#08x.\n",				entry, desc_status);		if (--boguscnt < 0)			break;		if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){			if (desc_status & DescMore) {				if (netif_msg_rx_err(np))					printk(KERN_WARNING 						"%s: Oversized(?) Ethernet "						"frame spanned multiple "						"buffers, entry %#08x "						"status %#08x.\n", dev->name, 						np->cur_rx, desc_status);				np->stats.rx_length_errors++;			} else {				/* There was an error. */				np->stats.rx_errors++;				if (desc_status & (DescRxAbort|DescRxOver)) 					np->stats.rx_over_errors++;				if (desc_status & (DescRxLong|DescRxRunt)) 					np->stats.rx_length_errors++;				if (desc_status & (DescRxInvalid|DescRxAlign)) 					np->stats.rx_frame_errors++;				if (desc_status & DescRxCRC) 					np->stats.rx_crc_errors++;			}		} else {			struct sk_buff *skb;			/* Omit CRC size. */			int pkt_len = (desc_status & DescSizeMask) - 4;			/* Check if the packet is long enough to accept 			 * without copying to a minimally-sized skbuff. */			if (pkt_len < rx_copybreak				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {				skb->dev = dev;				skb_reserve(skb, 2);	/* 16 byte align the IP header */				pci_dma_sync_single(np->pci_dev, np->rx_dma[entry],							np->rx_skbuff[entry]->len,							PCI_DMA_FROMDEVICE);#if HAS_IP_COPYSUM				eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);				skb_put(skb, pkt_len);#else				memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,					   pkt_len);#endif			} else {				pci_unmap_single(np->pci_dev, np->rx_dma[entry],							np->rx_skbuff[entry]->len,							PCI_DMA_FROMDEVICE);				skb_put(skb = np->rx_skbuff[entry], pkt_len);				np->rx_skbuff[entry] = NULL;			}			skb->protocol = eth_type_trans(skb, dev);			/* W/ hardware checksum: skb->ip_summed = CHECKSUM_UNNECESSARY; */			netif_rx(skb);			dev->last_rx = jiffies;			np->stats.rx_packets++;			np->stats.rx_bytes += pkt_len;		}		entry = (++np->cur_rx) % RX_RING_SIZE;		np->rx_head_desc = &np->rx_ring[entry];		desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);	}	/* Refill the Rx ring buffers. */	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {		struct sk_buff *skb;		entry = np->dirty_rx % RX_RING_SIZE;		if (np->rx_skbuff[entry] == NULL) {			skb = dev_alloc_skb(np->rx_buf_sz);			np->rx_skbuff[entry] = skb;			if (skb == NULL)				break;				/* Better luck next round. */			skb->dev = dev;			/* Mark as being used by this device. */			np->rx_dma[entry] = pci_map_single(np->pci_dev,							skb->data, skb->len, PCI_DMA_FROMDEVICE);			np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);		}		np->rx_ring[entry].cmd_status =			cpu_to_le32(np->rx_buf_sz);	}	/* Restart Rx engine if stopped. */	writel(RxOn, dev->base_addr + ChipCmd);}static void netdev_error(struct net_device *dev, int intr_status){	struct netdev_private *np = dev->priv;	long ioaddr = dev->base_addr;	spin_lock(&np->lock);	if (intr_status & LinkChange) {		u16 adv = mdio_read(dev, 1, MII_ADVERTISE);		u16 lpa = mdio_read(dev, 1, MII_LPA);		if (mdio_read(dev, 1, MII_BMCR) & BMCR_ANENABLE 		 && netif_msg_link(np)) {			printk(KERN_INFO 				"%s: Autonegotiation advertising"				" %#04x  partner %#04x.\n", dev->name,				adv, lpa);		}		/* read MII int status to clear the flag */		readw(ioaddr + MIntrStatus);		check_link(dev);	}	if (intr_status & StatsMax) {		__get_stats(dev);	}	if (intr_status & IntrTxUnderrun) {		if ((np->tx_config & TxDrthMask) < 62)			np->tx_config += 2;		if (netif_msg_tx_err(np))			printk(KERN_NOTICE 				"%s: increased Tx theshold, txcfg %#08x.\n",				dev->name, np->tx_config);		writel(np->tx_config, ioaddr + TxConfig);	}	if (intr_status & WOLPkt && netif_msg_wol(np)) {		int wol_status = readl(ioaddr + WOLCmd);		printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",			   dev->name, wol_status);	}	if (intr_status & RxStatusFIFOOver) {		if (netif_msg_rx_err(np) && netif_msg_intr(np)) {			printk(KERN_NOTICE "%s: Rx status FIFO overrun\n", 				dev->name);		}		np->stats.rx_fifo_errors++;	}	/* Hmmmmm, it's not clear how to recover from PCI faults. */	if (intr_status & IntrPCIErr) {		printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,			intr_status & IntrPCIErr);		np->stats.tx_fifo_errors++;		np->stats.rx_fifo_errors++;	}	spin_unlock(&np->lock);}static void __get_stats(struct net_device *dev){	long ioaddr = dev->base_addr;	struct netdev_private *np = dev->priv;	/* The chip only need report frame silently dropped. */	np->stats.rx_crc_errors	+= readl(ioaddr + RxCRCErrs);	np->stats.rx_missed_errors += readl(ioaddr + RxMissed);}static struct net_device_stats *get_stats(struct net_device *dev){	struct netdev_private *np = dev->priv;	/* The chip only need report frame silently dropped. */	spin_lock_irq(&np->lock); 	if (netif_running(dev) && netif_device_present(dev)) 		__get_stats(dev);	spin_unlock_irq(&np->lock);	return &np->stats;}/* The little-endian AUTODIN II ethernet CRC calculations.   A big-endian version is also available.   This is slow but compact code.  Do not use this routine for bulk data,   use a table-based routine instead.   This is common code and should be moved to net/core/crc.c.   Chips may use the upper or lower CRC bits, and may reverse and/or invert   them.  Select the endian-ness that results in minimal calculations.*/#if 0static unsigned const ethernet_polynomial_le = 0xedb88320U;static inline unsigned ether_crc_le(int length, unsigned char *data){	unsigned int crc = 0xffffffff;	/* Initial value. */	while(--length >= 0) {		unsigned char current_octet = *data++;		int bit;		for (bit = 8; --bit >= 0; current_octet >>= 1) {			if ((crc ^ current_octet) & 1) {				crc >>= 1;				crc ^= ethernet_polynomial_le;			} else				crc >>= 1;		}	}	return crc;}#else#define DP_POLYNOMIAL			0x04C11DB7/* dp83815_crc - computer CRC for hash table entries */static unsigned ether_crc_le(int length, unsigned char *data){    u32 crc;    u8 cur_byte;    u8 msb;    u8 byte, bit;    crc = ~0;    for (byte=0; byte<length; byte++) {        cur_byte = *data++;        for (bit=0; bit<8; bit++) {            msb = crc >> 31;            crc <<= 1;            if (msb ^ (cur_byte & 1)) {                crc ^= DP_POLYNOMIAL;                crc |= 1;            }            cur_byte >>= 1;        }    }    crc >>= 23;    return (crc);}#endifvoid set_bit_le(int offset, unsigned char * data){	data[offset >> 3] |= (1 << (offset & 0x07));}#define HASH_TABLE	0x200static void __set_rx_mode(struct net_device *dev){	long ioaddr = dev->base_addr;	struct netdev_private *np = dev->priv;	u8 mc_filter[64];			/* Multicast hash filter */	u32 rx_mode;	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */		/* Unconditionally log net taps. */		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", 			dev->name);		rx_mode = RxFilterEnable | AcceptBroadcast 			| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;	} else if ((dev->mc_count > multicast_filter_limit)			   ||  (dev->flags & IFF_ALLMULTI)) {		rx_mode = RxFilterEnable | AcceptBroadcast 			| AcceptAllMulticast | AcceptMyPhys;	} else {		struct dev_mc_list *mclist;		int i;		memset(mc_filter, 0, sizeof(mc_filter));		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;			 i++, mclist = mclist->next) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -