⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 8139cp.c

📁 linux和2410结合开发 用他可以生成2410所需的zImage文件
💻 C
📖 第 1 页 / 共 3 页
字号:
		mapping =		cp->rx_skb[rx_tail].mapping =			pci_map_single(cp->pdev, new_skb->tail,				       buflen, PCI_DMA_FROMDEVICE);		cp->rx_skb[rx_tail].skb = new_skb;		cp_rx_skb(cp, skb);rx_next:		if (rx_tail == (CP_RX_RING_SIZE - 1))			cp->rx_ring[rx_tail].opts1 =				cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);		else			cp->rx_ring[rx_tail].opts1 =				cpu_to_le32(DescOwn | cp->rx_buf_sz);		cp->rx_ring[rx_tail].opts2 = 0;		cp->rx_ring[rx_tail].addr_lo = cpu_to_le32(mapping);		rx_tail = NEXT_RX(rx_tail);	}	if (!rx_work)		printk(KERN_WARNING "%s: rx work limit reached\n", cp->dev->name);	cp->rx_tail = rx_tail;}static void cp_interrupt (int irq, void *dev_instance, struct pt_regs *regs){	struct net_device *dev = dev_instance;	struct cp_private *cp = dev->priv;	u16 status;	status = cpr16(IntrStatus);	if (!status || (status == 0xFFFF))		return;	if (netif_msg_intr(cp))		printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",		        dev->name, status, cpr8(Cmd), cpr16(CpCmd));	spin_lock(&cp->lock);	if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))		cp_rx(cp);	if (status & (TxOK | TxErr | TxEmpty | SWInt))		cp_tx(cp);	cpw16_f(IntrStatus, status);	if (status & PciErr) {		u16 pci_status;		pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);		pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);		printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",		       dev->name, status, pci_status);	}	spin_unlock(&cp->lock);}static void cp_tx (struct cp_private *cp){	unsigned tx_head = cp->tx_head;	unsigned tx_tail = cp->tx_tail;	while (tx_tail != tx_head) {		struct sk_buff *skb;		u32 status;		rmb();		status = le32_to_cpu(cp->tx_ring[tx_tail].opts1);		if (status & DescOwn)			break;		skb = cp->tx_skb[tx_tail].skb;		if (!skb)			BUG();		pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping,					skb->len, PCI_DMA_TODEVICE);		if (status & LastFrag) {			if (status & (TxError | TxFIFOUnder)) {				if (netif_msg_tx_err(cp))					printk(KERN_DEBUG "%s: tx err, status 0x%x\n",					       cp->dev->name, status);				cp->net_stats.tx_errors++;				if (status & TxOWC)					cp->net_stats.tx_window_errors++;				if (status & TxMaxCol)					cp->net_stats.tx_aborted_errors++;				if (status & TxLinkFail)					cp->net_stats.tx_carrier_errors++;				if (status & TxFIFOUnder)					cp->net_stats.tx_fifo_errors++;			} else {				cp->net_stats.collisions +=					((status >> TxColCntShift) & TxColCntMask);				cp->net_stats.tx_packets++;				cp->net_stats.tx_bytes += skb->len;				if (netif_msg_tx_done(cp))					printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);			}			dev_kfree_skb_irq(skb);		}		cp->tx_skb[tx_tail].skb = NULL;		tx_tail = NEXT_TX(tx_tail);	}	cp->tx_tail = tx_tail;	if (netif_queue_stopped(cp->dev) && (TX_BUFFS_AVAIL(cp) > 1))		netif_wake_queue(cp->dev);}static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev){	struct cp_private *cp = dev->priv;	unsigned entry;	u32 eor;	spin_lock_irq(&cp->lock);	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {		netif_stop_queue(dev);		spin_unlock_irq(&cp->lock);		return 1;	}	entry = cp->tx_head;	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;	if (skb_shinfo(skb)->nr_frags == 0) {		struct cp_desc *txd = &cp->tx_ring[entry];		u32 mapping, len;		len = skb->len;		mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);		eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;		txd->opts2 = 0;		txd->addr_lo = cpu_to_le32(mapping);		wmb();#ifdef CP_TX_CHECKSUM		txd->opts1 = cpu_to_le32(eor | len | DescOwn | FirstFrag |			LastFrag | IPCS | UDPCS | TCPCS);#else		txd->opts1 = cpu_to_le32(eor | len | DescOwn | FirstFrag |			LastFrag);#endif		wmb();		cp->tx_skb[entry].skb = skb;		cp->tx_skb[entry].mapping = mapping;		cp->tx_skb[entry].frag = 0;		entry = NEXT_TX(entry);	} else {		struct cp_desc *txd;		u32 first_len, first_mapping;		int frag, first_entry = entry;		/* We must give this initial chunk to the device last.		 * Otherwise we could race with the device.		 */		first_len = skb->len - skb->data_len;		first_mapping = pci_map_single(cp->pdev, skb->data,					       first_len, PCI_DMA_TODEVICE);		cp->tx_skb[entry].skb = skb;		cp->tx_skb[entry].mapping = first_mapping;		cp->tx_skb[entry].frag = 1;		entry = NEXT_TX(entry);		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {			skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];			u32 len, mapping;			u32 ctrl;			len = this_frag->size;			mapping = pci_map_single(cp->pdev,						 ((void *) page_address(this_frag->page) +						  this_frag->page_offset),						 len, PCI_DMA_TODEVICE);			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;#ifdef CP_TX_CHECKSUM			ctrl = eor | len | DescOwn | IPCS | UDPCS | TCPCS;#else			ctrl = eor | len | DescOwn;#endif			if (frag == skb_shinfo(skb)->nr_frags - 1)				ctrl |= LastFrag;			txd = &cp->tx_ring[entry];			txd->opts2 = 0;			txd->addr_lo = cpu_to_le32(mapping);			wmb();			txd->opts1 = cpu_to_le32(ctrl);			wmb();			cp->tx_skb[entry].skb = skb;			cp->tx_skb[entry].mapping = mapping;			cp->tx_skb[entry].frag = frag + 2;			entry = NEXT_TX(entry);		}		txd = &cp->tx_ring[first_entry];		txd->opts2 = 0;		txd->addr_lo = cpu_to_le32(first_mapping);		wmb();#ifdef CP_TX_CHECKSUM		txd->opts1 = cpu_to_le32(first_len | FirstFrag | DescOwn | IPCS | UDPCS | TCPCS);#else		txd->opts1 = cpu_to_le32(first_len | FirstFrag | DescOwn);#endif		wmb();	}	cp->tx_head = entry;	if (netif_msg_tx_queued(cp))		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",		       dev->name, entry, skb->len);	if (TX_BUFFS_AVAIL(cp) < 0)		BUG();	if (TX_BUFFS_AVAIL(cp) == 0)		netif_stop_queue(dev);	spin_unlock_irq(&cp->lock);	cpw8(TxPoll, NormalTxPoll);	dev->trans_start = jiffies;	return 0;}/* Set or clear the multicast filter for this adaptor.   This routine is not state sensitive and need not be SMP locked. */static unsigned const ethernet_polynomial = 0x04c11db7U;static inline u32 ether_crc (int length, unsigned char *data){	int crc = -1;	while (--length >= 0) {		unsigned char current_octet = *data++;		int bit;		for (bit = 0; bit < 8; bit++, current_octet >>= 1)			crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ?			     ethernet_polynomial : 0);	}	return crc;}static void __cp_set_rx_mode (struct net_device *dev){	struct cp_private *cp = dev->priv;	u32 mc_filter[2];	/* Multicast hash filter */	int i, rx_mode;	u32 tmp;	/* Note: do not reorder, GCC is clever about common statements. */	if (dev->flags & IFF_PROMISC) {		/* Unconditionally log net taps. */		printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",			dev->name);		rx_mode =		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |		    AcceptAllPhys;		mc_filter[1] = mc_filter[0] = 0xffffffff;	} else if ((dev->mc_count > multicast_filter_limit)		   || (dev->flags & IFF_ALLMULTI)) {		/* Too many to filter perfectly -- accept all multicasts. */		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;		mc_filter[1] = mc_filter[0] = 0xffffffff;	} else {		struct dev_mc_list *mclist;		rx_mode = AcceptBroadcast | AcceptMyPhys;		mc_filter[1] = mc_filter[0] = 0;		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;		     i++, mclist = mclist->next) {			int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;			mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));			rx_mode |= AcceptMulticast;		}	}	/* We can safely update without stopping the chip. */	tmp = cp_rx_config | rx_mode;	if (cp->rx_config != tmp) {		cpw32_f (RxConfig, tmp);		cp->rx_config = tmp;	}	cpw32_f (MAR0 + 0, mc_filter[0]);	cpw32_f (MAR0 + 4, mc_filter[1]);}static void cp_set_rx_mode (struct net_device *dev){	unsigned long flags;	struct cp_private *cp = dev->priv;	spin_lock_irqsave (&cp->lock, flags);	__cp_set_rx_mode(dev);	spin_unlock_irqrestore (&cp->lock, flags);}static void __cp_get_stats(struct cp_private *cp){	/* XXX implement */}static struct net_device_stats *cp_get_stats(struct net_device *dev){	struct cp_private *cp = dev->priv;	/* The chip only need report frame silently dropped. */	spin_lock_irq(&cp->lock); 	if (netif_running(dev) && netif_device_present(dev)) 		__cp_get_stats(cp);	spin_unlock_irq(&cp->lock);	return &cp->net_stats;}static void cp_stop_hw (struct cp_private *cp){	cpw16(IntrMask, 0);	cpr16(IntrMask);	cpw8(Cmd, 0);	cpw16(CpCmd, 0);	cpr16(CpCmd);	cpw16(IntrStatus, ~(cpr16(IntrStatus)));	synchronize_irq();	udelay(10);	cp->rx_tail = 0;	cp->tx_head = cp->tx_tail = 0;}static void cp_reset_hw (struct cp_private *cp){	unsigned work = 1000;	cpw8(Cmd, CmdReset);	while (work--) {		if (!(cpr8(Cmd) & CmdReset))			return;		set_current_state(TASK_UNINTERRUPTIBLE);		schedule_timeout(10);	}	printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);}static void cp_init_hw (struct cp_private *cp){	struct net_device *dev = cp->dev;	cp_reset_hw(cp);	cpw8_f (Cfg9346, Cfg9346_Unlock);	/* Restore our idea of the MAC address. */	cpw32_f (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));	cpw32_f (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));	cpw8(Cmd, RxOn | TxOn);	cpw16(CpCmd, PCIMulRW | CpRxOn | CpTxOn);	cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */	__cp_set_rx_mode(dev);	cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));	cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);	cpw8(Config3, PARMEnable); /* disables magic packet and WOL */	cpw8(Config5, cpr8(Config5) & PMEStatus); /* disables more WOL stuff */	cpw32_f(HiTxRingAddr, 0);	cpw32_f(HiTxRingAddr + 4, 0);	cpw32_f(OldRxBufAddr, 0);	cpw32_f(OldTSD0, 0);	cpw32_f(OldTSD0 + 4, 0);	cpw32_f(OldTSD0 + 8, 0);	cpw32_f(OldTSD0 + 12, 0);	cpw32_f(RxRingAddr, cp->ring_dma);	cpw32_f(RxRingAddr + 4, 0);	cpw32_f(TxRingAddr, cp->ring_dma + (sizeof(struct cp_desc) * CP_RX_RING_SIZE));	cpw32_f(TxRingAddr + 4, 0);	cpw16(MultiIntr, 0);	cpw16(IntrMask, cp_intr_mask);	cpw8_f (Cfg9346, Cfg9346_Lock);}static int cp_refill_rx (struct cp_private *cp){	unsigned i;	for (i = 0; i < CP_RX_RING_SIZE; i++) {		struct sk_buff *skb;		skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);		if (!skb)			goto err_out;		skb->dev = cp->dev;		skb_reserve(skb, RX_OFFSET);		cp->rx_skb[i].mapping = pci_map_single(cp->pdev,			skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);		cp->rx_skb[i].skb = skb;		cp->rx_skb[i].frag = 0;		if (i == (CP_RX_RING_SIZE - 1))			cp->rx_ring[i].opts1 =				cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);		else			cp->rx_ring[i].opts1 =				cpu_to_le32(DescOwn | cp->rx_buf_sz);		cp->rx_ring[i].opts2 = 0;		cp->rx_ring[i].addr_lo = cpu_to_le32(cp->rx_skb[i].mapping);		cp->rx_ring[i].addr_hi = 0;	}	return 0;err_out:	cp_clean_rings(cp);	return -ENOMEM;}static int cp_init_rings (struct cp_private *cp){	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);	cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);	cp->rx_tail = 0;	cp->tx_head = cp->tx_tail = 0;	return cp_refill_rx (cp);}static int cp_alloc_rings (struct cp_private *cp){	cp->rx_ring = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);	if (!cp->rx_ring)		return -ENOMEM;	cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];	return cp_init_rings(cp);}static void cp_clean_rings (struct cp_private *cp){	unsigned i;	memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);	memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);	for (i = 0; i < CP_RX_RING_SIZE; i++) {		if (cp->rx_skb[i].skb) {

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -