8139cp.c

来自「linux 内核源代码」· C语言 代码 · 共 2,102 行 · 第 1/4 页

C
2,102
字号
		desc = &cp->rx_ring[rx_tail];		status = le32_to_cpu(desc->opts1);		if (status & DescOwn)			break;		len = (status & 0x1fff) - 4;		mapping = le64_to_cpu(desc->addr);		if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {			/* we don't support incoming fragmented frames.			 * instead, we attempt to ensure that the			 * pre-allocated RX skbs are properly sized such			 * that RX fragments are never encountered			 */			cp_rx_err_acct(cp, rx_tail, status, len);			cp->net_stats.rx_dropped++;			cp->cp_stats.rx_frags++;			goto rx_next;		}		if (status & (RxError | RxErrFIFO)) {			cp_rx_err_acct(cp, rx_tail, status, len);			goto rx_next;		}		if (netif_msg_rx_status(cp))			printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",			       dev->name, rx_tail, status, len);		buflen = cp->rx_buf_sz + RX_OFFSET;		new_skb = dev_alloc_skb (buflen);		if (!new_skb) {			cp->net_stats.rx_dropped++;			goto rx_next;		}		skb_reserve(new_skb, RX_OFFSET);		dma_unmap_single(&cp->pdev->dev, mapping,				 buflen, PCI_DMA_FROMDEVICE);		/* Handle checksum offloading for incoming packets. */		if (cp_rx_csum_ok(status))			skb->ip_summed = CHECKSUM_UNNECESSARY;		else			skb->ip_summed = CHECKSUM_NONE;		skb_put(skb, len);		mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,					 PCI_DMA_FROMDEVICE);		cp->rx_skb[rx_tail] = new_skb;		cp_rx_skb(cp, skb, desc);		rx++;rx_next:		cp->rx_ring[rx_tail].opts2 = 0;		cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);		if (rx_tail == (CP_RX_RING_SIZE - 1))			desc->opts1 = cpu_to_le32(DescOwn | RingEnd |						  cp->rx_buf_sz);		else			desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);		rx_tail = NEXT_RX(rx_tail);		if (rx >= budget)			break;	}	cp->rx_tail = rx_tail;	/* if we did not reach work limit, then we're done with	 * this round of polling	 */	if (rx < budget) {		unsigned long flags;		if (cpr16(IntrStatus) & cp_rx_intr_mask)			goto rx_status_loop;		spin_lock_irqsave(&cp->lock, flags);		cpw16_f(IntrMask, cp_intr_mask);		__netif_rx_complete(dev, napi);		spin_unlock_irqrestore(&cp->lock, flags);	}	return rx;}static irqreturn_t cp_interrupt (int irq, void *dev_instance){	struct net_device *dev = dev_instance;	struct cp_private *cp;	u16 status;	if (unlikely(dev == NULL))		return IRQ_NONE;	cp = netdev_priv(dev);	status = cpr16(IntrStatus);	if (!status || (status == 0xFFFF))		return IRQ_NONE;	if (netif_msg_intr(cp))		printk(KERN_DEBUG "%s: intr, status %04x cmd %02x cpcmd %04x\n",		        dev->name, status, cpr8(Cmd), cpr16(CpCmd));	cpw16(IntrStatus, status & ~cp_rx_intr_mask);	spin_lock(&cp->lock);	/* close possible race's with dev_close */	if (unlikely(!netif_running(dev))) {		cpw16(IntrMask, 0);		spin_unlock(&cp->lock);		return IRQ_HANDLED;	}	if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))		if (netif_rx_schedule_prep(dev, &cp->napi)) {			cpw16_f(IntrMask, cp_norx_intr_mask);			__netif_rx_schedule(dev, &cp->napi);		}	if (status & (TxOK | TxErr | TxEmpty | SWInt))		cp_tx(cp);	if (status & LinkChg)		mii_check_media(&cp->mii_if, netif_msg_link(cp), false);	spin_unlock(&cp->lock);	if (status & PciErr) {		u16 pci_status;		pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);		pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);		printk(KERN_ERR "%s: PCI bus error, status=%04x, PCI status=%04x\n",		       dev->name, status, pci_status);		/* TODO: reset hardware */	}	return IRQ_HANDLED;}#ifdef CONFIG_NET_POLL_CONTROLLER/* * Polling receive - used by netconsole and other diagnostic tools * to allow network i/o with interrupts disabled. */static void cp_poll_controller(struct net_device *dev){	disable_irq(dev->irq);	cp_interrupt(dev->irq, dev);	enable_irq(dev->irq);}#endifstatic void cp_tx (struct cp_private *cp){	unsigned tx_head = cp->tx_head;	unsigned tx_tail = cp->tx_tail;	while (tx_tail != tx_head) {		struct cp_desc *txd = cp->tx_ring + tx_tail;		struct sk_buff *skb;		u32 status;		rmb();		status = le32_to_cpu(txd->opts1);		if (status & DescOwn)			break;		skb = cp->tx_skb[tx_tail];		BUG_ON(!skb);		dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),				 le32_to_cpu(txd->opts1) & 0xffff,				 PCI_DMA_TODEVICE);		if (status & LastFrag) {			if (status & (TxError | TxFIFOUnder)) {				if (netif_msg_tx_err(cp))					printk(KERN_DEBUG "%s: tx err, status 0x%x\n",					       cp->dev->name, status);				cp->net_stats.tx_errors++;				if (status & TxOWC)					cp->net_stats.tx_window_errors++;				if (status & TxMaxCol)					cp->net_stats.tx_aborted_errors++;				if (status & TxLinkFail)					cp->net_stats.tx_carrier_errors++;				if (status & TxFIFOUnder)					cp->net_stats.tx_fifo_errors++;			} else {				cp->net_stats.collisions +=					((status >> TxColCntShift) & TxColCntMask);				cp->net_stats.tx_packets++;				cp->net_stats.tx_bytes += skb->len;				if (netif_msg_tx_done(cp))					printk(KERN_DEBUG "%s: tx done, slot %d\n", cp->dev->name, tx_tail);			}			dev_kfree_skb_irq(skb);		}		cp->tx_skb[tx_tail] = NULL;		tx_tail = NEXT_TX(tx_tail);	}	cp->tx_tail = tx_tail;	if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))		netif_wake_queue(cp->dev);}static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev){	struct cp_private *cp = netdev_priv(dev);	unsigned entry;	u32 eor, flags;	unsigned long intr_flags;#if CP_VLAN_TAG_USED	u32 vlan_tag = 0;#endif	int mss = 0;	spin_lock_irqsave(&cp->lock, intr_flags);	/* This is a hard error, log it. */	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {		netif_stop_queue(dev);		spin_unlock_irqrestore(&cp->lock, intr_flags);		printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",		       dev->name);		return 1;	}#if CP_VLAN_TAG_USED	if (cp->vlgrp && vlan_tx_tag_present(skb))		vlan_tag = TxVlanTag | swab16(vlan_tx_tag_get(skb));#endif	entry = cp->tx_head;	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;	if (dev->features & NETIF_F_TSO)		mss = skb_shinfo(skb)->gso_size;	if (skb_shinfo(skb)->nr_frags == 0) {		struct cp_desc *txd = &cp->tx_ring[entry];		u32 len;		dma_addr_t mapping;		len = skb->len;		mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);		CP_VLAN_TX_TAG(txd, vlan_tag);		txd->addr = cpu_to_le64(mapping);		wmb();		flags = eor | len | DescOwn | FirstFrag | LastFrag;		if (mss)			flags |= LargeSend | ((mss & MSSMask) << MSSShift);		else if (skb->ip_summed == CHECKSUM_PARTIAL) {			const struct iphdr *ip = ip_hdr(skb);			if (ip->protocol == IPPROTO_TCP)				flags |= IPCS | TCPCS;			else if (ip->protocol == IPPROTO_UDP)				flags |= IPCS | UDPCS;			else				WARN_ON(1);	/* we need a WARN() */		}		txd->opts1 = cpu_to_le32(flags);		wmb();		cp->tx_skb[entry] = skb;		entry = NEXT_TX(entry);	} else {		struct cp_desc *txd;		u32 first_len, first_eor;		dma_addr_t first_mapping;		int frag, first_entry = entry;		const struct iphdr *ip = ip_hdr(skb);		/* We must give this initial chunk to the device last.		 * Otherwise we could race with the device.		 */		first_eor = eor;		first_len = skb_headlen(skb);		first_mapping = dma_map_single(&cp->pdev->dev, skb->data,					       first_len, PCI_DMA_TODEVICE);		cp->tx_skb[entry] = skb;		entry = NEXT_TX(entry);		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {			skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];			u32 len;			u32 ctrl;			dma_addr_t mapping;			len = this_frag->size;			mapping = dma_map_single(&cp->pdev->dev,						 ((void *) page_address(this_frag->page) +						  this_frag->page_offset),						 len, PCI_DMA_TODEVICE);			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;			ctrl = eor | len | DescOwn;			if (mss)				ctrl |= LargeSend |					((mss & MSSMask) << MSSShift);			else if (skb->ip_summed == CHECKSUM_PARTIAL) {				if (ip->protocol == IPPROTO_TCP)					ctrl |= IPCS | TCPCS;				else if (ip->protocol == IPPROTO_UDP)					ctrl |= IPCS | UDPCS;				else					BUG();			}			if (frag == skb_shinfo(skb)->nr_frags - 1)				ctrl |= LastFrag;			txd = &cp->tx_ring[entry];			CP_VLAN_TX_TAG(txd, vlan_tag);			txd->addr = cpu_to_le64(mapping);			wmb();			txd->opts1 = cpu_to_le32(ctrl);			wmb();			cp->tx_skb[entry] = skb;			entry = NEXT_TX(entry);		}		txd = &cp->tx_ring[first_entry];		CP_VLAN_TX_TAG(txd, vlan_tag);		txd->addr = cpu_to_le64(first_mapping);		wmb();		if (skb->ip_summed == CHECKSUM_PARTIAL) {			if (ip->protocol == IPPROTO_TCP)				txd->opts1 = cpu_to_le32(first_eor | first_len |							 FirstFrag | DescOwn |							 IPCS | TCPCS);			else if (ip->protocol == IPPROTO_UDP)				txd->opts1 = cpu_to_le32(first_eor | first_len |							 FirstFrag | DescOwn |							 IPCS | UDPCS);			else				BUG();		} else			txd->opts1 = cpu_to_le32(first_eor | first_len |						 FirstFrag | DescOwn);		wmb();	}	cp->tx_head = entry;	if (netif_msg_tx_queued(cp))		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",		       dev->name, entry, skb->len);	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))		netif_stop_queue(dev);	spin_unlock_irqrestore(&cp->lock, intr_flags);	cpw8(TxPoll, NormalTxPoll);	dev->trans_start = jiffies;	return 0;}/* Set or clear the multicast filter for this adaptor.   This routine is not state sensitive and need not be SMP locked. */static void __cp_set_rx_mode (struct net_device *dev){	struct cp_private *cp = netdev_priv(dev);	u32 mc_filter[2];	/* Multicast hash filter */	int i, rx_mode;	u32 tmp;	/* Note: do not reorder, GCC is clever about common statements. */	if (dev->flags & IFF_PROMISC) {		/* Unconditionally log net taps. */		rx_mode =		    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |		    AcceptAllPhys;		mc_filter[1] = mc_filter[0] = 0xffffffff;	} else if ((dev->mc_count > multicast_filter_limit)		   || (dev->flags & IFF_ALLMULTI)) {		/* Too many to filter perfectly -- accept all multicasts. */		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;		mc_filter[1] = mc_filter[0] = 0xffffffff;	} else {		struct dev_mc_list *mclist;		rx_mode = AcceptBroadcast | AcceptMyPhys;		mc_filter[1] = mc_filter[0] = 0;		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;		     i++, mclist = mclist->next) {			int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);			rx_mode |= AcceptMulticast;		}	}	/* We can safely update without stopping the chip. */	tmp = cp_rx_config | rx_mode;	if (cp->rx_config != tmp) {		cpw32_f (RxConfig, tmp);		cp->rx_config = tmp;	}	cpw32_f (MAR0 + 0, mc_filter[0]);	cpw32_f (MAR0 + 4, mc_filter[1]);}static void cp_set_rx_mode (struct net_device *dev){	unsigned long flags;	struct cp_private *cp = netdev_priv(dev);	spin_lock_irqsave (&cp->lock, flags);	__cp_set_rx_mode(dev);	spin_unlock_irqrestore (&cp->lock, flags);}static void __cp_get_stats(struct cp_private *cp){	/* only lower 24 bits valid; write any value to clear */	cp->net_stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);	cpw32 (RxMissed, 0);}static struct net_device_stats *cp_get_stats(struct net_device *dev){	struct cp_private *cp = netdev_priv(dev);	unsigned long flags;	/* The chip only need report frame silently dropped. */	spin_lock_irqsave(&cp->lock, flags); 	if (netif_running(dev) && netif_device_present(dev)) 		__cp_get_stats(cp);	spin_unlock_irqrestore(&cp->lock, flags);	return &cp->net_stats;}static void cp_stop_hw (struct cp_private *cp){	cpw16(IntrStatus, ~(cpr16(IntrStatus)));	cpw16_f(IntrMask, 0);	cpw8(Cmd, 0);	cpw16_f(CpCmd, 0);	cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));	cp->rx_tail = 0;	cp->tx_head = cp->tx_tail = 0;}static void cp_reset_hw (struct cp_private *cp){	unsigned work = 1000;	cpw8(Cmd, CmdReset);	while (work--) {		if (!(cpr8(Cmd) & CmdReset))			return;		schedule_timeout_uninterruptible(10);	}	printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);}static inline void cp_start_hw (struct cp_private *cp){	cpw16(CpCmd, cp->cpcmd);	cpw8(Cmd, RxOn | TxOn);}static void cp_init_hw (struct cp_private *cp){	struct net_device *dev = cp->dev;	dma_addr_t ring_dma;	cp_reset_hw(cp);	cpw8_f (Cfg9346, Cfg9346_Unlock);	/* Restore our idea of the MAC address. */	cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));	cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));	cp_start_hw(cp);	cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */	__cp_set_rx_mode(dev);	cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));	cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);	/* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */	cpw8(Config3, PARMEnable);	cp->wol_enabled = 0;	cpw8(Config5, cpr8(Config5) & PMEStatus);	cpw32_f(HiTxRingAddr, 0);	cpw32_f(HiTxRingAddr + 4, 0);	ring_dma = cp->ring_dma;	cpw32_f(RxRingAddr, ring_dma & 0xffffffff);	cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);	ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;	cpw32_f(TxRingAddr, ring_dma & 0xffffffff);	cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);	cpw16(MultiIntr, 0);	cpw16_f(IntrMask, cp_intr_mask);	cpw8_f(Cfg9346, Cfg9346_Lock);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?