sis190.c

来自「linux 内核源代码」· C语言 代码 · 共 1,862 行 · 第 1/3 页

C
1,862
字号
			skb_put(skb, pkt_size);			skb->protocol = eth_type_trans(skb, dev);			sis190_rx_skb(skb);			dev->last_rx = jiffies;			stats->rx_packets++;			stats->rx_bytes += pkt_size;			if ((status & BCAST) == MCAST)				stats->multicast++;		}	}	count = cur_rx - tp->cur_rx;	tp->cur_rx = cur_rx;	delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);	if (!delta && count && netif_msg_intr(tp))		printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);	tp->dirty_rx += delta;	if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))		printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);	return count;}static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,				struct TxDesc *desc){	unsigned int len;	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;	pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);	memset(desc, 0x00, sizeof(*desc));}static void sis190_tx_interrupt(struct net_device *dev,				struct sis190_private *tp, void __iomem *ioaddr){	u32 pending, dirty_tx = tp->dirty_tx;	/*	 * It would not be needed if queueing was allowed to be enabled	 * again too early (hint: think preempt and unclocked smp systems).	 */	unsigned int queue_stopped;	smp_rmb();	pending = tp->cur_tx - dirty_tx;	queue_stopped = (pending == NUM_TX_DESC);	for (; pending; pending--, dirty_tx++) {		unsigned int entry = dirty_tx % NUM_TX_DESC;		struct TxDesc *txd = tp->TxDescRing + entry;		struct sk_buff *skb;		if (le32_to_cpu(txd->status) & OWNbit)			break;		skb = tp->Tx_skbuff[entry];		dev->stats.tx_packets++;		dev->stats.tx_bytes += skb->len;		sis190_unmap_tx_skb(tp->pci_dev, skb, txd);		tp->Tx_skbuff[entry] = NULL;		dev_kfree_skb_irq(skb);	}	if (tp->dirty_tx != dirty_tx) {		tp->dirty_tx = dirty_tx;		smp_wmb();		if (queue_stopped)			netif_wake_queue(dev);	}}/* * The interrupt handler does all of the Rx thread work and cleans up after * the Tx thread. */static irqreturn_t sis190_interrupt(int irq, void *__dev){	struct net_device *dev = __dev;	struct sis190_private *tp = netdev_priv(dev);	void __iomem *ioaddr = tp->mmio_addr;	unsigned int handled = 0;	u32 status;	status = SIS_R32(IntrStatus);	if ((status == 0xffffffff) || !status)		goto out;	handled = 1;	if (unlikely(!netif_running(dev))) {		sis190_asic_down(ioaddr);		goto out;	}	SIS_W32(IntrStatus, status);	// net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);	if (status & LinkChange) {		net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);		schedule_work(&tp->phy_task);	}	if (status & RxQInt)		sis190_rx_interrupt(dev, tp, ioaddr);	if (status & TxQ0Int)		sis190_tx_interrupt(dev, tp, ioaddr);out:	return IRQ_RETVAL(handled);}#ifdef CONFIG_NET_POLL_CONTROLLERstatic void sis190_netpoll(struct net_device *dev){	struct sis190_private *tp = netdev_priv(dev);	struct pci_dev *pdev = tp->pci_dev;	disable_irq(pdev->irq);	sis190_interrupt(pdev->irq, dev);	enable_irq(pdev->irq);}#endifstatic void sis190_free_rx_skb(struct sis190_private *tp,			       struct sk_buff **sk_buff, struct RxDesc *desc){	struct pci_dev *pdev = tp->pci_dev;	pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,			 PCI_DMA_FROMDEVICE);	dev_kfree_skb(*sk_buff);	*sk_buff = NULL;	sis190_make_unusable_by_asic(desc);}static void sis190_rx_clear(struct sis190_private *tp){	unsigned int i;	for (i = 0; i < NUM_RX_DESC; i++) {		if (!tp->Rx_skbuff[i])			continue;		sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);	}}static void sis190_init_ring_indexes(struct sis190_private *tp){	tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;}static int sis190_init_ring(struct net_device *dev){	struct sis190_private *tp = netdev_priv(dev);	sis190_init_ring_indexes(tp);	memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));	memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));	if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)		goto err_rx_clear;	sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);	return 0;err_rx_clear:	sis190_rx_clear(tp);	return -ENOMEM;}static void sis190_set_rx_mode(struct net_device *dev){	struct sis190_private *tp = netdev_priv(dev);	void __iomem *ioaddr = tp->mmio_addr;	unsigned long flags;	u32 mc_filter[2];	/* Multicast hash filter */	u16 rx_mode;	if (dev->flags & IFF_PROMISC) {		rx_mode =			AcceptBroadcast | AcceptMulticast | AcceptMyPhys |			AcceptAllPhys;		mc_filter[1] = mc_filter[0] = 0xffffffff;	} else if ((dev->mc_count > multicast_filter_limit) ||		   (dev->flags & IFF_ALLMULTI)) {		/* Too many to filter perfectly -- accept all multicasts. */		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;		mc_filter[1] = mc_filter[0] = 0xffffffff;	} else {		struct dev_mc_list *mclist;		unsigned int i;		rx_mode = AcceptBroadcast | AcceptMyPhys;		mc_filter[1] = mc_filter[0] = 0;		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;		     i++, mclist = mclist->next) {			int bit_nr =				ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);			rx_mode |= AcceptMulticast;		}	}	spin_lock_irqsave(&tp->lock, flags);	SIS_W16(RxMacControl, rx_mode | 0x2);	SIS_W32(RxHashTable, mc_filter[0]);	SIS_W32(RxHashTable + 4, mc_filter[1]);	spin_unlock_irqrestore(&tp->lock, flags);}static void sis190_soft_reset(void __iomem *ioaddr){	SIS_W32(IntrControl, 0x8000);	SIS_PCI_COMMIT();	SIS_W32(IntrControl, 0x0);	sis190_asic_down(ioaddr);}static void sis190_hw_start(struct net_device *dev){	struct sis190_private *tp = netdev_priv(dev);	void __iomem *ioaddr = tp->mmio_addr;	sis190_soft_reset(ioaddr);	SIS_W32(TxDescStartAddr, tp->tx_dma);	SIS_W32(RxDescStartAddr, tp->rx_dma);	SIS_W32(IntrStatus, 0xffffffff);	SIS_W32(IntrMask, 0x0);	SIS_W32(GMIIControl, 0x0);	SIS_W32(TxMacControl, 0x60);	SIS_W16(RxMacControl, 0x02);	SIS_W32(RxHashTable, 0x0);	SIS_W32(0x6c, 0x0);	SIS_W32(RxWolCtrl, 0x0);	SIS_W32(RxWolData, 0x0);	SIS_PCI_COMMIT();	sis190_set_rx_mode(dev);	/* Enable all known interrupts by setting the interrupt mask. */	SIS_W32(IntrMask, sis190_intr_mask);	SIS_W32(TxControl, 0x1a00 | CmdTxEnb);	SIS_W32(RxControl, 0x1a1d);	netif_start_queue(dev);}static void sis190_phy_task(struct work_struct *work){	struct sis190_private *tp =		container_of(work, struct sis190_private, phy_task);	struct net_device *dev = tp->dev;	void __iomem *ioaddr = tp->mmio_addr;	int phy_id = tp->mii_if.phy_id;	u16 val;	rtnl_lock();	if (!netif_running(dev))		goto out_unlock;	val = mdio_read(ioaddr, phy_id, MII_BMCR);	if (val & BMCR_RESET) {		// FIXME: needlessly high ?  -- FR 02/07/2005		mod_timer(&tp->timer, jiffies + HZ/10);	} else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &		     BMSR_ANEGCOMPLETE)) {		net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",			 dev->name);		netif_carrier_off(dev);		mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);		mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);	} else {		/* Rejoice ! */		struct {			int val;			u32 ctl;			const char *msg;		} reg31[] = {			{ LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000,				"1000 Mbps Full Duplex" },			{ LPA_1000XHALF | LPA_SLCT, 0x07000c00,				"1000 Mbps Half Duplex" },			{ LPA_100FULL, 0x04000800 | 0x00001000,				"100 Mbps Full Duplex" },			{ LPA_100HALF, 0x04000800,				"100 Mbps Half Duplex" },			{ LPA_10FULL, 0x04000400 | 0x00001000,				"10 Mbps Full Duplex" },			{ LPA_10HALF, 0x04000400,				"10 Mbps Half Duplex" },			{ 0, 0x04000400, "unknown" } 		}, *p;		u16 adv;		val = mdio_read(ioaddr, phy_id, 0x1f);		net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);		val = mdio_read(ioaddr, phy_id, MII_LPA);		adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);		net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",			 dev->name, val, adv);		val &= adv;		for (p = reg31; p->val; p++) {			if ((val & p->val) == p->val)				break;		}		p->ctl |= SIS_R32(StationControl) & ~0x0f001c00;		if ((tp->features & F_HAS_RGMII) &&		    (tp->features & F_PHY_BCM5461)) {			// Set Tx Delay in RGMII mode.			mdio_write(ioaddr, phy_id, 0x18, 0xf1c7);			udelay(200);			mdio_write(ioaddr, phy_id, 0x1c, 0x8c00);			p->ctl |= 0x03000000;		}		SIS_W32(StationControl, p->ctl);		if (tp->features & F_HAS_RGMII) {			SIS_W32(RGDelay, 0x0441);			SIS_W32(RGDelay, 0x0440);		}		net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,			 p->msg);		netif_carrier_on(dev);	}out_unlock:	rtnl_unlock();}static void sis190_phy_timer(unsigned long __opaque){	struct net_device *dev = (struct net_device *)__opaque;	struct sis190_private *tp = netdev_priv(dev);	if (likely(netif_running(dev)))		schedule_work(&tp->phy_task);}static inline void sis190_delete_timer(struct net_device *dev){	struct sis190_private *tp = netdev_priv(dev);	del_timer_sync(&tp->timer);}static inline void sis190_request_timer(struct net_device *dev){	struct sis190_private *tp = netdev_priv(dev);	struct timer_list *timer = &tp->timer;	init_timer(timer);	timer->expires = jiffies + SIS190_PHY_TIMEOUT;	timer->data = (unsigned long)dev;	timer->function = sis190_phy_timer;	add_timer(timer);}static void sis190_set_rxbufsize(struct sis190_private *tp,				 struct net_device *dev){	unsigned int mtu = dev->mtu;	tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;	/* RxDesc->size has a licence to kill the lower bits */	if (tp->rx_buf_sz & 0x07) {		tp->rx_buf_sz += 8;		tp->rx_buf_sz &= RX_BUF_MASK;	}}static int sis190_open(struct net_device *dev){	struct sis190_private *tp = netdev_priv(dev);	struct pci_dev *pdev = tp->pci_dev;	int rc = -ENOMEM;	sis190_set_rxbufsize(tp, dev);	/*	 * Rx and Tx descriptors need 256 bytes alignment.	 * pci_alloc_consistent() guarantees a stronger alignment.	 */	tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);	if (!tp->TxDescRing)		goto out;	tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);	if (!tp->RxDescRing)		goto err_free_tx_0;	rc = sis190_init_ring(dev);	if (rc < 0)		goto err_free_rx_1;	sis190_request_timer(dev);	rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev);	if (rc < 0)		goto err_release_timer_2;	sis190_hw_start(dev);out:	return rc;err_release_timer_2:	sis190_delete_timer(dev);	sis190_rx_clear(tp);err_free_rx_1:	pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,		tp->rx_dma);err_free_tx_0:	pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,		tp->tx_dma);	goto out;}static void sis190_tx_clear(struct sis190_private *tp){	unsigned int i;	for (i = 0; i < NUM_TX_DESC; i++) {		struct sk_buff *skb = tp->Tx_skbuff[i];		if (!skb)			continue;		sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);		tp->Tx_skbuff[i] = NULL;		dev_kfree_skb(skb);		tp->dev->stats.tx_dropped++;	}	tp->cur_tx = tp->dirty_tx = 0;}static void sis190_down(struct net_device *dev){	struct sis190_private *tp = netdev_priv(dev);	void __iomem *ioaddr = tp->mmio_addr;	unsigned int poll_locked = 0;	sis190_delete_timer(dev);	netif_stop_queue(dev);	do {		spin_lock_irq(&tp->lock);		sis190_asic_down(ioaddr);		spin_unlock_irq(&tp->lock);		synchronize_irq(dev->irq);		if (!poll_locked)			poll_locked++;		synchronize_sched();	} while (SIS_R32(IntrMask));	sis190_tx_clear(tp);	sis190_rx_clear(tp);}static int sis190_close(struct net_device *dev){	struct sis190_private *tp = netdev_priv(dev);	struct pci_dev *pdev = tp->pci_dev;	sis190_down(dev);	free_irq(dev->irq, dev);	pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);	pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);	tp->TxDescRing = NULL;	tp->RxDescRing = NULL;	return 0;}static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev){	struct sis190_private *tp = netdev_priv(dev);	void __iomem *ioaddr = tp->mmio_addr;	u32 len, entry, dirty_tx;	struct TxDesc *desc;	dma_addr_t mapping;	if (unlikely(skb->len < ETH_ZLEN)) {		if (skb_padto(skb, ETH_ZLEN)) {			dev->stats.tx_dropped++;			goto out;		}		len = ETH_ZLEN;	} else {		len = skb->len;	}	entry = tp->cur_tx % NUM_TX_DESC;	desc = tp->TxDescRing + entry;	if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {		netif_stop_queue(dev);		net_tx_err(tp, KERN_ERR PFX			   "%s: BUG! Tx Ring full when queue awake!\n",			   dev->name);		return NETDEV_TX_BUSY;	}	mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);	tp->Tx_skbuff[entry] = skb;	desc->PSize = cpu_to_le32(len);	desc->addr = cpu_to_le32(mapping);	desc->size = cpu_to_le32(len);	if (entry == (NUM_TX_DESC - 1))		desc->size |= cpu_to_le32(RingEnd);	wmb();	desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);	tp->cur_tx++;	smp_wmb();	SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);	dev->trans_start = jiffies;	dirty_tx = tp->dirty_tx;	if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {		netif_stop_queue(dev);		smp_rmb();		if (dirty_tx != tp->dirty_tx)			netif_wake_queue(dev);	}out:	return NETDEV_TX_OK;}static void sis190_free_phy(struct list_head *first_phy){	struct sis190_phy *cur, *next;	list_for_each_entry_safe(cur, next, first_phy, list) {		kfree(cur);	}}/** *	sis190_default_phy - Select default PHY for sis190 mac. *	@dev: the net device to probe for * *	Select first detected PHY with link as default. *	If no one is link on, select PHY whose types is HOME as default. *	If HOME doesn't exist, select LAN. */static u16 sis190_default_phy(struct net_device *dev){	struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;	struct sis190_private *tp = netdev_priv(dev);	struct mii_if_info *mii_if = &tp->mii_if;	void __iomem *ioaddr = tp->mmio_addr;	u16 status;	phy_home = phy_default = phy_lan = NULL;	list_for_each_entry(phy, &tp->first_phy, list) {		status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);		// Link ON & Not select default PHY & not ghost PHY.		if ((status & BMSR_LSTATUS) &&		    !phy_default &&		    (phy->type != UNKNOWN)) {			phy_default = phy;		} else {			status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);			mdio_write(ioaddr, phy->phy_id, MII_BMCR,				   status | BMCR_ANENABLE | BMCR_ISOLATE);			if (phy->type == HOME)				phy_home = phy;			else if (phy->type == LAN)				phy_lan = phy;		}	}	if (!phy_default) {		if (phy_home)			phy_default = phy_home;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?