tsi108_eth.c

来自「linux 内核源代码」· C语言 代码 · 共 1,704 行 · 第 1/4 页

C
1,704
字号
	TSI_WRITE(TSI108_EC_RXESTAT, estat);	TSI_WRITE(TSI108_EC_INTSTAT, intstat);	if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))		num_received = tsi108_complete_rx(dev, budget);	/* This should normally fill no more slots than the number of	 * packets received in tsi108_complete_rx().  The exception	 * is when we previously ran out of memory for RX SKBs.  In that	 * case, it's helpful to obey the budget, not only so that the	 * CPU isn't hogged, but so that memory (which may still be low)	 * is not hogged by one device.	 *	 * A work unit is considered to be two SKBs to allow us to catch	 * up when the ring has shrunk due to out-of-memory but we're	 * still removing the full budget's worth of packets each time.	 */	if (data->rxfree < TSI108_RXRING_LEN)		num_filled = tsi108_refill_rx(dev, budget * 2);	if (intstat & TSI108_INT_RXERROR) {		u32 err = TSI_READ(TSI108_EC_RXERR);		TSI_WRITE(TSI108_EC_RXERR, err);		if (err) {			if (net_ratelimit())				printk(KERN_DEBUG "%s: RX error %x\n",				       dev->name, err);			if (!(TSI_READ(TSI108_EC_RXSTAT) &			      TSI108_EC_RXSTAT_QUEUE0))				tsi108_restart_rx(data, dev);		}	}	if (intstat & TSI108_INT_RXOVERRUN) {		spin_lock_irq(&data->misclock);		data->stats.rx_fifo_errors++;		spin_unlock_irq(&data->misclock);	}	if (num_received < budget) {		data->rxpending = 0;		netif_rx_complete(dev, napi);		TSI_WRITE(TSI108_EC_INTMASK,				     TSI_READ(TSI108_EC_INTMASK)				     & ~(TSI108_INT_RXQUEUE0					 | TSI108_INT_RXTHRESH |					 TSI108_INT_RXOVERRUN |					 TSI108_INT_RXERROR |					 TSI108_INT_RXWAIT));	} else {		data->rxpending = 1;	}	return num_received;}static void tsi108_rx_int(struct net_device *dev){	struct tsi108_prv_data *data = netdev_priv(dev);	/* A race could cause dev to already be scheduled, so it's not an	 * error if that happens (and interrupts shouldn't be re-masked,	 * because that can cause harmful races, if poll has already	 * unmasked them but not cleared LINK_STATE_SCHED).	 *	 * This can happen if this code races with tsi108_poll(), which masks	 * the interrupts after tsi108_irq_one() read the mask, but before	 * netif_rx_schedule is called.  It could also happen due to calls	 * from tsi108_check_rxring().	 */	if (netif_rx_schedule_prep(dev, &data->napi)) {		/* Mask, rather than ack, the receive interrupts.  The ack		 * will happen in tsi108_poll().		 */		TSI_WRITE(TSI108_EC_INTMASK,				     TSI_READ(TSI108_EC_INTMASK) |				     TSI108_INT_RXQUEUE0				     | TSI108_INT_RXTHRESH |				     TSI108_INT_RXOVERRUN | TSI108_INT_RXERROR |				     TSI108_INT_RXWAIT);		__netif_rx_schedule(dev, &data->napi);	} else {		if (!netif_running(dev)) {			/* This can happen if an interrupt occurs while the			 * interface is being brought down, as the START			 * bit is cleared before the stop function is called.			 *			 * In this case, the interrupts must be masked, or			 * they will continue indefinitely.			 *			 * There's a race here if the interface is brought down			 * and then up in rapid succession, as the device could			 * be made running after the above check and before			 * the masking below.  This will only happen if the IRQ			 * thread has a lower priority than the task brining			 * up the interface.  Fixing this race would likely			 * require changes in generic code.			 */			TSI_WRITE(TSI108_EC_INTMASK,					     TSI_READ					     (TSI108_EC_INTMASK) |					     TSI108_INT_RXQUEUE0 |					     TSI108_INT_RXTHRESH |					     TSI108_INT_RXOVERRUN |					     TSI108_INT_RXERROR |					     TSI108_INT_RXWAIT);		}	}}/* If the RX ring has run out of memory, try periodically * to allocate some more, as otherwise poll would never * get called (apart from the initial end-of-queue condition). * * This is called once per second (by default) from the thread. */static void tsi108_check_rxring(struct net_device *dev){	struct tsi108_prv_data *data = netdev_priv(dev);	/* A poll is scheduled, as opposed to caling tsi108_refill_rx	 * directly, so as to keep the receive path single-threaded	 * (and thus not needing a lock).	 */	if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)		tsi108_rx_int(dev);}static void tsi108_tx_int(struct net_device *dev){	struct tsi108_prv_data *data = netdev_priv(dev);	u32 estat = TSI_READ(TSI108_EC_TXESTAT);	TSI_WRITE(TSI108_EC_TXESTAT, estat);	TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_TXQUEUE0 |			     TSI108_INT_TXIDLE | TSI108_INT_TXERROR);	if (estat & TSI108_EC_TXESTAT_Q0_ERR) {		u32 err = TSI_READ(TSI108_EC_TXERR);		TSI_WRITE(TSI108_EC_TXERR, err);		if (err && net_ratelimit())			printk(KERN_ERR "%s: TX error %x\n", dev->name, err);	}	if (estat & (TSI108_EC_TXESTAT_Q0_DESCINT | TSI108_EC_TXESTAT_Q0_EOQ)) {		spin_lock(&data->txlock);		tsi108_complete_tx(dev);		spin_unlock(&data->txlock);	}}static irqreturn_t tsi108_irq(int irq, void *dev_id){	struct net_device *dev = dev_id;	struct tsi108_prv_data *data = netdev_priv(dev);	u32 stat = TSI_READ(TSI108_EC_INTSTAT);	if (!(stat & TSI108_INT_ANY))		return IRQ_NONE;	/* Not our interrupt */	stat &= ~TSI_READ(TSI108_EC_INTMASK);	if (stat & (TSI108_INT_TXQUEUE0 | TSI108_INT_TXIDLE |		    TSI108_INT_TXERROR))		tsi108_tx_int(dev);	if (stat & (TSI108_INT_RXQUEUE0 | TSI108_INT_RXTHRESH |		    TSI108_INT_RXWAIT | TSI108_INT_RXOVERRUN |		    TSI108_INT_RXERROR))		tsi108_rx_int(dev);	if (stat & TSI108_INT_SFN) {		if (net_ratelimit())			printk(KERN_DEBUG "%s: SFN error\n", dev->name);		TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_SFN);	}	if (stat & TSI108_INT_STATCARRY) {		tsi108_stat_carry(dev);		TSI_WRITE(TSI108_EC_INTSTAT, TSI108_INT_STATCARRY);	}	return IRQ_HANDLED;}static void tsi108_stop_ethernet(struct net_device *dev){	struct tsi108_prv_data *data = netdev_priv(dev);	int i = 1000;	/* Disable all TX and RX queues ... */	TSI_WRITE(TSI108_EC_TXCTRL, 0);	TSI_WRITE(TSI108_EC_RXCTRL, 0);	/* ...and wait for them to become idle */	while(i--) {		if(!(TSI_READ(TSI108_EC_TXSTAT) & TSI108_EC_TXSTAT_ACTIVE))			break;		udelay(10);	}	i = 1000;	while(i--){		if(!(TSI_READ(TSI108_EC_RXSTAT) & TSI108_EC_RXSTAT_ACTIVE))			return;		udelay(10);	}	printk(KERN_ERR "%s function time out \n", __FUNCTION__);}static void tsi108_reset_ether(struct tsi108_prv_data * data){	TSI_WRITE(TSI108_MAC_CFG1, TSI108_MAC_CFG1_SOFTRST);	udelay(100);	TSI_WRITE(TSI108_MAC_CFG1, 0);	TSI_WRITE(TSI108_EC_PORTCTRL, TSI108_EC_PORTCTRL_STATRST);	udelay(100);	TSI_WRITE(TSI108_EC_PORTCTRL,			     TSI_READ(TSI108_EC_PORTCTRL) &			     ~TSI108_EC_PORTCTRL_STATRST);	TSI_WRITE(TSI108_EC_TXCFG, TSI108_EC_TXCFG_RST);	udelay(100);	TSI_WRITE(TSI108_EC_TXCFG,			     TSI_READ(TSI108_EC_TXCFG) &			     ~TSI108_EC_TXCFG_RST);	TSI_WRITE(TSI108_EC_RXCFG, TSI108_EC_RXCFG_RST);	udelay(100);	TSI_WRITE(TSI108_EC_RXCFG,			     TSI_READ(TSI108_EC_RXCFG) &			     ~TSI108_EC_RXCFG_RST);	TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,			     TSI_READ(TSI108_MAC_MII_MGMT_CFG) |			     TSI108_MAC_MII_MGMT_RST);	udelay(100);	TSI_WRITE(TSI108_MAC_MII_MGMT_CFG,			     (TSI_READ(TSI108_MAC_MII_MGMT_CFG) &			     ~(TSI108_MAC_MII_MGMT_RST |			       TSI108_MAC_MII_MGMT_CLK)) | 0x07);}static int tsi108_get_mac(struct net_device *dev){	struct tsi108_prv_data *data = netdev_priv(dev);	u32 word1 = TSI_READ(TSI108_MAC_ADDR1);	u32 word2 = TSI_READ(TSI108_MAC_ADDR2);	/* Note that the octets are reversed from what the manual says,	 * producing an even weirder ordering...	 */	if (word2 == 0 && word1 == 0) {		dev->dev_addr[0] = 0x00;		dev->dev_addr[1] = 0x06;		dev->dev_addr[2] = 0xd2;		dev->dev_addr[3] = 0x00;		dev->dev_addr[4] = 0x00;		if (0x8 == data->phy)			dev->dev_addr[5] = 0x01;		else			dev->dev_addr[5] = 0x02;		word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);		word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |		    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);		TSI_WRITE(TSI108_MAC_ADDR1, word1);		TSI_WRITE(TSI108_MAC_ADDR2, word2);	} else {		dev->dev_addr[0] = (word2 >> 16) & 0xff;		dev->dev_addr[1] = (word2 >> 24) & 0xff;		dev->dev_addr[2] = (word1 >> 0) & 0xff;		dev->dev_addr[3] = (word1 >> 8) & 0xff;		dev->dev_addr[4] = (word1 >> 16) & 0xff;		dev->dev_addr[5] = (word1 >> 24) & 0xff;	}	if (!is_valid_ether_addr(dev->dev_addr)) {		printk("KERN_ERR: word1: %08x, word2: %08x\n", word1, word2);		return -EINVAL;	}	return 0;}static int tsi108_set_mac(struct net_device *dev, void *addr){	struct tsi108_prv_data *data = netdev_priv(dev);	u32 word1, word2;	int i;	if (!is_valid_ether_addr(addr))		return -EINVAL;	for (i = 0; i < 6; i++)		/* +2 is for the offset of the HW addr type */		dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];	word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);	word1 = (dev->dev_addr[2] << 0) | (dev->dev_addr[3] << 8) |	    (dev->dev_addr[4] << 16) | (dev->dev_addr[5] << 24);	spin_lock_irq(&data->misclock);	TSI_WRITE(TSI108_MAC_ADDR1, word1);	TSI_WRITE(TSI108_MAC_ADDR2, word2);	spin_lock(&data->txlock);	if (data->txfree && data->link_up)		netif_wake_queue(dev);	spin_unlock(&data->txlock);	spin_unlock_irq(&data->misclock);	return 0;}/* Protected by dev->xmit_lock. */static void tsi108_set_rx_mode(struct net_device *dev){	struct tsi108_prv_data *data = netdev_priv(dev);	u32 rxcfg = TSI_READ(TSI108_EC_RXCFG);	if (dev->flags & IFF_PROMISC) {		rxcfg &= ~(TSI108_EC_RXCFG_UC_HASH | TSI108_EC_RXCFG_MC_HASH);		rxcfg |= TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE;		goto out;	}	rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);	if (dev->flags & IFF_ALLMULTI || dev->mc_count) {		int i;		struct dev_mc_list *mc = dev->mc_list;		rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;		memset(data->mc_hash, 0, sizeof(data->mc_hash));		while (mc) {			u32 hash, crc;			if (mc->dmi_addrlen == 6) {				crc = ether_crc(6, mc->dmi_addr);				hash = crc >> 23;				__set_bit(hash, &data->mc_hash[0]);			} else {				printk(KERN_ERR				       "%s: got multicast address of length %d "				       "instead of 6.\n", dev->name,				       mc->dmi_addrlen);			}			mc = mc->next;		}		TSI_WRITE(TSI108_EC_HASHADDR,				     TSI108_EC_HASHADDR_AUTOINC |				     TSI108_EC_HASHADDR_MCAST);		for (i = 0; i < 16; i++) {			/* The manual says that the hardware may drop			 * back-to-back writes to the data register.			 */			udelay(1);			TSI_WRITE(TSI108_EC_HASHDATA,					     data->mc_hash[i]);		}	}      out:	TSI_WRITE(TSI108_EC_RXCFG, rxcfg);}static void tsi108_init_phy(struct net_device *dev){	struct tsi108_prv_data *data = netdev_priv(dev);	u32 i = 0;	u16 phyval = 0;	unsigned long flags;	spin_lock_irqsave(&phy_lock, flags);	tsi108_write_mii(data, MII_BMCR, BMCR_RESET);	while (i--){		if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET))			break;		udelay(10);	}	if (i == 0)		printk(KERN_ERR "%s function time out \n", __FUNCTION__);	if (data->phy_type == TSI108_PHY_BCM54XX) {		tsi108_write_mii(data, 0x09, 0x0300);		tsi108_write_mii(data, 0x10, 0x1020);		tsi108_write_mii(data, 0x1c, 0x8c00);	}	tsi108_write_mii(data,			 MII_BMCR,			 BMCR_ANENABLE | BMCR_ANRESTART);	while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART)		cpu_relax();	/* Set G/MII mode and receive clock select in TBI control #2.  The	 * second port won't work if this isn't done, even though we don't	 * use TBI mode.	 */	tsi108_write_tbi(data, 0x11, 0x30);	/* FIXME: It seems to take more than 2 back-to-back reads to the	 * PHY_STAT register before the link up status bit is set.	 */	data->link_up = 1;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?