⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 forcedeth.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
			return PHY_ERROR;		}		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));			return PHY_ERROR;		}		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));			return PHY_ERROR;		}		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);		phy_reserved &= ~PHY_VITESSE_INIT_MSK2;		phy_reserved |= PHY_VITESSE_INIT8;		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));			return PHY_ERROR;		}		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));			return PHY_ERROR;		}		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));			return PHY_ERROR;		}	}	if (np->phy_oui == PHY_OUI_REALTEK) {		/* reset could have cleared these out, set them back */		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));			return PHY_ERROR;		}		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));			return PHY_ERROR;		}		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));			return PHY_ERROR;		}		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));			return PHY_ERROR;		}		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));			return PHY_ERROR;		}	}	/* some phys clear out pause advertisment on reset, set it back */	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);	/* restart auto negotiation */	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {		return PHY_ERROR;	}	return 0;}static void nv_start_rx(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	u8 __iomem *base = get_hwbase(dev);	u32 rx_ctrl = readl(base + NvRegReceiverControl);	dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);	/* Already running? Stop it. */	if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {		rx_ctrl &= ~NVREG_RCVCTL_START;		writel(rx_ctrl, base + NvRegReceiverControl);		pci_push(base);	}	writel(np->linkspeed, base + NvRegLinkSpeed);	pci_push(base);        rx_ctrl |= NVREG_RCVCTL_START;        if (np->mac_in_use)		rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;	writel(rx_ctrl, base + NvRegReceiverControl);	dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",				dev->name, np->duplex, np->linkspeed);	pci_push(base);}static void nv_stop_rx(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	u8 __iomem *base = get_hwbase(dev);	u32 rx_ctrl = readl(base + NvRegReceiverControl);	dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);	if (!np->mac_in_use)		rx_ctrl &= ~NVREG_RCVCTL_START;	else		rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;	writel(rx_ctrl, base + NvRegReceiverControl);	reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,			NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,			KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");	udelay(NV_RXSTOP_DELAY2);	if (!np->mac_in_use)		writel(0, base + NvRegLinkSpeed);}static void nv_start_tx(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	u8 __iomem *base = get_hwbase(dev);	u32 tx_ctrl = readl(base + NvRegTransmitterControl);	dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);	tx_ctrl |= NVREG_XMITCTL_START;	if (np->mac_in_use)		tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;	writel(tx_ctrl, base + NvRegTransmitterControl);	pci_push(base);}static void nv_stop_tx(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	u8 __iomem *base = get_hwbase(dev);	u32 tx_ctrl = readl(base + NvRegTransmitterControl);	dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);	if (!np->mac_in_use)		tx_ctrl &= ~NVREG_XMITCTL_START;	else		tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;	writel(tx_ctrl, base + NvRegTransmitterControl);	reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,			NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,			KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");	udelay(NV_TXSTOP_DELAY2);	if (!np->mac_in_use)		writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,		       base + NvRegTransmitPoll);}static void nv_txrx_reset(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	u8 __iomem *base = get_hwbase(dev);	dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);	pci_push(base);	udelay(NV_TXRX_RESET_DELAY);	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);	pci_push(base);}static void nv_mac_reset(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	u8 __iomem *base = get_hwbase(dev);	dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);	pci_push(base);	writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);	pci_push(base);	udelay(NV_MAC_RESET_DELAY);	writel(0, base + NvRegMacReset);	pci_push(base);	udelay(NV_MAC_RESET_DELAY);	writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);	pci_push(base);}static void nv_get_hw_stats(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	u8 __iomem *base = get_hwbase(dev);	np->estats.tx_bytes += readl(base + NvRegTxCnt);	np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);	np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);	np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);	np->estats.tx_late_collision += readl(base + NvRegTxLateCol);	np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);	np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);	np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);	np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);	np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);	np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);	np->estats.rx_late_collision += readl(base + NvRegRxLateCol);	np->estats.rx_runt += readl(base + NvRegRxRunt);	np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);	np->estats.rx_over_errors += readl(base + NvRegRxOverflow);	np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);	np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);	np->estats.rx_length_error += readl(base + NvRegRxLenErr);	np->estats.rx_unicast += readl(base + NvRegRxUnicast);	np->estats.rx_multicast += readl(base + NvRegRxMulticast);	np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);	np->estats.rx_packets =		np->estats.rx_unicast +		np->estats.rx_multicast +		np->estats.rx_broadcast;	np->estats.rx_errors_total =		np->estats.rx_crc_errors +		np->estats.rx_over_errors +		np->estats.rx_frame_error +		(np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +		np->estats.rx_late_collision +		np->estats.rx_runt +		np->estats.rx_frame_too_long;	np->estats.tx_errors_total =		np->estats.tx_late_collision +		np->estats.tx_fifo_errors +		np->estats.tx_carrier_errors +		np->estats.tx_excess_deferral +		np->estats.tx_retry_error;	if (np->driver_data & DEV_HAS_STATISTICS_V2) {		np->estats.tx_deferral += readl(base + NvRegTxDef);		np->estats.tx_packets += readl(base + NvRegTxFrame);		np->estats.rx_bytes += readl(base + NvRegRxCnt);		np->estats.tx_pause += readl(base + NvRegTxPause);		np->estats.rx_pause += readl(base + NvRegRxPause);		np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);	}}/* * nv_get_stats: dev->get_stats function * Get latest stats value from the nic. * Called with read_lock(&dev_base_lock) held for read - * only synchronized against unregister_netdevice. */static struct net_device_stats *nv_get_stats(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	/* If the nic supports hw counters then retrieve latest values */	if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {		nv_get_hw_stats(dev);		/* copy to net_device stats */		dev->stats.tx_bytes = np->estats.tx_bytes;		dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;		dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;		dev->stats.rx_crc_errors = np->estats.rx_crc_errors;		dev->stats.rx_over_errors = np->estats.rx_over_errors;		dev->stats.rx_errors = np->estats.rx_errors_total;		dev->stats.tx_errors = np->estats.tx_errors_total;	}	return &dev->stats;}/* * nv_alloc_rx: fill rx ring entries. * Return 1 if the allocations for the skbs failed and the * rx engine is without Available descriptors */static int nv_alloc_rx(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	struct ring_desc* less_rx;	less_rx = np->get_rx.orig;	if (less_rx-- == np->first_rx.orig)		less_rx = np->last_rx.orig;	while (np->put_rx.orig != less_rx) {		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);		if (skb) {			np->put_rx_ctx->skb = skb;			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,							     skb->data,							     skb_tailroom(skb),							     PCI_DMA_FROMDEVICE);			np->put_rx_ctx->dma_len = skb_tailroom(skb);			np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);			wmb();			np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);			if (unlikely(np->put_rx.orig++ == np->last_rx.orig))				np->put_rx.orig = np->first_rx.orig;			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))				np->put_rx_ctx = np->first_rx_ctx;		} else {			return 1;		}	}	return 0;}static int nv_alloc_rx_optimized(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	struct ring_desc_ex* less_rx;	less_rx = np->get_rx.ex;	if (less_rx-- == np->first_rx.ex)		less_rx = np->last_rx.ex;	while (np->put_rx.ex != less_rx) {		struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);		if (skb) {			np->put_rx_ctx->skb = skb;			np->put_rx_ctx->dma = pci_map_single(np->pci_dev,							     skb->data,							     skb_tailroom(skb),							     PCI_DMA_FROMDEVICE);			np->put_rx_ctx->dma_len = skb_tailroom(skb);			np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;			np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;			wmb();			np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);			if (unlikely(np->put_rx.ex++ == np->last_rx.ex))				np->put_rx.ex = np->first_rx.ex;			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))				np->put_rx_ctx = np->first_rx_ctx;		} else {			return 1;		}	}	return 0;}/* If rx bufs are exhausted called after 50ms to attempt to refresh */#ifdef CONFIG_FORCEDETH_NAPIstatic void nv_do_rx_refill(unsigned long data){	struct net_device *dev = (struct net_device *) data;	struct fe_priv *np = netdev_priv(dev);	/* Just reschedule NAPI rx processing */	netif_rx_schedule(dev, &np->napi);}#elsestatic void nv_do_rx_refill(unsigned long data){	struct net_device *dev = (struct net_device *) data;	struct fe_priv *np = netdev_priv(dev);	int retcode;	if (!using_multi_irqs(dev)) {		if (np->msi_flags & NV_MSI_X_ENABLED)			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);		else			disable_irq(np->pci_dev->irq);	} else {		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);	}	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)		retcode = nv_alloc_rx(dev);	else		retcode = nv_alloc_rx_optimized(dev);	if (retcode) {		spin_lock_irq(&np->lock);		if (!np->in_shutdown)			mod_timer(&np->oom_kick, jiffies + OOM_REFILL);		spin_unlock_irq(&np->lock);	}	if (!using_multi_irqs(dev)) {		if (np->msi_flags & NV_MSI_X_ENABLED)			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);		else			enable_irq(np->pci_dev->irq);	} else {		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);	}}#endifstatic void nv_init_rx(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	int i;	np->get_rx = np->put_rx = np->first_rx = np->rx_ring;	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)		np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];	else		np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];	np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;	np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];	for (i = 0; i < np->rx_ring_size; i++) {		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {			np->rx_ring.orig[i].flaglen = 0;			np->rx_ring.orig[i].buf = 0;		} else {			np->rx_ring.ex[i].flaglen = 0;			np->rx_ring.ex[i].txvlan = 0;			np->rx_ring.ex[i].bufhigh = 0;			np->rx_ring.ex[i].buflow = 0;		}		np->rx_skb[i].skb = NULL;		np->rx_skb[i].dma = 0;	}}static void nv_init_tx(struct net_device *dev){	struct fe_priv *np = netdev_priv(dev);	int i;	np->get_tx = np->put_tx = np->first_tx = np->tx_ring;	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];	else		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];	np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];	for (i = 0; i < np->tx_ring_size; i++) {		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {			np->tx_ring.orig[i].flaglen = 0;			np->tx_ring.orig[i].buf = 0;		} else {			np->tx_ring.ex[i].flaglen = 0;			np->tx_ring.ex[i].txvlan = 0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -