sc92031.c

来自「linux 内核源代码」· C语言 代码 · 共 1,620 行 · 第 1/3 页

C
1,620
字号
		netif_carrier_on(dev);		if (printk_ratelimit())			printk(KERN_INFO "%s: link up, %sMbps, %s-duplex\n",				dev->name,				speed_100 ? "100" : "10",				duplex_full ? "full" : "half");		return true;	} else {		_sc92031_mii_scan(port_base);		netif_carrier_off(dev);		_sc92031_disable_tx_rx(dev);		if (printk_ratelimit())			printk(KERN_INFO "%s: link down\n", dev->name);		return false;	}}static void _sc92031_phy_reset(struct net_device *dev){	struct sc92031_priv *priv = netdev_priv(dev);	void __iomem *port_base = priv->port_base;	u32 phy_ctrl;	phy_ctrl = ioread32(port_base + PhyCtrl);	phy_ctrl &= ~(PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10);	phy_ctrl |= PhyCtrlAne | PhyCtrlReset;	switch (media) {	default:	case AUTOSELECT:		phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100 | PhyCtrlSpd10;		break;	case M10_HALF:		phy_ctrl |= PhyCtrlSpd10;		break;	case M10_FULL:		phy_ctrl |= PhyCtrlDux | PhyCtrlSpd10;		break;	case M100_HALF:		phy_ctrl |= PhyCtrlSpd100;		break;	case M100_FULL:		phy_ctrl |= PhyCtrlDux | PhyCtrlSpd100;		break;	}	iowrite32(phy_ctrl, port_base + PhyCtrl);	mdelay(10);	phy_ctrl &= ~PhyCtrlReset;	iowrite32(phy_ctrl, port_base + PhyCtrl);	mdelay(1);	_sc92031_mii_write(port_base, MII_JAB,			PHY_16_JAB_ENB | PHY_16_PORT_ENB);	_sc92031_mii_scan(port_base);	netif_carrier_off(dev);	netif_stop_queue(dev);}static void _sc92031_reset(struct net_device *dev){	struct sc92031_priv *priv = netdev_priv(dev);	void __iomem *port_base = priv->port_base;	/* disable PM */	iowrite32(0, port_base + PMConfig);	/* soft reset the chip */	iowrite32(Cfg0_Reset, port_base + Config0);	mdelay(200);	iowrite32(0, port_base + Config0);	mdelay(10);	/* disable interrupts */	iowrite32(0, port_base + IntrMask);	/* clear multicast address */	iowrite32(0, port_base + MAR0);	iowrite32(0, port_base + MAR0 + 4);	/* init rx ring */	iowrite32(priv->rx_ring_dma_addr, port_base + RxbufAddr);	priv->rx_ring_tail = priv->rx_ring_dma_addr;	/* init tx ring */	_sc92031_tx_clear(dev);	/* clear old register values */	priv->intr_status = 0;	atomic_set(&priv->intr_mask, 0);	priv->rx_config = 0;	priv->tx_config = 0;	priv->mc_flags = 0;	/* configure rx buffer size */	/* NOTE: vendor driver had dead code here to enable early tx/rx */	iowrite32(Cfg1_Rcv64K, port_base + Config1);	_sc92031_phy_reset(dev);	_sc92031_check_media(dev);	/* calculate rx fifo overflow */	priv->rx_value = 0;	/* enable PM */	iowrite32(priv->pm_config, port_base + PMConfig);	/* clear intr register */	ioread32(port_base + IntrStatus);}static void _sc92031_tx_tasklet(struct net_device *dev){	struct sc92031_priv *priv = netdev_priv(dev);	void __iomem *port_base = priv->port_base;	unsigned old_tx_tail;	unsigned entry;	u32 tx_status;	old_tx_tail = priv->tx_tail;	while (priv->tx_head - priv->tx_tail > 0) {		entry = priv->tx_tail % NUM_TX_DESC;		tx_status = ioread32(port_base + TxStatus0 + entry * 4);		if (!(tx_status & (TxStatOK | TxUnderrun | TxAborted)))			break;		priv->tx_tail++;		if (tx_status & TxStatOK) {			priv->stats.tx_bytes += tx_status & 0x1fff;			priv->stats.tx_packets++;			/* Note: TxCarrierLost is always asserted at 100mbps. */			priv->stats.collisions += (tx_status >> 22) & 0xf;		}		if (tx_status & (TxOutOfWindow | TxAborted)) {			priv->stats.tx_errors++;			if (tx_status & TxAborted)				priv->stats.tx_aborted_errors++;			if (tx_status & TxCarrierLost)				priv->stats.tx_carrier_errors++;			if (tx_status & TxOutOfWindow)				priv->stats.tx_window_errors++;		}		if (tx_status & TxUnderrun)			priv->stats.tx_fifo_errors++;	}	if (priv->tx_tail != old_tx_tail)		if (netif_queue_stopped(dev))			netif_wake_queue(dev);}static void _sc92031_rx_tasklet_error(u32 rx_status,		struct sc92031_priv *priv, unsigned rx_size){	if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {		priv->stats.rx_errors++;		priv->stats.rx_length_errors++;	}	if (!(rx_status & RxStatesOK)) {		priv->stats.rx_errors++;		if (rx_status & (RxHugeFrame | RxSmallFrame))			priv->stats.rx_length_errors++;		if (rx_status & RxBadAlign)			priv->stats.rx_frame_errors++;		if (!(rx_status & RxCRCOK))			priv->stats.rx_crc_errors++;	} else		priv->rx_loss++;}static void _sc92031_rx_tasklet(struct net_device *dev){	struct sc92031_priv *priv = netdev_priv(dev);	void __iomem *port_base = priv->port_base;	dma_addr_t rx_ring_head;	unsigned rx_len;	unsigned rx_ring_offset;	void *rx_ring = priv->rx_ring;	rx_ring_head = ioread32(port_base + RxBufWPtr);	rmb();	/* rx_ring_head is only 17 bits in the RxBufWPtr register.	 * we need to change it to 32 bits physical address	 */	rx_ring_head &= (dma_addr_t)(RX_BUF_LEN - 1);	rx_ring_head |= priv->rx_ring_dma_addr & ~(dma_addr_t)(RX_BUF_LEN - 1);	if (rx_ring_head < priv->rx_ring_dma_addr)		rx_ring_head += RX_BUF_LEN;	if (rx_ring_head >= priv->rx_ring_tail)		rx_len = rx_ring_head - priv->rx_ring_tail;	else		rx_len = RX_BUF_LEN - (priv->rx_ring_tail - rx_ring_head);	if (!rx_len)		return;	if (unlikely(rx_len > RX_BUF_LEN)) {		if (printk_ratelimit())			printk(KERN_ERR "%s: rx packets length > rx buffer\n",					dev->name);		return;	}	rx_ring_offset = (priv->rx_ring_tail - priv->rx_ring_dma_addr) % RX_BUF_LEN;	while (rx_len) {		u32 rx_status;		unsigned rx_size, rx_size_align, pkt_size;		struct sk_buff *skb;		rx_status = le32_to_cpup((__le32 *)(rx_ring + rx_ring_offset));		rmb();		rx_size = rx_status >> 20;		rx_size_align = (rx_size + 3) & ~3;	// for 4 bytes aligned		pkt_size = rx_size - 4;	// Omit the four octet CRC from the length.		rx_ring_offset = (rx_ring_offset + 4) % RX_BUF_LEN;		if (unlikely(rx_status == 0				|| rx_size > (MAX_ETH_FRAME_SIZE + 4)				|| rx_size < 16				|| !(rx_status & RxStatesOK))) {			_sc92031_rx_tasklet_error(rx_status, priv, rx_size);			break;		}		if (unlikely(rx_size_align + 4 > rx_len)) {			if (printk_ratelimit())				printk(KERN_ERR "%s: rx_len is too small\n", dev->name);			break;		}		rx_len -= rx_size_align + 4;		skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);		if (unlikely(!skb)) {			if (printk_ratelimit())				printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",						dev->name, pkt_size);			goto next;		}		skb_reserve(skb, NET_IP_ALIGN);		if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {			memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),				rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);			memcpy(skb_put(skb, pkt_size - (RX_BUF_LEN - rx_ring_offset)),				rx_ring, pkt_size - (RX_BUF_LEN - rx_ring_offset));		} else {			memcpy(skb_put(skb, pkt_size), rx_ring + rx_ring_offset, pkt_size);		}		skb->protocol = eth_type_trans(skb, dev);		dev->last_rx = jiffies;		netif_rx(skb);		priv->stats.rx_bytes += pkt_size;		priv->stats.rx_packets++;		if (rx_status & Rx_Multicast)			priv->stats.multicast++;	next:		rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;	}	mb();	priv->rx_ring_tail = rx_ring_head;	iowrite32(priv->rx_ring_tail, port_base + RxBufRPtr);}static void _sc92031_link_tasklet(struct net_device *dev){	struct sc92031_priv *priv = netdev_priv(dev);	if (_sc92031_check_media(dev))		netif_wake_queue(dev);	else {		netif_stop_queue(dev);		priv->stats.tx_carrier_errors++;	}}static void sc92031_tasklet(unsigned long data){	struct net_device *dev = (struct net_device *)data;	struct sc92031_priv *priv = netdev_priv(dev);	void __iomem *port_base = priv->port_base;	u32 intr_status, intr_mask;	intr_status = priv->intr_status;	spin_lock(&priv->lock);	if (unlikely(!netif_running(dev)))		goto out;	if (intr_status & TxOK)		_sc92031_tx_tasklet(dev);	if (intr_status & RxOK)		_sc92031_rx_tasklet(dev);	if (intr_status & RxOverflow)		priv->stats.rx_errors++;	if (intr_status & TimeOut) {		priv->stats.rx_errors++;		priv->stats.rx_length_errors++;	}	if (intr_status & (LinkFail | LinkOK))		_sc92031_link_tasklet(dev);out:	intr_mask = atomic_read(&priv->intr_mask);	rmb();	iowrite32(intr_mask, port_base + IntrMask);	mmiowb();	spin_unlock(&priv->lock);}static irqreturn_t sc92031_interrupt(int irq, void *dev_id){	struct net_device *dev = dev_id;	struct sc92031_priv *priv = netdev_priv(dev);	void __iomem *port_base = priv->port_base;	u32 intr_status, intr_mask;	/* mask interrupts before clearing IntrStatus */	iowrite32(0, port_base + IntrMask);	_sc92031_dummy_read(port_base);	intr_status = ioread32(port_base + IntrStatus);	if (unlikely(intr_status == 0xffffffff))		return IRQ_NONE;	// hardware has gone missing	intr_status &= IntrBits;	if (!intr_status)		goto out_none;	priv->intr_status = intr_status;	tasklet_schedule(&priv->tasklet);	return IRQ_HANDLED;out_none:	intr_mask = atomic_read(&priv->intr_mask);	rmb();	iowrite32(intr_mask, port_base + IntrMask);	mmiowb();	return IRQ_NONE;}static struct net_device_stats *sc92031_get_stats(struct net_device *dev){	struct sc92031_priv *priv = netdev_priv(dev);	void __iomem *port_base = priv->port_base;	// FIXME I do not understand what is this trying to do.	if (netif_running(dev)) {		int temp;		spin_lock_bh(&priv->lock);		/* Update the error count. */		temp = (ioread32(port_base + RxStatus0) >> 16) & 0xffff;		if (temp == 0xffff) {			priv->rx_value += temp;			priv->stats.rx_fifo_errors = priv->rx_value;		} else {			priv->stats.rx_fifo_errors = temp + priv->rx_value;		}		spin_unlock_bh(&priv->lock);	}	return &priv->stats;}static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev){	int err = 0;	struct sc92031_priv *priv = netdev_priv(dev);	void __iomem *port_base = priv->port_base;	unsigned len;	unsigned entry;	u32 tx_status;	if (unlikely(skb->len > TX_BUF_SIZE)) {		err = -EMSGSIZE;		priv->stats.tx_dropped++;		goto out;	}	spin_lock(&priv->lock);	if (unlikely(!netif_carrier_ok(dev))) {		err = -ENOLINK;		priv->stats.tx_dropped++;		goto out_unlock;	}	BUG_ON(priv->tx_head - priv->tx_tail >= NUM_TX_DESC);	entry = priv->tx_head++ % NUM_TX_DESC;	skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);	len = skb->len;	if (unlikely(len < ETH_ZLEN)) {		memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,				0, ETH_ZLEN - len);		len = ETH_ZLEN;	}	wmb();	if (len < 100)		tx_status = len;	else if (len < 300)		tx_status = 0x30000 | len;	else		tx_status = 0x50000 | len;	iowrite32(priv->tx_bufs_dma_addr + entry * TX_BUF_SIZE,			port_base + TxAddr0 + entry * 4);	iowrite32(tx_status, port_base + TxStatus0 + entry * 4);	mmiowb();	dev->trans_start = jiffies;	if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)		netif_stop_queue(dev);out_unlock:	spin_unlock(&priv->lock);out:	dev_kfree_skb(skb);	return err;}static int sc92031_open(struct net_device *dev){	int err;	struct sc92031_priv *priv = netdev_priv(dev);	struct pci_dev *pdev = priv->pdev;	priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,			&priv->rx_ring_dma_addr);	if (unlikely(!priv->rx_ring)) {		err = -ENOMEM;		goto out_alloc_rx_ring;	}	priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,			&priv->tx_bufs_dma_addr);	if (unlikely(!priv->tx_bufs)) {		err = -ENOMEM;		goto out_alloc_tx_bufs;	}	priv->tx_head = priv->tx_tail = 0;	err = request_irq(pdev->irq, sc92031_interrupt,			IRQF_SHARED, dev->name, dev);	if (unlikely(err < 0))		goto out_request_irq;	priv->pm_config = 0;	/* Interrupts already disabled by sc92031_stop or sc92031_probe */	spin_lock_bh(&priv->lock);	_sc92031_reset(dev);	mmiowb();	spin_unlock_bh(&priv->lock);	sc92031_enable_interrupts(dev);	if (netif_carrier_ok(dev))		netif_start_queue(dev);	else		netif_tx_disable(dev);	return 0;out_request_irq:	pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,			priv->tx_bufs_dma_addr);out_alloc_tx_bufs:	pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,			priv->rx_ring_dma_addr);out_alloc_rx_ring:	return err;}static int sc92031_stop(struct net_device *dev){	struct sc92031_priv *priv = netdev_priv(dev);	struct pci_dev *pdev = priv->pdev;	netif_tx_disable(dev);	/* Disable interrupts, stop Tx and Rx. */	sc92031_disable_interrupts(dev);	spin_lock_bh(&priv->lock);

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?