pcnet32.c

来自「linux 内核源代码」· C语言 代码 · 共 2,361 行 · 第 1/5 页

C
2,361
字号
		lp->init_block->phys_addr[i] = dev->dev_addr[i];	lp->init_block->filter[0] = 0x00000000;	lp->init_block->filter[1] = 0x00000000;	lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);	lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);	/* switch pcnet32 to 32bit mode */	a->write_bcr(ioaddr, 20, 2);	a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));	a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));	if (pdev) {		/* use the IRQ provided by PCI */		dev->irq = pdev->irq;		if (pcnet32_debug & NETIF_MSG_PROBE)			printk(" assigned IRQ %d.\n", dev->irq);	} else {		unsigned long irq_mask = probe_irq_on();		/*		 * To auto-IRQ we enable the initialization-done and DMA error		 * interrupts. For ISA boards we get a DMA error, but VLB and PCI		 * boards will work.		 */		/* Trigger an initialization just for the interrupt. */		a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT);		mdelay(1);		dev->irq = probe_irq_off(irq_mask);		if (!dev->irq) {			if (pcnet32_debug & NETIF_MSG_PROBE)				printk(", failed to detect IRQ line.\n");			ret = -ENODEV;			goto err_free_ring;		}		if (pcnet32_debug & NETIF_MSG_PROBE)			printk(", probed IRQ %d.\n", dev->irq);	}	/* Set the mii phy_id so that we can query the link state */	if (lp->mii) {		/* lp->phycount and lp->phymask are set to 0 by memset above */		lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;		/* scan for PHYs */		for (i = 0; i < PCNET32_MAX_PHYS; i++) {			unsigned short id1, id2;			id1 = mdio_read(dev, i, MII_PHYSID1);			if (id1 == 0xffff)				continue;			id2 = mdio_read(dev, i, MII_PHYSID2);			if (id2 == 0xffff)				continue;			if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)				continue;	/* 79C971 & 79C972 have phantom phy at id 31 */			lp->phycount++;			lp->phymask |= (1 << i);			lp->mii_if.phy_id = i;			if (pcnet32_debug & NETIF_MSG_PROBE)				printk(KERN_INFO PFX				       "Found PHY %04x:%04x at address %d.\n",				       id1, id2, i);		}		lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);		if (lp->phycount > 1) {			lp->options |= PCNET32_PORT_MII;		}	}	init_timer(&lp->watchdog_timer);	lp->watchdog_timer.data = (unsigned long)dev;	lp->watchdog_timer.function = (void *)&pcnet32_watchdog;	/* The PCNET32-specific entries in the device structure. */	dev->open = &pcnet32_open;	dev->hard_start_xmit = &pcnet32_start_xmit;	dev->stop = &pcnet32_close;	dev->get_stats = &pcnet32_get_stats;	dev->set_multicast_list = &pcnet32_set_multicast_list;	dev->do_ioctl = &pcnet32_ioctl;	dev->ethtool_ops = &pcnet32_ethtool_ops;	dev->tx_timeout = pcnet32_tx_timeout;	dev->watchdog_timeo = (5 * HZ);#ifdef CONFIG_NET_POLL_CONTROLLER	dev->poll_controller = pcnet32_poll_controller;#endif	/* Fill in the generic fields of the device structure. */	if (register_netdev(dev))		goto err_free_ring;	if (pdev) {		pci_set_drvdata(pdev, dev);	} else {		lp->next = pcnet32_dev;		pcnet32_dev = dev;	}	if (pcnet32_debug & NETIF_MSG_PROBE)		printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);	cards_found++;	/* enable LED writes */	a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);	return 0;      err_free_ring:	pcnet32_free_ring(dev);      err_free_consistent:	pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), 			    lp->init_block, lp->init_dma_addr);      err_free_netdev:	free_netdev(dev);      err_release_region:	release_region(ioaddr, PCNET32_TOTAL_SIZE);	return ret;}/* if any allocation fails, caller must also call pcnet32_free_ring */static int pcnet32_alloc_ring(struct net_device *dev, char *name){	struct pcnet32_private *lp = netdev_priv(dev);	lp->tx_ring = pci_alloc_consistent(lp->pci_dev,					   sizeof(struct pcnet32_tx_head) *					   lp->tx_ring_size,					   &lp->tx_ring_dma_addr);	if (lp->tx_ring == NULL) {		if (netif_msg_drv(lp))			printk("\n" KERN_ERR PFX			       "%s: Consistent memory allocation failed.\n",			       name);		return -ENOMEM;	}	lp->rx_ring = pci_alloc_consistent(lp->pci_dev,					   sizeof(struct pcnet32_rx_head) *					   lp->rx_ring_size,					   &lp->rx_ring_dma_addr);	if (lp->rx_ring == NULL) {		if (netif_msg_drv(lp))			printk("\n" KERN_ERR PFX			       "%s: Consistent memory allocation failed.\n",			       name);		return -ENOMEM;	}	lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),				  GFP_ATOMIC);	if (!lp->tx_dma_addr) {		if (netif_msg_drv(lp))			printk("\n" KERN_ERR PFX			       "%s: Memory allocation failed.\n", name);		return -ENOMEM;	}	lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),				  GFP_ATOMIC);	if (!lp->rx_dma_addr) {		if (netif_msg_drv(lp))			printk("\n" KERN_ERR PFX			       "%s: Memory allocation failed.\n", name);		return -ENOMEM;	}	lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),				GFP_ATOMIC);	if (!lp->tx_skbuff) {		if (netif_msg_drv(lp))			printk("\n" KERN_ERR PFX			       "%s: Memory allocation failed.\n", name);		return -ENOMEM;	}	lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),				GFP_ATOMIC);	if (!lp->rx_skbuff) {		if (netif_msg_drv(lp))			printk("\n" KERN_ERR PFX			       "%s: Memory allocation failed.\n", name);		return -ENOMEM;	}	return 0;}static void pcnet32_free_ring(struct net_device *dev){	struct pcnet32_private *lp = netdev_priv(dev);	kfree(lp->tx_skbuff);	lp->tx_skbuff = NULL;	kfree(lp->rx_skbuff);	lp->rx_skbuff = NULL;	kfree(lp->tx_dma_addr);	lp->tx_dma_addr = NULL;	kfree(lp->rx_dma_addr);	lp->rx_dma_addr = NULL;	if (lp->tx_ring) {		pci_free_consistent(lp->pci_dev,				    sizeof(struct pcnet32_tx_head) *				    lp->tx_ring_size, lp->tx_ring,				    lp->tx_ring_dma_addr);		lp->tx_ring = NULL;	}	if (lp->rx_ring) {		pci_free_consistent(lp->pci_dev,				    sizeof(struct pcnet32_rx_head) *				    lp->rx_ring_size, lp->rx_ring,				    lp->rx_ring_dma_addr);		lp->rx_ring = NULL;	}}static int pcnet32_open(struct net_device *dev){	struct pcnet32_private *lp = netdev_priv(dev);	unsigned long ioaddr = dev->base_addr;	u16 val;	int i;	int rc;	unsigned long flags;	if (request_irq(dev->irq, &pcnet32_interrupt,			lp->shared_irq ? IRQF_SHARED : 0, dev->name,			(void *)dev)) {		return -EAGAIN;	}	spin_lock_irqsave(&lp->lock, flags);	/* Check for a valid station address */	if (!is_valid_ether_addr(dev->dev_addr)) {		rc = -EINVAL;		goto err_free_irq;	}	/* Reset the PCNET32 */	lp->a.reset(ioaddr);	/* switch pcnet32 to 32bit mode */	lp->a.write_bcr(ioaddr, 20, 2);	if (netif_msg_ifup(lp))		printk(KERN_DEBUG		       "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",		       dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr),		       (u32) (lp->rx_ring_dma_addr),		       (u32) (lp->init_dma_addr));	/* set/reset autoselect bit */	val = lp->a.read_bcr(ioaddr, 2) & ~2;	if (lp->options & PCNET32_PORT_ASEL)		val |= 2;	lp->a.write_bcr(ioaddr, 2, val);	/* handle full duplex setting */	if (lp->mii_if.full_duplex) {		val = lp->a.read_bcr(ioaddr, 9) & ~3;		if (lp->options & PCNET32_PORT_FD) {			val |= 1;			if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))				val |= 2;		} else if (lp->options & PCNET32_PORT_ASEL) {			/* workaround of xSeries250, turn on for 79C975 only */			if (lp->chip_version == 0x2627)				val |= 3;		}		lp->a.write_bcr(ioaddr, 9, val);	}	/* set/reset GPSI bit in test register */	val = lp->a.read_csr(ioaddr, 124) & ~0x10;	if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)		val |= 0x10;	lp->a.write_csr(ioaddr, 124, val);	/* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */	if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&	    (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||	     lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {		if (lp->options & PCNET32_PORT_ASEL) {			lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;			if (netif_msg_link(lp))				printk(KERN_DEBUG				       "%s: Setting 100Mb-Full Duplex.\n",				       dev->name);		}	}	if (lp->phycount < 2) {		/*		 * 24 Jun 2004 according AMD, in order to change the PHY,		 * DANAS (or DISPM for 79C976) must be set; then select the speed,		 * duplex, and/or enable auto negotiation, and clear DANAS		 */		if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {			lp->a.write_bcr(ioaddr, 32,					lp->a.read_bcr(ioaddr, 32) | 0x0080);			/* disable Auto Negotiation, set 10Mpbs, HD */			val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;			if (lp->options & PCNET32_PORT_FD)				val |= 0x10;			if (lp->options & PCNET32_PORT_100)				val |= 0x08;			lp->a.write_bcr(ioaddr, 32, val);		} else {			if (lp->options & PCNET32_PORT_ASEL) {				lp->a.write_bcr(ioaddr, 32,						lp->a.read_bcr(ioaddr,							       32) | 0x0080);				/* enable auto negotiate, setup, disable fd */				val = lp->a.read_bcr(ioaddr, 32) & ~0x98;				val |= 0x20;				lp->a.write_bcr(ioaddr, 32, val);			}		}	} else {		int first_phy = -1;		u16 bmcr;		u32 bcr9;		struct ethtool_cmd ecmd;		/*		 * There is really no good other way to handle multiple PHYs		 * other than turning off all automatics		 */		val = lp->a.read_bcr(ioaddr, 2);		lp->a.write_bcr(ioaddr, 2, val & ~2);		val = lp->a.read_bcr(ioaddr, 32);		lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7));	/* stop MII manager */		if (!(lp->options & PCNET32_PORT_ASEL)) {			/* setup ecmd */			ecmd.port = PORT_MII;			ecmd.transceiver = XCVR_INTERNAL;			ecmd.autoneg = AUTONEG_DISABLE;			ecmd.speed =			    lp->			    options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;			bcr9 = lp->a.read_bcr(ioaddr, 9);			if (lp->options & PCNET32_PORT_FD) {				ecmd.duplex = DUPLEX_FULL;				bcr9 |= (1 << 0);			} else {				ecmd.duplex = DUPLEX_HALF;				bcr9 |= ~(1 << 0);			}			lp->a.write_bcr(ioaddr, 9, bcr9);		}		for (i = 0; i < PCNET32_MAX_PHYS; i++) {			if (lp->phymask & (1 << i)) {				/* isolate all but the first PHY */				bmcr = mdio_read(dev, i, MII_BMCR);				if (first_phy == -1) {					first_phy = i;					mdio_write(dev, i, MII_BMCR,						   bmcr & ~BMCR_ISOLATE);				} else {					mdio_write(dev, i, MII_BMCR,						   bmcr | BMCR_ISOLATE);				}				/* use mii_ethtool_sset to setup PHY */				lp->mii_if.phy_id = i;				ecmd.phy_address = i;				if (lp->options & PCNET32_PORT_ASEL) {					mii_ethtool_gset(&lp->mii_if, &ecmd);					ecmd.autoneg = AUTONEG_ENABLE;				}				mii_ethtool_sset(&lp->mii_if, &ecmd);			}		}		lp->mii_if.phy_id = first_phy;		if (netif_msg_link(lp))			printk(KERN_INFO "%s: Using PHY number %d.\n",			       dev->name, first_phy);	}#ifdef DO_DXSUFLO	if (lp->dxsuflo) {	/* Disable transmit stop on underflow */		val = lp->a.read_csr(ioaddr, CSR3);		val |= 0x40;		lp->a.write_csr(ioaddr, CSR3, val);	}#endif	lp->init_block->mode =	    cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);	pcnet32_load_multicast(dev);	if (pcnet32_init_ring(dev)) {		rc = -ENOMEM;		goto err_free_ring;	}#ifdef CONFIG_PCNET32_NAPI	napi_enable(&lp->napi);#endif	/* Re-initialize the PCNET32, and start it when done. */	lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));	lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));	lp->a.write_csr(ioaddr, CSR4, 0x0915);	/* auto tx pad */	lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);	netif_start_queue(dev);	if (lp->chip_version >= PCNET32_79C970A) {		/* Print the link status and start the watchdog */		pcnet32_check_media(dev, 1);		mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);	}	i = 0;	while (i++ < 100)		if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)			break;	/*	 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton	 * reports that doing so triggers a bug in the '974.	 */	lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);	if (netif_msg_ifup(lp))		printk(KERN_DEBUG		       "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",		       dev->name, i,		       (u32) (lp->init_dma_addr),		       lp->a.read_csr(ioaddr, CSR0));	spin_unlock_irqrestore(&lp->lock, flags);	return 0;		/* Always succeed */      err_free_ring:	/* free any allocated skbuffs */	pcnet32_purge_rx_ring(dev);	/*	 * Switch back to 16bit mode to avoid problems with dumb	 * DOS packet driver after a warm reboot	 */	lp->a.write_bcr(ioaddr, 20, 4);      err_free_irq:	spin_unlock_irqrestore(&lp->lock, flags);	free_irq(dev->irq, dev);	return rc;}/* * The LANCE has been halted for one reason or another (busmaster memory * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure, * etc.).  Modern LANCE variants always reload their ring-buffer * configuration when restarted, so we must reinitialize our ring * context before restarting.  As part of this reinitialization, * find all packets still on the Tx ring and pretend that they had been * sent (in effect, drop the packets on the floor) - the higher-level * protocols will time out and retransmit.  It'd be better to shuffle * these skbs t

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?