📄 via-rhine.c
字号:
static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);static void rhine_tx(struct net_device *dev);static void rhine_rx(struct net_device *dev);static void rhine_error(struct net_device *dev, int intr_status);static void rhine_set_rx_mode(struct net_device *dev);static struct net_device_stats *rhine_get_stats(struct net_device *dev);static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);static struct ethtool_ops netdev_ethtool_ops;static int rhine_close(struct net_device *dev);static void rhine_shutdown (struct device *gdev);#define RHINE_WAIT_FOR(condition) do { \ int i=1024; \ while (!(condition) && --i) \ ; \ if (debug > 1 && i < 512) \ printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \ DRV_NAME, 1024-i, __func__, __LINE__); \} while(0)static inline u32 get_intr_status(struct net_device *dev){ long ioaddr = dev->base_addr; struct rhine_private *rp = netdev_priv(dev); u32 intr_status; intr_status = readw(ioaddr + IntrStatus); /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */ if (rp->quirks & rqStatusWBRace) intr_status |= readb(ioaddr + IntrStatus2) << 16; return intr_status;}/* * Get power related registers into sane state. * Notify user about past WOL event. */static void rhine_power_init(struct net_device *dev){ long ioaddr = dev->base_addr; struct rhine_private *rp = netdev_priv(dev); u16 wolstat; if (rp->quirks & rqWOL) { /* Make sure chip is in power state D0 */ writeb(readb(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW); /* Disable "force PME-enable" */ writeb(0x80, ioaddr + WOLcgClr); /* Clear power-event config bits (WOL) */ writeb(0xFF, ioaddr + WOLcrClr); /* More recent cards can manage two additional patterns */ if (rp->quirks & rq6patterns) writeb(0x03, ioaddr + WOLcrClr1); /* Save power-event status bits */ wolstat = readb(ioaddr + PwrcsrSet); if (rp->quirks & rq6patterns) wolstat |= (readb(ioaddr + PwrcsrSet1) & 0x03) << 8; /* Clear power-event status bits */ writeb(0xFF, ioaddr + PwrcsrClr); if (rp->quirks & rq6patterns) writeb(0x03, ioaddr + PwrcsrClr1); if (wolstat) { char *reason; switch (wolstat) { case WOLmagic: reason = "Magic packet"; break; case WOLlnkon: reason = "Link went up"; break; case WOLlnkoff: reason = "Link went down"; break; case WOLucast: reason = "Unicast packet"; break; case WOLbmcast: reason = "Multicast/broadcast packet"; break; default: reason = "Unknown"; } printk(KERN_INFO "%s: Woke system up. Reason: %s.\n", DRV_NAME, reason); } }}static void rhine_chip_reset(struct net_device *dev){ long ioaddr = dev->base_addr; struct rhine_private *rp = netdev_priv(dev); writeb(Cmd1Reset, ioaddr + ChipCmd1); IOSYNC; if (readb(ioaddr + ChipCmd1) & Cmd1Reset) { printk(KERN_INFO "%s: Reset not complete yet. " "Trying harder.\n", DRV_NAME); /* Force reset */ if (rp->quirks & rqForceReset) writeb(0x40, ioaddr + MiscCmd); /* Reset can take somewhat longer (rare) */ RHINE_WAIT_FOR(!(readb(ioaddr + ChipCmd1) & Cmd1Reset)); } if (debug > 1) printk(KERN_INFO "%s: Reset %s.\n", dev->name, (readb(ioaddr + ChipCmd1) & Cmd1Reset) ? "failed" : "succeeded");}#ifdef USE_MMIOstatic void __devinit enable_mmio(long pioaddr, u32 quirks){ int n; if (quirks & rqRhineI) { /* More recent docs say that this bit is reserved ... */ n = inb(pioaddr + ConfigA) | 0x20; outb(n, pioaddr + ConfigA); } else { n = inb(pioaddr + ConfigD) | 0x80; outb(n, pioaddr + ConfigD); }}#endif/* * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM * (plus 0x6C for Rhine-I/II) */static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev){ long ioaddr = dev->base_addr; struct rhine_private *rp = netdev_priv(dev); outb(0x20, pioaddr + MACRegEEcsr); RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));#ifdef USE_MMIO /* * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable * MMIO. If reloading EEPROM was done first this could be avoided, but * it is not known if that still works with the "win98-reboot" problem. */ enable_mmio(pioaddr, rp->quirks);#endif /* Turn off EEPROM-controlled wake-up (magic packet) */ if (rp->quirks & rqWOL) writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);}#ifdef CONFIG_NET_POLL_CONTROLLERstatic void rhine_poll(struct net_device *dev){ disable_irq(dev->irq); rhine_interrupt(dev->irq, (void *)dev, NULL); enable_irq(dev->irq);}#endifstatic void rhine_hw_init(struct net_device *dev, long pioaddr){ struct rhine_private *rp = netdev_priv(dev); /* Reset the chip to erase previous misconfiguration. */ rhine_chip_reset(dev); /* Rhine-I needs extra time to recuperate before EEPROM reload */ if (rp->quirks & rqRhineI) msleep(5); /* Reload EEPROM controlled bytes cleared by soft reset */ rhine_reload_eeprom(pioaddr, dev);}static int __devinit rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent){ struct net_device *dev; struct rhine_private *rp; int i, rc; u8 pci_rev; u32 quirks; long pioaddr; long memaddr; long ioaddr; int io_size, phy_id; const char *name;/* when built into the kernel, we only print version if device is found */#ifndef MODULE static int printed_version; if (!printed_version++) printk(version);#endif pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev); io_size = 256; phy_id = 0; quirks = 0; name = "Rhine"; if (pci_rev < VTunknown0) { quirks = rqRhineI; io_size = 128; } else if (pci_rev >= VT6102) { quirks = rqWOL | rqForceReset; if (pci_rev < VT6105) { name = "Rhine II"; quirks |= rqStatusWBRace; /* Rhine-II exclusive */ } else { phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */ if (pci_rev >= VT6105_B0) quirks |= rq6patterns; if (pci_rev < VT6105M) name = "Rhine III"; else name = "Rhine III (Management Adapter)"; } } rc = pci_enable_device(pdev); if (rc) goto err_out; /* this should always be supported */ rc = pci_set_dma_mask(pdev, 0xffffffff); if (rc) { printk(KERN_ERR "32-bit PCI DMA addresses not supported by " "the card!?\n"); goto err_out; } /* sanity check */ if ((pci_resource_len(pdev, 0) < io_size) || (pci_resource_len(pdev, 1) < io_size)) { rc = -EIO; printk(KERN_ERR "Insufficient PCI resources, aborting\n"); goto err_out; } pioaddr = pci_resource_start(pdev, 0); memaddr = pci_resource_start(pdev, 1); pci_set_master(pdev); dev = alloc_etherdev(sizeof(struct rhine_private)); if (!dev) { rc = -ENOMEM; printk(KERN_ERR "alloc_etherdev failed\n"); goto err_out; } SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); rp = netdev_priv(dev); rp->quirks = quirks; rp->pioaddr = pioaddr; rp->pdev = pdev; rc = pci_request_regions(pdev, DRV_NAME); if (rc) goto err_out_free_netdev;#ifdef USE_MMIO enable_mmio(pioaddr, quirks); ioaddr = (long) ioremap(memaddr, io_size); if (!ioaddr) { rc = -EIO; printk(KERN_ERR "ioremap failed for device %s, region 0x%X " "@ 0x%lX\n", pci_name(pdev), io_size, memaddr); goto err_out_free_res; } /* Check that selected MMIO registers match the PIO ones */ i = 0; while (mmio_verify_registers[i]) { int reg = mmio_verify_registers[i++]; unsigned char a = inb(pioaddr+reg); unsigned char b = readb(ioaddr+reg); if (a != b) { rc = -EIO; printk(KERN_ERR "MMIO do not match PIO [%02x] " "(%02x != %02x)\n", reg, a, b); goto err_out_unmap; } }#else ioaddr = pioaddr;#endif /* USE_MMIO */ dev->base_addr = ioaddr; /* Get chip registers into a sane state */ rhine_power_init(dev); rhine_hw_init(dev, pioaddr); for (i = 0; i < 6; i++) dev->dev_addr[i] = readb(ioaddr + StationAddr + i); if (!is_valid_ether_addr(dev->dev_addr)) { rc = -EIO; printk(KERN_ERR "Invalid MAC address\n"); goto err_out_unmap; } /* For Rhine-I/II, phy_id is loaded from EEPROM */ if (!phy_id) phy_id = readb(ioaddr + 0x6C); dev->irq = pdev->irq; spin_lock_init(&rp->lock); rp->mii_if.dev = dev; rp->mii_if.mdio_read = mdio_read; rp->mii_if.mdio_write = mdio_write; rp->mii_if.phy_id_mask = 0x1f; rp->mii_if.reg_num_mask = 0x1f; /* The chip-specific entries in the device structure. */ dev->open = rhine_open; dev->hard_start_xmit = rhine_start_tx; dev->stop = rhine_close; dev->get_stats = rhine_get_stats; dev->set_multicast_list = rhine_set_rx_mode; dev->do_ioctl = netdev_ioctl; dev->ethtool_ops = &netdev_ethtool_ops; dev->tx_timeout = rhine_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT;#ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = rhine_poll;#endif if (rp->quirks & rqRhineI) dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; /* dev->name not defined before register_netdev()! */ rc = register_netdev(dev); if (rc) goto err_out_unmap; printk(KERN_INFO "%s: VIA %s at 0x%lx, ", dev->name, name,#ifdef USE_MMIO memaddr#else ioaddr#endif ); for (i = 0; i < 5; i++) printk("%2.2x:", dev->dev_addr[i]); printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq); pci_set_drvdata(pdev, dev); { u16 mii_cmd; int mii_status = mdio_read(dev, phy_id, 1); mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE; mdio_write(dev, phy_id, MII_BMCR, mii_cmd); if (mii_status != 0xffff && mii_status != 0x0000) { rp->mii_if.advertising = mdio_read(dev, phy_id, 4); printk(KERN_INFO "%s: MII PHY found at address " "%d, status 0x%4.4x advertising %4.4x " "Link %4.4x.\n", dev->name, phy_id, mii_status, rp->mii_if.advertising, mdio_read(dev, phy_id, 5)); /* set IFF_RUNNING */ if (mii_status & BMSR_LSTATUS) netif_carrier_on(dev); else netif_carrier_off(dev); } } rp->mii_if.phy_id = phy_id; return 0;err_out_unmap:#ifdef USE_MMIO iounmap((void *)ioaddr);err_out_free_res:#endif pci_release_regions(pdev);err_out_free_netdev: free_netdev(dev);err_out: return rc;}static int alloc_ring(struct net_device* dev){ struct rhine_private *rp = netdev_priv(dev); void *ring; dma_addr_t ring_dma; ring = pci_alloc_consistent(rp->pdev, RX_RING_SIZE * sizeof(struct rx_desc) + TX_RING_SIZE * sizeof(struct tx_desc), &ring_dma); if (!ring) { printk(KERN_ERR "Could not allocate DMA memory.\n"); return -ENOMEM; } if (rp->quirks & rqRhineI) { rp->tx_bufs = pci_alloc_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE, &rp->tx_bufs_dma); if (rp->tx_bufs == NULL) { pci_free_consistent(rp->pdev, RX_RING_SIZE * sizeof(struct rx_desc) + TX_RING_SIZE * sizeof(struct tx_desc), ring, ring_dma); return -ENOMEM; } } rp->rx_ring = ring; rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); rp->rx_ring_dma = ring_dma; rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); return 0;}void free_ring(struct net_device* dev){ struct rhine_private *rp = netdev_priv(dev); pci_free_consistent(rp->pdev, RX_RING_SIZE * sizeof(struct rx_desc) + TX_RING_SIZE * sizeof(struct tx_desc), rp->rx_ring, rp->rx_ring_dma); rp->tx_ring = NULL; if (rp->tx_bufs) pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE, rp->tx_bufs, rp->tx_bufs_dma); rp->tx_bufs = NULL;}static void alloc_rbufs(struct net_device *dev){ struct rhine_private *rp = netdev_priv(dev); dma_addr_t next; int i; rp->dirty_rx = rp->cur_rx = 0; rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); rp->rx_head_desc = &rp->rx_ring[0]; next = rp->rx_ring_dma; /* Init the ring entries */ for (i = 0; i < RX_RING_SIZE; i++) { rp->rx_ring[i].rx_status = 0; rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); next += sizeof(struct rx_desc); rp->rx_ring[i].next_desc = cpu_to_le32(next); rp->rx_skbuff[i] = NULL; } /* Mark the last entry as wrapping the ring. */ rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz); rp->rx_skbuff[i] = skb; if (skb == NULL) break; skb->dev = dev; /* Mark as being used by this device. */ rp->rx_skbuff_dma[i] = pci_map_single(rp->pdev, skb->tail, rp->rx_buf_sz, PCI_DMA_FROMDEVICE); rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); } rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);}static void free_rbufs(struct net_device* dev){ struct rhine_private *rp = netdev_priv(dev); int i; /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { rp->rx_ring[i].rx_status = 0;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -