📄 via-rhine.c
字号:
iowrite8(Cmd1Reset, ioaddr + ChipCmd1); IOSYNC; if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) { printk(KERN_INFO "%s: Reset not complete yet. " "Trying harder.\n", DRV_NAME); /* Force reset */ if (rp->quirks & rqForceReset) iowrite8(0x40, ioaddr + MiscCmd); /* Reset can take somewhat longer (rare) */ RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset)); } if (debug > 1) printk(KERN_INFO "%s: Reset %s.\n", dev->name, (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ? "failed" : "succeeded");}#ifdef USE_MMIOstatic void enable_mmio(long pioaddr, u32 quirks){ int n; if (quirks & rqRhineI) { /* More recent docs say that this bit is reserved ... */ n = inb(pioaddr + ConfigA) | 0x20; outb(n, pioaddr + ConfigA); } else { n = inb(pioaddr + ConfigD) | 0x80; outb(n, pioaddr + ConfigD); }}#endif/* * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM * (plus 0x6C for Rhine-I/II) */static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev){ struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; outb(0x20, pioaddr + MACRegEEcsr); RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));#ifdef USE_MMIO /* * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable * MMIO. If reloading EEPROM was done first this could be avoided, but * it is not known if that still works with the "win98-reboot" problem. */ enable_mmio(pioaddr, rp->quirks);#endif /* Turn off EEPROM-controlled wake-up (magic packet) */ if (rp->quirks & rqWOL) iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);}#ifdef CONFIG_NET_POLL_CONTROLLERstatic void rhine_poll(struct net_device *dev){ disable_irq(dev->irq); rhine_interrupt(dev->irq, (void *)dev); enable_irq(dev->irq);}#endif#ifdef CONFIG_VIA_RHINE_NAPIstatic int rhine_napipoll(struct napi_struct *napi, int budget){ struct rhine_private *rp = container_of(napi, struct rhine_private, napi); struct net_device *dev = rp->dev; void __iomem *ioaddr = rp->base; int work_done; work_done = rhine_rx(dev, budget); if (work_done < budget) { netif_rx_complete(dev, napi); iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | IntrRxDropped | IntrRxNoBuf | IntrTxAborted | IntrTxDone | IntrTxError | IntrTxUnderrun | IntrPCIErr | IntrStatsMax | IntrLinkChange, ioaddr + IntrEnable); } return work_done;}#endifstatic void rhine_hw_init(struct net_device *dev, long pioaddr){ struct rhine_private *rp = netdev_priv(dev); /* Reset the chip to erase previous misconfiguration. */ rhine_chip_reset(dev); /* Rhine-I needs extra time to recuperate before EEPROM reload */ if (rp->quirks & rqRhineI) msleep(5); /* Reload EEPROM controlled bytes cleared by soft reset */ rhine_reload_eeprom(pioaddr, dev);}static int __devinit rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent){ struct net_device *dev; struct rhine_private *rp; int i, rc; u32 quirks; long pioaddr; long memaddr; void __iomem *ioaddr; int io_size, phy_id; const char *name;#ifdef USE_MMIO int bar = 1;#else int bar = 0;#endif DECLARE_MAC_BUF(mac);/* when built into the kernel, we only print version if device is found */#ifndef MODULE static int printed_version; if (!printed_version++) printk(version);#endif io_size = 256; phy_id = 0; quirks = 0; name = "Rhine"; if (pdev->revision < VTunknown0) { quirks = rqRhineI; io_size = 128; } else if (pdev->revision >= VT6102) { quirks = rqWOL | rqForceReset; if (pdev->revision < VT6105) { name = "Rhine II"; quirks |= rqStatusWBRace; /* Rhine-II exclusive */ } else { phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */ if (pdev->revision >= VT6105_B0) quirks |= rq6patterns; if (pdev->revision < VT6105M) name = "Rhine III"; else name = "Rhine III (Management Adapter)"; } } rc = pci_enable_device(pdev); if (rc) goto err_out; /* this should always be supported */ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (rc) { printk(KERN_ERR "32-bit PCI DMA addresses not supported by " "the card!?\n"); goto err_out; } /* sanity check */ if ((pci_resource_len(pdev, 0) < io_size) || (pci_resource_len(pdev, 1) < io_size)) { rc = -EIO; printk(KERN_ERR "Insufficient PCI resources, aborting\n"); goto err_out; } pioaddr = pci_resource_start(pdev, 0); memaddr = pci_resource_start(pdev, 1); pci_set_master(pdev); dev = alloc_etherdev(sizeof(struct rhine_private)); if (!dev) { rc = -ENOMEM; printk(KERN_ERR "alloc_etherdev failed\n"); goto err_out; } SET_NETDEV_DEV(dev, &pdev->dev); rp = netdev_priv(dev); rp->dev = dev; rp->quirks = quirks; rp->pioaddr = pioaddr; rp->pdev = pdev; rc = pci_request_regions(pdev, DRV_NAME); if (rc) goto err_out_free_netdev; ioaddr = pci_iomap(pdev, bar, io_size); if (!ioaddr) { rc = -EIO; printk(KERN_ERR "ioremap failed for device %s, region 0x%X " "@ 0x%lX\n", pci_name(pdev), io_size, memaddr); goto err_out_free_res; }#ifdef USE_MMIO enable_mmio(pioaddr, quirks); /* Check that selected MMIO registers match the PIO ones */ i = 0; while (mmio_verify_registers[i]) { int reg = mmio_verify_registers[i++]; unsigned char a = inb(pioaddr+reg); unsigned char b = readb(ioaddr+reg); if (a != b) { rc = -EIO; printk(KERN_ERR "MMIO do not match PIO [%02x] " "(%02x != %02x)\n", reg, a, b); goto err_out_unmap; } }#endif /* USE_MMIO */ dev->base_addr = (unsigned long)ioaddr; rp->base = ioaddr; /* Get chip registers into a sane state */ rhine_power_init(dev); rhine_hw_init(dev, pioaddr); for (i = 0; i < 6; i++) dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); if (!is_valid_ether_addr(dev->perm_addr)) { rc = -EIO; printk(KERN_ERR "Invalid MAC address\n"); goto err_out_unmap; } /* For Rhine-I/II, phy_id is loaded from EEPROM */ if (!phy_id) phy_id = ioread8(ioaddr + 0x6C); dev->irq = pdev->irq; spin_lock_init(&rp->lock); rp->mii_if.dev = dev; rp->mii_if.mdio_read = mdio_read; rp->mii_if.mdio_write = mdio_write; rp->mii_if.phy_id_mask = 0x1f; rp->mii_if.reg_num_mask = 0x1f; /* The chip-specific entries in the device structure. */ dev->open = rhine_open; dev->hard_start_xmit = rhine_start_tx; dev->stop = rhine_close; dev->get_stats = rhine_get_stats; dev->set_multicast_list = rhine_set_rx_mode; dev->do_ioctl = netdev_ioctl; dev->ethtool_ops = &netdev_ethtool_ops; dev->tx_timeout = rhine_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT;#ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = rhine_poll;#endif#ifdef CONFIG_VIA_RHINE_NAPI netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);#endif if (rp->quirks & rqRhineI) dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; /* dev->name not defined before register_netdev()! */ rc = register_netdev(dev); if (rc) goto err_out_unmap; printk(KERN_INFO "%s: VIA %s at 0x%lx, %s, IRQ %d.\n", dev->name, name,#ifdef USE_MMIO memaddr,#else (long)ioaddr,#endif print_mac(mac, dev->dev_addr), pdev->irq); pci_set_drvdata(pdev, dev); { u16 mii_cmd; int mii_status = mdio_read(dev, phy_id, 1); mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE; mdio_write(dev, phy_id, MII_BMCR, mii_cmd); if (mii_status != 0xffff && mii_status != 0x0000) { rp->mii_if.advertising = mdio_read(dev, phy_id, 4); printk(KERN_INFO "%s: MII PHY found at address " "%d, status 0x%4.4x advertising %4.4x " "Link %4.4x.\n", dev->name, phy_id, mii_status, rp->mii_if.advertising, mdio_read(dev, phy_id, 5)); /* set IFF_RUNNING */ if (mii_status & BMSR_LSTATUS) netif_carrier_on(dev); else netif_carrier_off(dev); } } rp->mii_if.phy_id = phy_id; if (debug > 1 && avoid_D3) printk(KERN_INFO "%s: No D3 power state at shutdown.\n", dev->name); return 0;err_out_unmap: pci_iounmap(pdev, ioaddr);err_out_free_res: pci_release_regions(pdev);err_out_free_netdev: free_netdev(dev);err_out: return rc;}static int alloc_ring(struct net_device* dev){ struct rhine_private *rp = netdev_priv(dev); void *ring; dma_addr_t ring_dma; ring = pci_alloc_consistent(rp->pdev, RX_RING_SIZE * sizeof(struct rx_desc) + TX_RING_SIZE * sizeof(struct tx_desc), &ring_dma); if (!ring) { printk(KERN_ERR "Could not allocate DMA memory.\n"); return -ENOMEM; } if (rp->quirks & rqRhineI) { rp->tx_bufs = pci_alloc_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE, &rp->tx_bufs_dma); if (rp->tx_bufs == NULL) { pci_free_consistent(rp->pdev, RX_RING_SIZE * sizeof(struct rx_desc) + TX_RING_SIZE * sizeof(struct tx_desc), ring, ring_dma); return -ENOMEM; } } rp->rx_ring = ring; rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); rp->rx_ring_dma = ring_dma; rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); return 0;}static void free_ring(struct net_device* dev){ struct rhine_private *rp = netdev_priv(dev); pci_free_consistent(rp->pdev, RX_RING_SIZE * sizeof(struct rx_desc) + TX_RING_SIZE * sizeof(struct tx_desc), rp->rx_ring, rp->rx_ring_dma); rp->tx_ring = NULL; if (rp->tx_bufs) pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE, rp->tx_bufs, rp->tx_bufs_dma); rp->tx_bufs = NULL;}static void alloc_rbufs(struct net_device *dev){ struct rhine_private *rp = netdev_priv(dev); dma_addr_t next; int i; rp->dirty_rx = rp->cur_rx = 0; rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); rp->rx_head_desc = &rp->rx_ring[0]; next = rp->rx_ring_dma; /* Init the ring entries */ for (i = 0; i < RX_RING_SIZE; i++) { rp->rx_ring[i].rx_status = 0; rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); next += sizeof(struct rx_desc); rp->rx_ring[i].next_desc = cpu_to_le32(next); rp->rx_skbuff[i] = NULL; } /* Mark the last entry as wrapping the ring. */ rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz); rp->rx_skbuff[i] = skb; if (skb == NULL) break; skb->dev = dev; /* Mark as being used by this device. */ rp->rx_skbuff_dma[i] = pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, PCI_DMA_FROMDEVICE); rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); } rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);}static void free_rbufs(struct net_device* dev){ struct rhine_private *rp = netdev_priv(dev); int i; /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { rp->rx_ring[i].rx_status = 0; rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ if (rp->rx_skbuff[i]) { pci_unmap_single(rp->pdev, rp->rx_skbuff_dma[i], rp->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(rp->rx_skbuff[i]); } rp->rx_skbuff[i] = NULL; }}static void alloc_tbufs(struct net_device* dev){ struct rhine_private *rp = netdev_priv(dev); dma_addr_t next; int i; rp->dirty_tx = rp->cur_tx = 0; next = rp->tx_ring_dma; for (i = 0; i < TX_RING_SIZE; i++) { rp->tx_skbuff[i] = NULL; rp->tx_ring[i].tx_status = 0; rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); next += sizeof(struct tx_desc); rp->tx_ring[i].next_desc = cpu_to_le32(next); if (rp->quirks & rqRhineI) rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; } rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);}static void free_tbufs(struct net_device* dev){ struct rhine_private *rp = netdev_priv(dev); int i; for (i = 0; i < TX_RING_SIZE; i++) { rp->tx_ring[i].tx_status = 0; rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ if (rp->tx_skbuff[i]) { if (rp->tx_skbuff_dma[i]) { pci_unmap_single(rp->pdev, rp->tx_skbuff_dma[i], rp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); } dev_kfree_skb(rp->tx_skbuff[i]); } rp->tx_skbuff[i] = NULL; rp->tx_buf[i] = NULL; }}static void rhine_check_media(struct net_device *dev, unsigned int init_media){ struct rhine_private *rp = netdev_priv(dev); void __iomem *ioaddr = rp->base; mii_check_media(&rp->mii_if, debug, init_media); if (rp->mii_if.full_duplex) iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, ioaddr + ChipCmd1); else iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, ioaddr + ChipCmd1); if (debug > 1) printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name, rp->mii_if.force_media, netif_carrier_ok(dev));}/* Called after status of force_media possibly changed */static void rhine_set_carrier(struct mii_if_info *mii){ if (mii->force_media) { /* autoneg is off: Link is always assumed to be up */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -