📄 via-velocity.c
字号:
/** * velocity_rx_reset - handle a receive reset * @vptr: velocity we are resetting * * Reset the ownership and status for the receive ring side. * Hand all the receive queue to the NIC. */static void velocity_rx_reset(struct velocity_info *vptr){ struct mac_regs * regs = vptr->mac_regs; int i; vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; /* * Init state, all RD entries belong to the NIC */ for (i = 0; i < vptr->options.numrx; ++i) vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC; writew(vptr->options.numrx, ®s->RBRDU); writel(vptr->rd_pool_dma, ®s->RDBaseLo); writew(0, ®s->RDIdx); writew(vptr->options.numrx - 1, ®s->RDCSize);}/** * velocity_init_registers - initialise MAC registers * @vptr: velocity to init * @type: type of initialisation (hot or cold) * * Initialise the MAC on a reset or on first set up on the * hardware. */static void velocity_init_registers(struct velocity_info *vptr, enum velocity_init_type type){ struct mac_regs * regs = vptr->mac_regs; int i, mii_status; mac_wol_reset(regs); switch (type) { case VELOCITY_INIT_RESET: case VELOCITY_INIT_WOL: netif_stop_queue(vptr->dev); /* * Reset RX to prevent RX pointer not on the 4X location */ velocity_rx_reset(vptr); mac_rx_queue_run(regs); mac_rx_queue_wake(regs); mii_status = velocity_get_opt_media_mode(vptr); if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { velocity_print_link_status(vptr); if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) netif_wake_queue(vptr->dev); } enable_flow_control_ability(vptr); mac_clear_isr(regs); writel(CR0_STOP, ®s->CR0Clr); writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set); break; case VELOCITY_INIT_COLD: default: /* * Do reset */ velocity_soft_reset(vptr); mdelay(5); mac_eeprom_reload(regs); for (i = 0; i < 6; i++) { writeb(vptr->dev->dev_addr[i], &(regs->PAR[i])); } /* * clear Pre_ACPI bit. */ BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA)); mac_set_rx_thresh(regs, vptr->options.rx_thresh); mac_set_dma_length(regs, vptr->options.DMA_length); writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet); /* * Back off algorithm use original IEEE standard */ BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB); /* * Init CAM filter */ velocity_init_cam_filter(vptr); /* * Set packet filter: Receive directed and broadcast address */ velocity_set_multi(vptr->dev); /* * Enable MII auto-polling */ enable_mii_autopoll(regs); vptr->int_mask = INT_MASK_DEF; writel(cpu_to_le32(vptr->rd_pool_dma), ®s->RDBaseLo); writew(vptr->options.numrx - 1, ®s->RDCSize); mac_rx_queue_run(regs); mac_rx_queue_wake(regs); writew(vptr->options.numtx - 1, ®s->TDCSize); for (i = 0; i < vptr->num_txq; i++) { writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); mac_tx_queue_run(regs, i); } init_flow_control_register(vptr); writel(CR0_STOP, ®s->CR0Clr); writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set); mii_status = velocity_get_opt_media_mode(vptr); netif_stop_queue(vptr->dev); mii_init(vptr, mii_status); if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { velocity_print_link_status(vptr); if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) netif_wake_queue(vptr->dev); } enable_flow_control_ability(vptr); mac_hw_mibs_init(regs); mac_write_int_mask(vptr->int_mask, regs); mac_clear_isr(regs); }}/** * velocity_soft_reset - soft reset * @vptr: velocity to reset * * Kick off a soft reset of the velocity adapter and then poll * until the reset sequence has completed before returning. */static int velocity_soft_reset(struct velocity_info *vptr){ struct mac_regs * regs = vptr->mac_regs; int i = 0; writel(CR0_SFRST, ®s->CR0Set); for (i = 0; i < W_MAX_TIMEOUT; i++) { udelay(5); if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set)) break; } if (i == W_MAX_TIMEOUT) { writel(CR0_FORSRST, ®s->CR0Set); /* FIXME: PCI POSTING */ /* delay 2ms */ mdelay(2); } return 0;}/** * velocity_found1 - set up discovered velocity card * @pdev: PCI device * @ent: PCI device table entry that matched * * Configure a discovered adapter from scratch. Return a negative * errno error code on failure paths. */static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent){ static int first = 1; struct net_device *dev; int i; struct velocity_info_tbl *info = (struct velocity_info_tbl *) ent->driver_data; struct velocity_info *vptr; struct mac_regs * regs; int ret = -ENOMEM; if (velocity_nics >= MAX_UNITS) { printk(KERN_NOTICE VELOCITY_NAME ": already found %d NICs.\n", velocity_nics); return -ENODEV; } dev = alloc_etherdev(sizeof(struct velocity_info)); if (dev == NULL) { printk(KERN_ERR VELOCITY_NAME ": allocate net device failed.\n"); goto out; } /* Chain it all together */ SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); vptr = dev->priv; if (first) { printk(KERN_INFO "%s Ver. %s\n", VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n"); printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n"); first = 0; } velocity_init_info(pdev, vptr, info); vptr->dev = dev; dev->irq = pdev->irq; ret = pci_enable_device(pdev); if (ret < 0) goto err_free_dev; ret = velocity_get_pci_info(vptr, pdev); if (ret < 0) { printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n"); goto err_disable; } ret = pci_request_regions(pdev, VELOCITY_NAME); if (ret < 0) { printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n"); goto err_disable; } regs = ioremap(vptr->memaddr, vptr->io_size); if (regs == NULL) { ret = -EIO; goto err_release_res; } vptr->mac_regs = regs; mac_wol_reset(regs); dev->base_addr = vptr->ioaddr; for (i = 0; i < 6; i++) dev->dev_addr[i] = readb(®s->PAR[i]); velocity_get_options(&vptr->options, velocity_nics, dev->name); /* * Mask out the options cannot be set to the chip */ vptr->options.flags &= info->flags; /* * Enable the chip specified capbilities */ vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL); vptr->wol_opts = vptr->options.wol_opts; vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); dev->irq = pdev->irq; dev->open = velocity_open; dev->hard_start_xmit = velocity_xmit; dev->stop = velocity_close; dev->get_stats = velocity_get_stats; dev->set_multicast_list = velocity_set_multi; dev->do_ioctl = velocity_ioctl; dev->ethtool_ops = &velocity_ethtool_ops; dev->change_mtu = velocity_change_mtu;#ifdef VELOCITY_ZERO_COPY_SUPPORT dev->features |= NETIF_F_SG;#endif if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) { dev->features |= NETIF_F_HW_CSUM; } ret = register_netdev(dev); if (ret < 0) goto err_iounmap; velocity_print_info(vptr); pci_set_drvdata(pdev, dev); /* and leave the chip powered down */ pci_set_power_state(pdev, 3);#ifdef CONFIG_PM { unsigned long flags; spin_lock_irqsave(&velocity_dev_list_lock, flags); list_add(&vptr->list, &velocity_dev_list); spin_unlock_irqrestore(&velocity_dev_list_lock, flags); }#endif velocity_nics++;out: return ret;err_iounmap: iounmap(regs);err_release_res: pci_release_regions(pdev);err_disable: pci_disable_device(pdev);err_free_dev: free_netdev(dev); goto out;}/** * velocity_print_info - per driver data * @vptr: velocity * * Print per driver data as the kernel driver finds Velocity * hardware */static void __devinit velocity_print_info(struct velocity_info *vptr){ struct net_device *dev = vptr->dev; printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", dev->name, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);}/** * velocity_init_info - init private data * @pdev: PCI device * @vptr: Velocity info * @info: Board type * * Set up the initial velocity_info struct for the device that has been * discovered. */static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info){ memset(vptr, 0, sizeof(struct velocity_info)); vptr->pdev = pdev; vptr->chip_id = info->chip_id; vptr->io_size = info->io_size; vptr->num_txq = info->txqueue; vptr->multicast_limit = MCAM_SIZE; spin_lock_init(&vptr->lock); INIT_LIST_HEAD(&vptr->list);}/** * velocity_get_pci_info - retrieve PCI info for device * @vptr: velocity device * @pdev: PCI device it matches * * Retrieve the PCI configuration space data that interests us from * the kernel PCI layer */static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev){ if(pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0) return -EIO; pci_set_master(pdev); vptr->ioaddr = pci_resource_start(pdev, 0); vptr->memaddr = pci_resource_start(pdev, 1); if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { printk(KERN_ERR "%s: region #0 is not an I/O resource, aborting.\n", pci_name(pdev)); return -EINVAL; } if((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) { printk(KERN_ERR "%s: region #1 is an I/O resource, aborting.\n", pci_name(pdev)); return -EINVAL; } if(pci_resource_len(pdev, 1) < 256) { printk(KERN_ERR "%s: region #1 is too small.\n", pci_name(pdev)); return -EINVAL; } vptr->pdev = pdev; return 0;}/** * velocity_init_rings - set up DMA rings * @vptr: Velocity to set up * * Allocate PCI mapped DMA rings for the receive and transmit layer * to use. */static int velocity_init_rings(struct velocity_info *vptr){ int i; unsigned int psize; unsigned int tsize; dma_addr_t pool_dma; u8 *pool; /* * Allocate all RD/TD rings a single pool */ psize = vptr->options.numrx * sizeof(struct rx_desc) + vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; /* * pci_alloc_consistent() fulfills the requirement for 64 bytes * alignment */ pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma); if (pool == NULL) { printk(KERN_ERR "%s : DMA memory allocation failed.\n", vptr->dev->name); return -ENOMEM; } memset(pool, 0, psize); vptr->rd_ring = (struct rx_desc *) pool; vptr->rd_pool_dma = pool_dma; tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq; vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize, &vptr->tx_bufs_dma); if (vptr->tx_bufs == NULL) { printk(KERN_ERR "%s: DMA memory allocation failed.\n", vptr->dev->name); pci_free_consistent(vptr->pdev, psize, pool, pool_dma); return -ENOMEM; } memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq); i = vptr->options.numrx * sizeof(struct rx_desc); pool += i; pool_dma += i; for (i = 0; i < vptr->num_txq; i++) { int offset = vptr->options.numtx * sizeof(struct tx_desc); vptr->td_pool_dma[i] = pool_dma; vptr->td_rings[i] = (struct tx_desc *) pool; pool += offset; pool_dma += offset; } return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -