📄 via-velocity.c
字号:
goto out_unlock; ret = velocity_init_td_ring(vptr); if (ret < 0) goto out_unlock; velocity_init_registers(vptr, VELOCITY_INIT_COLD); mac_enable_int(vptr->mac_regs); netif_start_queue(dev);out_unlock: spin_unlock_irqrestore(&vptr->lock, flags); } return ret;}/** * velocity_shutdown - shut down the chip * @vptr: velocity to deactivate * * Shuts down the internal operations of the velocity and * disables interrupts, autopolling, transmit and receive */static void velocity_shutdown(struct velocity_info *vptr){ struct mac_regs __iomem * regs = vptr->mac_regs; mac_disable_int(regs); writel(CR0_STOP, ®s->CR0Set); writew(0xFFFF, ®s->TDCSRClr); writeb(0xFF, ®s->RDCSRClr); safe_disable_mii_autopoll(regs); mac_clear_isr(regs);}/** * velocity_close - close adapter callback * @dev: network device * * Callback from the network layer when the velocity is being * deactivated by the network layer */static int velocity_close(struct net_device *dev){ struct velocity_info *vptr = netdev_priv(dev); netif_stop_queue(dev); velocity_shutdown(vptr); if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) velocity_get_ip(vptr); if (dev->irq != 0) free_irq(dev->irq, dev); /* Power down the chip */ pci_set_power_state(vptr->pdev, PCI_D3hot); /* Free the resources */ velocity_free_td_ring(vptr); velocity_free_rd_ring(vptr); velocity_free_rings(vptr); vptr->flags &= (~VELOCITY_FLAGS_OPENED); return 0;}/** * velocity_xmit - transmit packet callback * @skb: buffer to transmit * @dev: network device * * Called by the networ layer to request a packet is queued to * the velocity. Returns zero on success. */static int velocity_xmit(struct sk_buff *skb, struct net_device *dev){ struct velocity_info *vptr = netdev_priv(dev); int qnum = 0; struct tx_desc *td_ptr; struct velocity_td_info *tdinfo; unsigned long flags; int index; int pktlen = skb->len;#ifdef VELOCITY_ZERO_COPY_SUPPORT if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { kfree_skb(skb); return 0; }#endif spin_lock_irqsave(&vptr->lock, flags); index = vptr->td_curr[qnum]; td_ptr = &(vptr->td_rings[qnum][index]); tdinfo = &(vptr->td_infos[qnum][index]); td_ptr->tdesc1.TCPLS = TCPLS_NORMAL; td_ptr->tdesc1.TCR = TCR0_TIC; td_ptr->td_buf[0].queue = 0; /* * Pad short frames. */ if (pktlen < ETH_ZLEN) { /* Cannot occur until ZC support */ pktlen = ETH_ZLEN; skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); tdinfo->skb = skb; tdinfo->skb_dma[0] = tdinfo->buf_dma; td_ptr->tdesc0.pktsize = pktlen; td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); td_ptr->td_buf[0].pa_high = 0; td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; tdinfo->nskb_dma = 1; td_ptr->tdesc1.CMDZ = 2; } else#ifdef VELOCITY_ZERO_COPY_SUPPORT if (skb_shinfo(skb)->nr_frags > 0) { int nfrags = skb_shinfo(skb)->nr_frags; tdinfo->skb = skb; if (nfrags > 6) { skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); tdinfo->skb_dma[0] = tdinfo->buf_dma; td_ptr->tdesc0.pktsize = td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); td_ptr->td_buf[0].pa_high = 0; td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; tdinfo->nskb_dma = 1; td_ptr->tdesc1.CMDZ = 2; } else { int i = 0; tdinfo->nskb_dma = 0; tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE); td_ptr->tdesc0.pktsize = pktlen; /* FIXME: support 48bit DMA later */ td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); td_ptr->td_buf[i].pa_high = 0; td_ptr->td_buf[i].bufsize = skb->len->skb->data_len; for (i = 0; i < nfrags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; void *addr = ((void *) page_address(frag->page + frag->page_offset)); tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); td_ptr->td_buf[i + 1].pa_high = 0; td_ptr->td_buf[i + 1].bufsize = frag->size; } tdinfo->nskb_dma = i - 1; td_ptr->tdesc1.CMDZ = i; } } else#endif { /* * Map the linear network buffer into PCI space and * add it to the transmit ring. */ tdinfo->skb = skb; tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); td_ptr->tdesc0.pktsize = pktlen; td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); td_ptr->td_buf[0].pa_high = 0; td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; tdinfo->nskb_dma = 1; td_ptr->tdesc1.CMDZ = 2; } if (vptr->vlgrp && vlan_tx_tag_present(skb)) { td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb); td_ptr->tdesc1.pqinf.priority = 0; td_ptr->tdesc1.pqinf.CFI = 0; td_ptr->tdesc1.TCR |= TCR0_VETAG; } /* * Handle hardware checksum */ if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) && (skb->ip_summed == CHECKSUM_PARTIAL)) { const struct iphdr *ip = ip_hdr(skb); if (ip->protocol == IPPROTO_TCP) td_ptr->tdesc1.TCR |= TCR0_TCPCK; else if (ip->protocol == IPPROTO_UDP) td_ptr->tdesc1.TCR |= (TCR0_UDPCK); td_ptr->tdesc1.TCR |= TCR0_IPCK; } { int prev = index - 1; if (prev < 0) prev = vptr->options.numtx - 1; td_ptr->tdesc0.owner = OWNED_BY_NIC; vptr->td_used[qnum]++; vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; if (AVAIL_TD(vptr, qnum) < 1) netif_stop_queue(dev); td_ptr = &(vptr->td_rings[qnum][prev]); td_ptr->td_buf[0].queue = 1; mac_tx_queue_wake(vptr->mac_regs, qnum); } dev->trans_start = jiffies; spin_unlock_irqrestore(&vptr->lock, flags); return 0;}/** * velocity_intr - interrupt callback * @irq: interrupt number * @dev_instance: interrupting device * * Called whenever an interrupt is generated by the velocity * adapter IRQ line. We may not be the source of the interrupt * and need to identify initially if we are, and if not exit as * efficiently as possible. */static int velocity_intr(int irq, void *dev_instance){ struct net_device *dev = dev_instance; struct velocity_info *vptr = netdev_priv(dev); u32 isr_status; int max_count = 0; spin_lock(&vptr->lock); isr_status = mac_read_isr(vptr->mac_regs); /* Not us ? */ if (isr_status == 0) { spin_unlock(&vptr->lock); return IRQ_NONE; } mac_disable_int(vptr->mac_regs); /* * Keep processing the ISR until we have completed * processing and the isr_status becomes zero */ while (isr_status != 0) { mac_write_isr(vptr->mac_regs, isr_status); if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) velocity_error(vptr, isr_status); if (isr_status & (ISR_PRXI | ISR_PPRXI)) max_count += velocity_rx_srv(vptr, isr_status); if (isr_status & (ISR_PTXI | ISR_PPTXI)) max_count += velocity_tx_srv(vptr, isr_status); isr_status = mac_read_isr(vptr->mac_regs); if (max_count > vptr->options.int_works) { printk(KERN_WARNING "%s: excessive work at interrupt.\n", dev->name); max_count = 0; } } spin_unlock(&vptr->lock); mac_enable_int(vptr->mac_regs); return IRQ_HANDLED;}/** * velocity_set_multi - filter list change callback * @dev: network device * * Called by the network layer when the filter lists need to change * for a velocity adapter. Reload the CAMs with the new address * filter ruleset. */static void velocity_set_multi(struct net_device *dev){ struct velocity_info *vptr = netdev_priv(dev); struct mac_regs __iomem * regs = vptr->mac_regs; u8 rx_mode; int i; struct dev_mc_list *mclist; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ writel(0xffffffff, ®s->MARCAM[0]); writel(0xffffffff, ®s->MARCAM[4]); rx_mode = (RCR_AM | RCR_AB | RCR_PROM); } else if ((dev->mc_count > vptr->multicast_limit) || (dev->flags & IFF_ALLMULTI)) { writel(0xffffffff, ®s->MARCAM[0]); writel(0xffffffff, ®s->MARCAM[4]); rx_mode = (RCR_AM | RCR_AB); } else { int offset = MCAM_SIZE - vptr->multicast_limit; mac_get_cam_mask(regs, vptr->mCAMmask); for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { mac_set_cam(regs, i + offset, mclist->dmi_addr); vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); } mac_set_cam_mask(regs, vptr->mCAMmask); rx_mode = (RCR_AM | RCR_AB); } if (dev->mtu > 1500) rx_mode |= RCR_AL; BYTE_REG_BITS_ON(rx_mode, ®s->RCR);}/** * velocity_get_status - statistics callback * @dev: network device * * Callback from the network layer to allow driver statistics * to be resynchronized with hardware collected state. In the * case of the velocity we need to pull the MIB counters from * the hardware into the counters before letting the network * layer display them. */static struct net_device_stats *velocity_get_stats(struct net_device *dev){ struct velocity_info *vptr = netdev_priv(dev); /* If the hardware is down, don't touch MII */ if(!netif_running(dev)) return &vptr->stats; spin_lock_irq(&vptr->lock); velocity_update_hw_mibs(vptr); spin_unlock_irq(&vptr->lock); vptr->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts]; vptr->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts]; vptr->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];// unsigned long rx_dropped; /* no space in linux buffers */ vptr->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions]; /* detailed rx_errors: */// unsigned long rx_length_errors;// unsigned long rx_over_errors; /* receiver ring buff overflow */ vptr->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];// unsigned long rx_frame_errors; /* recv'd frame alignment error */// unsigned long rx_fifo_errors; /* recv'r fifo overrun */// unsigned long rx_missed_errors; /* receiver missed packet */ /* detailed tx_errors */// unsigned long tx_fifo_errors; return &vptr->stats;}/** * velocity_ioctl - ioctl entry point * @dev: network device * @rq: interface request ioctl * @cmd: command code * * Called when the user issues an ioctl request to the network * device in question. The velocity interface supports MII. */static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd){ struct velocity_info *vptr = netdev_priv(dev); int ret; /* If we are asked for information and the device is power saving then we need to bring the device back up to talk to it */ if (!netif_running(dev)) pci_set_power_state(vptr->pdev, PCI_D0); switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ case SIOCGMIIREG: /* Read MII PHY register. */ case SIOCSMIIREG: /* Write to MII PHY register. */ ret = velocity_mii_ioctl(dev, rq, cmd); break; default: ret = -EOPNOTSUPP; } if (!netif_running(dev)) pci_set_power_state(vptr->pdev, PCI_D3hot); return ret;}/* * Definition for our device driver. The PCI layer interface * uses this to handle all our card discover and plugging */static struct pci_driver velocity_driver = { .name = VELOCITY_NAME, .id_table = velocity_id_table, .probe = velocity_found1, .remove = __devexit_p(velocity_remove1),#ifdef CONFIG_PM .suspend = velocity_suspend, .resume = velocity_resume,#endif};/** * velocity_init_module - load time function * * Called when the velocity module is loaded. The PCI driver * is registered with the PCI layer, and in turn will call * the probe functions for each velocity adapter installed * in the system. */static int __init velocity_init_module(void){ int ret; velocity_register_notifier(); ret = pci_register_driver(&velocity_driver); if (ret < 0) velocity_unregister_notifier(); return ret;}/** * velocity_cleanup - module unload * * When the velocity hardware is unloaded this function is called. * It will clean up the notifiers and the unregister the PCI * driver interface for this hardware. This in turn cleans up * all discovered interfaces before returning from the function */static void __exit velocity_cleanup_module(void){ velocity_unregister_notifier(); pci_unregister_driver(&velocity_driver);}module_init(velocity_init_module);module_exit(velocity_cleanup_module);/* * MII access , media link mode setting functions *//** * mii_init - set up MII * @vptr: velocity adapter * @mii_status: links tatus * * Set up the PHY for the current link state. */static void mii_init(struct velocity_info *vptr, u32 mii_status){ u16 BMCR; switch (PHYID_GET_PHY_ID(vptr->phy_id)) { case PHYID_CICADA_CS8201: /* * Reset to hardware default */ MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); /* * Turn on ECHODIS bit in NWay-forced full mode and turn it * off it in NWay-forced half mode for NWay-forced v.s. * legacy-forced issue. */ if (vptr->mii_status & VELOCITY_DUPLEX_FULL) MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); else MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); /* * Turn on Link/Activity LED
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -