📄 r8169.c
字号:
*/ RTL_W16(IntrMitigate, 0x0000); rtl_set_rx_tx_desc_registers(tp, ioaddr); if ((tp->mac_version != RTL_GIGA_MAC_VER_01) && (tp->mac_version != RTL_GIGA_MAC_VER_02) && (tp->mac_version != RTL_GIGA_MAC_VER_03) && (tp->mac_version != RTL_GIGA_MAC_VER_04)) { RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); rtl_set_rx_tx_config_registers(tp); } RTL_W8(Cfg9346, Cfg9346_Lock); /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ RTL_R8(IntrMask); RTL_W32(RxMissed, 0); rtl_set_rx_mode(dev); /* no early-rx interrupts */ RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); /* Enable all known interrupts by setting the interrupt mask. */ RTL_W16(IntrMask, tp->intr_event);}static void rtl_hw_start_8168(struct net_device *dev){ struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; u8 ctl; RTL_W8(Cfg9346, Cfg9346_Unlock); RTL_W8(EarlyTxThres, EarlyTxThld); rtl_set_rx_max_size(ioaddr); rtl_set_rx_tx_config_registers(tp); tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; RTL_W16(CPlusCmd, tp->cp_cmd); /* Tx performance tweak. */ pci_read_config_byte(pdev, 0x69, &ctl); ctl = (ctl & ~0x70) | 0x50; pci_write_config_byte(pdev, 0x69, ctl); RTL_W16(IntrMitigate, 0x5151); /* Work around for RxFIFO overflow. */ if (tp->mac_version == RTL_GIGA_MAC_VER_11) { tp->intr_event |= RxFIFOOver | PCSTimeout; tp->intr_event &= ~RxOverflow; } rtl_set_rx_tx_desc_registers(tp, ioaddr); RTL_W8(Cfg9346, Cfg9346_Lock); RTL_R8(IntrMask); RTL_W32(RxMissed, 0); rtl_set_rx_mode(dev); RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); RTL_W16(IntrMask, tp->intr_event);}static void rtl_hw_start_8101(struct net_device *dev){ struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; if ((tp->mac_version == RTL_GIGA_MAC_VER_13) || (tp->mac_version == RTL_GIGA_MAC_VER_16)) { pci_write_config_word(pdev, 0x68, 0x00); pci_write_config_word(pdev, 0x69, 0x08); } RTL_W8(Cfg9346, Cfg9346_Unlock); RTL_W8(EarlyTxThres, EarlyTxThld); rtl_set_rx_max_size(ioaddr); tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; RTL_W16(CPlusCmd, tp->cp_cmd); RTL_W16(IntrMitigate, 0x0000); rtl_set_rx_tx_desc_registers(tp, ioaddr); RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); rtl_set_rx_tx_config_registers(tp); RTL_W8(Cfg9346, Cfg9346_Lock); RTL_R8(IntrMask); RTL_W32(RxMissed, 0); rtl_set_rx_mode(dev); RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); RTL_W16(IntrMask, tp->intr_event);}static int rtl8169_change_mtu(struct net_device *dev, int new_mtu){ struct rtl8169_private *tp = netdev_priv(dev); int ret = 0; if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu) return -EINVAL; dev->mtu = new_mtu; if (!netif_running(dev)) goto out; rtl8169_down(dev); rtl8169_set_rxbufsize(tp, dev); ret = rtl8169_init_ring(dev); if (ret < 0) goto out;#ifdef CONFIG_R8169_NAPI napi_enable(&tp->napi);#endif rtl_hw_start(dev); rtl8169_request_timer(dev);out: return ret;}static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc){ desc->addr = cpu_to_le64(0x0badbadbadbadbadull); desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);}static void rtl8169_free_rx_skb(struct rtl8169_private *tp, struct sk_buff **sk_buff, struct RxDesc *desc){ struct pci_dev *pdev = tp->pci_dev; pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(*sk_buff); *sk_buff = NULL; rtl8169_make_unusable_by_asic(desc);}static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz){ u32 eor = le32_to_cpu(desc->opts1) & RingEnd; desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);}static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, u32 rx_buf_sz){ desc->addr = cpu_to_le64(mapping); wmb(); rtl8169_mark_to_asic(desc, rx_buf_sz);}static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct net_device *dev, struct RxDesc *desc, int rx_buf_sz, unsigned int align){ struct sk_buff *skb; dma_addr_t mapping; unsigned int pad; pad = align ? align : NET_IP_ALIGN; skb = netdev_alloc_skb(dev, rx_buf_sz + pad); if (!skb) goto err_out; skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad); mapping = pci_map_single(pdev, skb->data, rx_buf_sz, PCI_DMA_FROMDEVICE); rtl8169_map_to_asic(desc, mapping, rx_buf_sz);out: return skb;err_out: rtl8169_make_unusable_by_asic(desc); goto out;}static void rtl8169_rx_clear(struct rtl8169_private *tp){ unsigned int i; for (i = 0; i < NUM_RX_DESC; i++) { if (tp->Rx_skbuff[i]) { rtl8169_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescArray + i); } }}static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, u32 start, u32 end){ u32 cur; for (cur = start; end - cur != 0; cur++) { struct sk_buff *skb; unsigned int i = cur % NUM_RX_DESC; WARN_ON((s32)(end - cur) < 0); if (tp->Rx_skbuff[i]) continue; skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev, tp->RxDescArray + i, tp->rx_buf_sz, tp->align); if (!skb) break; tp->Rx_skbuff[i] = skb; } return cur - start;}static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc){ desc->opts1 |= cpu_to_le32(RingEnd);}static void rtl8169_init_ring_indexes(struct rtl8169_private *tp){ tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;}static int rtl8169_init_ring(struct net_device *dev){ struct rtl8169_private *tp = netdev_priv(dev); rtl8169_init_ring_indexes(tp); memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) goto err_out; rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); return 0;err_out: rtl8169_rx_clear(tp); return -ENOMEM;}static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb, struct TxDesc *desc){ unsigned int len = tx_skb->len; pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); desc->opts1 = 0x00; desc->opts2 = 0x00; desc->addr = 0x00; tx_skb->len = 0;}static void rtl8169_tx_clear(struct rtl8169_private *tp){ unsigned int i; for (i = tp->dirty_tx; i < tp->dirty_tx + NUM_TX_DESC; i++) { unsigned int entry = i % NUM_TX_DESC; struct ring_info *tx_skb = tp->tx_skb + entry; unsigned int len = tx_skb->len; if (len) { struct sk_buff *skb = tx_skb->skb; rtl8169_unmap_tx_skb(tp->pci_dev, tx_skb, tp->TxDescArray + entry); if (skb) { dev_kfree_skb(skb); tx_skb->skb = NULL; } tp->dev->stats.tx_dropped++; } } tp->cur_tx = tp->dirty_tx = 0;}static void rtl8169_schedule_work(struct net_device *dev, work_func_t task){ struct rtl8169_private *tp = netdev_priv(dev); PREPARE_DELAYED_WORK(&tp->task, task); schedule_delayed_work(&tp->task, 4);}static void rtl8169_wait_for_quiescence(struct net_device *dev){ struct rtl8169_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; synchronize_irq(dev->irq); /* Wait for any pending NAPI task to complete */#ifdef CONFIG_R8169_NAPI napi_disable(&tp->napi);#endif rtl8169_irq_mask_and_ack(ioaddr);#ifdef CONFIG_R8169_NAPI tp->intr_mask = 0xffff; RTL_W16(IntrMask, tp->intr_event); napi_enable(&tp->napi);#endif}static void rtl8169_reinit_task(struct work_struct *work){ struct rtl8169_private *tp = container_of(work, struct rtl8169_private, task.work); struct net_device *dev = tp->dev; int ret; rtnl_lock(); if (!netif_running(dev)) goto out_unlock; rtl8169_wait_for_quiescence(dev); rtl8169_close(dev); ret = rtl8169_open(dev); if (unlikely(ret < 0)) { if (net_ratelimit() && netif_msg_drv(tp)) { printk(KERN_ERR PFX "%s: reinit failure (status = %d)." " Rescheduling.\n", dev->name, ret); } rtl8169_schedule_work(dev, rtl8169_reinit_task); }out_unlock: rtnl_unlock();}static void rtl8169_reset_task(struct work_struct *work){ struct rtl8169_private *tp = container_of(work, struct rtl8169_private, task.work); struct net_device *dev = tp->dev; rtnl_lock(); if (!netif_running(dev)) goto out_unlock; rtl8169_wait_for_quiescence(dev); rtl8169_rx_interrupt(dev, tp, tp->mmio_addr, ~(u32)0); rtl8169_tx_clear(tp); if (tp->dirty_rx == tp->cur_rx) { rtl8169_init_ring_indexes(tp); rtl_hw_start(dev); netif_wake_queue(dev); rtl8169_check_link_status(dev, tp, tp->mmio_addr); } else { if (net_ratelimit() && netif_msg_intr(tp)) { printk(KERN_EMERG PFX "%s: Rx buffers shortage\n", dev->name); } rtl8169_schedule_work(dev, rtl8169_reset_task); }out_unlock: rtnl_unlock();}static void rtl8169_tx_timeout(struct net_device *dev){ struct rtl8169_private *tp = netdev_priv(dev); rtl8169_hw_reset(tp->mmio_addr); /* Let's wait a bit while any (async) irq lands on */ rtl8169_schedule_work(dev, rtl8169_reset_task);}static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, u32 opts1){ struct skb_shared_info *info = skb_shinfo(skb); unsigned int cur_frag, entry; struct TxDesc * uninitialized_var(txd); entry = tp->cur_tx; for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { skb_frag_t *frag = info->frags + cur_frag; dma_addr_t mapping; u32 status, len; void *addr; entry = (entry + 1) % NUM_TX_DESC; txd = tp->TxDescArray + entry; len = frag->size; addr = ((void *) page_address(frag->page)) + frag->page_offset; mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE); /* anti gcc 2.95.3 bugware (sic) */ status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); txd->opts1 = cpu_to_le32(status); txd->addr = cpu_to_le64(mapping); tp->tx_skb[entry].len = len; } if (cur_frag) { tp->tx_skb[entry].skb = skb; txd->opts1 |= cpu_to_le32(LastFrag); } return cur_frag;}static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev){ if (dev->features & NETIF_F_TSO) { u32 mss = skb_shinfo(skb)->gso_size; if (mss) return LargeSend | ((mss & MSSMask) << MSSShift); } if (skb->ip_summed == CHECKSUM_PARTIAL) { const struct iphdr *ip = ip_hdr(skb); if (ip->protocol == IPPROTO_TCP) return IPCS | TCPCS; else if (ip->protocol == IPPROTO_UDP) return IPCS | UDPCS; WARN_ON(1); /* we need a WARN() */ } return 0;}static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev){ struct rtl8169_private *tp = netdev_priv(dev); unsigned int frags, entry = tp->cur_tx % NUM_TX_DESC; struct TxDesc *txd = tp->TxDescArray + entry; void __iomem *ioaddr = tp->mmio_addr; dma_addr_t mapping; u32 status, len; u32 opts1; int ret = NETDEV_TX_OK; if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { if (netif_msg_drv(tp)) { printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", dev->name); } goto err_stop; } if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) goto err_stop; opts1 = DescOwn | rtl8169_tso_csum(skb, dev); frags = rtl8169_xmit_frags(tp, skb, opts1); if (frags) { len = skb_headlen(skb); opts1 |= FirstFrag; } else { len = skb
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -