📄 niu.c
字号:
static int niu_rx_pkt_ignore(struct niu *np, struct rx_ring_info *rp){ unsigned int index = rp->rcr_index; int num_rcr = 0; rp->rx_dropped++; while (1) { struct page *page, **link; u64 addr, val; u32 rcr_size; num_rcr++; val = le64_to_cpup(&rp->rcr[index]); addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << RCR_ENTRY_PKT_BUF_ADDR_SHIFT; page = niu_find_rxpage(rp, addr, &link); rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> RCR_ENTRY_PKTBUFSZ_SHIFT]; if ((page->index + PAGE_SIZE) - rcr_size == addr) { *link = (struct page *) page->mapping; np->ops->unmap_page(np->device, page->index, PAGE_SIZE, DMA_FROM_DEVICE); page->index = 0; page->mapping = NULL; __free_page(page); rp->rbr_refill_pending++; } index = NEXT_RCR(rp, index); if (!(val & RCR_ENTRY_MULTI)) break; } rp->rcr_index = index; return num_rcr;}static int niu_process_rx_pkt(struct niu *np, struct rx_ring_info *rp){ unsigned int index = rp->rcr_index; struct sk_buff *skb; int len, num_rcr; skb = netdev_alloc_skb(np->dev, RX_SKB_ALLOC_SIZE); if (unlikely(!skb)) return niu_rx_pkt_ignore(np, rp); num_rcr = 0; while (1) { struct page *page, **link; u32 rcr_size, append_size; u64 addr, val, off; num_rcr++; val = le64_to_cpup(&rp->rcr[index]); len = (val & RCR_ENTRY_L2_LEN) >> RCR_ENTRY_L2_LEN_SHIFT; len -= ETH_FCS_LEN; addr = (val & RCR_ENTRY_PKT_BUF_ADDR) << RCR_ENTRY_PKT_BUF_ADDR_SHIFT; page = niu_find_rxpage(rp, addr, &link); rcr_size = rp->rbr_sizes[(val & RCR_ENTRY_PKTBUFSZ) >> RCR_ENTRY_PKTBUFSZ_SHIFT]; off = addr & ~PAGE_MASK; append_size = rcr_size; if (num_rcr == 1) { int ptype; off += 2; append_size -= 2; ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); if ((ptype == RCR_PKT_TYPE_TCP || ptype == RCR_PKT_TYPE_UDP) && !(val & (RCR_ENTRY_NOPORT | RCR_ENTRY_ERROR))) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; } if (!(val & RCR_ENTRY_MULTI)) append_size = len - skb->len; niu_rx_skb_append(skb, page, off, append_size); if ((page->index + rp->rbr_block_size) - rcr_size == addr) { *link = (struct page *) page->mapping; np->ops->unmap_page(np->device, page->index, PAGE_SIZE, DMA_FROM_DEVICE); page->index = 0; page->mapping = NULL; rp->rbr_refill_pending++; } else get_page(page); index = NEXT_RCR(rp, index); if (!(val & RCR_ENTRY_MULTI)) break; } rp->rcr_index = index; skb_reserve(skb, NET_IP_ALIGN); __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); rp->rx_packets++; rp->rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, np->dev); netif_receive_skb(skb); np->dev->last_rx = jiffies; return num_rcr;}static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask){ int blocks_per_page = rp->rbr_blocks_per_page; int err, index = rp->rbr_index; err = 0; while (index < (rp->rbr_table_size - blocks_per_page)) { err = niu_rbr_add_page(np, rp, mask, index); if (err) break; index += blocks_per_page; } rp->rbr_index = index; return err;}static void niu_rbr_free(struct niu *np, struct rx_ring_info *rp){ int i; for (i = 0; i < MAX_RBR_RING_SIZE; i++) { struct page *page; page = rp->rxhash[i]; while (page) { struct page *next = (struct page *) page->mapping; u64 base = page->index; np->ops->unmap_page(np->device, base, PAGE_SIZE, DMA_FROM_DEVICE); page->index = 0; page->mapping = NULL; __free_page(page); page = next; } } for (i = 0; i < rp->rbr_table_size; i++) rp->rbr[i] = cpu_to_le32(0); rp->rbr_index = 0;}static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx){ struct tx_buff_info *tb = &rp->tx_buffs[idx]; struct sk_buff *skb = tb->skb; struct tx_pkt_hdr *tp; u64 tx_flags; int i, len; tp = (struct tx_pkt_hdr *) skb->data; tx_flags = le64_to_cpup(&tp->flags); rp->tx_packets++; rp->tx_bytes += (((tx_flags & TXHDR_LEN) >> TXHDR_LEN_SHIFT) - ((tx_flags & TXHDR_PAD) / 2)); len = skb_headlen(skb); np->ops->unmap_single(np->device, tb->mapping, len, DMA_TO_DEVICE); if (le64_to_cpu(rp->descr[idx]) & TX_DESC_MARK) rp->mark_pending--; tb->skb = NULL; do { idx = NEXT_TX(rp, idx); len -= MAX_TX_DESC_LEN; } while (len > 0); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { tb = &rp->tx_buffs[idx]; BUG_ON(tb->skb != NULL); np->ops->unmap_page(np->device, tb->mapping, skb_shinfo(skb)->frags[i].size, DMA_TO_DEVICE); idx = NEXT_TX(rp, idx); } dev_kfree_skb(skb); return idx;}#define NIU_TX_WAKEUP_THRESH(rp) ((rp)->pending / 4)static void niu_tx_work(struct niu *np, struct tx_ring_info *rp){ u16 pkt_cnt, tmp; int cons; u64 cs; cs = rp->tx_cs; if (unlikely(!(cs & (TX_CS_MK | TX_CS_MMK)))) goto out; tmp = pkt_cnt = (cs & TX_CS_PKT_CNT) >> TX_CS_PKT_CNT_SHIFT; pkt_cnt = (pkt_cnt - rp->last_pkt_cnt) & (TX_CS_PKT_CNT >> TX_CS_PKT_CNT_SHIFT); rp->last_pkt_cnt = tmp; cons = rp->cons; niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n", np->dev->name, pkt_cnt, cons); while (pkt_cnt--) cons = release_tx_packet(np, rp, cons); rp->cons = cons; smp_mb();out: if (unlikely(netif_queue_stopped(np->dev) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { netif_tx_lock(np->dev); if (netif_queue_stopped(np->dev) && (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp))) netif_wake_queue(np->dev); netif_tx_unlock(np->dev); }}static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget){ int qlen, rcr_done = 0, work_done = 0; struct rxdma_mailbox *mbox = rp->mbox; u64 stat;#if 1 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); qlen = nr64(RCRSTAT_A(rp->rx_channel)) & RCRSTAT_A_QLEN;#else stat = le64_to_cpup(&mbox->rx_dma_ctl_stat); qlen = (le64_to_cpup(&mbox->rcrstat_a) & RCRSTAT_A_QLEN);#endif mbox->rx_dma_ctl_stat = 0; mbox->rcrstat_a = 0; niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n", np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen); rcr_done = work_done = 0; qlen = min(qlen, budget); while (work_done < qlen) { rcr_done += niu_process_rx_pkt(np, rp); work_done++; } if (rp->rbr_refill_pending >= rp->rbr_kick_thresh) { unsigned int i; for (i = 0; i < rp->rbr_refill_pending; i++) niu_rbr_refill(np, rp, GFP_ATOMIC); rp->rbr_refill_pending = 0; } stat = (RX_DMA_CTL_STAT_MEX | ((u64)work_done << RX_DMA_CTL_STAT_PKTREAD_SHIFT) | ((u64)rcr_done << RX_DMA_CTL_STAT_PTRREAD_SHIFT)); nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat); return work_done;}static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget){ u64 v0 = lp->v0; u32 tx_vec = (v0 >> 32); u32 rx_vec = (v0 & 0xffffffff); int i, work_done = 0; niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n", np->dev->name, (unsigned long long) v0); for (i = 0; i < np->num_tx_rings; i++) { struct tx_ring_info *rp = &np->tx_rings[i]; if (tx_vec & (1 << rp->tx_channel)) niu_tx_work(np, rp); nw64(LD_IM0(LDN_TXDMA(rp->tx_channel)), 0); } for (i = 0; i < np->num_rx_rings; i++) { struct rx_ring_info *rp = &np->rx_rings[i]; if (rx_vec & (1 << rp->rx_channel)) { int this_work_done; this_work_done = niu_rx_work(np, rp, budget); budget -= this_work_done; work_done += this_work_done; } nw64(LD_IM0(LDN_RXDMA(rp->rx_channel)), 0); } return work_done;}static int niu_poll(struct napi_struct *napi, int budget){ struct niu_ldg *lp = container_of(napi, struct niu_ldg, napi); struct niu *np = lp->np; int work_done; work_done = niu_poll_core(np, lp, budget); if (work_done < budget) { netif_rx_complete(np->dev, napi); niu_ldg_rearm(np, lp, 1); } return work_done;}static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp, u64 stat){ dev_err(np->device, PFX "%s: RX channel %u errors ( ", np->dev->name, rp->rx_channel); if (stat & RX_DMA_CTL_STAT_RBR_TMOUT) printk("RBR_TMOUT "); if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR) printk("RSP_CNT "); if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS) printk("BYTE_EN_BUS "); if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR) printk("RSP_DAT "); if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR) printk("RCR_ACK "); if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR) printk("RCR_SHA_PAR "); if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR) printk("RBR_PRE_PAR "); if (stat & RX_DMA_CTL_STAT_CONFIG_ERR) printk("CONFIG "); if (stat & RX_DMA_CTL_STAT_RCRINCON) printk("RCRINCON "); if (stat & RX_DMA_CTL_STAT_RCRFULL) printk("RCRFULL "); if (stat & RX_DMA_CTL_STAT_RBRFULL) printk("RBRFULL "); if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE) printk("RBRLOGPAGE "); if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE) printk("CFIGLOGPAGE "); if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR) printk("DC_FIDO "); printk(")\n");}static int niu_rx_error(struct niu *np, struct rx_ring_info *rp){ u64 stat = nr64(RX_DMA_CTL_STAT(rp->rx_channel)); int err = 0; if (stat & (RX_DMA_CTL_STAT_CHAN_FATAL | RX_DMA_CTL_STAT_PORT_FATAL)) err = -EINVAL; if (err) { dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n", np->dev->name, rp->rx_channel, (unsigned long long) stat); niu_log_rxchan_errors(np, rp, stat); } nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat & RX_DMA_CTL_WRITE_CLEAR_ERRS); return err;}static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp, u64 cs){ dev_err(np->device, PFX "%s: TX channel %u errors ( ", np->dev->name, rp->tx_channel); if (cs & TX_CS_MBOX_ERR) printk("MBOX "); if (cs & TX_CS_PKT_SIZE_ERR) printk("PKT_SIZE "); if (cs & TX_CS_TX_RING_OFLOW) printk("TX_RING_OFLOW "); if (cs & TX_CS_PREF_BUF_PAR_ERR) printk("PREF_BUF_PAR "); if (cs & TX_CS_NACK_PREF) printk("NACK_PREF "); if (cs & TX_CS_NACK_PKT_RD) printk("NACK_PKT_RD "); if (cs & TX_CS_CONF_PART_ERR) printk("CONF_PART "); if (cs & TX_CS_PKT_PRT_ERR) printk("PKT_PTR "); printk(")\n");}static int niu_tx_error(struct niu *np, struct tx_ring_info *rp){ u64 cs, logh, logl; cs = nr64(TX_CS(rp->tx_channel)); logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel)); logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel)); dev_err(np->device, PFX "%s: TX channel %u error, " "cs[%llx] logh[%llx] logl[%llx]\n", np->dev->name, rp->tx_channel, (unsigned long long) cs, (unsigned long long) logh, (unsigned long long) logl); niu_log_txchan_errors(np, rp, cs); return -ENODEV;}static int niu_mif_interrupt(struct niu *np){ u64 mif_status = nr64(MIF_STATUS); int phy_mdint = 0; if (np->flags & NIU_FLAGS_XMAC) { u64 xrxmac_stat = nr64_mac(XRXMAC_STATUS); if (xrxmac_stat & XRXMAC_STATUS_PHY_MDINT) phy_mdint = 1; } dev_err(np->device, PFX "%s: MIF interrupt, " "stat[%llx] phy_mdint(%d)\n", np->dev->name, (unsigned long long) mif_status, phy_mdint); return -ENODEV;}static void niu_xmac_interrupt(struct niu *np){ struct niu_xmac_stats *mp = &np->mac_stats.xmac; u64 val; val = nr64_mac(XTXMAC_STATUS); if (val & XTXMAC_STATUS_FRAME_CNT_EXP) mp->tx_frames += TXMAC_FRM_CNT_COUNT; if (val & XTXMAC_STATUS_BYTE_CNT_EXP) mp->tx_bytes += TXMAC_BYTE_CNT_COUNT; if (val & XTXMAC_STATUS_TXFIFO_XFR_ERR) mp->tx_fifo_errors++; if (val & XTXMAC_STATUS_TXMAC_OFLOW) mp->tx_overflow_errors++; if (val & XTXMAC_STATUS_MAX_PSIZE_ERR) mp->tx_max_pkt_size_errors++; if (val & XTXMAC_STATUS_TXMAC_UFLOW) mp->tx_underflow_errors++; val = nr64_mac(XRXMAC_STATUS); if (val & XRXMAC_STATUS_LCL_FLT_STATUS) mp->rx_local_faults++; if (val & XRXMAC_STATUS_RFLT_DET) mp->rx_remote_faults++; if (val & XRXMAC_STATUS_LFLT_CNT_EXP) mp->rx_link_faults += LINK_FAULT_CNT_COUNT; if (val & XRXMAC_STATUS_ALIGNERR_CNT_EXP) mp->rx_align_errors += RXMAC_ALIGN_ERR_CNT_COUNT; if (val & XRXMAC_STATUS_RXFRAG_CNT_EXP) mp->rx_frags += RXMAC_FRAG_CNT_COUNT; if (val & XRXMAC_STATUS_RXMULTF_CNT_EXP) mp->rx_mcasts += RXMAC_MC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXBCAST_CNT_EXP) mp->rx_bcasts += RXMAC_BC_FRM_CNT_COUNT; if (val & XRXMAC_STATUS_RXHIST1_CNT_EXP) mp->rx_hist_cnt1 += RXMAC_HIST_CNT1_COUNT; if (val & XRXMAC_STATUS_RXHIST2_CNT_EXP) mp->rx_hist_cnt2 += RXMAC_HIST_CNT2_COUNT; if (val & XRXMAC_STATUS_RXHIST3_CNT_EXP) mp->rx_hist_cnt3 += RXMAC_HIST_CNT3_COUNT; if (val & XRXMAC_STATUS_RXHIST4_CNT_EXP) mp->rx_hist_cnt4 += RXMAC_HIST_CNT4_COUNT; if (val & XRXMAC_STATUS_RXHIST5_CNT_EXP) mp->rx_hist_cnt5 += RXMAC_HIST_CNT5_COUNT; if (val & XRXMAC_STATUS_RXHIST6_CNT_EXP) mp->rx_hist_cnt6 += RXMAC_HIST_CNT6_COUNT; if (val & XRXMAC_STATUS_RXHIST7_CNT_EXP) mp->rx_hist_cnt7 += RXMAC_HIST_CNT7_COUNT; if (val & XRXMAC_STAT_MSK_RXOCTET_CNT_EXP) mp->rx_octets += RXMAC_BT_CNT_COUNT; if (val & XRXMAC_STATUS_CVIOLERR_CNT_EXP) mp->rx_code_violations += RXMAC_CD_VIO_CNT_COUNT; if (val & XRXMAC_STATUS_LENERR_CNT_EXP) mp->rx_len_errors += RXMAC_MPSZER_CNT_COUNT; if (val & XRXMAC_STATUS_CRCERR_CNT_EXP) mp->rx_crc_errors += RXMAC_CRC_ER_CNT_COUNT; if (val & XRXMAC_STATUS_RXUFLOW) mp->rx_underflows++; if (val & XRXMAC_STATUS_RXOFLOW) mp->rx_overflows++; val = nr64_mac(XMAC_FC_STAT); if (val & XMAC_FC_STAT_TX_MAC_NPAUSE) mp->pause_off_state++; if (val & XMAC_FC_STAT_TX_MAC_PAUSE) mp->pause_on_state++; if (val & XMAC_FC_STAT_RX_MAC_RPAUSE) mp->pause_received++;}static void niu_bmac_interrupt(struct niu *np){ struct niu_bmac_stats *mp = &np->mac_stats.bmac; u64 val; val = nr64_mac(BTXMAC_STATUS); if (val & BTXMAC_STATUS_UNDERRUN) mp->tx_underflow_errors++; if (val & BTXMAC_STATUS_MAX_PKT_ERR) mp->tx_max_pkt_size_errors++; if (val & BTXMAC_STATUS_BYTE_CNT_EXP) mp->tx_bytes += BTXMAC_BYTE_CNT_COUNT; if (val & BTXMAC_STATUS_FRAME_CNT_EXP) mp->tx_frames += BTXMAC_FRM_CNT_COUNT; val = nr64_mac(BRXMAC_STATUS); if (val & BRXMAC_STATUS_OVERFLOW) mp->rx_overflows++; if (val & BRXMAC_STATUS_FRAME_CNT_EXP) mp->rx_frames += BRXMAC_FRAME_CNT_COUNT; if (val & BRXMAC_STATUS_ALIGN_ERR_EXP) mp->rx_align_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; if (val & BRXMAC_STATUS_CRC_ERR_EXP) mp->rx_crc_errors += BRXMAC_ALIGN_ERR_CNT_COUNT; if (val & BRXMAC_STATUS_LEN_ERR_EXP) mp->rx_len_errors += BRXMAC_CODE_VIOL_ERR_CNT_COUNT; val = nr64_mac(BMAC_CTRL_STATUS); if (val & BMAC_CTRL_STATUS_NOPAUSE) mp->pause_off_state++; if (val & BMAC_CTRL_ST
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -