📄 bnx2.c
字号:
{ u32 vcid; vcid = 96; while (vcid) { u32 vcid_addr, pcid_addr, offset; int i; vcid--; if (CHIP_ID(bp) == CHIP_ID_5706_A0) { u32 new_vcid; vcid_addr = GET_PCID_ADDR(vcid); if (vcid & 0x8) { new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7); } else { new_vcid = vcid; } pcid_addr = GET_PCID_ADDR(new_vcid); } else { vcid_addr = GET_CID_ADDR(vcid); pcid_addr = vcid_addr; } for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) { vcid_addr += (i << PHY_CTX_SHIFT); pcid_addr += (i << PHY_CTX_SHIFT); REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00); REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); /* Zero out the context. */ for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) CTX_WR(bp, 0x00, offset, 0); REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr); REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr); } }}static intbnx2_alloc_bad_rbuf(struct bnx2 *bp){ u16 *good_mbuf; u32 good_mbuf_cnt; u32 val; good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL); if (good_mbuf == NULL) { printk(KERN_ERR PFX "Failed to allocate memory in " "bnx2_alloc_bad_rbuf\n"); return -ENOMEM; } REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE); good_mbuf_cnt = 0; /* Allocate a bunch of mbufs and save the good ones in an array. */ val = REG_RD_IND(bp, BNX2_RBUF_STATUS1); while (val & BNX2_RBUF_STATUS1_FREE_COUNT) { REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ); val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC); val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE; /* The addresses with Bit 9 set are bad memory blocks. */ if (!(val & (1 << 9))) { good_mbuf[good_mbuf_cnt] = (u16) val; good_mbuf_cnt++; } val = REG_RD_IND(bp, BNX2_RBUF_STATUS1); } /* Free the good ones back to the mbuf pool thus discarding * all the bad ones. */ while (good_mbuf_cnt) { good_mbuf_cnt--; val = good_mbuf[good_mbuf_cnt]; val = (val << 9) | val | 1; REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val); } kfree(good_mbuf); return 0;}static voidbnx2_set_mac_addr(struct bnx2 *bp){ u32 val; u8 *mac_addr = bp->dev->dev_addr; val = (mac_addr[0] << 8) | mac_addr[1]; REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val); val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | (mac_addr[4] << 8) | mac_addr[5]; REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);}static inline intbnx2_alloc_rx_skb(struct bnx2 *bp, u16 index){ struct sk_buff *skb; struct sw_bd *rx_buf = &bp->rx_buf_ring[index]; dma_addr_t mapping; struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; unsigned long align; skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); if (skb == NULL) { return -ENOMEM; } if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1)))) skb_reserve(skb, BNX2_RX_ALIGN - align); mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); rx_buf->skb = skb; pci_unmap_addr_set(rx_buf, mapping, mapping); rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; bp->rx_prod_bseq += bp->rx_buf_use_size; return 0;}static intbnx2_phy_event_is_set(struct bnx2 *bp, u32 event){ struct status_block *sblk = bp->status_blk; u32 new_link_state, old_link_state; int is_set = 1; new_link_state = sblk->status_attn_bits & event; old_link_state = sblk->status_attn_bits_ack & event; if (new_link_state != old_link_state) { if (new_link_state) REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event); else REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event); } else is_set = 0; return is_set;}static voidbnx2_phy_int(struct bnx2 *bp){ if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) { spin_lock(&bp->phy_lock); bnx2_set_link(bp); spin_unlock(&bp->phy_lock); } if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT)) bnx2_set_remote_link(bp);}static voidbnx2_tx_int(struct bnx2 *bp){ struct status_block *sblk = bp->status_blk; u16 hw_cons, sw_cons, sw_ring_cons; int tx_free_bd = 0; hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0; if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) { hw_cons++; } sw_cons = bp->tx_cons; while (sw_cons != hw_cons) { struct sw_bd *tx_buf; struct sk_buff *skb; int i, last; sw_ring_cons = TX_RING_IDX(sw_cons); tx_buf = &bp->tx_buf_ring[sw_ring_cons]; skb = tx_buf->skb; /* partial BD completions possible with TSO packets */ if (skb_is_gso(skb)) { u16 last_idx, last_ring_idx; last_idx = sw_cons + skb_shinfo(skb)->nr_frags + 1; last_ring_idx = sw_ring_cons + skb_shinfo(skb)->nr_frags + 1; if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) { last_idx++; } if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) { break; } } pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), skb_headlen(skb), PCI_DMA_TODEVICE); tx_buf->skb = NULL; last = skb_shinfo(skb)->nr_frags; for (i = 0; i < last; i++) { sw_cons = NEXT_TX_BD(sw_cons); pci_unmap_page(bp->pdev, pci_unmap_addr( &bp->tx_buf_ring[TX_RING_IDX(sw_cons)], mapping), skb_shinfo(skb)->frags[i].size, PCI_DMA_TODEVICE); } sw_cons = NEXT_TX_BD(sw_cons); tx_free_bd += last + 1; dev_kfree_skb(skb); hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0; if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) { hw_cons++; } } bp->tx_cons = sw_cons; /* Need to make the tx_cons update visible to bnx2_start_xmit() * before checking for netif_queue_stopped(). Without the * memory barrier, there is a small possibility that bnx2_start_xmit() * will miss it and cause the queue to be stopped forever. */ smp_mb(); if (unlikely(netif_queue_stopped(bp->dev)) && (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) { netif_tx_lock(bp->dev); if ((netif_queue_stopped(bp->dev)) && (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) netif_wake_queue(bp->dev); netif_tx_unlock(bp->dev); }}static inline voidbnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, u16 cons, u16 prod){ struct sw_bd *cons_rx_buf, *prod_rx_buf; struct rx_bd *cons_bd, *prod_bd; cons_rx_buf = &bp->rx_buf_ring[cons]; prod_rx_buf = &bp->rx_buf_ring[prod]; pci_dma_sync_single_for_device(bp->pdev, pci_unmap_addr(cons_rx_buf, mapping), bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); bp->rx_prod_bseq += bp->rx_buf_use_size; prod_rx_buf->skb = skb; if (cons == prod) return; pci_unmap_addr_set(prod_rx_buf, mapping, pci_unmap_addr(cons_rx_buf, mapping)); cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;}static inline u16bnx2_get_hw_rx_cons(struct bnx2 *bp){ u16 cons = bp->status_blk->status_rx_quick_consumer_index0; if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) cons++; return cons;}static intbnx2_rx_int(struct bnx2 *bp, int budget){ u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; struct l2_fhdr *rx_hdr; int rx_pkt = 0; hw_cons = bnx2_get_hw_rx_cons(bp); sw_cons = bp->rx_cons; sw_prod = bp->rx_prod; /* Memory barrier necessary as speculative reads of the rx * buffer can be ahead of the index in the status block */ rmb(); while (sw_cons != hw_cons) { unsigned int len; u32 status; struct sw_bd *rx_buf; struct sk_buff *skb; dma_addr_t dma_addr; sw_ring_cons = RX_RING_IDX(sw_cons); sw_ring_prod = RX_RING_IDX(sw_prod); rx_buf = &bp->rx_buf_ring[sw_ring_cons]; skb = rx_buf->skb; rx_buf->skb = NULL; dma_addr = pci_unmap_addr(rx_buf, mapping); pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); rx_hdr = (struct l2_fhdr *) skb->data; len = rx_hdr->l2_fhdr_pkt_len - 4; if ((status = rx_hdr->l2_fhdr_status) & (L2_FHDR_ERRORS_BAD_CRC | L2_FHDR_ERRORS_PHY_DECODE | L2_FHDR_ERRORS_ALIGNMENT | L2_FHDR_ERRORS_TOO_SHORT | L2_FHDR_ERRORS_GIANT_FRAME)) { goto reuse_rx; } /* Since we don't have a jumbo ring, copy small packets * if mtu > 1500 */ if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { struct sk_buff *new_skb; new_skb = netdev_alloc_skb(bp->dev, len + 2); if (new_skb == NULL) goto reuse_rx; /* aligned copy */ skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2, new_skb->data, len + 2); skb_reserve(new_skb, 2); skb_put(new_skb, len); bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod); skb = new_skb; } else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) { pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); skb_reserve(skb, bp->rx_offset); skb_put(skb, len); } else {reuse_rx: bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod); goto next_rx; } skb->protocol = eth_type_trans(skb, bp->dev); if ((len > (bp->dev->mtu + ETH_HLEN)) && (ntohs(skb->protocol) != 0x8100)) { dev_kfree_skb(skb); goto next_rx; } skb->ip_summed = CHECKSUM_NONE; if (bp->rx_csum && (status & (L2_FHDR_STATUS_TCP_SEGMENT | L2_FHDR_STATUS_UDP_DATAGRAM))) { if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM | L2_FHDR_ERRORS_UDP_XSUM)) == 0)) skb->ip_summed = CHECKSUM_UNNECESSARY; }#ifdef BCM_VLAN if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) { vlan_hwaccel_receive_skb(skb, bp->vlgrp, rx_hdr->l2_fhdr_vlan_tag); } else#endif netif_receive_skb(skb); bp->dev->last_rx = jiffies; rx_pkt++;next_rx: sw_cons = NEXT_RX_BD(sw_cons); sw_prod = NEXT_RX_BD(sw_prod); if ((rx_pkt == budget)) break; /* Refresh hw_cons to see if there is new work */ if (sw_cons == hw_cons) { hw_cons = bnx2_get_hw_rx_cons(bp); rmb(); } } bp->rx_cons = sw_cons; bp->rx_prod = sw_prod; REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod); REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq); mmiowb(); return rx_pkt;}/* MSI ISR - The only difference between this and the INTx ISR * is that the MSI interrupt is always serviced. */static irqreturn_tbnx2_msi(int irq, void *dev_instance){ struct net_device *dev = dev_instance; struct bnx2 *bp = netdev_priv(dev); prefetch(bp->status_blk); REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); /* Return here if interrupt is disabled. */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; netif_rx_schedule(dev, &bp->napi); return IRQ_HANDLED;}static irqreturn_tbnx2_msi_1shot(int irq, void *dev_instance){ struct net_device *dev = dev_instance; struct bnx2 *bp = netdev_priv(dev); prefetch(bp->status_blk); /* Return here if interrupt is disabled. */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; netif_rx_schedule(dev, &bp->napi); return IRQ_HANDLED;}static irqreturn_tbnx2_interrupt(int irq, void *dev_instance){ struct net_device *dev = dev_instance; struct bnx2 *bp = netdev_priv(dev); struct status_block *sblk = bp->status_blk; /* When using INTx, it is possible for the interrupt to arrive * at the CPU before the status block posted prior to the * interrupt. Reading a register will flush the status block. * When using MSI, the MSI message will always complete after * the status block write. */ if ((sblk->status_idx == bp->last_status_idx) && (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) return IRQ_NONE; REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); /* Read back to deassert IRQ immediately to avoid too many * spurious interrupts. */ REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD); /* Return here if interrupt is shared and is disabled. */ if (unlikely(atomic_read(&bp->intr_sem) != 0)) return IRQ_HANDLED; if (netif_rx_schedule_prep(dev, &bp->napi)) { bp->last_status_idx = sblk->status_idx; __netif_rx_schedule(dev, &bp->napi); } return IRQ_HANDLED;}#define STATUS_ATTN_EVENTS (
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -