📄 sge.c
字号:
unsigned int size, i; for (i = 0; i < SGE_CMDQ_N; i++) { struct cmdQ *q = &sge->cmdQ[i]; if (q->centries) { if (q->in_use) free_cmdQ_buffers(sge, q, q->in_use); kfree(q->centries); } if (q->entries) { size = sizeof(struct cmdQ_e) * q->size; pci_free_consistent(pdev, size, q->entries, q->dma_addr); } }}/* * Allocates basic TX resources, consisting of memory mapped command Qs. */static int alloc_tx_resources(struct sge *sge, struct sge_params *p){ struct pci_dev *pdev = sge->adapter->pdev; unsigned int size, i; for (i = 0; i < SGE_CMDQ_N; i++) { struct cmdQ *q = &sge->cmdQ[i]; q->genbit = 1; q->sop = 1; q->size = p->cmdQ_size[i]; q->in_use = 0; q->status = 0; q->processed = q->cleaned = 0; q->stop_thres = 0; spin_lock_init(&q->lock); size = sizeof(struct cmdQ_e) * q->size; q->entries = (struct cmdQ_e *) pci_alloc_consistent(pdev, size, &q->dma_addr); if (!q->entries) goto err_no_mem; memset(q->entries, 0, size); size = sizeof(struct cmdQ_ce) * q->size; q->centries = kmalloc(size, GFP_KERNEL); if (!q->centries) goto err_no_mem; memset(q->centries, 0, size); } /* * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE * only. For queue 0 set the stop threshold so we can handle one more * packet from each port, plus reserve an additional 24 entries for * Ethernet packets only. Queue 1 never suspends nor do we reserve * space for Ethernet packets. */ sge->cmdQ[0].stop_thres = sge->adapter->params.nports * (MAX_SKB_FRAGS + 1); return 0;err_no_mem: free_tx_resources(sge); return -ENOMEM;}static inline void setup_ring_params(struct adapter *adapter, u64 addr, u32 size, int base_reg_lo, int base_reg_hi, int size_reg){ writel((u32)addr, adapter->regs + base_reg_lo); writel(addr >> 32, adapter->regs + base_reg_hi); writel(size, adapter->regs + size_reg);}/* * Enable/disable VLAN acceleration. */void t1_set_vlan_accel(struct adapter *adapter, int on_off){ struct sge *sge = adapter->sge; sge->sge_control &= ~F_VLAN_XTRACT; if (on_off) sge->sge_control |= F_VLAN_XTRACT; if (adapter->open_device_map) { writel(sge->sge_control, adapter->regs + A_SG_CONTROL); readl(adapter->regs + A_SG_CONTROL); /* flush */ }}/* * Programs the various SGE registers. However, the engine is not yet enabled, * but sge->sge_control is setup and ready to go. */static void configure_sge(struct sge *sge, struct sge_params *p){ struct adapter *ap = sge->adapter; writel(0, ap->regs + A_SG_CONTROL); setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size, A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE); setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size, A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE); setup_ring_params(ap, sge->freelQ[0].dma_addr, sge->freelQ[0].size, A_SG_FL0BASELWR, A_SG_FL0BASEUPR, A_SG_FL0SIZE); setup_ring_params(ap, sge->freelQ[1].dma_addr, sge->freelQ[1].size, A_SG_FL1BASELWR, A_SG_FL1BASEUPR, A_SG_FL1SIZE); /* The threshold comparison uses <. */ writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD); setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size, A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE); writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT); sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE | F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE | V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE | F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS | V_RX_PKT_OFFSET(sge->rx_pkt_pad);#if defined(__BIG_ENDIAN_BITFIELD) sge->sge_control |= F_ENABLE_BIG_ENDIAN;#endif /* Initialize no-resource timer */ sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap); t1_sge_set_coalesce_params(sge, p);}/* * Return the payload capacity of the jumbo free-list buffers. */static inline unsigned int jumbo_payload_capacity(const struct sge *sge){ return sge->freelQ[sge->jumbo_fl].rx_buffer_size - sge->freelQ[sge->jumbo_fl].dma_offset - sizeof(struct cpl_rx_data);}/* * Frees all SGE related resources and the sge structure itself */void t1_sge_destroy(struct sge *sge){ if (sge->espibug_skb) kfree_skb(sge->espibug_skb); free_tx_resources(sge); free_rx_resources(sge); kfree(sge);}/* * Allocates new RX buffers on the freelist Q (and tracks them on the freelist * context Q) until the Q is full or alloc_skb fails. * * It is possible that the generation bits already match, indicating that the * buffer is already valid and nothing needs to be done. This happens when we * copied a received buffer into a new sk_buff during the interrupt processing. * * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad), * we specify a RX_OFFSET in order to make sure that the IP header is 4B * aligned. */static void refill_free_list(struct sge *sge, struct freelQ *q){ struct pci_dev *pdev = sge->adapter->pdev; struct freelQ_ce *ce = &q->centries[q->pidx]; struct freelQ_e *e = &q->entries[q->pidx]; unsigned int dma_len = q->rx_buffer_size - q->dma_offset; while (q->credits < q->size) { struct sk_buff *skb; dma_addr_t mapping; skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); if (!skb) break; skb_reserve(skb, q->dma_offset); mapping = pci_map_single(pdev, skb->data, dma_len, PCI_DMA_FROMDEVICE); ce->skb = skb; pci_unmap_addr_set(ce, dma_addr, mapping); pci_unmap_len_set(ce, dma_len, dma_len); e->addr_lo = (u32)mapping; e->addr_hi = (u64)mapping >> 32; e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); wmb(); e->gen2 = V_CMD_GEN2(q->genbit); e++; ce++; if (++q->pidx == q->size) { q->pidx = 0; q->genbit ^= 1; ce = q->centries; e = q->entries; } q->credits++; }}/* * Calls refill_free_list for both free lists. If we cannot fill at least 1/4 * of both rings, we go into 'few interrupt mode' in order to give the system * time to free up resources. */static void freelQs_empty(struct sge *sge){ struct adapter *adapter = sge->adapter; u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE); u32 irqholdoff_reg; refill_free_list(sge, &sge->freelQ[0]); refill_free_list(sge, &sge->freelQ[1]); if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) && sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) { irq_reg |= F_FL_EXHAUSTED; irqholdoff_reg = sge->fixed_intrtimer; } else { /* Clear the F_FL_EXHAUSTED interrupts for now */ irq_reg &= ~F_FL_EXHAUSTED; irqholdoff_reg = sge->intrtimer_nres; } writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER); writel(irq_reg, adapter->regs + A_SG_INT_ENABLE); /* We reenable the Qs to force a freelist GTS interrupt later */ doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);}#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \ F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)/* * Disable SGE Interrupts */void t1_sge_intr_disable(struct sge *sge){ u32 val = readl(sge->adapter->regs + A_PL_ENABLE); writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE); writel(0, sge->adapter->regs + A_SG_INT_ENABLE);}/* * Enable SGE interrupts. */void t1_sge_intr_enable(struct sge *sge){ u32 en = SGE_INT_ENABLE; u32 val = readl(sge->adapter->regs + A_PL_ENABLE); if (sge->adapter->flags & TSO_CAPABLE) en &= ~F_PACKET_TOO_BIG; writel(en, sge->adapter->regs + A_SG_INT_ENABLE); writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);}/* * Clear SGE interrupts. */void t1_sge_intr_clear(struct sge *sge){ writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE); writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);}/* * SGE 'Error' interrupt handler */int t1_sge_intr_error_handler(struct sge *sge){ struct adapter *adapter = sge->adapter; u32 cause = readl(adapter->regs + A_SG_INT_CAUSE); if (adapter->flags & TSO_CAPABLE) cause &= ~F_PACKET_TOO_BIG; if (cause & F_RESPQ_EXHAUSTED) sge->stats.respQ_empty++; if (cause & F_RESPQ_OVERFLOW) { sge->stats.respQ_overflow++; CH_ALERT("%s: SGE response queue overflow\n", adapter->name); } if (cause & F_FL_EXHAUSTED) { sge->stats.freelistQ_empty++; freelQs_empty(sge); } if (cause & F_PACKET_TOO_BIG) { sge->stats.pkt_too_big++; CH_ALERT("%s: SGE max packet size exceeded\n", adapter->name); } if (cause & F_PACKET_MISMATCH) { sge->stats.pkt_mismatch++; CH_ALERT("%s: SGE packet mismatch\n", adapter->name); } if (cause & SGE_INT_FATAL) t1_fatal_err(adapter); writel(cause, adapter->regs + A_SG_INT_CAUSE); return 0;}const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge){ return &sge->stats;}const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port){ return &sge->port_stats[port];}/** * recycle_fl_buf - recycle a free list buffer * @fl: the free list * @idx: index of buffer to recycle * * Recycles the specified buffer on the given free list by adding it at * the next available slot on the list. */static void recycle_fl_buf(struct freelQ *fl, int idx){ struct freelQ_e *from = &fl->entries[idx]; struct freelQ_e *to = &fl->entries[fl->pidx]; fl->centries[fl->pidx] = fl->centries[idx]; to->addr_lo = from->addr_lo; to->addr_hi = from->addr_hi; to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit); wmb(); to->gen2 = V_CMD_GEN2(fl->genbit); fl->credits++; if (++fl->pidx == fl->size) { fl->pidx = 0; fl->genbit ^= 1; }}/** * get_packet - return the next ingress packet buffer * @pdev: the PCI device that received the packet * @fl: the SGE free list holding the packet * @len: the actual packet length, excluding any SGE padding * @dma_pad: padding at beginning of buffer left by SGE DMA * @skb_pad: padding to be used if the packet is copied * @copy_thres: length threshold under which a packet should be copied * @drop_thres: # of remaining buffers before we start dropping packets * * Get the next packet from a free list and complete setup of the * sk_buff. If the packet is small we make a copy and recycle the * original buffer, otherwise we use the original buffer itself. If a * positive drop threshold is supplied packets are dropped and their * buffers recycled if (a) the number of remaining buffers is under the * threshold and the packet is too big to copy, or (b) the packet should * be copied but there is no memory for the copy. */static inline struct sk_buff *get_packet(struct pci_dev *pdev, struct freelQ *fl, unsigned int len, int dma_pad, int skb_pad, unsigned int copy_thres, unsigned int drop_thres){ struct sk_buff *skb; struct freelQ_ce *ce = &fl->centries[fl->cidx]; if (len < copy_thres) { skb = alloc_skb(len + skb_pad, GFP_ATOMIC); if (likely(skb != NULL)) { skb_reserve(skb, skb_pad); skb_put(skb, len); pci_dma_sync_single_for_cpu(pdev, pci_unmap_addr(ce, dma_addr), pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); memcpy(skb->data, ce->skb->data + dma_pad, len); pci_dma_sync_single_for_device(pdev, pci_unmap_addr(ce, dma_addr), pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); } else if (!drop_thres) goto use_orig_buf; recycle_fl_buf(fl, fl->cidx); return skb; } if (fl->credits < drop_thres) { recycle_fl_buf(fl, fl->cidx); return NULL; }use_orig_buf: pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); skb = ce->skb; skb_reserve(skb, dma_pad); skb_put(skb, len); return skb;}/** * unexpected_offload - handle an unexpected offload packet * @adapter: the adapter * @fl: the free list that received the packet * * Called when we receive an unexpected offload packet (e.g., the TOE * function is disabled or the card is a NIC). Prints a message and
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -