sge.c
来自「linux 内核源代码」· C语言 代码 · 共 2,293 行 · 第 1/5 页
C
2,293 行
/* * Copyright (c) 2005-2007 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#include <linux/skbuff.h>#include <linux/netdevice.h>#include <linux/etherdevice.h>#include <linux/if_vlan.h>#include <linux/ip.h>#include <linux/tcp.h>#include <linux/dma-mapping.h>#include "common.h"#include "regs.h"#include "sge_defs.h"#include "t3_cpl.h"#include "firmware_exports.h"#define USE_GTS 0#define SGE_RX_SM_BUF_SIZE 1536#define SGE_RX_COPY_THRES 256#define SGE_RX_PULL_LEN 128/* * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs * directly. */#define FL0_PG_CHUNK_SIZE 2048#define SGE_RX_DROP_THRES 16/* * Period of the Tx buffer reclaim timer. This timer does not need to run * frequently as Tx buffers are usually reclaimed by new Tx packets. */#define TX_RECLAIM_PERIOD (HZ / 4)/* WR size in bytes */#define WR_LEN (WR_FLITS * 8)/* * Types of Tx queues in each queue set. Order here matters, do not change. */enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };/* Values for sge_txq.flags */enum { TXQ_RUNNING = 1 << 0, /* fetch engine is running */ TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */};struct tx_desc { __be64 flit[TX_DESC_FLITS];};struct rx_desc { __be32 addr_lo; __be32 len_gen; __be32 gen2; __be32 addr_hi;};struct tx_sw_desc { /* SW state per Tx descriptor */ struct sk_buff *skb;};struct rx_sw_desc { /* SW state per Rx descriptor */ union { struct sk_buff *skb; struct fl_pg_chunk pg_chunk; }; DECLARE_PCI_UNMAP_ADDR(dma_addr);};struct rsp_desc { /* response queue descriptor */ struct rss_header rss_hdr; __be32 flags; __be32 len_cq; u8 imm_data[47]; u8 intr_gen;};struct unmap_info { /* packet unmapping info, overlays skb->cb */ int sflit; /* start flit of first SGL entry in Tx descriptor */ u16 fragidx; /* first page fragment in current Tx descriptor */ u16 addr_idx; /* buffer index of first SGL entry in descriptor */ u32 len; /* mapped length of skb main body */};/* * Holds unmapping information for Tx packets that need deferred unmapping. * This structure lives at skb->head and must be allocated by callers. */struct deferred_unmap_info { struct pci_dev *pdev; dma_addr_t addr[MAX_SKB_FRAGS + 1];};/* * Maps a number of flits to the number of Tx descriptors that can hold them. * The formula is * * desc = 1 + (flits - 2) / (WR_FLITS - 1). * * HW allows up to 4 descriptors to be combined into a WR. */static u8 flit_desc_map[] = { 0,#if SGE_NUM_GENBITS == 1 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4#elif SGE_NUM_GENBITS == 2 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,#else# error "SGE_NUM_GENBITS must be 1 or 2"#endif};static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx){ return container_of(q, struct sge_qset, fl[qidx]);}static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q){ return container_of(q, struct sge_qset, rspq);}static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx){ return container_of(q, struct sge_qset, txq[qidx]);}/** * refill_rspq - replenish an SGE response queue * @adapter: the adapter * @q: the response queue to replenish * @credits: how many new responses to make available * * Replenishes a response queue by making the supplied number of responses * available to HW. */static inline void refill_rspq(struct adapter *adapter, const struct sge_rspq *q, unsigned int credits){ t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN, V_RSPQ(q->cntxt_id) | V_CREDITS(credits));}/** * need_skb_unmap - does the platform need unmapping of sk_buffs? * * Returns true if the platfrom needs sk_buff unmapping. The compiler * optimizes away unecessary code if this returns true. */static inline int need_skb_unmap(void){ /* * This structure is used to tell if the platfrom needs buffer * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything. */ struct dummy { DECLARE_PCI_UNMAP_ADDR(addr); }; return sizeof(struct dummy) != 0;}/** * unmap_skb - unmap a packet main body and its page fragments * @skb: the packet * @q: the Tx queue containing Tx descriptors for the packet * @cidx: index of Tx descriptor * @pdev: the PCI device * * Unmap the main body of an sk_buff and its page fragments, if any. * Because of the fairly complicated structure of our SGLs and the desire * to conserve space for metadata, we keep the information necessary to * unmap an sk_buff partly in the sk_buff itself (in its cb), and partly * in the Tx descriptors (the physical addresses of the various data * buffers). The send functions initialize the state in skb->cb so we * can unmap the buffers held in the first Tx descriptor here, and we * have enough information at this point to update the state for the next * Tx descriptor. */static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, unsigned int cidx, struct pci_dev *pdev){ const struct sg_ent *sgp; struct unmap_info *ui = (struct unmap_info *)skb->cb; int nfrags, frag_idx, curflit, j = ui->addr_idx; sgp = (struct sg_ent *)&q->desc[cidx].flit[ui->sflit]; if (ui->len) { pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), ui->len, PCI_DMA_TODEVICE); ui->len = 0; /* so we know for next descriptor for this skb */ j = 1; } frag_idx = ui->fragidx; curflit = ui->sflit + 1 + j; nfrags = skb_shinfo(skb)->nr_frags; while (frag_idx < nfrags && curflit < WR_FLITS) { pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), skb_shinfo(skb)->frags[frag_idx].size, PCI_DMA_TODEVICE); j ^= 1; if (j == 0) { sgp++; curflit++; } curflit++; frag_idx++; } if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */ ui->fragidx = frag_idx; ui->addr_idx = j; ui->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ }}/** * free_tx_desc - reclaims Tx descriptors and their buffers * @adapter: the adapter * @q: the Tx queue to reclaim descriptors from * @n: the number of descriptors to reclaim * * Reclaims Tx descriptors from an SGE Tx queue and frees the associated * Tx buffers. Called with the Tx queue lock held. */static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, unsigned int n){ struct tx_sw_desc *d; struct pci_dev *pdev = adapter->pdev; unsigned int cidx = q->cidx; const int need_unmap = need_skb_unmap() && q->cntxt_id >= FW_TUNNEL_SGEEC_START; d = &q->sdesc[cidx]; while (n--) { if (d->skb) { /* an SGL is present */ if (need_unmap) unmap_skb(d->skb, q, cidx, pdev); if (d->skb->priority == cidx) kfree_skb(d->skb); } ++d; if (++cidx == q->size) { cidx = 0; d = q->sdesc; } } q->cidx = cidx;}/** * reclaim_completed_tx - reclaims completed Tx descriptors * @adapter: the adapter * @q: the Tx queue to reclaim completed descriptors from * * Reclaims Tx descriptors that the SGE has indicated it has processed, * and frees the associated buffers if possible. Called with the Tx * queue's lock held. */static inline void reclaim_completed_tx(struct adapter *adapter, struct sge_txq *q){ unsigned int reclaim = q->processed - q->cleaned; if (reclaim) { free_tx_desc(adapter, q, reclaim); q->cleaned += reclaim; q->in_use -= reclaim; }}/** * should_restart_tx - are there enough resources to restart a Tx queue? * @q: the Tx queue * * Checks if there are enough descriptors to restart a suspended Tx queue. */static inline int should_restart_tx(const struct sge_txq *q){ unsigned int r = q->processed - q->cleaned; return q->in_use - r < (q->size >> 1);}/** * free_rx_bufs - free the Rx buffers on an SGE free list * @pdev: the PCI device associated with the adapter * @rxq: the SGE free list to clean up * * Release the buffers on an SGE free-buffer Rx queue. HW fetching from * this queue should be stopped before calling this function. */static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q){ unsigned int cidx = q->cidx; while (q->credits--) { struct rx_sw_desc *d = &q->sdesc[cidx]; pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), q->buf_size, PCI_DMA_FROMDEVICE); if (q->use_pages) { put_page(d->pg_chunk.page); d->pg_chunk.page = NULL; } else { kfree_skb(d->skb); d->skb = NULL; } if (++cidx == q->size) cidx = 0; } if (q->pg_chunk.page) { __free_page(q->pg_chunk.page); q->pg_chunk.page = NULL; }}/** * add_one_rx_buf - add a packet buffer to a free-buffer list * @va: buffer start VA * @len: the buffer length * @d: the HW Rx descriptor to write * @sd: the SW Rx descriptor to write * @gen: the generation bit value * @pdev: the PCI device associated with the adapter * * Add a buffer of the given length to the supplied HW and SW Rx * descriptors. */static inline void add_one_rx_buf(void *va, unsigned int len, struct rx_desc *d, struct rx_sw_desc *sd, unsigned int gen, struct pci_dev *pdev){ dma_addr_t mapping; mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); pci_unmap_addr_set(sd, dma_addr, mapping); d->addr_lo = cpu_to_be32(mapping); d->addr_hi = cpu_to_be32((u64) mapping >> 32); wmb(); d->len_gen = cpu_to_be32(V_FLD_GEN1(gen)); d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));}static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp){ if (!q->pg_chunk.page) { q->pg_chunk.page = alloc_page(gfp); if (unlikely(!q->pg_chunk.page)) return -ENOMEM; q->pg_chunk.va = page_address(q->pg_chunk.page); q->pg_chunk.offset = 0; } sd->pg_chunk = q->pg_chunk; q->pg_chunk.offset += q->buf_size; if (q->pg_chunk.offset == PAGE_SIZE) q->pg_chunk.page = NULL; else { q->pg_chunk.va += q->buf_size; get_page(q->pg_chunk.page); } return 0;}/** * refill_fl - refill an SGE free-buffer list * @adapter: the adapter * @q: the free-list to refill * @n: the number of new buffers to allocate * @gfp: the gfp flags for allocating new buffers * * (Re)populate an SGE free-buffer list with up to @n new packet buffers, * allocated with the supplied gfp flags. The caller must assure that * @n does not exceed the queue's capacity. */static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp){ void *buf_start; struct rx_sw_desc *sd = &q->sdesc[q->pidx]; struct rx_desc *d = &q->desc[q->pidx]; while (n--) { if (q->use_pages) { if (unlikely(alloc_pg_chunk(q, sd, gfp))) {nomem: q->alloc_failed++; break; } buf_start = sd->pg_chunk.va; } else { struct sk_buff *skb = alloc_skb(q->buf_size, gfp); if (!skb) goto nomem; sd->skb = skb; buf_start = skb->data; } add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen, adap->pdev); d++; sd++; if (++q->pidx == q->size) { q->pidx = 0; q->gen ^= 1; sd = q->sdesc; d = q->desc; } q->credits++; }
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?