📄 cxio_wr.h
字号:
#define T3_SQ_COOKIE_FLIT 14#define T3_RQ_COOKIE_FLIT 13#define T3_RQ_CQE_FLIT 14static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe){ return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));}static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op, enum t3_wr_flags flags, u8 genbit, u32 tid, u8 len){ wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) | V_FW_RIWR_SOPEOP(M_FW_RIWR_SOPEOP) | V_FW_RIWR_FLAGS(flags)); wmb(); wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) | V_FW_RIWR_TID(tid) | V_FW_RIWR_LEN(len)); /* 2nd gen bit... */ ((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit);}/* * T3 ULP2_TX commands */enum t3_utx_mem_op { T3_UTX_MEM_READ = 2, T3_UTX_MEM_WRITE = 3};/* T3 MC7 RDMA TPT entry format */enum tpt_mem_type { TPT_NON_SHARED_MR = 0x0, TPT_SHARED_MR = 0x1, TPT_MW = 0x2, TPT_MW_RELAXED_PROTECTION = 0x3};enum tpt_addr_type { TPT_ZBTO = 0, TPT_VATO = 1};enum tpt_mem_perm { TPT_LOCAL_READ = 0x8, TPT_LOCAL_WRITE = 0x4, TPT_REMOTE_READ = 0x2, TPT_REMOTE_WRITE = 0x1};struct tpt_entry { __be32 valid_stag_pdid; __be32 flags_pagesize_qpid; __be32 rsvd_pbl_addr; __be32 len; __be32 va_hi; __be32 va_low_or_fbo; __be32 rsvd_bind_cnt_or_pstag; __be32 rsvd_pbl_size;};#define S_TPT_VALID 31#define V_TPT_VALID(x) ((x) << S_TPT_VALID)#define F_TPT_VALID V_TPT_VALID(1U)#define S_TPT_STAG_KEY 23#define M_TPT_STAG_KEY 0xFF#define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)#define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)#define S_TPT_STAG_STATE 22#define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)#define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)#define S_TPT_STAG_TYPE 20#define M_TPT_STAG_TYPE 0x3#define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)#define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)#define S_TPT_PDID 0#define M_TPT_PDID 0xFFFFF#define V_TPT_PDID(x) ((x) << S_TPT_PDID)#define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)#define S_TPT_PERM 28#define M_TPT_PERM 0xF#define V_TPT_PERM(x) ((x) << S_TPT_PERM)#define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)#define S_TPT_REM_INV_DIS 27#define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)#define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)#define S_TPT_ADDR_TYPE 26#define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)#define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)#define S_TPT_MW_BIND_ENABLE 25#define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)#define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)#define S_TPT_PAGE_SIZE 20#define M_TPT_PAGE_SIZE 0x1F#define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)#define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)#define S_TPT_PBL_ADDR 0#define M_TPT_PBL_ADDR 0x1FFFFFFF#define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)#define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)#define S_TPT_QPID 0#define M_TPT_QPID 0xFFFFF#define V_TPT_QPID(x) ((x) << S_TPT_QPID)#define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)#define S_TPT_PSTAG 0#define M_TPT_PSTAG 0xFFFFFF#define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)#define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)#define S_TPT_PBL_SIZE 0#define M_TPT_PBL_SIZE 0xFFFFF#define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)#define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)/* * CQE defs */struct t3_cqe { __be32 header; __be32 len; union { struct { __be32 stag; __be32 msn; } rcqe; struct { u32 wrid_hi; u32 wrid_low; } scqe; } u;};#define S_CQE_OOO 31#define M_CQE_OOO 0x1#define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)#define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)#define S_CQE_QPID 12#define M_CQE_QPID 0x7FFFF#define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)#define V_CQE_QPID(x) ((x)<<S_CQE_QPID)#define S_CQE_SWCQE 11#define M_CQE_SWCQE 0x1#define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)#define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)#define S_CQE_GENBIT 10#define M_CQE_GENBIT 0x1#define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)#define S_CQE_STATUS 5#define M_CQE_STATUS 0x1F#define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)#define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)#define S_CQE_TYPE 4#define M_CQE_TYPE 0x1#define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)#define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)#define S_CQE_OPCODE 0#define M_CQE_OPCODE 0xF#define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)#define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)#define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))#define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))#define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))#define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))#define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))#define SQ_TYPE(x) (CQE_TYPE((x)))#define RQ_TYPE(x) (!CQE_TYPE((x)))#define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))#define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))#define CQE_LEN(x) (be32_to_cpu((x).len))/* used for RQ completion processing */#define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))#define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))/* used for SQ completion processing */#define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)#define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)/* generic accessor macros */#define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)#define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)#define TPT_ERR_SUCCESS 0x0#define TPT_ERR_STAG 0x1 /* STAG invalid: either the */ /* STAG is offlimt, being 0, */ /* or STAG_key mismatch */#define TPT_ERR_PDID 0x2 /* PDID mismatch */#define TPT_ERR_QPID 0x3 /* QPID mismatch */#define TPT_ERR_ACCESS 0x4 /* Invalid access right */#define TPT_ERR_WRAP 0x5 /* Wrap error */#define TPT_ERR_BOUND 0x6 /* base and bounds voilation */#define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */ /* shared memory region */#define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */ /* shared memory region */#define TPT_ERR_ECC 0x9 /* ECC error detected */#define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */ /* reading PSTAG for a MW */ /* Invalidate */#define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */ /* software error */#define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */#define TPT_ERR_CRC 0x10 /* CRC error */#define TPT_ERR_MARKER 0x11 /* Marker error */#define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */#define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */#define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */#define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */#define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */#define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */#define TPT_ERR_MSN 0x18 /* MSN error */#define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */#define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */ /* or READ_REQ */#define TPT_ERR_MSN_GAP 0x1B#define TPT_ERR_MSN_RANGE 0x1C#define TPT_ERR_IRD_OVERFLOW 0x1D#define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */ /* software error */#define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */ /* mismatch) */struct t3_swsq { __u64 wr_id; struct t3_cqe cqe; __u32 sq_wptr; __be32 read_len; int opcode; int complete; int signaled;};/* * A T3 WQ implements both the SQ and RQ. */struct t3_wq { union t3_wr *queue; /* DMA accessable memory */ dma_addr_t dma_addr; /* DMA address for HW */ DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */ u32 error; /* 1 once we go to ERROR */ u32 qpid; u32 wptr; /* idx to next available WR slot */ u32 size_log2; /* total wq size */ struct t3_swsq *sq; /* SW SQ */ struct t3_swsq *oldest_read; /* tracks oldest pending read */ u32 sq_wptr; /* sq_wptr - sq_rptr == count of */ u32 sq_rptr; /* pending wrs */ u32 sq_size_log2; /* sq size */ u64 *rq; /* SW RQ (holds consumer wr_ids */ u32 rq_wptr; /* rq_wptr - rq_rptr == count of */ u32 rq_rptr; /* pending wrs */ u64 *rq_oldest_wr; /* oldest wr on the SW RQ */ u32 rq_size_log2; /* rq size */ u32 rq_addr; /* rq adapter address */ void __iomem *doorbell; /* kernel db */ u64 udb; /* user db if any */};struct t3_cq { u32 cqid; u32 rptr; u32 wptr; u32 size_log2; dma_addr_t dma_addr; DECLARE_PCI_UNMAP_ADDR(mapping) struct t3_cqe *queue; struct t3_cqe *sw_queue; u32 sw_rptr; u32 sw_wptr;};#define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \ CQE_GENBIT(*cqe))static inline void cxio_set_wq_in_error(struct t3_wq *wq){ wq->queue->flit[13] = 1;}static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq){ struct t3_cqe *cqe; cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe)) return cqe; return NULL;}static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq){ struct t3_cqe *cqe; if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) { cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); return cqe; } return NULL;}static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq){ struct t3_cqe *cqe; if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) { cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); return cqe; } cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe)) return cqe; return NULL;}#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -