📄 c2_wr.h
字号:
union c2wr_pd_alloc { struct c2wr_pd_alloc_req req; struct c2wr_pd_alloc_rep rep;} __attribute__((packed)) ;struct c2wr_pd_dealloc_req { struct c2wr_hdr hdr; u32 rnic_handle; u32 pd_id;} __attribute__((packed)) ;struct c2wr_pd_dealloc_rep { struct c2wr_hdr hdr;} __attribute__((packed)) ;union c2wr_pd_dealloc { struct c2wr_pd_dealloc_req req; struct c2wr_pd_dealloc_rep rep;} __attribute__((packed)) ;/* *------------------------ SRQ ------------------------ */struct c2wr_srq_create_req { struct c2wr_hdr hdr; u64 shared_ht; u64 user_context; u32 rnic_handle; u32 srq_depth; u32 srq_limit; u32 sgl_depth; u32 pd_id;} __attribute__((packed)) ;struct c2wr_srq_create_rep { struct c2wr_hdr hdr; u32 srq_depth; u32 sgl_depth; u32 msg_size; u32 mq_index; u32 mq_start; u32 srq_handle;} __attribute__((packed)) ;union c2wr_srq_create { struct c2wr_srq_create_req req; struct c2wr_srq_create_rep rep;} __attribute__((packed)) ;struct c2wr_srq_destroy_req { struct c2wr_hdr hdr; u32 rnic_handle; u32 srq_handle;} __attribute__((packed)) ;struct c2wr_srq_destroy_rep { struct c2wr_hdr hdr;} __attribute__((packed)) ;union c2wr_srq_destroy { struct c2wr_srq_destroy_req req; struct c2wr_srq_destroy_rep rep;} __attribute__((packed)) ;/* *------------------------ QP ------------------------ */enum c2wr_qp_flags { QP_RDMA_READ = 0x00000001, /* RDMA read enabled? */ QP_RDMA_WRITE = 0x00000002, /* RDMA write enabled? */ QP_MW_BIND = 0x00000004, /* MWs enabled */ QP_ZERO_STAG = 0x00000008, /* enabled? */ QP_REMOTE_TERMINATION = 0x00000010, /* remote end terminated */ QP_RDMA_READ_RESPONSE = 0x00000020 /* Remote RDMA read */ /* enabled? */};struct c2wr_qp_create_req { struct c2wr_hdr hdr; u64 shared_sq_ht; u64 shared_rq_ht; u64 user_context; u32 rnic_handle; u32 sq_cq_handle; u32 rq_cq_handle; u32 sq_depth; u32 rq_depth; u32 srq_handle; u32 srq_limit; u32 flags; /* see enum c2wr_qp_flags */ u32 send_sgl_depth; u32 recv_sgl_depth; u32 rdma_write_sgl_depth; u32 ord; u32 ird; u32 pd_id;} __attribute__((packed)) ;struct c2wr_qp_create_rep { struct c2wr_hdr hdr; u32 sq_depth; u32 rq_depth; u32 send_sgl_depth; u32 recv_sgl_depth; u32 rdma_write_sgl_depth; u32 ord; u32 ird; u32 sq_msg_size; u32 sq_mq_index; u32 sq_mq_start; u32 rq_msg_size; u32 rq_mq_index; u32 rq_mq_start; u32 qp_handle;} __attribute__((packed)) ;union c2wr_qp_create { struct c2wr_qp_create_req req; struct c2wr_qp_create_rep rep;} __attribute__((packed)) ;struct c2wr_qp_query_req { struct c2wr_hdr hdr; u32 rnic_handle; u32 qp_handle;} __attribute__((packed)) ;struct c2wr_qp_query_rep { struct c2wr_hdr hdr; u64 user_context; u32 rnic_handle; u32 sq_depth; u32 rq_depth; u32 send_sgl_depth; u32 rdma_write_sgl_depth; u32 recv_sgl_depth; u32 ord; u32 ird; u16 qp_state; u16 flags; /* see c2wr_qp_flags_t */ u32 qp_id; u32 local_addr; u32 remote_addr; u16 local_port; u16 remote_port; u32 terminate_msg_length; /* 0 if not present */ u8 data[0]; /* Terminate Message in-line here. */} __attribute__((packed)) ;union c2wr_qp_query { struct c2wr_qp_query_req req; struct c2wr_qp_query_rep rep;} __attribute__((packed)) ;struct c2wr_qp_modify_req { struct c2wr_hdr hdr; u64 stream_msg; u32 stream_msg_length; u32 rnic_handle; u32 qp_handle; u32 next_qp_state; u32 ord; u32 ird; u32 sq_depth; u32 rq_depth; u32 llp_ep_handle;} __attribute__((packed)) ;struct c2wr_qp_modify_rep { struct c2wr_hdr hdr; u32 ord; u32 ird; u32 sq_depth; u32 rq_depth; u32 sq_msg_size; u32 sq_mq_index; u32 sq_mq_start; u32 rq_msg_size; u32 rq_mq_index; u32 rq_mq_start;} __attribute__((packed)) ;union c2wr_qp_modify { struct c2wr_qp_modify_req req; struct c2wr_qp_modify_rep rep;} __attribute__((packed)) ;struct c2wr_qp_destroy_req { struct c2wr_hdr hdr; u32 rnic_handle; u32 qp_handle;} __attribute__((packed)) ;struct c2wr_qp_destroy_rep { struct c2wr_hdr hdr;} __attribute__((packed)) ;union c2wr_qp_destroy { struct c2wr_qp_destroy_req req; struct c2wr_qp_destroy_rep rep;} __attribute__((packed)) ;/* * The CCWR_QP_CONNECT msg is posted on the verbs request queue. It can * only be posted when a QP is in IDLE state. After the connect request is * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state. * No synchronous reply from adapter to this WR. The results of * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS * See c2wr_ae_active_connect_results_t */struct c2wr_qp_connect_req { struct c2wr_hdr hdr; u32 rnic_handle; u32 qp_handle; u32 remote_addr; u16 remote_port; u16 pad; u32 private_data_length; u8 private_data[0]; /* Private data in-line. */} __attribute__((packed)) ;struct c2wr_qp_connect { struct c2wr_qp_connect_req req; /* no synchronous reply. */} __attribute__((packed)) ;/* *------------------------ MM ------------------------ */struct c2wr_nsmr_stag_alloc_req { struct c2wr_hdr hdr; u32 rnic_handle; u32 pbl_depth; u32 pd_id; u32 flags;} __attribute__((packed)) ;struct c2wr_nsmr_stag_alloc_rep { struct c2wr_hdr hdr; u32 pbl_depth; u32 stag_index;} __attribute__((packed)) ;union c2wr_nsmr_stag_alloc { struct c2wr_nsmr_stag_alloc_req req; struct c2wr_nsmr_stag_alloc_rep rep;} __attribute__((packed)) ;struct c2wr_nsmr_register_req { struct c2wr_hdr hdr; u64 va; u32 rnic_handle; u16 flags; u8 stag_key; u8 pad; u32 pd_id; u32 pbl_depth; u32 pbe_size; u32 fbo; u32 length; u32 addrs_length; /* array of paddrs (must be aligned on a 64bit boundary) */ u64 paddrs[0];} __attribute__((packed)) ;struct c2wr_nsmr_register_rep { struct c2wr_hdr hdr; u32 pbl_depth; u32 stag_index;} __attribute__((packed)) ;union c2wr_nsmr_register { struct c2wr_nsmr_register_req req; struct c2wr_nsmr_register_rep rep;} __attribute__((packed)) ;struct c2wr_nsmr_pbl_req { struct c2wr_hdr hdr; u32 rnic_handle; u32 flags; u32 stag_index; u32 addrs_length; /* array of paddrs (must be aligned on a 64bit boundary) */ u64 paddrs[0];} __attribute__((packed)) ;struct c2wr_nsmr_pbl_rep { struct c2wr_hdr hdr;} __attribute__((packed)) ;union c2wr_nsmr_pbl { struct c2wr_nsmr_pbl_req req; struct c2wr_nsmr_pbl_rep rep;} __attribute__((packed)) ;struct c2wr_mr_query_req { struct c2wr_hdr hdr; u32 rnic_handle; u32 stag_index;} __attribute__((packed)) ;struct c2wr_mr_query_rep { struct c2wr_hdr hdr; u8 stag_key; u8 pad[3]; u32 pd_id; u32 flags; u32 pbl_depth;} __attribute__((packed)) ;union c2wr_mr_query { struct c2wr_mr_query_req req; struct c2wr_mr_query_rep rep;} __attribute__((packed)) ;struct c2wr_mw_query_req { struct c2wr_hdr hdr; u32 rnic_handle; u32 stag_index;} __attribute__((packed)) ;struct c2wr_mw_query_rep { struct c2wr_hdr hdr; u8 stag_key; u8 pad[3]; u32 pd_id; u32 flags;} __attribute__((packed)) ;union c2wr_mw_query { struct c2wr_mw_query_req req; struct c2wr_mw_query_rep rep;} __attribute__((packed)) ;struct c2wr_stag_dealloc_req { struct c2wr_hdr hdr; u32 rnic_handle; u32 stag_index;} __attribute__((packed)) ;struct c2wr_stag_dealloc_rep { struct c2wr_hdr hdr;} __attribute__((packed)) ;union c2wr_stag_dealloc { struct c2wr_stag_dealloc_req req; struct c2wr_stag_dealloc_rep rep;} __attribute__((packed)) ;struct c2wr_nsmr_reregister_req { struct c2wr_hdr hdr; u64 va; u32 rnic_handle; u16 flags; u8 stag_key; u8 pad; u32 stag_index; u32 pd_id; u32 pbl_depth; u32 pbe_size; u32 fbo; u32 length; u32 addrs_length; u32 pad1; /* array of paddrs (must be aligned on a 64bit boundary) */ u64 paddrs[0];} __attribute__((packed)) ;struct c2wr_nsmr_reregister_rep { struct c2wr_hdr hdr; u32 pbl_depth; u32 stag_index;} __attribute__((packed)) ;union c2wr_nsmr_reregister { struct c2wr_nsmr_reregister_req req; struct c2wr_nsmr_reregister_rep rep;} __attribute__((packed)) ;struct c2wr_smr_register_req { struct c2wr_hdr hdr; u64 va; u32 rnic_handle; u16 flags; u8 stag_key; u8 pad; u32 stag_index; u32 pd_id;} __attribute__((packed)) ;struct c2wr_smr_register_rep { struct c2wr_hdr hdr; u32 stag_index;} __attribute__((packed)) ;union c2wr_smr_register { struct c2wr_smr_register_req req; struct c2wr_smr_register_rep rep;} __attribute__((packed)) ;struct c2wr_mw_alloc_req { struct c2wr_hdr hdr; u32 rnic_handle; u32 pd_id;} __attribute__((packed)) ;struct c2wr_mw_alloc_rep { struct c2wr_hdr hdr; u32 stag_index;} __attribute__((packed)) ;union c2wr_mw_alloc { struct c2wr_mw_alloc_req req; struct c2wr_mw_alloc_rep rep;} __attribute__((packed)) ;/* *------------------------ WRs ----------------------- */struct c2wr_user_hdr { struct c2wr_hdr hdr; /* Has status and WR Type */} __attribute__((packed)) ;enum c2_qp_state { C2_QP_STATE_IDLE = 0x01, C2_QP_STATE_CONNECTING = 0x02, C2_QP_STATE_RTS = 0x04, C2_QP_STATE_CLOSING = 0x08, C2_QP_STATE_TERMINATE = 0x10, C2_QP_STATE_ERROR = 0x20,};/* Completion queue entry. */struct c2wr_ce { struct c2wr_hdr hdr; /* Has status and WR Type */ u64 qp_user_context; /* c2_user_qp_t * */ u32 qp_state; /* Current QP State */ u32 handle; /* QPID or EP Handle */ u32 bytes_rcvd; /* valid for RECV WCs */ u32 stag;} __attribute__((packed)) ;/* * Flags used for all post-sq WRs. These must fit in the flags * field of the struct c2wr_hdr (eight bits). */enum { SQ_SIGNALED = 0x01, SQ_READ_FENCE = 0x02, SQ_FENCE = 0x04,};/* * Common fields for all post-sq WRs. Namely the standard header and a * secondary header with fields common to all post-sq WRs. */struct c2_sq_hdr { struct c2wr_user_hdr user_hdr;} __attribute__((packed));/* * Same as above but for post-rq WRs. */struct c2_rq_hdr { struct c2wr_user_hdr user_hdr;} __attribute__((packed));/* * use the same struct for all sends. */struct c2wr_send_req { struct c2_sq_hdr sq_hdr; u32 sge_len; u32 remote_stag; u8 data[0]; /* SGE array */} __attribute__((packed));union c2wr_send { struct c2wr_send_req req; struct c2wr_ce rep;} __attribute__((packed));struct c2wr_rdma_write_req { struct c2_sq_hdr sq_hdr; u64 remote_to; u32 remote_stag; u32 sge_len; u8 data[0]; /* SGE array */} __attribute__((packed));union c2wr_rdma_write { struct c2wr_rdma_write_req req; struct c2wr_ce rep;} __attribute__((packed));struct c2wr_rdma_read_req { struct c2_sq_hdr sq_hdr; u64 local_to; u64 remote_to; u32 local_stag;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -