ipath_verbs.h
来自「LINUX 2.6.17.4的源码」· C头文件 代码 · 共 694 行 · 第 1/2 页
H
694 行
/* * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#ifndef IPATH_VERBS_H#define IPATH_VERBS_H#include <linux/types.h>#include <linux/spinlock.h>#include <linux/kernel.h>#include <linux/interrupt.h>#include <rdma/ib_pack.h>#include "ipath_layer.h"#include "verbs_debug.h"#define QPN_MAX (1 << 24)#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)/* * Increment this value if any changes that break userspace ABI * compatibility are made. */#define IPATH_UVERBS_ABI_VERSION 1/* * Define an ib_cq_notify value that is not valid so we know when CQ * notifications are armed. */#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)#define IB_RNR_NAK 0x20#define IB_NAK_PSN_ERROR 0x60#define IB_NAK_INVALID_REQUEST 0x61#define IB_NAK_REMOTE_ACCESS_ERROR 0x62#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63#define IB_NAK_INVALID_RD_REQUEST 0x64#define IPATH_POST_SEND_OK 0x01#define IPATH_POST_RECV_OK 0x02#define IPATH_PROCESS_RECV_OK 0x04#define IPATH_PROCESS_SEND_OK 0x08/* IB Performance Manager status values */#define IB_PMA_SAMPLE_STATUS_DONE 0x00#define IB_PMA_SAMPLE_STATUS_STARTED 0x01#define IB_PMA_SAMPLE_STATUS_RUNNING 0x02/* Mandatory IB performance counter select values. */#define IB_PMA_PORT_XMIT_DATA __constant_htons(0x0001)#define IB_PMA_PORT_RCV_DATA __constant_htons(0x0002)#define IB_PMA_PORT_XMIT_PKTS __constant_htons(0x0003)#define IB_PMA_PORT_RCV_PKTS __constant_htons(0x0004)#define IB_PMA_PORT_XMIT_WAIT __constant_htons(0x0005)struct ib_reth { __be64 vaddr; __be32 rkey; __be32 length;} __attribute__ ((packed));struct ib_atomic_eth { __be64 vaddr; __be32 rkey; __be64 swap_data; __be64 compare_data;} __attribute__ ((packed));struct ipath_other_headers { __be32 bth[3]; union { struct { __be32 deth[2]; __be32 imm_data; } ud; struct { struct ib_reth reth; __be32 imm_data; } rc; struct { __be32 aeth; __be64 atomic_ack_eth; } at; __be32 imm_data; __be32 aeth; struct ib_atomic_eth atomic_eth; } u;} __attribute__ ((packed));/* * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes * long (72 w/ imm_data). Only the first 56 bytes of the IB header * will be in the eager header buffer. The remaining 12 or 16 bytes * are in the data buffer. */struct ipath_ib_header { __be16 lrh[4]; union { struct { struct ib_grh grh; struct ipath_other_headers oth; } l; struct ipath_other_headers oth; } u;} __attribute__ ((packed));/* * There is one struct ipath_mcast for each multicast GID. * All attached QPs are then stored as a list of * struct ipath_mcast_qp. */struct ipath_mcast_qp { struct list_head list; struct ipath_qp *qp;};struct ipath_mcast { struct rb_node rb_node; union ib_gid mgid; struct list_head qp_list; wait_queue_head_t wait; atomic_t refcount;};/* Memory region */struct ipath_mr { struct ib_mr ibmr; struct ipath_mregion mr; /* must be last */};/* Fast memory region */struct ipath_fmr { struct ib_fmr ibfmr; u8 page_shift; struct ipath_mregion mr; /* must be last */};/* Protection domain */struct ipath_pd { struct ib_pd ibpd; int user; /* non-zero if created from user space */};/* Address Handle */struct ipath_ah { struct ib_ah ibah; struct ib_ah_attr attr;};/* * Quick description of our CQ/QP locking scheme: * * We have one global lock that protects dev->cq/qp_table. Each * struct ipath_cq/qp also has its own lock. An individual qp lock * may be taken inside of an individual cq lock. Both cqs attached to * a qp may be locked, with the send cq locked first. No other * nesting should be done. * * Each struct ipath_cq/qp also has an atomic_t ref count. The * pointer from the cq/qp_table to the struct counts as one reference. * This reference also is good for access through the consumer API, so * modifying the CQ/QP etc doesn't need to take another reference. * Access because of a completion being polled does need a reference. * * Finally, each struct ipath_cq/qp has a wait_queue_head_t for the * destroy function to sleep on. * * This means that access from the consumer API requires nothing but * taking the struct's lock. * * Access because of a completion event should go as follows: * - lock cq/qp_table and look up struct * - increment ref count in struct * - drop cq/qp_table lock * - lock struct, do your thing, and unlock struct * - decrement ref count; if zero, wake up waiters * * To destroy a CQ/QP, we can do the following: * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock * - decrement ref count * - wait_event until ref count is zero * * It is the consumer's responsibilty to make sure that no QP * operations (WQE posting or state modification) are pending when the * QP is destroyed. Also, the consumer must make sure that calls to * qp_modify are serialized. * * Possible optimizations (wait for profile data to see if/where we * have locks bouncing between CPUs): * - split cq/qp table lock into n separate (cache-aligned) locks, * indexed (say) by the page in the table */struct ipath_cq { struct ib_cq ibcq; struct tasklet_struct comptask; spinlock_t lock; u8 notify; u8 triggered; u32 head; /* new records added to the head */ u32 tail; /* poll_cq() reads from here. */ struct ib_wc *queue; /* this is actually ibcq.cqe + 1 */};/* * Send work request queue entry. * The size of the sg_list is determined when the QP is created and stored * in qp->s_max_sge. */struct ipath_swqe { struct ib_send_wr wr; /* don't use wr.sg_list */ u32 psn; /* first packet sequence number */ u32 lpsn; /* last packet sequence number */ u32 ssn; /* send sequence number */ u32 length; /* total length of data in sg_list */ struct ipath_sge sg_list[0];};/* * Receive work request queue entry. * The size of the sg_list is determined when the QP is created and stored * in qp->r_max_sge. */struct ipath_rwqe { u64 wr_id; u32 length; /* total length of data in sg_list */ u8 num_sge; struct ipath_sge sg_list[0];};struct ipath_rq { spinlock_t lock; u32 head; /* new work requests posted to the head */ u32 tail; /* receives pull requests from here. */ u32 size; /* size of RWQE array */ u8 max_sge; struct ipath_rwqe *wq; /* RWQE array */};struct ipath_srq { struct ib_srq ibsrq; struct ipath_rq rq; /* send signal when number of RWQEs < limit */ u32 limit;};/* * Variables prefixed with s_ are for the requester (sender). * Variables prefixed with r_ are for the responder (receiver). * Variables prefixed with ack_ are for responder replies. * * Common variables are protected by both r_rq.lock and s_lock in that order * which only happens in modify_qp() or changing the QP 'state'. */struct ipath_qp { struct ib_qp ibqp; struct ipath_qp *next; /* link list for QPN hash table */ struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */ struct list_head piowait; /* link for wait PIO buf */ struct list_head timerwait; /* link for waiting for timeouts */ struct ib_ah_attr remote_ah_attr; struct ipath_ib_header s_hdr; /* next packet header to send */ atomic_t refcount; wait_queue_head_t wait; struct tasklet_struct s_task; struct ipath_sge_state *s_cur_sge; struct ipath_sge_state s_sge; /* current send request data */ /* current RDMA read send data */ struct ipath_sge_state s_rdma_sge; struct ipath_sge_state r_sge; /* current receive data */ spinlock_t s_lock; unsigned long s_flags; u32 s_hdrwords; /* size of s_hdr in 32 bit words */ u32 s_cur_size; /* size of send packet in bytes */ u32 s_len; /* total length of s_sge */ u32 s_rdma_len; /* total length of s_rdma_sge */ u32 s_next_psn; /* PSN for next request */ u32 s_last_psn; /* last response PSN processed */ u32 s_psn; /* current packet sequence number */ u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ u32 s_ack_psn; /* PSN for next ACK or RDMA_READ */ u64 s_ack_atomic; /* data for atomic ACK */ u64 r_wr_id; /* ID for current receive WQE */ u64 r_atomic_data; /* data for last atomic op */ u32 r_atomic_psn; /* PSN of last atomic op */ u32 r_len; /* total length of r_sge */ u32 r_rcv_len; /* receive data len processed */ u32 r_psn; /* expected rcv packet sequence number */ u8 state; /* QP state */ u8 s_state; /* opcode of last packet sent */ u8 s_ack_state; /* opcode of packet to ACK */ u8 s_nak_state; /* non-zero if NAK is pending */ u8 r_state; /* opcode of last packet received */ u8 r_reuse_sge; /* for UC receive errors */ u8 r_sge_inx; /* current index into sg_list */ u8 s_max_sge; /* size of s_wq->sg_list */ u8 qp_access_flags; u8 s_retry_cnt; /* number of times to retry */ u8 s_rnr_retry_cnt; u8 s_min_rnr_timer; u8 s_retry; /* requester retry counter */ u8 s_rnr_retry; /* requester RNR retry counter */ u8 s_pkey_index; /* PKEY index to use */ enum ib_mtu path_mtu; atomic_t msn; /* message sequence number */ u32 remote_qpn; u32 qkey; /* QKEY for this QP (for UD or RD) */ u32 s_size; /* send work queue size */ u32 s_head; /* new entries added here */ u32 s_tail; /* next entry to process */ u32 s_cur; /* current work queue entry */ u32 s_last; /* last un-ACK'ed entry */ u32 s_ssn; /* SSN of tail entry */ u32 s_lsn; /* limit sequence number (credit) */ struct ipath_swqe *s_wq; /* send work queue */ struct ipath_rq r_rq; /* receive work queue */};/*
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?