📄 c2_qp.c
字号:
/* * Copyright (c) 2004 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */#include <linux/delay.h>#include "c2.h"#include "c2_vq.h"#include "c2_status.h"#define C2_MAX_ORD_PER_QP 128#define C2_MAX_IRD_PER_QP 128#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)#define NO_SUPPORT -1static const u8 c2_opcode[] = { [IB_WR_SEND] = C2_WR_TYPE_SEND, [IB_WR_SEND_WITH_IMM] = NO_SUPPORT, [IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE, [IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT, [IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ, [IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT, [IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT,};static int to_c2_state(enum ib_qp_state ib_state){ switch (ib_state) { case IB_QPS_RESET: return C2_QP_STATE_IDLE; case IB_QPS_RTS: return C2_QP_STATE_RTS; case IB_QPS_SQD: return C2_QP_STATE_CLOSING; case IB_QPS_SQE: return C2_QP_STATE_CLOSING; case IB_QPS_ERR: return C2_QP_STATE_ERROR; default: return -1; }}static int to_ib_state(enum c2_qp_state c2_state){ switch (c2_state) { case C2_QP_STATE_IDLE: return IB_QPS_RESET; case C2_QP_STATE_CONNECTING: return IB_QPS_RTR; case C2_QP_STATE_RTS: return IB_QPS_RTS; case C2_QP_STATE_CLOSING: return IB_QPS_SQD; case C2_QP_STATE_ERROR: return IB_QPS_ERR; case C2_QP_STATE_TERMINATE: return IB_QPS_SQE; default: return -1; }}static const char *to_ib_state_str(int ib_state){ static const char *state_str[] = { "IB_QPS_RESET", "IB_QPS_INIT", "IB_QPS_RTR", "IB_QPS_RTS", "IB_QPS_SQD", "IB_QPS_SQE", "IB_QPS_ERR" }; if (ib_state < IB_QPS_RESET || ib_state > IB_QPS_ERR) return "<invalid IB QP state>"; ib_state -= IB_QPS_RESET; return state_str[ib_state];}void c2_set_qp_state(struct c2_qp *qp, int c2_state){ int new_state = to_ib_state(c2_state); pr_debug("%s: qp[%p] state modify %s --> %s\n", __FUNCTION__, qp, to_ib_state_str(qp->state), to_ib_state_str(new_state)); qp->state = new_state;}#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFFint c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, struct ib_qp_attr *attr, int attr_mask){ struct c2wr_qp_modify_req wr; struct c2wr_qp_modify_rep *reply; struct c2_vq_req *vq_req; unsigned long flags; u8 next_state; int err; pr_debug("%s:%d qp=%p, %s --> %s\n", __FUNCTION__, __LINE__, qp, to_ib_state_str(qp->state), to_ib_state_str(attr->qp_state)); vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; c2_wr_set_id(&wr, CCWR_QP_MODIFY); wr.hdr.context = (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.qp_handle = qp->adapter_handle; wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); if (attr_mask & IB_QP_STATE) { /* Ensure the state is valid */ if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) { err = -EINVAL; goto bail0; } wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state)); if (attr->qp_state == IB_QPS_ERR) { spin_lock_irqsave(&qp->lock, flags); if (qp->cm_id && qp->state == IB_QPS_RTS) { pr_debug("Generating CLOSE event for QP-->ERR, " "qp=%p, cm_id=%p\n",qp,qp->cm_id); /* Generate an CLOSE event */ vq_req->cm_id = qp->cm_id; vq_req->event = IW_CM_EVENT_CLOSE; } spin_unlock_irqrestore(&qp->lock, flags); } next_state = attr->qp_state; } else if (attr_mask & IB_QP_CUR_STATE) { if (attr->cur_qp_state != IB_QPS_RTR && attr->cur_qp_state != IB_QPS_RTS && attr->cur_qp_state != IB_QPS_SQD && attr->cur_qp_state != IB_QPS_SQE) { err = -EINVAL; goto bail0; } else wr.next_qp_state = cpu_to_be32(to_c2_state(attr->cur_qp_state)); next_state = attr->cur_qp_state; } else { err = 0; goto bail0; } /* reference the request struct */ vq_req_get(c2dev, vq_req); err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } err = c2_errno(reply); if (!err) qp->state = next_state;#ifdef DEBUG else pr_debug("%s: c2_errno=%d\n", __FUNCTION__, err);#endif /* * If we're going to error and generating the event here, then * we need to remove the reference because there will be no * close event generated by the adapter */ spin_lock_irqsave(&qp->lock, flags); if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) { qp->cm_id->rem_ref(qp->cm_id); qp->cm_id = NULL; } spin_unlock_irqrestore(&qp->lock, flags); vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); pr_debug("%s:%d qp=%p, cur_state=%s\n", __FUNCTION__, __LINE__, qp, to_ib_state_str(qp->state)); return err;}int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, int ord, int ird){ struct c2wr_qp_modify_req wr; struct c2wr_qp_modify_rep *reply; struct c2_vq_req *vq_req; int err; vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; c2_wr_set_id(&wr, CCWR_QP_MODIFY); wr.hdr.context = (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.qp_handle = qp->adapter_handle; wr.ord = cpu_to_be32(ord); wr.ird = cpu_to_be32(ird); wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE); /* reference the request struct */ vq_req_get(c2dev, vq_req); err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } err = c2_errno(reply); vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err;}static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp){ struct c2_vq_req *vq_req; struct c2wr_qp_destroy_req wr; struct c2wr_qp_destroy_rep *reply; unsigned long flags; int err; /* * Allocate a verb request message */ vq_req = vq_req_alloc(c2dev); if (!vq_req) { return -ENOMEM; } /* * Initialize the WR */ c2_wr_set_id(&wr, CCWR_QP_DESTROY); wr.hdr.context = (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.qp_handle = qp->adapter_handle; /* * reference the request struct. dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); spin_lock_irqsave(&qp->lock, flags); if (qp->cm_id && qp->state == IB_QPS_RTS) { pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, " "qp=%p, cm_id=%p\n",qp,qp->cm_id); /* Generate an CLOSE event */ vq_req->qp = qp; vq_req->cm_id = qp->cm_id; vq_req->event = IW_CM_EVENT_CLOSE; } spin_unlock_irqrestore(&qp->lock, flags); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) { goto bail0; } /* * Process reply */ reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg); if (!reply) { err = -ENOMEM; goto bail0; } spin_lock_irqsave(&qp->lock, flags); if (qp->cm_id) { qp->cm_id->rem_ref(qp->cm_id); qp->cm_id = NULL; } spin_unlock_irqrestore(&qp->lock, flags); vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err;}static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp){ int ret; do { spin_lock_irq(&c2dev->qp_table.lock); ret = idr_get_new_above(&c2dev->qp_table.idr, qp, c2dev->qp_table.last++, &qp->qpn); spin_unlock_irq(&c2dev->qp_table.lock); } while ((ret == -EAGAIN) && idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL)); return ret;}static void c2_free_qpn(struct c2_dev *c2dev, int qpn){ spin_lock_irq(&c2dev->qp_table.lock); idr_remove(&c2dev->qp_table.idr, qpn); spin_unlock_irq(&c2dev->qp_table.lock);}struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn){ unsigned long flags; struct c2_qp *qp; spin_lock_irqsave(&c2dev->qp_table.lock, flags); qp = idr_find(&c2dev->qp_table.idr, qpn); spin_unlock_irqrestore(&c2dev->qp_table.lock, flags); return qp;}int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd, struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp){ struct c2wr_qp_create_req wr; struct c2wr_qp_create_rep *reply; struct c2_vq_req *vq_req; struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq); struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq); unsigned long peer_pa; u32 q_size, msg_size, mmap_size; void __iomem *mmap; int err; err = c2_alloc_qpn(c2dev, qp); if (err) return err; qp->ibqp.qp_num = qp->qpn; qp->ibqp.qp_type = IB_QPT_RC; /* Allocate the SQ and RQ shared pointers */ qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, &qp->sq_mq.shared_dma, GFP_KERNEL); if (!qp->sq_mq.shared) { err = -ENOMEM; goto bail0; } qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, &qp->rq_mq.shared_dma, GFP_KERNEL); if (!qp->rq_mq.shared) { err = -ENOMEM; goto bail1; } /* Allocate the verbs request */ vq_req = vq_req_alloc(c2dev); if (vq_req == NULL) { err = -ENOMEM; goto bail2; } /* Initialize the work request */ memset(&wr, 0, sizeof(wr)); c2_wr_set_id(&wr, CCWR_QP_CREATE); wr.hdr.context = (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.sq_cq_handle = send_cq->adapter_handle; wr.rq_cq_handle = recv_cq->adapter_handle; wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1); wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1); wr.srq_handle = 0; wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND | QP_ZERO_STAG | QP_RDMA_READ_RESPONSE); wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge); wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge); wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge); wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma); wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma); wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP); wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP); wr.pd_id = pd->pd_id; wr.user_context = (unsigned long) qp; vq_req_get(c2dev, vq_req); /* Send the WR to the adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail3; } /* Wait for the verb reply */ err = vq_wait_for_reply(c2dev, vq_req); if (err) { goto bail3; } /* Process the reply */ reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg); if (!reply) { err = -ENOMEM; goto bail3; } if ((err = c2_wr_get_result(reply)) != 0) { goto bail4; } /* Fill in the kernel QP struct */ atomic_set(&qp->refcount, 1); qp->adapter_handle = reply->qp_handle;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -