⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 iwch_qp.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (c) 2006 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses.  You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * *     Redistribution and use in source and binary forms, with or *     without modification, are permitted provided that the following *     conditions are met: * *      - Redistributions of source code must retain the above *        copyright notice, this list of conditions and the following *        disclaimer. * *      - Redistributions in binary form must reproduce the above *        copyright notice, this list of conditions and the following *        disclaimer in the documentation and/or other materials *        provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */#include "iwch_provider.h"#include "iwch.h"#include "iwch_cm.h"#include "cxio_hal.h"#define NO_SUPPORT -1static int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,				u8 * flit_cnt){	int i;	u32 plen;	switch (wr->opcode) {	case IB_WR_SEND:	case IB_WR_SEND_WITH_IMM:		if (wr->send_flags & IB_SEND_SOLICITED)			wqe->send.rdmaop = T3_SEND_WITH_SE;		else			wqe->send.rdmaop = T3_SEND;		wqe->send.rem_stag = 0;		break;#if 0				/* Not currently supported */	case TYPE_SEND_INVALIDATE:	case TYPE_SEND_INVALIDATE_IMMEDIATE:		wqe->send.rdmaop = T3_SEND_WITH_INV;		wqe->send.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);		break;	case TYPE_SEND_SE_INVALIDATE:		wqe->send.rdmaop = T3_SEND_WITH_SE_INV;		wqe->send.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);		break;#endif	default:		break;	}	if (wr->num_sge > T3_MAX_SGE)		return -EINVAL;	wqe->send.reserved[0] = 0;	wqe->send.reserved[1] = 0;	wqe->send.reserved[2] = 0;	if (wr->opcode == IB_WR_SEND_WITH_IMM) {		plen = 4;		wqe->send.sgl[0].stag = wr->imm_data;		wqe->send.sgl[0].len = __constant_cpu_to_be32(0);		wqe->send.num_sgle = __constant_cpu_to_be32(0);		*flit_cnt = 5;	} else {		plen = 0;		for (i = 0; i < wr->num_sge; i++) {			if ((plen + wr->sg_list[i].length) < plen) {				return -EMSGSIZE;			}			plen += wr->sg_list[i].length;			wqe->send.sgl[i].stag =			    cpu_to_be32(wr->sg_list[i].lkey);			wqe->send.sgl[i].len =			    cpu_to_be32(wr->sg_list[i].length);			wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);		}		wqe->send.num_sgle = cpu_to_be32(wr->num_sge);		*flit_cnt = 4 + ((wr->num_sge) << 1);	}	wqe->send.plen = cpu_to_be32(plen);	return 0;}static int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,				 u8 *flit_cnt){	int i;	u32 plen;	if (wr->num_sge > T3_MAX_SGE)		return -EINVAL;	wqe->write.rdmaop = T3_RDMA_WRITE;	wqe->write.reserved[0] = 0;	wqe->write.reserved[1] = 0;	wqe->write.reserved[2] = 0;	wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);	wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);	if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {		plen = 4;		wqe->write.sgl[0].stag = wr->imm_data;		wqe->write.sgl[0].len = __constant_cpu_to_be32(0);		wqe->write.num_sgle = __constant_cpu_to_be32(0);		*flit_cnt = 6;	} else {		plen = 0;		for (i = 0; i < wr->num_sge; i++) {			if ((plen + wr->sg_list[i].length) < plen) {				return -EMSGSIZE;			}			plen += wr->sg_list[i].length;			wqe->write.sgl[i].stag =			    cpu_to_be32(wr->sg_list[i].lkey);			wqe->write.sgl[i].len =			    cpu_to_be32(wr->sg_list[i].length);			wqe->write.sgl[i].to =			    cpu_to_be64(wr->sg_list[i].addr);		}		wqe->write.num_sgle = cpu_to_be32(wr->num_sge);		*flit_cnt = 5 + ((wr->num_sge) << 1);	}	wqe->write.plen = cpu_to_be32(plen);	return 0;}static int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,				u8 *flit_cnt){	if (wr->num_sge > 1)		return -EINVAL;	wqe->read.rdmaop = T3_READ_REQ;	wqe->read.reserved[0] = 0;	wqe->read.reserved[1] = 0;	wqe->read.reserved[2] = 0;	wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);	wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr);	wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);	wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);	wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);	*flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;	return 0;}/* * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now. */static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,			    u32 num_sgle, u32 * pbl_addr, u8 * page_size){	int i;	struct iwch_mr *mhp;	u32 offset;	for (i = 0; i < num_sgle; i++) {		mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);		if (!mhp) {			PDBG("%s %d\n", __FUNCTION__, __LINE__);			return -EIO;		}		if (!mhp->attr.state) {			PDBG("%s %d\n", __FUNCTION__, __LINE__);			return -EIO;		}		if (mhp->attr.zbva) {			PDBG("%s %d\n", __FUNCTION__, __LINE__);			return -EIO;		}		if (sg_list[i].addr < mhp->attr.va_fbo) {			PDBG("%s %d\n", __FUNCTION__, __LINE__);			return -EINVAL;		}		if (sg_list[i].addr + ((u64) sg_list[i].length) <		    sg_list[i].addr) {			PDBG("%s %d\n", __FUNCTION__, __LINE__);			return -EINVAL;		}		if (sg_list[i].addr + ((u64) sg_list[i].length) >		    mhp->attr.va_fbo + ((u64) mhp->attr.len)) {			PDBG("%s %d\n", __FUNCTION__, __LINE__);			return -EINVAL;		}		offset = sg_list[i].addr - mhp->attr.va_fbo;		offset += ((u32) mhp->attr.va_fbo) %		          (1UL << (12 + mhp->attr.page_size));		pbl_addr[i] = ((mhp->attr.pbl_addr -			        rhp->rdev.rnic_info.pbl_base) >> 3) +			      (offset >> (12 + mhp->attr.page_size));		page_size[i] = mhp->attr.page_size;	}	return 0;}static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe,				struct ib_recv_wr *wr){	int i, err = 0;	u32 pbl_addr[4];	u8 page_size[4];	if (wr->num_sge > T3_MAX_SGE)		return -EINVAL;	err = iwch_sgl2pbl_map(rhp, wr->sg_list, wr->num_sge, pbl_addr,			       page_size);	if (err)		return err;	wqe->recv.pagesz[0] = page_size[0];	wqe->recv.pagesz[1] = page_size[1];	wqe->recv.pagesz[2] = page_size[2];	wqe->recv.pagesz[3] = page_size[3];	wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);	for (i = 0; i < wr->num_sge; i++) {		wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);		wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);		/* to in the WQE == the offset into the page */		wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) %				(1UL << (12 + page_size[i])));		/* pbl_addr is the adapters address in the PBL */		wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);	}	for (; i < T3_MAX_SGE; i++) {		wqe->recv.sgl[i].stag = 0;		wqe->recv.sgl[i].len = 0;		wqe->recv.sgl[i].to = 0;		wqe->recv.pbl_addr[i] = 0;	}	return 0;}int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,		      struct ib_send_wr **bad_wr){	int err = 0;	u8 t3_wr_flit_cnt;	enum t3_wr_opcode t3_wr_opcode = 0;	enum t3_wr_flags t3_wr_flags;	struct iwch_qp *qhp;	u32 idx;	union t3_wr *wqe;	u32 num_wrs;	unsigned long flag;	struct t3_swsq *sqp;	qhp = to_iwch_qp(ibqp);	spin_lock_irqsave(&qhp->lock, flag);	if (qhp->attr.state > IWCH_QP_STATE_RTS) {		spin_unlock_irqrestore(&qhp->lock, flag);		return -EINVAL;	}	num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,		  qhp->wq.sq_size_log2);	if (num_wrs <= 0) {		spin_unlock_irqrestore(&qhp->lock, flag);		return -ENOMEM;	}	while (wr) {		if (num_wrs == 0) {			err = -ENOMEM;			*bad_wr = wr;			break;		}		idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);		wqe = (union t3_wr *) (qhp->wq.queue + idx);		t3_wr_flags = 0;		if (wr->send_flags & IB_SEND_SOLICITED)			t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;		if (wr->send_flags & IB_SEND_FENCE)			t3_wr_flags |= T3_READ_FENCE_FLAG;		if (wr->send_flags & IB_SEND_SIGNALED)			t3_wr_flags |= T3_COMPLETION_FLAG;		sqp = qhp->wq.sq +		      Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);		switch (wr->opcode) {		case IB_WR_SEND:		case IB_WR_SEND_WITH_IMM:			t3_wr_opcode = T3_WR_SEND;			err = iwch_build_rdma_send(wqe, wr, &t3_wr_flit_cnt);			break;		case IB_WR_RDMA_WRITE:		case IB_WR_RDMA_WRITE_WITH_IMM:			t3_wr_opcode = T3_WR_WRITE;			err = iwch_build_rdma_write(wqe, wr, &t3_wr_flit_cnt);			break;		case IB_WR_RDMA_READ:			t3_wr_opcode = T3_WR_READ;			t3_wr_flags = 0; /* T3 reads are always signaled */			err = iwch_build_rdma_read(wqe, wr, &t3_wr_flit_cnt);			if (err)				break;			sqp->read_len = wqe->read.local_len;			if (!qhp->wq.oldest_read)				qhp->wq.oldest_read = sqp;			break;		default:			PDBG("%s post of type=%d TBD!\n", __FUNCTION__,			     wr->opcode);			err = -EINVAL;		}		if (err) {			*bad_wr = wr;			break;		}		wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;		sqp->wr_id = wr->wr_id;		sqp->opcode = wr2opcode(t3_wr_opcode);		sqp->sq_wptr = qhp->wq.sq_wptr;		sqp->complete = 0;		sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);		build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,			       Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),			       0, t3_wr_flit_cnt);		PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",		     __FUNCTION__, (unsigned long long) wr->wr_id, idx,		     Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),		     sqp->opcode);		wr = wr->next;		num_wrs--;		++(qhp->wq.wptr);		++(qhp->wq.sq_wptr);	}	spin_unlock_irqrestore(&qhp->lock, flag);	ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);	return err;}int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,		      struct ib_recv_wr **bad_wr){	int err = 0;	struct iwch_qp *qhp;	u32 idx;	union t3_wr *wqe;	u32 num_wrs;	unsigned long flag;	qhp = to_iwch_qp(ibqp);	spin_lock_irqsave(&qhp->lock, flag);	if (qhp->attr.state > IWCH_QP_STATE_RTS) {		spin_unlock_irqrestore(&qhp->lock, flag);		return -EINVAL;	}	num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,			    qhp->wq.rq_size_log2) - 1;	if (!wr) {		spin_unlock_irqrestore(&qhp->lock, flag);		return -EINVAL;	}	while (wr) {		idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);		wqe = (union t3_wr *) (qhp->wq.queue + idx);		if (num_wrs)			err = iwch_build_rdma_recv(qhp->rhp, wqe, wr);		else			err = -ENOMEM;		if (err) {			*bad_wr = wr;			break;		}		qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, qhp->wq.rq_size_log2)] =			wr->wr_id;		build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,			       Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),			       0, sizeof(struct t3_receive_wr) >> 3);		PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "		     "wqe %p \n", __FUNCTION__, (unsigned long long) wr->wr_id,		     idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);		++(qhp->wq.rq_wptr);		++(qhp->wq.wptr);		wr = wr->next;		num_wrs--;	}	spin_unlock_irqrestore(&qhp->lock, flag);	ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);	return err;}int iwch_bind_mw(struct ib_qp *qp,			     struct ib_mw *mw,			     struct ib_mw_bind *mw_bind){	struct iwch_dev *rhp;	struct iwch_mw *mhp;	struct iwch_qp *qhp;	union t3_wr *wqe;	u32 pbl_addr;	u8 page_size;	u32 num_wrs;	unsigned long flag;	struct ib_sge sgl;	int err=0;	enum t3_wr_flags t3_wr_flags;	u32 idx;	struct t3_swsq *sqp;	qhp = to_iwch_qp(qp);	mhp = to_iwch_mw(mw);	rhp = qhp->rhp;	spin_lock_irqsave(&qhp->lock, flag);	if (qhp->attr.state > IWCH_QP_STATE_RTS) {		spin_unlock_irqrestore(&qhp->lock, flag);		return -EINVAL;	}	num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,			    qhp->wq.sq_size_log2);	if ((num_wrs) <= 0) {		spin_unlock_irqrestore(&qhp->lock, flag);		return -ENOMEM;	}	idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);	PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __FUNCTION__, idx,	     mw, mw_bind);	wqe = (union t3_wr *) (qhp->wq.queue + idx);	t3_wr_flags = 0;	if (mw_bind->send_flags & IB_SEND_SIGNALED)		t3_wr_flags = T3_COMPLETION_FLAG;	sgl.addr = mw_bind->addr;	sgl.lkey = mw_bind->mr->lkey;	sgl.length = mw_bind->length;	wqe->bind.reserved = 0;	wqe->bind.type = T3_VA_BASED_TO;	/* TBD: check perms */	wqe->bind.perms = iwch_ib_to_mwbind_access(mw_bind->mw_access_flags);	wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);	wqe->bind.mw_stag = cpu_to_be32(mw->rkey);	wqe->bind.mw_len = cpu_to_be32(mw_bind->length);	wqe->bind.mw_va = cpu_to_be64(mw_bind->addr);	err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);	if (err) {		spin_unlock_irqrestore(&qhp->lock, flag);	        return err;	}	wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;	sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);	sqp->wr_id = mw_bind->wr_id;	sqp->opcode = T3_BIND_MW;	sqp->sq_wptr = qhp->wq.sq_wptr;	sqp->complete = 0;	sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED);	wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr);	wqe->bind.mr_pagesz = page_size;	wqe->flit[T3_SQ_COOKIE_FLIT] = mw_bind->wr_id;	build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags,		       Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,			        sizeof(struct t3_bind_mw_wr) >> 3);	++(qhp->wq.wptr);	++(qhp->wq.sq_wptr);	spin_unlock_irqrestore(&qhp->lock, flag);	ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);	return err;}static inline void build_term_codes(struct respQ_msg_t *rsp_msg,				    u8 *layer_type, u8 *ecode){	int status = TPT_ERR_INTERNAL_ERR;	int tagged = 0;	int opcode = -1;	int rqtype = 0;	int send_inv = 0;	if (rsp_msg) {		status = CQE_STATUS(rsp_msg->cqe);		opcode = CQE_OPCODE(rsp_msg->cqe);		rqtype = RQ_TYPE(rsp_msg->cqe);		send_inv = (opcode == T3_SEND_WITH_INV) ||		           (opcode == T3_SEND_WITH_SE_INV);		tagged = (opcode == T3_RDMA_WRITE) ||			 (rqtype && (opcode == T3_READ_RESP));	}	switch (status) {	case TPT_ERR_STAG:		if (send_inv) {			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;			*ecode = RDMAP_CANT_INV_STAG;		} else {			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;			*ecode = RDMAP_INV_STAG;		}		break;	case TPT_ERR_PDID:		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;		if ((opcode == T3_SEND_WITH_INV) ||		    (opcode == T3_SEND_WITH_SE_INV))			*ecode = RDMAP_CANT_INV_STAG;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -