mthca_qp.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 2,252 行 · 第 1/5 页

C
2,252
字号
			  qp->qpn & (dev->limits.num_qps - 1));	--qp->refcount;	spin_unlock(&dev->qp_table.lock);	if (send_cq != recv_cq)		spin_unlock(&recv_cq->lock);	spin_unlock_irq(&send_cq->lock);	wait_event(qp->wait, !get_qp_refcount(dev, qp));	if (qp->state != IB_QPS_RESET)		mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,				NULL, 0, &status);	/*	 * If this is a userspace QP, the buffers, MR, CQs and so on	 * will be cleaned up in userspace, so all we have to do is	 * unref the mem-free tables and free the QPN in our table.	 */	if (!qp->ibqp.uobject) {		mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,			       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);		if (qp->ibqp.send_cq != qp->ibqp.recv_cq)			mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,				       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);		mthca_free_memfree(dev, qp);		mthca_free_wqe_buf(dev, qp);	}	mthca_unmap_memfree(dev, qp);	if (is_sqp(dev, qp)) {		atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));		dma_free_coherent(&dev->pdev->dev,				  to_msqp(qp)->header_buf_size,				  to_msqp(qp)->header_buf,				  to_msqp(qp)->header_dma);	} else		mthca_free(&dev->qp_table.alloc, qp->qpn);}/* Create UD header for an MLX send and build a data segment for it */static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,			    int ind, struct ib_send_wr *wr,			    struct mthca_mlx_seg *mlx,			    struct mthca_data_seg *data){	int header_size;	int err;	u16 pkey;	ib_ud_header_init(256, /* assume a MAD */			  mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),			  &sqp->ud_header);	err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);	if (err)		return err;	mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);	mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |				  (sqp->ud_header.lrh.destination_lid ==				   IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |				  (sqp->ud_header.lrh.service_level << 8));	mlx->rlid = sqp->ud_header.lrh.destination_lid;	mlx->vcrc = 0;	switch (wr->opcode) {	case IB_WR_SEND:		sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;		sqp->ud_header.immediate_present = 0;		break;	case IB_WR_SEND_WITH_IMM:		sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;		sqp->ud_header.immediate_present = 1;		sqp->ud_header.immediate_data = wr->imm_data;		break;	default:		return -EINVAL;	}	sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;	if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)		sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;	sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);	if (!sqp->qp.ibqp.qp_num)		ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,				   sqp->pkey_index, &pkey);	else		ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,				   wr->wr.ud.pkey_index, &pkey);	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);	sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));	sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?					       sqp->qkey : wr->wr.ud.remote_qkey);	sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);	header_size = ib_ud_header_pack(&sqp->ud_header,					sqp->header_buf +					ind * MTHCA_UD_HEADER_SIZE);	data->byte_count = cpu_to_be32(header_size);	data->lkey       = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);	data->addr       = cpu_to_be64(sqp->header_dma +				       ind * MTHCA_UD_HEADER_SIZE);	return 0;}static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,				    struct ib_cq *ib_cq){	unsigned cur;	struct mthca_cq *cq;	cur = wq->head - wq->tail;	if (likely(cur + nreq < wq->max))		return 0;	cq = to_mcq(ib_cq);	spin_lock(&cq->lock);	cur = wq->head - wq->tail;	spin_unlock(&cq->lock);	return cur + nreq >= wq->max;}int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,			  struct ib_send_wr **bad_wr){	struct mthca_dev *dev = to_mdev(ibqp->device);	struct mthca_qp *qp = to_mqp(ibqp);	void *wqe;	void *prev_wqe;	unsigned long flags;	int err = 0;	int nreq;	int i;	int size;	int size0 = 0;	u32 f0 = 0;	int ind;	u8 op0 = 0;	spin_lock_irqsave(&qp->sq.lock, flags);	/* XXX check that state is OK to post send */	ind = qp->sq.next_ind;	for (nreq = 0; wr; ++nreq, wr = wr->next) {		if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {			mthca_err(dev, "SQ %06x full (%u head, %u tail,"					" %d max, %d nreq)\n", qp->qpn,					qp->sq.head, qp->sq.tail,					qp->sq.max, nreq);			err = -ENOMEM;			*bad_wr = wr;			goto out;		}		wqe = get_send_wqe(qp, ind);		prev_wqe = qp->sq.last;		qp->sq.last = wqe;		((struct mthca_next_seg *) wqe)->nda_op = 0;		((struct mthca_next_seg *) wqe)->ee_nds = 0;		((struct mthca_next_seg *) wqe)->flags =			((wr->send_flags & IB_SEND_SIGNALED) ?			 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |			((wr->send_flags & IB_SEND_SOLICITED) ?			 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |			cpu_to_be32(1);		if (wr->opcode == IB_WR_SEND_WITH_IMM ||		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)			((struct mthca_next_seg *) wqe)->imm = wr->imm_data;		wqe += sizeof (struct mthca_next_seg);		size = sizeof (struct mthca_next_seg) / 16;		switch (qp->transport) {		case RC:			switch (wr->opcode) {			case IB_WR_ATOMIC_CMP_AND_SWP:			case IB_WR_ATOMIC_FETCH_AND_ADD:				((struct mthca_raddr_seg *) wqe)->raddr =					cpu_to_be64(wr->wr.atomic.remote_addr);				((struct mthca_raddr_seg *) wqe)->rkey =					cpu_to_be32(wr->wr.atomic.rkey);				((struct mthca_raddr_seg *) wqe)->reserved = 0;				wqe += sizeof (struct mthca_raddr_seg);				if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {					((struct mthca_atomic_seg *) wqe)->swap_add =						cpu_to_be64(wr->wr.atomic.swap);					((struct mthca_atomic_seg *) wqe)->compare =						cpu_to_be64(wr->wr.atomic.compare_add);				} else {					((struct mthca_atomic_seg *) wqe)->swap_add =						cpu_to_be64(wr->wr.atomic.compare_add);					((struct mthca_atomic_seg *) wqe)->compare = 0;				}				wqe += sizeof (struct mthca_atomic_seg);				size += (sizeof (struct mthca_raddr_seg) +					 sizeof (struct mthca_atomic_seg)) / 16;				break;			case IB_WR_RDMA_WRITE:			case IB_WR_RDMA_WRITE_WITH_IMM:			case IB_WR_RDMA_READ:				((struct mthca_raddr_seg *) wqe)->raddr =					cpu_to_be64(wr->wr.rdma.remote_addr);				((struct mthca_raddr_seg *) wqe)->rkey =					cpu_to_be32(wr->wr.rdma.rkey);				((struct mthca_raddr_seg *) wqe)->reserved = 0;				wqe += sizeof (struct mthca_raddr_seg);				size += sizeof (struct mthca_raddr_seg) / 16;				break;			default:				/* No extra segments required for sends */				break;			}			break;		case UC:			switch (wr->opcode) {			case IB_WR_RDMA_WRITE:			case IB_WR_RDMA_WRITE_WITH_IMM:				((struct mthca_raddr_seg *) wqe)->raddr =					cpu_to_be64(wr->wr.rdma.remote_addr);				((struct mthca_raddr_seg *) wqe)->rkey =					cpu_to_be32(wr->wr.rdma.rkey);				((struct mthca_raddr_seg *) wqe)->reserved = 0;				wqe += sizeof (struct mthca_raddr_seg);				size += sizeof (struct mthca_raddr_seg) / 16;				break;			default:				/* No extra segments required for sends */				break;			}			break;		case UD:			((struct mthca_tavor_ud_seg *) wqe)->lkey =				cpu_to_be32(to_mah(wr->wr.ud.ah)->key);			((struct mthca_tavor_ud_seg *) wqe)->av_addr =				cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);			((struct mthca_tavor_ud_seg *) wqe)->dqpn =				cpu_to_be32(wr->wr.ud.remote_qpn);			((struct mthca_tavor_ud_seg *) wqe)->qkey =				cpu_to_be32(wr->wr.ud.remote_qkey);			wqe += sizeof (struct mthca_tavor_ud_seg);			size += sizeof (struct mthca_tavor_ud_seg) / 16;			break;		case MLX:			err = build_mlx_header(dev, to_msqp(qp), ind, wr,					       wqe - sizeof (struct mthca_next_seg),					       wqe);			if (err) {				*bad_wr = wr;				goto out;			}			wqe += sizeof (struct mthca_data_seg);			size += sizeof (struct mthca_data_seg) / 16;			break;		}		if (wr->num_sge > qp->sq.max_gs) {			mthca_err(dev, "too many gathers\n");			err = -EINVAL;			*bad_wr = wr;			goto out;		}		for (i = 0; i < wr->num_sge; ++i) {			((struct mthca_data_seg *) wqe)->byte_count =				cpu_to_be32(wr->sg_list[i].length);			((struct mthca_data_seg *) wqe)->lkey =				cpu_to_be32(wr->sg_list[i].lkey);			((struct mthca_data_seg *) wqe)->addr =				cpu_to_be64(wr->sg_list[i].addr);			wqe += sizeof (struct mthca_data_seg);			size += sizeof (struct mthca_data_seg) / 16;		}		/* Add one more inline data segment for ICRC */		if (qp->transport == MLX) {			((struct mthca_data_seg *) wqe)->byte_count =				cpu_to_be32((1 << 31) | 4);			((u32 *) wqe)[1] = 0;			wqe += sizeof (struct mthca_data_seg);			size += sizeof (struct mthca_data_seg) / 16;		}		qp->wrid[ind + qp->rq.max] = wr->wr_id;		if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {			mthca_err(dev, "opcode invalid\n");			err = -EINVAL;			*bad_wr = wr;			goto out;		}		((struct mthca_next_seg *) prev_wqe)->nda_op =			cpu_to_be32(((ind << qp->sq.wqe_shift) +				     qp->send_wqe_offset) |				    mthca_opcode[wr->opcode]);		wmb();		((struct mthca_next_seg *) prev_wqe)->ee_nds =			cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size |				    ((wr->send_flags & IB_SEND_FENCE) ?				    MTHCA_NEXT_FENCE : 0));		if (!size0) {			size0 = size;			op0   = mthca_opcode[wr->opcode];		}		++ind;		if (unlikely(ind >= qp->sq.max))			ind -= qp->sq.max;	}out:	if (likely(nreq)) {		__be32 doorbell[2];		doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +					   qp->send_wqe_offset) | f0 | op0);		doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);		wmb();		mthca_write64(doorbell,			      dev->kar + MTHCA_SEND_DOORBELL,			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));	}	qp->sq.next_ind = ind;	qp->sq.head    += nreq;	spin_unlock_irqrestore(&qp->sq.lock, flags);	return err;}int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,			     struct ib_recv_wr **bad_wr){	struct mthca_dev *dev = to_mdev(ibqp->device);	struct mthca_qp *qp = to_mqp(ibqp);	__be32 doorbell[2];	unsigned long flags;	int err = 0;	int nreq;	int i;	int size;	int size0 = 0;	int ind;	void *wqe;	void *prev_wqe;	spin_lock_irqsave(&qp->rq.lock, flags);	/* XXX check that state is OK to post receive */	ind = qp->rq.next_ind;	for (nreq = 0; wr; wr = wr->next) {		if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {			mthca_err(dev, "RQ %06x full (%u head, %u tail,"					" %d max, %d nreq)\n", qp->qpn,					qp->rq.head, qp->rq.tail,					qp->rq.max, nreq);			err = -ENOMEM;			*bad_wr = wr;			goto out;		}		wqe = get_recv_wqe(qp, ind);		prev_wqe = qp->rq.last;		qp->rq.last = wqe;		((struct mthca_next_seg *) wqe)->nda_op = 0;		((struct mthca_next_seg *) wqe)->ee_nds =			cpu_to_be32(MTHCA_NEXT_DBD);		((struct mthca_next_seg *) wqe)->flags = 0;		wqe += sizeof (struct mthca_next_seg);		size = sizeof (struct mthca_next_seg) / 16;		if (unlikely(wr->num_sge > qp->rq.max_gs)) {			err = -EINVAL;			*bad_wr = wr;			goto out;		}		for (i = 0; i < wr->num_sge; ++i) {			((struct mthca_data_seg *) wqe)->byte_count =				cpu_to_be32(wr->sg_list[i].length);			((struct mthca_data_seg *) wqe)->lkey =				cpu_to_be32(wr->sg_list[i].lkey);			((struct mthca_data_seg *) wqe)->addr =				cpu_to_be64(wr->sg_list[i].addr);			wqe += sizeof (struct mthca_data_seg);			size += sizeof (struct mthca_data_seg) / 16;		}		qp->wrid[ind] = wr->wr_id;		((struct mthca_next_seg *) prev_wqe)->nda_op =			cpu_to_be32((ind << qp->rq.wqe_shift) | 1);		wmb();		((struct mthca_next_seg *) prev_wqe)->ee_nds =			cpu_to_be32(MTHCA_NEXT_DBD | size);		if (!size0)			size0 = size;		++ind;		if (unlikely(ind >= qp->rq.max))			ind -= qp->rq.max;		++nreq;		if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {			nreq = 0;			doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);			doorbell[1] = cpu_to_be32(qp->qpn << 8);			wmb();			mthca_write64(doorbell,				      dev->kar + MTHCA_RECEIVE_DOORBELL,				      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));			qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;			size0 = 0;		}	}out:	if (likely(nreq)) {

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?