⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_qp.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 5 页
字号:
	return 0; err_out_free:	/*	 * Lock CQs here, so that CQ polling code can do QP lookup	 * without taking a lock.	 */	mthca_lock_cqs(send_cq, recv_cq);	spin_lock(&dev->qp_table.lock);	mthca_array_clear(&dev->qp_table.qp, mqpn);	spin_unlock(&dev->qp_table.lock);	mthca_unlock_cqs(send_cq, recv_cq); err_out:	dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,			  sqp->header_buf, sqp->header_dma);	return err;}static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp){	int c;	spin_lock_irq(&dev->qp_table.lock);	c = qp->refcount;	spin_unlock_irq(&dev->qp_table.lock);	return c;}void mthca_free_qp(struct mthca_dev *dev,		   struct mthca_qp *qp){	u8 status;	struct mthca_cq *send_cq;	struct mthca_cq *recv_cq;	send_cq = to_mcq(qp->ibqp.send_cq);	recv_cq = to_mcq(qp->ibqp.recv_cq);	/*	 * Lock CQs here, so that CQ polling code can do QP lookup	 * without taking a lock.	 */	mthca_lock_cqs(send_cq, recv_cq);	spin_lock(&dev->qp_table.lock);	mthca_array_clear(&dev->qp_table.qp,			  qp->qpn & (dev->limits.num_qps - 1));	--qp->refcount;	spin_unlock(&dev->qp_table.lock);	mthca_unlock_cqs(send_cq, recv_cq);	wait_event(qp->wait, !get_qp_refcount(dev, qp));	if (qp->state != IB_QPS_RESET)		mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,				NULL, 0, &status);	/*	 * If this is a userspace QP, the buffers, MR, CQs and so on	 * will be cleaned up in userspace, so all we have to do is	 * unref the mem-free tables and free the QPN in our table.	 */	if (!qp->ibqp.uobject) {		mthca_cq_clean(dev, recv_cq, qp->qpn,			       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);		if (send_cq != recv_cq)			mthca_cq_clean(dev, send_cq, qp->qpn, NULL);		mthca_free_memfree(dev, qp);		mthca_free_wqe_buf(dev, qp);	}	mthca_unmap_memfree(dev, qp);	if (is_sqp(dev, qp)) {		atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));		dma_free_coherent(&dev->pdev->dev,				  to_msqp(qp)->header_buf_size,				  to_msqp(qp)->header_buf,				  to_msqp(qp)->header_dma);	} else		mthca_free(&dev->qp_table.alloc, qp->qpn);}/* Create UD header for an MLX send and build a data segment for it */static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,			    int ind, struct ib_send_wr *wr,			    struct mthca_mlx_seg *mlx,			    struct mthca_data_seg *data){	int header_size;	int err;	u16 pkey;	ib_ud_header_init(256, /* assume a MAD */			  mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),			  &sqp->ud_header);	err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);	if (err)		return err;	mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);	mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |				  (sqp->ud_header.lrh.destination_lid ==				   IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |				  (sqp->ud_header.lrh.service_level << 8));	mlx->rlid = sqp->ud_header.lrh.destination_lid;	mlx->vcrc = 0;	switch (wr->opcode) {	case IB_WR_SEND:		sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;		sqp->ud_header.immediate_present = 0;		break;	case IB_WR_SEND_WITH_IMM:		sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;		sqp->ud_header.immediate_present = 1;		sqp->ud_header.immediate_data = wr->imm_data;		break;	default:		return -EINVAL;	}	sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;	if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)		sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;	sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);	if (!sqp->qp.ibqp.qp_num)		ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,				   sqp->pkey_index, &pkey);	else		ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,				   wr->wr.ud.pkey_index, &pkey);	sqp->ud_header.bth.pkey = cpu_to_be16(pkey);	sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));	sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?					       sqp->qkey : wr->wr.ud.remote_qkey);	sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);	header_size = ib_ud_header_pack(&sqp->ud_header,					sqp->header_buf +					ind * MTHCA_UD_HEADER_SIZE);	data->byte_count = cpu_to_be32(header_size);	data->lkey       = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);	data->addr       = cpu_to_be64(sqp->header_dma +				       ind * MTHCA_UD_HEADER_SIZE);	return 0;}static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,				    struct ib_cq *ib_cq){	unsigned cur;	struct mthca_cq *cq;	cur = wq->head - wq->tail;	if (likely(cur + nreq < wq->max))		return 0;	cq = to_mcq(ib_cq);	spin_lock(&cq->lock);	cur = wq->head - wq->tail;	spin_unlock(&cq->lock);	return cur + nreq >= wq->max;}static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,					  u64 remote_addr, u32 rkey){	rseg->raddr    = cpu_to_be64(remote_addr);	rseg->rkey     = cpu_to_be32(rkey);	rseg->reserved = 0;}static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,					   struct ib_send_wr *wr){	if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {		aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);		aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add);	} else {		aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);		aseg->compare  = 0;	}}static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,			     struct ib_send_wr *wr){	useg->lkey    = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);	useg->av_addr =	cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);	useg->dqpn    =	cpu_to_be32(wr->wr.ud.remote_qpn);	useg->qkey    =	cpu_to_be32(wr->wr.ud.remote_qkey);}static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,			     struct ib_send_wr *wr){	memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);	useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);	useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);}int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,			  struct ib_send_wr **bad_wr){	struct mthca_dev *dev = to_mdev(ibqp->device);	struct mthca_qp *qp = to_mqp(ibqp);	void *wqe;	void *prev_wqe;	unsigned long flags;	int err = 0;	int nreq;	int i;	int size;	/*	 * f0 and size0 are only used if nreq != 0, and they will	 * always be initialized the first time through the main loop	 * before nreq is incremented.  So nreq cannot become non-zero	 * without initializing f0 and size0, and they are in fact	 * never used uninitialized.	 */	int uninitialized_var(size0);	u32 uninitialized_var(f0);	int ind;	u8 op0 = 0;	spin_lock_irqsave(&qp->sq.lock, flags);	/* XXX check that state is OK to post send */	ind = qp->sq.next_ind;	for (nreq = 0; wr; ++nreq, wr = wr->next) {		if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {			mthca_err(dev, "SQ %06x full (%u head, %u tail,"					" %d max, %d nreq)\n", qp->qpn,					qp->sq.head, qp->sq.tail,					qp->sq.max, nreq);			err = -ENOMEM;			*bad_wr = wr;			goto out;		}		wqe = get_send_wqe(qp, ind);		prev_wqe = qp->sq.last;		qp->sq.last = wqe;		((struct mthca_next_seg *) wqe)->nda_op = 0;		((struct mthca_next_seg *) wqe)->ee_nds = 0;		((struct mthca_next_seg *) wqe)->flags =			((wr->send_flags & IB_SEND_SIGNALED) ?			 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |			((wr->send_flags & IB_SEND_SOLICITED) ?			 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |			cpu_to_be32(1);		if (wr->opcode == IB_WR_SEND_WITH_IMM ||		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)			((struct mthca_next_seg *) wqe)->imm = wr->imm_data;		wqe += sizeof (struct mthca_next_seg);		size = sizeof (struct mthca_next_seg) / 16;		switch (qp->transport) {		case RC:			switch (wr->opcode) {			case IB_WR_ATOMIC_CMP_AND_SWP:			case IB_WR_ATOMIC_FETCH_AND_ADD:				set_raddr_seg(wqe, wr->wr.atomic.remote_addr,					      wr->wr.atomic.rkey);				wqe += sizeof (struct mthca_raddr_seg);				set_atomic_seg(wqe, wr);				wqe += sizeof (struct mthca_atomic_seg);				size += (sizeof (struct mthca_raddr_seg) +					 sizeof (struct mthca_atomic_seg)) / 16;				break;			case IB_WR_RDMA_WRITE:			case IB_WR_RDMA_WRITE_WITH_IMM:			case IB_WR_RDMA_READ:				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,					      wr->wr.rdma.rkey);				wqe  += sizeof (struct mthca_raddr_seg);				size += sizeof (struct mthca_raddr_seg) / 16;				break;			default:				/* No extra segments required for sends */				break;			}			break;		case UC:			switch (wr->opcode) {			case IB_WR_RDMA_WRITE:			case IB_WR_RDMA_WRITE_WITH_IMM:				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,					      wr->wr.rdma.rkey);				wqe  += sizeof (struct mthca_raddr_seg);				size += sizeof (struct mthca_raddr_seg) / 16;				break;			default:				/* No extra segments required for sends */				break;			}			break;		case UD:			set_tavor_ud_seg(wqe, wr);			wqe  += sizeof (struct mthca_tavor_ud_seg);			size += sizeof (struct mthca_tavor_ud_seg) / 16;			break;		case MLX:			err = build_mlx_header(dev, to_msqp(qp), ind, wr,					       wqe - sizeof (struct mthca_next_seg),					       wqe);			if (err) {				*bad_wr = wr;				goto out;			}			wqe += sizeof (struct mthca_data_seg);			size += sizeof (struct mthca_data_seg) / 16;			break;		}		if (wr->num_sge > qp->sq.max_gs) {			mthca_err(dev, "too many gathers\n");			err = -EINVAL;			*bad_wr = wr;			goto out;		}		for (i = 0; i < wr->num_sge; ++i) {			mthca_set_data_seg(wqe, wr->sg_list + i);			wqe  += sizeof (struct mthca_data_seg);			size += sizeof (struct mthca_data_seg) / 16;		}		/* Add one more inline data segment for ICRC */		if (qp->transport == MLX) {			((struct mthca_data_seg *) wqe)->byte_count =				cpu_to_be32((1 << 31) | 4);			((u32 *) wqe)[1] = 0;			wqe += sizeof (struct mthca_data_seg);			size += sizeof (struct mthca_data_seg) / 16;		}		qp->wrid[ind + qp->rq.max] = wr->wr_id;		if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {			mthca_err(dev, "opcode invalid\n");			err = -EINVAL;			*bad_wr = wr;			goto out;		}		((struct mthca_next_seg *) prev_wqe)->nda_op =			cpu_to_be32(((ind << qp->sq.wqe_shift) +				     qp->send_wqe_offset) |				    mthca_opcode[wr->opcode]);		wmb();		((struct mthca_next_seg *) prev_wqe)->ee_nds =			cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |				    ((wr->send_flags & IB_SEND_FENCE) ?				    MTHCA_NEXT_FENCE : 0));		if (!nreq) {			size0 = size;			op0   = mthca_opcode[wr->opcode];			f0    = wr->send_flags & IB_SEND_FENCE ?				MTHCA_SEND_DOORBELL_FENCE : 0;		}		++ind;		if (unlikely(ind >= qp->sq.max))			ind -= qp->sq.max;	}out:	if (likely(nreq)) {		wmb();		mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) +			       qp->send_wqe_offset) | f0 | op0,			      (qp->qpn << 8) | size0,			      dev->kar + MTHCA_SEND_DOORBELL,			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));		/*		 * Make sure doorbells don't leak out of SQ spinlock		 * and reach the HCA out of order:		 */		mmiowb();	}	qp->sq.next_ind = ind;	qp->sq.head    += nreq;	spin_unlock_irqrestore(&qp->sq.lock, flags);	return err;}int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,			     struct ib_recv_wr **bad_wr){	struct mthca_dev *dev = to_mdev(ibqp->device);	struct mthca_qp *qp = to_mqp(ibqp);	unsigned long flags;	int err = 0;	int nreq;	int i;	int size;	/*	 * size0 is only used if nreq != 0, and it will always be	 * initialized the first time through the main loop before	 * nreq is incremented.  So nreq cannot become non-zero	 * without initializing size0, and it is in fact never used	 * uninitialized.	 */	int uninitialized_var(size0);	int ind;	void *wqe;	void *prev_wqe;	spin_lock_irqsave(&qp->rq.lock, flags);	/* XXX check that state is OK to post receive */	ind = qp->rq.next_ind;	for (nreq = 0; wr; wr = wr->next) {		if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {			mthca_err(dev, "RQ %06x full (%u head, %u tail,"					" %d max, %d nreq)\n", qp->qpn,					qp->rq.head, qp->rq.tail,					qp->rq.max, nreq);			err = -ENOMEM;			*bad_wr = wr;			goto out;		}		wqe = get_recv_wqe(qp, ind);		prev_wqe = qp->rq.last;		qp->rq.last = wqe;		((struct mthca_next_seg *) wqe)->nda_op = 0;		((struct mthca_next_seg *) wqe)->ee_nds =			cpu_to_be32(MTHCA_NEXT_DBD);		((struct mthca_next_seg *) wqe)->flags = 0;		wqe += sizeof (struct mthca_next_seg);		size = sizeof (struct mthca_next_seg) / 16;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -