⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_qp.c

📁 h内核
💻 C
📖 第 1 页 / 共 3 页
字号:
		goto err_out_free;	atomic_inc(&pd->sqp_count);	return 0; err_out_free:	spin_lock_irq(&dev->qp_table.lock);	mthca_array_clear(&dev->qp_table.qp, mqpn);	spin_unlock_irq(&dev->qp_table.lock); err_out:	dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,			  sqp->header_buf, sqp->header_dma);	return err;}void mthca_free_qp(struct mthca_dev *dev,		   struct mthca_qp *qp){	u8 status;	int size;	int i;	spin_lock_irq(&dev->qp_table.lock);	mthca_array_clear(&dev->qp_table.qp,			  qp->qpn & (dev->limits.num_qps - 1));	spin_unlock_irq(&dev->qp_table.lock);	atomic_dec(&qp->refcount);	wait_event(qp->wait, !atomic_read(&qp->refcount));	if (qp->state != IB_QPS_RESET)		mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);	mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn);	if (qp->ibqp.send_cq != qp->ibqp.recv_cq)		mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn);	mthca_free_mr(dev, &qp->mr);	size = PAGE_ALIGN(qp->send_wqe_offset +			  (qp->sq.max << qp->sq.wqe_shift));	if (qp->is_direct) {		pci_free_consistent(dev->pdev, size,				    qp->queue.direct.buf,				    pci_unmap_addr(&qp->queue.direct, mapping));	} else {		for (i = 0; i < size / PAGE_SIZE; ++i) {			pci_free_consistent(dev->pdev, PAGE_SIZE,					    qp->queue.page_list[i].buf,					    pci_unmap_addr(&qp->queue.page_list[i],							   mapping));		}	}	kfree(qp->wrid);	if (is_sqp(dev, qp)) {		atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));		dma_free_coherent(&dev->pdev->dev,				  to_msqp(qp)->header_buf_size,				  to_msqp(qp)->header_buf,				  to_msqp(qp)->header_dma);	}	else		mthca_free(&dev->qp_table.alloc, qp->qpn);}/* Create UD header for an MLX send and build a data segment for it */static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,			    int ind, struct ib_send_wr *wr,			    struct mthca_mlx_seg *mlx,			    struct mthca_data_seg *data){	int header_size;	int err;	ib_ud_header_init(256, /* assume a MAD */			  sqp->ud_header.grh_present,			  &sqp->ud_header);	err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);	if (err)		return err;	mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);	mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |				  (sqp->ud_header.lrh.destination_lid == 0xffff ?				   MTHCA_MLX_SLR : 0) |				  (sqp->ud_header.lrh.service_level << 8));	mlx->rlid = sqp->ud_header.lrh.destination_lid;	mlx->vcrc = 0;	switch (wr->opcode) {	case IB_WR_SEND:		sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;		sqp->ud_header.immediate_present = 0;		break;	case IB_WR_SEND_WITH_IMM:		sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;		sqp->ud_header.immediate_present = 1;		sqp->ud_header.immediate_data = wr->imm_data;		break;	default:		return -EINVAL;	}	sqp->ud_header.lrh.virtual_lane    = !sqp->qp.ibqp.qp_num ? 15 : 0;	if (sqp->ud_header.lrh.destination_lid == 0xffff)		sqp->ud_header.lrh.source_lid = 0xffff;	sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);	if (!sqp->qp.ibqp.qp_num)		ib_get_cached_pkey(&dev->ib_dev, sqp->port,				   sqp->pkey_index,				   &sqp->ud_header.bth.pkey);	else		ib_get_cached_pkey(&dev->ib_dev, sqp->port,				   wr->wr.ud.pkey_index,				   &sqp->ud_header.bth.pkey);	cpu_to_be16s(&sqp->ud_header.bth.pkey);	sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);	sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));	sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?					       sqp->qkey : wr->wr.ud.remote_qkey);	sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);	header_size = ib_ud_header_pack(&sqp->ud_header,					sqp->header_buf +					ind * MTHCA_UD_HEADER_SIZE);	data->byte_count = cpu_to_be32(header_size);	data->lkey       = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);	data->addr       = cpu_to_be64(sqp->header_dma +				       ind * MTHCA_UD_HEADER_SIZE);	return 0;}int mthca_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,		    struct ib_send_wr **bad_wr){	struct mthca_dev *dev = to_mdev(ibqp->device);	struct mthca_qp *qp = to_mqp(ibqp);	void *wqe;	void *prev_wqe;	unsigned long flags;	int err = 0;	int nreq;	int i;	int size;	int size0 = 0;	u32 f0 = 0;	int ind;	u8 op0 = 0;	static const u8 opcode[] = {		[IB_WR_SEND]                 = MTHCA_OPCODE_SEND,		[IB_WR_SEND_WITH_IMM]        = MTHCA_OPCODE_SEND_IMM,		[IB_WR_RDMA_WRITE]           = MTHCA_OPCODE_RDMA_WRITE,		[IB_WR_RDMA_WRITE_WITH_IMM]  = MTHCA_OPCODE_RDMA_WRITE_IMM,		[IB_WR_RDMA_READ]            = MTHCA_OPCODE_RDMA_READ,		[IB_WR_ATOMIC_CMP_AND_SWP]   = MTHCA_OPCODE_ATOMIC_CS,		[IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,	};	spin_lock_irqsave(&qp->lock, flags);	/* XXX check that state is OK to post send */	ind = qp->sq.next;	for (nreq = 0; wr; ++nreq, wr = wr->next) {		if (qp->sq.cur + nreq >= qp->sq.max) {			mthca_err(dev, "SQ full (%d posted, %d max, %d nreq)\n",				  qp->sq.cur, qp->sq.max, nreq);			err = -ENOMEM;			*bad_wr = wr;			goto out;		}		wqe = get_send_wqe(qp, ind);		prev_wqe = qp->sq.last;		qp->sq.last = wqe;		((struct mthca_next_seg *) wqe)->nda_op = 0;		((struct mthca_next_seg *) wqe)->ee_nds = 0;		((struct mthca_next_seg *) wqe)->flags =			((wr->send_flags & IB_SEND_SIGNALED) ?			 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |			((wr->send_flags & IB_SEND_SOLICITED) ?			 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0)   |			cpu_to_be32(1);		if (wr->opcode == IB_WR_SEND_WITH_IMM ||		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)			((struct mthca_next_seg *) wqe)->flags = wr->imm_data;		wqe += sizeof (struct mthca_next_seg);		size = sizeof (struct mthca_next_seg) / 16;		switch (qp->transport) {		case RC:			switch (wr->opcode) {			case IB_WR_ATOMIC_CMP_AND_SWP:			case IB_WR_ATOMIC_FETCH_AND_ADD:				((struct mthca_raddr_seg *) wqe)->raddr =					cpu_to_be64(wr->wr.atomic.remote_addr);				((struct mthca_raddr_seg *) wqe)->rkey =					cpu_to_be32(wr->wr.atomic.rkey);				((struct mthca_raddr_seg *) wqe)->reserved = 0;				wqe += sizeof (struct mthca_raddr_seg);				if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {					((struct mthca_atomic_seg *) wqe)->swap_add =						cpu_to_be64(wr->wr.atomic.swap);					((struct mthca_atomic_seg *) wqe)->compare =						cpu_to_be64(wr->wr.atomic.compare_add);				} else {					((struct mthca_atomic_seg *) wqe)->swap_add =						cpu_to_be64(wr->wr.atomic.compare_add);					((struct mthca_atomic_seg *) wqe)->compare = 0;				}				wqe += sizeof (struct mthca_atomic_seg);				size += sizeof (struct mthca_raddr_seg) / 16 +					sizeof (struct mthca_atomic_seg);				break;			case IB_WR_RDMA_WRITE:			case IB_WR_RDMA_WRITE_WITH_IMM:			case IB_WR_RDMA_READ:				((struct mthca_raddr_seg *) wqe)->raddr =					cpu_to_be64(wr->wr.rdma.remote_addr);				((struct mthca_raddr_seg *) wqe)->rkey =					cpu_to_be32(wr->wr.rdma.rkey);				((struct mthca_raddr_seg *) wqe)->reserved = 0;				wqe += sizeof (struct mthca_raddr_seg);				size += sizeof (struct mthca_raddr_seg) / 16;				break;			default:				/* No extra segments required for sends */				break;			}			break;		case UD:			((struct mthca_ud_seg *) wqe)->lkey =				cpu_to_be32(to_mah(wr->wr.ud.ah)->key);			((struct mthca_ud_seg *) wqe)->av_addr =				cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);			((struct mthca_ud_seg *) wqe)->dqpn =				cpu_to_be32(wr->wr.ud.remote_qpn);			((struct mthca_ud_seg *) wqe)->qkey =				cpu_to_be32(wr->wr.ud.remote_qkey);			wqe += sizeof (struct mthca_ud_seg);			size += sizeof (struct mthca_ud_seg) / 16;			break;		case MLX:			err = build_mlx_header(dev, to_msqp(qp), ind, wr,					       wqe - sizeof (struct mthca_next_seg),					       wqe);			if (err) {				*bad_wr = wr;				goto out;			}			wqe += sizeof (struct mthca_data_seg);			size += sizeof (struct mthca_data_seg) / 16;			break;		}		if (wr->num_sge > qp->sq.max_gs) {			mthca_err(dev, "too many gathers\n");			err = -EINVAL;			*bad_wr = wr;			goto out;		}		for (i = 0; i < wr->num_sge; ++i) {			((struct mthca_data_seg *) wqe)->byte_count =				cpu_to_be32(wr->sg_list[i].length);			((struct mthca_data_seg *) wqe)->lkey =				cpu_to_be32(wr->sg_list[i].lkey);			((struct mthca_data_seg *) wqe)->addr =				cpu_to_be64(wr->sg_list[i].addr);			wqe += sizeof (struct mthca_data_seg);			size += sizeof (struct mthca_data_seg) / 16;		}		/* Add one more inline data segment for ICRC */		if (qp->transport == MLX) {			((struct mthca_data_seg *) wqe)->byte_count =				cpu_to_be32((1 << 31) | 4);			((u32 *) wqe)[1] = 0;			wqe += sizeof (struct mthca_data_seg);			size += sizeof (struct mthca_data_seg) / 16;		}		qp->wrid[ind + qp->rq.max] = wr->wr_id;		if (wr->opcode >= ARRAY_SIZE(opcode)) {			mthca_err(dev, "opcode invalid\n");			err = -EINVAL;			*bad_wr = wr;			goto out;		}		if (prev_wqe) {			((struct mthca_next_seg *) prev_wqe)->nda_op =				cpu_to_be32(((ind << qp->sq.wqe_shift) +					     qp->send_wqe_offset) |					    opcode[wr->opcode]);			smp_wmb();			((struct mthca_next_seg *) prev_wqe)->ee_nds =				cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);		}		if (!size0) {			size0 = size;			op0   = opcode[wr->opcode];		}		++ind;		if (unlikely(ind >= qp->sq.max))			ind -= qp->sq.max;	}out:	if (nreq) {		u32 doorbell[2];		doorbell[0] = cpu_to_be32(((qp->sq.next << qp->sq.wqe_shift) +					   qp->send_wqe_offset) | f0 | op0);		doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);		wmb();		mthca_write64(doorbell,			      dev->kar + MTHCA_SEND_DOORBELL,			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));	}	qp->sq.cur += nreq;	qp->sq.next = ind;	spin_unlock_irqrestore(&qp->lock, flags);	return err;}int mthca_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,		       struct ib_recv_wr **bad_wr){	struct mthca_dev *dev = to_mdev(ibqp->device);	struct mthca_qp *qp = to_mqp(ibqp);	unsigned long flags;	int err = 0;	int nreq;	int i;	int size;	int size0 = 0;	int ind;	void *wqe;	void *prev_wqe;	spin_lock_irqsave(&qp->lock, flags);	/* XXX check that state is OK to post receive */	ind = qp->rq.next;	for (nreq = 0; wr; ++nreq, wr = wr->next) {		if (qp->rq.cur + nreq >= qp->rq.max) {			mthca_err(dev, "RQ %06x full\n", qp->qpn);			err = -ENOMEM;			*bad_wr = wr;			goto out;		}		wqe = get_recv_wqe(qp, ind);		prev_wqe = qp->rq.last;		qp->rq.last = wqe;		((struct mthca_next_seg *) wqe)->nda_op = 0;		((struct mthca_next_seg *) wqe)->ee_nds =			cpu_to_be32(MTHCA_NEXT_DBD);		((struct mthca_next_seg *) wqe)->flags =			(wr->recv_flags & IB_RECV_SIGNALED) ?			cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0;		wqe += sizeof (struct mthca_next_seg);		size = sizeof (struct mthca_next_seg) / 16;		if (wr->num_sge > qp->rq.max_gs) {			err = -EINVAL;			*bad_wr = wr;			goto out;		}		for (i = 0; i < wr->num_sge; ++i) {			((struct mthca_data_seg *) wqe)->byte_count =				cpu_to_be32(wr->sg_list[i].length);			((struct mthca_data_seg *) wqe)->lkey =				cpu_to_be32(wr->sg_list[i].lkey);			((struct mthca_data_seg *) wqe)->addr =				cpu_to_be64(wr->sg_list[i].addr);			wqe += sizeof (struct mthca_data_seg);			size += sizeof (struct mthca_data_seg) / 16;		}		qp->wrid[ind] = wr->wr_id;		if (prev_wqe) {			((struct mthca_next_seg *) prev_wqe)->nda_op =				cpu_to_be32((ind << qp->rq.wqe_shift) | 1);			smp_wmb();			((struct mthca_next_seg *) prev_wqe)->ee_nds =				cpu_to_be32(MTHCA_NEXT_DBD | size);		}		if (!size0)			size0 = size;		++ind;		if (unlikely(ind >= qp->rq.max))			ind -= qp->rq.max;	}out:	if (nreq) {		u32 doorbell[2];		doorbell[0] = cpu_to_be32((qp->rq.next << qp->rq.wqe_shift) | size0);		doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);		wmb();		mthca_write64(doorbell,			      dev->kar + MTHCA_RECEIVE_DOORBELL,			      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));	}	qp->rq.cur += nreq;	qp->rq.next = ind;	spin_unlock_irqrestore(&qp->lock, flags);	return err;}int mthca_free_err_wqe(struct mthca_qp *qp, int is_send,		       int index, int *dbd, u32 *new_wqe){	struct mthca_next_seg *next;	if (is_send)		next = get_send_wqe(qp, index);	else		next = get_recv_wqe(qp, index);	*dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));	if (next->ee_nds & cpu_to_be32(0x3f))		*new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |			(next->ee_nds & cpu_to_be32(0x3f));	else		*new_wqe = 0;	return 0;}int __devinit mthca_init_qp_table(struct mthca_dev *dev){	int err;	u8 status;	int i;	spin_lock_init(&dev->qp_table.lock);	/*	 * We reserve 2 extra QPs per port for the special QPs.  The	 * special QP for port 1 has to be even, so round up.	 */	dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;	err = mthca_alloc_init(&dev->qp_table.alloc,			       dev->limits.num_qps,			       (1 << 24) - 1,			       dev->qp_table.sqp_start +			       MTHCA_MAX_PORTS * 2);	if (err)		return err;	err = mthca_array_init(&dev->qp_table.qp,			       dev->limits.num_qps);	if (err) {		mthca_alloc_cleanup(&dev->qp_table.alloc);		return err;	}	for (i = 0; i < 2; ++i) {		err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,					    dev->qp_table.sqp_start + i * 2,					    &status);		if (err)			goto err_out;		if (status) {			mthca_warn(dev, "CONF_SPECIAL_QP returned "				   "status %02x, aborting.\n",				   status);			err = -EINVAL;			goto err_out;		}	}	return 0; err_out:	for (i = 0; i < 2; ++i)		mthca_CONF_SPECIAL_QP(dev, i, 0, &status);	mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);	mthca_alloc_cleanup(&dev->qp_table.alloc);	return err;}void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev){	int i;	u8 status;	for (i = 0; i < 2; ++i)		mthca_CONF_SPECIAL_QP(dev, i, 0, &status);	mthca_alloc_cleanup(&dev->qp_table.alloc);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -