⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_qp.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 5 页
字号:
}static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz){	/*	 * Calculate the maximum size of WQE s/g segments, excluding	 * the next segment and other non-data segments.	 */	int max_data_size = desc_sz - sizeof (struct mthca_next_seg);	switch (qp->transport) {	case MLX:		max_data_size -= 2 * sizeof (struct mthca_data_seg);		break;	case UD:		if (mthca_is_memfree(dev))			max_data_size -= sizeof (struct mthca_arbel_ud_seg);		else			max_data_size -= sizeof (struct mthca_tavor_ud_seg);		break;	default:		max_data_size -= sizeof (struct mthca_raddr_seg);		break;	}	return max_data_size;}static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size){	/* We don't support inline data for kernel QPs (yet). */	return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;}static void mthca_adjust_qp_caps(struct mthca_dev *dev,				 struct mthca_pd *pd,				 struct mthca_qp *qp){	int max_data_size = mthca_max_data_size(dev, qp,						min(dev->limits.max_desc_sz,						    1 << qp->sq.wqe_shift));	qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);	qp->sq.max_gs = min_t(int, dev->limits.max_sg,			      max_data_size / sizeof (struct mthca_data_seg));	qp->rq.max_gs = min_t(int, dev->limits.max_sg,			       (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -				sizeof (struct mthca_next_seg)) /			       sizeof (struct mthca_data_seg));}/* * Allocate and register buffer for WQEs.  qp->rq.max, sq.max, * rq.max_gs and sq.max_gs must all be assigned. * mthca_alloc_wqe_buf will calculate rq.wqe_shift and * sq.wqe_shift (as well as send_wqe_offset, is_direct, and * queue) */static int mthca_alloc_wqe_buf(struct mthca_dev *dev,			       struct mthca_pd *pd,			       struct mthca_qp *qp){	int size;	int err = -ENOMEM;	size = sizeof (struct mthca_next_seg) +		qp->rq.max_gs * sizeof (struct mthca_data_seg);	if (size > dev->limits.max_desc_sz)		return -EINVAL;	for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;	     qp->rq.wqe_shift++)		; /* nothing */	size = qp->sq.max_gs * sizeof (struct mthca_data_seg);	switch (qp->transport) {	case MLX:		size += 2 * sizeof (struct mthca_data_seg);		break;	case UD:		size += mthca_is_memfree(dev) ?			sizeof (struct mthca_arbel_ud_seg) :			sizeof (struct mthca_tavor_ud_seg);		break;	case UC:		size += sizeof (struct mthca_raddr_seg);		break;	case RC:		size += sizeof (struct mthca_raddr_seg);		/*		 * An atomic op will require an atomic segment, a		 * remote address segment and one scatter entry.		 */		size = max_t(int, size,			     sizeof (struct mthca_atomic_seg) +			     sizeof (struct mthca_raddr_seg) +			     sizeof (struct mthca_data_seg));		break;	default:		break;	}	/* Make sure that we have enough space for a bind request */	size = max_t(int, size, sizeof (struct mthca_bind_seg));	size += sizeof (struct mthca_next_seg);	if (size > dev->limits.max_desc_sz)		return -EINVAL;	for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;	     qp->sq.wqe_shift++)		; /* nothing */	qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,				    1 << qp->sq.wqe_shift);	/*	 * If this is a userspace QP, we don't actually have to	 * allocate anything.  All we need is to calculate the WQE	 * sizes and the send_wqe_offset, so we're done now.	 */	if (pd->ibpd.uobject)		return 0;	size = PAGE_ALIGN(qp->send_wqe_offset +			  (qp->sq.max << qp->sq.wqe_shift));	qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),			   GFP_KERNEL);	if (!qp->wrid)		goto err_out;	err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,			      &qp->queue, &qp->is_direct, pd, 0, &qp->mr);	if (err)		goto err_out;	return 0;err_out:	kfree(qp->wrid);	return err;}static void mthca_free_wqe_buf(struct mthca_dev *dev,			       struct mthca_qp *qp){	mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +				       (qp->sq.max << qp->sq.wqe_shift)),		       &qp->queue, qp->is_direct, &qp->mr);	kfree(qp->wrid);}static int mthca_map_memfree(struct mthca_dev *dev,			     struct mthca_qp *qp){	int ret;	if (mthca_is_memfree(dev)) {		ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);		if (ret)			return ret;		ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);		if (ret)			goto err_qpc;		ret = mthca_table_get(dev, dev->qp_table.rdb_table,				      qp->qpn << dev->qp_table.rdb_shift);		if (ret)			goto err_eqpc;	}	return 0;err_eqpc:	mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);err_qpc:	mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);	return ret;}static void mthca_unmap_memfree(struct mthca_dev *dev,				struct mthca_qp *qp){	mthca_table_put(dev, dev->qp_table.rdb_table,			qp->qpn << dev->qp_table.rdb_shift);	mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);	mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);}static int mthca_alloc_memfree(struct mthca_dev *dev,			       struct mthca_qp *qp){	if (mthca_is_memfree(dev)) {		qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,						 qp->qpn, &qp->rq.db);		if (qp->rq.db_index < 0)			return -ENOMEM;		qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,						 qp->qpn, &qp->sq.db);		if (qp->sq.db_index < 0) {			mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);			return -ENOMEM;		}	}	return 0;}static void mthca_free_memfree(struct mthca_dev *dev,			       struct mthca_qp *qp){	if (mthca_is_memfree(dev)) {		mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);		mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);	}}static int mthca_alloc_qp_common(struct mthca_dev *dev,				 struct mthca_pd *pd,				 struct mthca_cq *send_cq,				 struct mthca_cq *recv_cq,				 enum ib_sig_type send_policy,				 struct mthca_qp *qp){	int ret;	int i;	qp->refcount = 1;	init_waitqueue_head(&qp->wait);	mutex_init(&qp->mutex);	qp->state    	 = IB_QPS_RESET;	qp->atomic_rd_en = 0;	qp->resp_depth   = 0;	qp->sq_policy    = send_policy;	mthca_wq_reset(&qp->sq);	mthca_wq_reset(&qp->rq);	spin_lock_init(&qp->sq.lock);	spin_lock_init(&qp->rq.lock);	ret = mthca_map_memfree(dev, qp);	if (ret)		return ret;	ret = mthca_alloc_wqe_buf(dev, pd, qp);	if (ret) {		mthca_unmap_memfree(dev, qp);		return ret;	}	mthca_adjust_qp_caps(dev, pd, qp);	/*	 * If this is a userspace QP, we're done now.  The doorbells	 * will be allocated and buffers will be initialized in	 * userspace.	 */	if (pd->ibpd.uobject)		return 0;	ret = mthca_alloc_memfree(dev, qp);	if (ret) {		mthca_free_wqe_buf(dev, qp);		mthca_unmap_memfree(dev, qp);		return ret;	}	if (mthca_is_memfree(dev)) {		struct mthca_next_seg *next;		struct mthca_data_seg *scatter;		int size = (sizeof (struct mthca_next_seg) +			    qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;		for (i = 0; i < qp->rq.max; ++i) {			next = get_recv_wqe(qp, i);			next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<						   qp->rq.wqe_shift);			next->ee_nds = cpu_to_be32(size);			for (scatter = (void *) (next + 1);			     (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);			     ++scatter)				scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);		}		for (i = 0; i < qp->sq.max; ++i) {			next = get_send_wqe(qp, i);			next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<						    qp->sq.wqe_shift) +						   qp->send_wqe_offset);		}	}	qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);	qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);	return 0;}static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,			     struct mthca_pd *pd, struct mthca_qp *qp){	int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);	/* Sanity check QP size before proceeding */	if (cap->max_send_wr  	 > dev->limits.max_wqes ||	    cap->max_recv_wr  	 > dev->limits.max_wqes ||	    cap->max_send_sge 	 > dev->limits.max_sg   ||	    cap->max_recv_sge 	 > dev->limits.max_sg   ||	    cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))		return -EINVAL;	/*	 * For MLX transport we need 2 extra S/G entries:	 * one for the header and one for the checksum at the end	 */	if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg)		return -EINVAL;	if (mthca_is_memfree(dev)) {		qp->rq.max = cap->max_recv_wr ?			roundup_pow_of_two(cap->max_recv_wr) : 0;		qp->sq.max = cap->max_send_wr ?			roundup_pow_of_two(cap->max_send_wr) : 0;	} else {		qp->rq.max = cap->max_recv_wr;		qp->sq.max = cap->max_send_wr;	}	qp->rq.max_gs = cap->max_recv_sge;	qp->sq.max_gs = max_t(int, cap->max_send_sge,			      ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,				    MTHCA_INLINE_CHUNK_SIZE) /			      sizeof (struct mthca_data_seg));	return 0;}int mthca_alloc_qp(struct mthca_dev *dev,		   struct mthca_pd *pd,		   struct mthca_cq *send_cq,		   struct mthca_cq *recv_cq,		   enum ib_qp_type type,		   enum ib_sig_type send_policy,		   struct ib_qp_cap *cap,		   struct mthca_qp *qp){	int err;	switch (type) {	case IB_QPT_RC: qp->transport = RC; break;	case IB_QPT_UC: qp->transport = UC; break;	case IB_QPT_UD: qp->transport = UD; break;	default: return -EINVAL;	}	err = mthca_set_qp_size(dev, cap, pd, qp);	if (err)		return err;	qp->qpn = mthca_alloc(&dev->qp_table.alloc);	if (qp->qpn == -1)		return -ENOMEM;	/* initialize port to zero for error-catching. */	qp->port = 0;	err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,				    send_policy, qp);	if (err) {		mthca_free(&dev->qp_table.alloc, qp->qpn);		return err;	}	spin_lock_irq(&dev->qp_table.lock);	mthca_array_set(&dev->qp_table.qp,			qp->qpn & (dev->limits.num_qps - 1), qp);	spin_unlock_irq(&dev->qp_table.lock);	return 0;}static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq){	if (send_cq == recv_cq)		spin_lock_irq(&send_cq->lock);	else if (send_cq->cqn < recv_cq->cqn) {		spin_lock_irq(&send_cq->lock);		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);	} else {		spin_lock_irq(&recv_cq->lock);		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);	}}static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq){	if (send_cq == recv_cq)		spin_unlock_irq(&send_cq->lock);	else if (send_cq->cqn < recv_cq->cqn) {		spin_unlock(&recv_cq->lock);		spin_unlock_irq(&send_cq->lock);	} else {		spin_unlock(&send_cq->lock);		spin_unlock_irq(&recv_cq->lock);	}}int mthca_alloc_sqp(struct mthca_dev *dev,		    struct mthca_pd *pd,		    struct mthca_cq *send_cq,		    struct mthca_cq *recv_cq,		    enum ib_sig_type send_policy,		    struct ib_qp_cap *cap,		    int qpn,		    int port,		    struct mthca_sqp *sqp){	u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;	int err;	sqp->qp.transport = MLX;	err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);	if (err)		return err;	sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;	sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,					     &sqp->header_dma, GFP_KERNEL);	if (!sqp->header_buf)		return -ENOMEM;	spin_lock_irq(&dev->qp_table.lock);	if (mthca_array_get(&dev->qp_table.qp, mqpn))		err = -EBUSY;	else		mthca_array_set(&dev->qp_table.qp, mqpn, sqp);	spin_unlock_irq(&dev->qp_table.lock);	if (err)		goto err_out;	sqp->qp.port      = port;	sqp->qp.qpn       = mqpn;	sqp->qp.transport = MLX;	err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,				    send_policy, &sqp->qp);	if (err)		goto err_out_free;	atomic_inc(&pd->sqp_count);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -