⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_qp.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 4 页
字号:
	req_param = state_table[cur_state][new_state].req_param[qp->transport];	opt_param = state_table[cur_state][new_state].opt_param[qp->transport];	if ((req_param & attr_mask) != req_param) {		mthca_dbg(dev, "QP transition "			  "%d->%d missing req attr 0x%08x\n",			  cur_state, new_state,			  req_param & ~attr_mask);		return -EINVAL;	}	if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {		mthca_dbg(dev, "QP transition (transport %d) "			  "%d->%d has extra attr 0x%08x\n",			  qp->transport,			  cur_state, new_state,			  attr_mask & ~(req_param | opt_param |						 IB_QP_STATE));		return -EINVAL;	}	if ((attr_mask & IB_QP_PKEY_INDEX) && 	     attr->pkey_index >= dev->limits.pkey_table_len) {		mthca_dbg(dev, "PKey index (%u) too large. max is %d\n",			  attr->pkey_index,dev->limits.pkey_table_len-1); 		return -EINVAL;	}	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);	if (IS_ERR(mailbox))		return PTR_ERR(mailbox);	qp_param = mailbox->buf;	qp_context = &qp_param->context;	memset(qp_param, 0, sizeof *qp_param);	qp_context->flags      = cpu_to_be32((to_mthca_state(new_state) << 28) |					     (to_mthca_st(qp->transport) << 16));	qp_context->flags     |= cpu_to_be32(MTHCA_QP_BIT_DE);	if (!(attr_mask & IB_QP_PATH_MIG_STATE))		qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);	else {		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);		switch (attr->path_mig_state) {		case IB_MIG_MIGRATED:			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);			break;		case IB_MIG_REARM:			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);			break;		case IB_MIG_ARMED:			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);			break;		}	}	/* leave tavor_sched_queue as 0 */	if (qp->transport == MLX || qp->transport == UD)		qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;	else if (attr_mask & IB_QP_PATH_MTU)		qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;	if (mthca_is_memfree(dev)) {		if (qp->rq.max)			qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;		qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;		if (qp->sq.max)			qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;		qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;	}	/* leave arbel_sched_queue as 0 */	if (qp->ibqp.uobject)		qp_context->usr_page =			cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);	else		qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);	qp_context->local_qpn  = cpu_to_be32(qp->qpn);	if (attr_mask & IB_QP_DEST_QPN) {		qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);	}	if (qp->transport == MLX)		qp_context->pri_path.port_pkey |=			cpu_to_be32(to_msqp(qp)->port << 24);	else {		if (attr_mask & IB_QP_PORT) {			qp_context->pri_path.port_pkey |=				cpu_to_be32(attr->port_num << 24);			qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);		}	}	if (attr_mask & IB_QP_PKEY_INDEX) {		qp_context->pri_path.port_pkey |=			cpu_to_be32(attr->pkey_index);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);	}	if (attr_mask & IB_QP_RNR_RETRY) {		qp_context->pri_path.rnr_retry = attr->rnr_retry << 5;		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY);	}	if (attr_mask & IB_QP_AV) {		qp_context->pri_path.g_mylmc     = attr->ah_attr.src_path_bits & 0x7f;		qp_context->pri_path.rlid        = cpu_to_be16(attr->ah_attr.dlid);		qp_context->pri_path.static_rate = !!attr->ah_attr.static_rate;		if (attr->ah_attr.ah_flags & IB_AH_GRH) {			qp_context->pri_path.g_mylmc |= 1 << 7;			qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;			qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit;			qp_context->pri_path.sl_tclass_flowlabel =				cpu_to_be32((attr->ah_attr.sl << 28)                |					    (attr->ah_attr.grh.traffic_class << 20) |					    (attr->ah_attr.grh.flow_label));			memcpy(qp_context->pri_path.rgid,			       attr->ah_attr.grh.dgid.raw, 16);		} else {			qp_context->pri_path.sl_tclass_flowlabel =				cpu_to_be32(attr->ah_attr.sl << 28);		}		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);	}	if (attr_mask & IB_QP_TIMEOUT) {		qp_context->pri_path.ackto = attr->timeout << 3;		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);	}	/* XXX alt_path */	/* leave rdd as 0 */	qp_context->pd         = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);	/* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */	qp_context->wqe_lkey   = cpu_to_be32(qp->mr.ibmr.lkey);	qp_context->params1    = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |					     (MTHCA_FLIGHT_LIMIT << 24) |					     MTHCA_QP_BIT_SRE           |					     MTHCA_QP_BIT_SWE           |					     MTHCA_QP_BIT_SAE);	if (qp->sq_policy == IB_SIGNAL_ALL_WR)		qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);	if (attr_mask & IB_QP_RETRY_CNT) {		qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);	}	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {		qp_context->params1 |= cpu_to_be32(min(attr->max_rd_atomic ?						       ffs(attr->max_rd_atomic) - 1 : 0,						       7) << 21);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);	}	if (attr_mask & IB_QP_SQ_PSN)		qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);	qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);	if (mthca_is_memfree(dev)) {		qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);		qp_context->snd_db_index   = cpu_to_be32(qp->sq.db_index);	}	if (attr_mask & IB_QP_ACCESS_FLAGS) {		qp_context->params2 |=			cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?				    MTHCA_QP_BIT_RWE : 0);		/*		 * Only enable RDMA reads and atomics if we have		 * responder resources set to a non-zero value.		 */		if (qp->resp_depth) {			qp_context->params2 |=				cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?					    MTHCA_QP_BIT_RRE : 0);			qp_context->params2 |=				cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ?					    MTHCA_QP_BIT_RAE : 0);		}		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |							MTHCA_QP_OPTPAR_RRE |							MTHCA_QP_OPTPAR_RAE);		qp->atomic_rd_en = attr->qp_access_flags;	}	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {		u8 rra_max;		if (qp->resp_depth && !attr->max_dest_rd_atomic) {			/*			 * Lowering our responder resources to zero.			 * Turn off reads RDMA and atomics as responder.			 * (RRE/RAE in params2 already zero)			 */			qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |								MTHCA_QP_OPTPAR_RAE);		}		if (!qp->resp_depth && attr->max_dest_rd_atomic) {			/*			 * Increasing our responder resources from			 * zero.  Turn on RDMA reads and atomics as			 * appropriate.			 */			qp_context->params2 |=				cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?					    MTHCA_QP_BIT_RRE : 0);			qp_context->params2 |=				cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?					    MTHCA_QP_BIT_RAE : 0);			qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |								MTHCA_QP_OPTPAR_RAE);		}		for (rra_max = 0;		     1 << rra_max < attr->max_dest_rd_atomic &&			     rra_max < dev->qp_table.rdb_shift;		     ++rra_max)			; /* nothing */		qp_context->params2      |= cpu_to_be32(rra_max << 21);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);		qp->resp_depth = attr->max_dest_rd_atomic;	}	qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);	if (ibqp->srq)		qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);	if (attr_mask & IB_QP_MIN_RNR_TIMER) {		qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);	}	if (attr_mask & IB_QP_RQ_PSN)		qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);	qp_context->ra_buff_indx =		cpu_to_be32(dev->qp_table.rdb_base +			    ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<			     dev->qp_table.rdb_shift));	qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);	if (mthca_is_memfree(dev))		qp_context->rcv_db_index   = cpu_to_be32(qp->rq.db_index);	if (attr_mask & IB_QP_QKEY) {		qp_context->qkey = cpu_to_be32(attr->qkey);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);	}	if (ibqp->srq)		qp_context->srqn = cpu_to_be32(1 << 24 |					       to_msrq(ibqp->srq)->srqn);	err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,			      qp->qpn, 0, mailbox, 0, &status);	if (status) {		mthca_warn(dev, "modify QP %d returned status %02x.\n",			   state_table[cur_state][new_state].trans, status);		err = -EINVAL;	}	if (!err)		qp->state = new_state;	mthca_free_mailbox(dev, mailbox);	if (is_sqp(dev, qp))		store_attrs(to_msqp(qp), attr, attr_mask);	/*	 * If we moved QP0 to RTR, bring the IB link up; if we moved	 * QP0 to RESET or ERROR, bring the link back down.	 */	if (is_qp0(dev, qp)) {		if (cur_state != IB_QPS_RTR &&		    new_state == IB_QPS_RTR)			init_port(dev, to_msqp(qp)->port);		if (cur_state != IB_QPS_RESET &&		    cur_state != IB_QPS_ERR &&		    (new_state == IB_QPS_RESET ||		     new_state == IB_QPS_ERR))			mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);	}	/*	 * If we moved a kernel QP to RESET, clean up all old CQ	 * entries and reinitialize the QP.	 */	if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) {		mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,			       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);		if (qp->ibqp.send_cq != qp->ibqp.recv_cq)			mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,				       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);		mthca_wq_init(&qp->sq);		qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);		mthca_wq_init(&qp->rq);		qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);		if (mthca_is_memfree(dev)) {			*qp->sq.db = 0;			*qp->rq.db = 0;		}	}	return err;}static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz){	/*	 * Calculate the maximum size of WQE s/g segments, excluding	 * the next segment and other non-data segments.	 */	int max_data_size = desc_sz - sizeof (struct mthca_next_seg);	switch (qp->transport) {	case MLX:		max_data_size -= 2 * sizeof (struct mthca_data_seg);		break;	case UD:		if (mthca_is_memfree(dev))			max_data_size -= sizeof (struct mthca_arbel_ud_seg);		else			max_data_size -= sizeof (struct mthca_tavor_ud_seg);		break;	default:		max_data_size -= sizeof (struct mthca_raddr_seg);		break;	}	return max_data_size;}static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size){	/* We don't support inline data for kernel QPs (yet). */	return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;}static void mthca_adjust_qp_caps(struct mthca_dev *dev,				 struct mthca_pd *pd,				 struct mthca_qp *qp){	int max_data_size = mthca_max_data_size(dev, qp,						min(dev->limits.max_desc_sz,						    1 << qp->sq.wqe_shift));	qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);	qp->sq.max_gs = min_t(int, dev->limits.max_sg,			      max_data_size / sizeof (struct mthca_data_seg));	qp->rq.max_gs = min_t(int, dev->limits.max_sg,			       (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -				sizeof (struct mthca_next_seg)) /			       sizeof (struct mthca_data_seg));}/* * Allocate and register buffer for WQEs.  qp->rq.max, sq.max, * rq.max_gs and sq.max_gs must all be assigned. * mthca_alloc_wqe_buf will calculate rq.wqe_shift and * sq.wqe_shift (as well as send_wqe_offset, is_direct, and * queue) */static int mthca_alloc_wqe_buf(struct mthca_dev *dev,			       struct mthca_pd *pd,			       struct mthca_qp *qp){	int size;	int err = -ENOMEM;	size = sizeof (struct mthca_next_seg) +		qp->rq.max_gs * sizeof (struct mthca_data_seg);	if (size > dev->limits.max_desc_sz)		return -EINVAL;	for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;	     qp->rq.wqe_shift++)		; /* nothing */	size = qp->sq.max_gs * sizeof (struct mthca_data_seg);	switch (qp->transport) {	case MLX:		size += 2 * sizeof (struct mthca_data_seg);		break;	case UD:		size += mthca_is_memfree(dev) ?			sizeof (struct mthca_arbel_ud_seg) :			sizeof (struct mthca_tavor_ud_seg);		break;	case UC:		size += sizeof (struct mthca_raddr_seg);		break;	case RC:		size += sizeof (struct mthca_raddr_seg);		/*		 * An atomic op will require an atomic segment, a		 * remote address segment and one scatter entry.		 */		size = max_t(int, size,			     sizeof (struct mthca_atomic_seg) +			     sizeof (struct mthca_raddr_seg) +			     sizeof (struct mthca_data_seg));		break;	default:		break;	}	/* Make sure that we have enough space for a bind request */	size = max_t(int, size, sizeof (struct mthca_bind_seg));	size += sizeof (struct mthca_next_seg);	if (size > dev->limits.max_desc_sz)		return -EINVAL;	for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;	     qp->sq.wqe_shift++)		; /* nothing */	qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,				    1 << qp->sq.wqe_shift);	/*	 * If this is a userspace QP, we don't actually have to	 * allocate anything.  All we need is to calculate the WQE	 * sizes and the send_wqe_offset, so we're done now.	 */	if (pd->ibpd.uobject)		return 0;	size = PAGE_ALIGN(qp->send_wqe_offset +			  (qp->sq.max << qp->sq.wqe_shift));	qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),			   GFP_KERNEL);	if (!qp->wrid)		goto err_out;	err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,			      &qp->queue, &qp->is_direct, pd, 0, &qp->mr);	if (err)		goto err_out;	return 0;err_out:	kfree(qp->wrid);	return err;}static void mthca_free_wqe_buf(struct mthca_dev *dev,			       struct mthca_qp *qp){	mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +				       (qp->sq.max << qp->sq.wqe_shift)),		       &qp->queue, qp->is_direct, &qp->mr);	kfree(qp->wrid);}static int mthca_map_memfree(struct mthca_dev *dev,			     struct mthca_qp *qp){	int ret;	if (mthca_is_memfree(dev)) {		ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);		if (ret)			return ret;		ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);		if (ret)			goto err_qpc; 		ret = mthca_table_get(dev, dev->qp_table.rdb_table, 				      qp->qpn << dev->qp_table.rdb_shift); 		if (ret) 			goto err_eqpc;	}	return 0;err_eqpc:	mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);err_qpc:	mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);	return ret;}static void mthca_unmap_memfree(struct mthca_dev *dev,				struct mthca_qp *qp){	mthca_table_put(dev, dev->qp_table.rdb_table,			qp->qpn << dev->qp_table.rdb_shift);	mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);	mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);}static int mthca_alloc_memfree(struct mthca_dev *dev,			       struct mthca_qp *qp){	int ret = 0;	if (mthca_is_memfree(dev)) {		qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,						 qp->qpn, &qp->rq.db);		if (qp->rq.db_index < 0)			return ret;		qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,						 qp->qpn, &qp->sq.db);		if (qp->sq.db_index < 0)			mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);	}	return ret;}static void mthca_free_memfree(struct mthca_dev *dev,			       struct mthca_qp *qp){	if (mthca_is_memfree(dev)) {		mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);		mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);	}}static int mthca_alloc_qp_common(struct mthca_dev *dev,				 struct mthca_pd *pd,				 struct mthca_cq *send_cq,				 struct mthca_cq *recv_cq,				 enum ib_sig_type send_policy,				 struct mthca_qp *qp){	int ret;	int i;	atomic_set(&qp->refcount, 1);	init_waitqueue_head(&qp->wait);	qp->state    	 = IB_QPS_RESET;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -