⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_qp.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 5 页
字号:
	qp_attr->dest_qp_num 	     = be32_to_cpu(context->remote_qpn) & 0xffffff;	qp_attr->qp_access_flags     =		to_ib_qp_access_flags(be32_to_cpu(context->params2));	if (qp->transport == RC || qp->transport == UC) {		to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);		to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);		qp_attr->alt_pkey_index =			be32_to_cpu(context->alt_path.port_pkey) & 0x7f;		qp_attr->alt_port_num 	= qp_attr->alt_ah_attr.port_num;	}	qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;	qp_attr->port_num   =		(be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3;	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */	qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;	qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);	qp_attr->max_dest_rd_atomic =		1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);	qp_attr->min_rnr_timer 	    =		(be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;	qp_attr->timeout 	    = context->pri_path.ackto >> 3;	qp_attr->retry_cnt 	    = (be32_to_cpu(context->params1) >> 16) & 0x7;	qp_attr->rnr_retry 	    = context->pri_path.rnr_retry >> 5;	qp_attr->alt_timeout 	    = context->alt_path.ackto >> 3;done:	qp_attr->cur_qp_state	     = qp_attr->qp_state;	qp_attr->cap.max_send_wr     = qp->sq.max;	qp_attr->cap.max_recv_wr     = qp->rq.max;	qp_attr->cap.max_send_sge    = qp->sq.max_gs;	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;	qp_attr->cap.max_inline_data = qp->max_inline_data;	qp_init_attr->cap	     = qp_attr->cap;out:	mthca_free_mailbox(dev, mailbox);	return err;}static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah,			  struct mthca_qp_path *path, u8 port){	path->g_mylmc     = ah->src_path_bits & 0x7f;	path->rlid        = cpu_to_be16(ah->dlid);	path->static_rate = mthca_get_rate(dev, ah->static_rate, port);	if (ah->ah_flags & IB_AH_GRH) {		if (ah->grh.sgid_index >= dev->limits.gid_table_len) {			mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",				  ah->grh.sgid_index, dev->limits.gid_table_len-1);			return -1;		}		path->g_mylmc   |= 1 << 7;		path->mgid_index = ah->grh.sgid_index;		path->hop_limit  = ah->grh.hop_limit;		path->sl_tclass_flowlabel =			cpu_to_be32((ah->sl << 28)                |				    (ah->grh.traffic_class << 20) |				    (ah->grh.flow_label));		memcpy(path->rgid, ah->grh.dgid.raw, 16);	} else		path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);	return 0;}static int __mthca_modify_qp(struct ib_qp *ibqp,			     const struct ib_qp_attr *attr, int attr_mask,			     enum ib_qp_state cur_state, enum ib_qp_state new_state){	struct mthca_dev *dev = to_mdev(ibqp->device);	struct mthca_qp *qp = to_mqp(ibqp);	struct mthca_mailbox *mailbox;	struct mthca_qp_param *qp_param;	struct mthca_qp_context *qp_context;	u32 sqd_event = 0;	u8 status;	int err = -EINVAL;	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);	if (IS_ERR(mailbox)) {		err = PTR_ERR(mailbox);		goto out;	}	qp_param = mailbox->buf;	qp_context = &qp_param->context;	memset(qp_param, 0, sizeof *qp_param);	qp_context->flags      = cpu_to_be32((to_mthca_state(new_state) << 28) |					     (to_mthca_st(qp->transport) << 16));	qp_context->flags     |= cpu_to_be32(MTHCA_QP_BIT_DE);	if (!(attr_mask & IB_QP_PATH_MIG_STATE))		qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);	else {		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);		switch (attr->path_mig_state) {		case IB_MIG_MIGRATED:			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);			break;		case IB_MIG_REARM:			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);			break;		case IB_MIG_ARMED:			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);			break;		}	}	/* leave tavor_sched_queue as 0 */	if (qp->transport == MLX || qp->transport == UD)		qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;	else if (attr_mask & IB_QP_PATH_MTU) {		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {			mthca_dbg(dev, "path MTU (%u) is invalid\n",				  attr->path_mtu);			goto out_mailbox;		}		qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;	}	if (mthca_is_memfree(dev)) {		if (qp->rq.max)			qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;		qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;		if (qp->sq.max)			qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;		qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;	}	/* leave arbel_sched_queue as 0 */	if (qp->ibqp.uobject)		qp_context->usr_page =			cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);	else		qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);	qp_context->local_qpn  = cpu_to_be32(qp->qpn);	if (attr_mask & IB_QP_DEST_QPN) {		qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);	}	if (qp->transport == MLX)		qp_context->pri_path.port_pkey |=			cpu_to_be32(qp->port << 24);	else {		if (attr_mask & IB_QP_PORT) {			qp_context->pri_path.port_pkey |=				cpu_to_be32(attr->port_num << 24);			qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);		}	}	if (attr_mask & IB_QP_PKEY_INDEX) {		qp_context->pri_path.port_pkey |=			cpu_to_be32(attr->pkey_index);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);	}	if (attr_mask & IB_QP_RNR_RETRY) {		qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =			attr->rnr_retry << 5;		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |							MTHCA_QP_OPTPAR_ALT_RNR_RETRY);	}	if (attr_mask & IB_QP_AV) {		if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,				   attr_mask & IB_QP_PORT ? attr->port_num : qp->port))			goto out_mailbox;		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);	}	if (ibqp->qp_type == IB_QPT_RC &&	    cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {		u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;		if (mthca_is_memfree(dev))			qp_context->rlkey_arbel_sched_queue |= sched_queue;		else			qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue);		qp_param->opt_param_mask |=			cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE);	}	if (attr_mask & IB_QP_TIMEOUT) {		qp_context->pri_path.ackto = attr->timeout << 3;		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);	}	if (attr_mask & IB_QP_ALT_PATH) {		if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {			mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",				  attr->alt_pkey_index, dev->limits.pkey_table_len-1);			goto out_mailbox;		}		if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {			mthca_dbg(dev, "Alternate port number (%u) is invalid\n",				attr->alt_port_num);			goto out_mailbox;		}		if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,				   attr->alt_ah_attr.port_num))			goto out_mailbox;		qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |							      attr->alt_port_num << 24);		qp_context->alt_path.ackto = attr->alt_timeout << 3;		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);	}	/* leave rdd as 0 */	qp_context->pd         = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);	/* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */	qp_context->wqe_lkey   = cpu_to_be32(qp->mr.ibmr.lkey);	qp_context->params1    = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |					     (MTHCA_FLIGHT_LIMIT << 24) |					     MTHCA_QP_BIT_SWE);	if (qp->sq_policy == IB_SIGNAL_ALL_WR)		qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);	if (attr_mask & IB_QP_RETRY_CNT) {		qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);	}	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {		if (attr->max_rd_atomic) {			qp_context->params1 |=				cpu_to_be32(MTHCA_QP_BIT_SRE |					    MTHCA_QP_BIT_SAE);			qp_context->params1 |=				cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);		}		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);	}	if (attr_mask & IB_QP_SQ_PSN)		qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);	qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);	if (mthca_is_memfree(dev)) {		qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);		qp_context->snd_db_index   = cpu_to_be32(qp->sq.db_index);	}	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {		if (attr->max_dest_rd_atomic)			qp_context->params2 |=				cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);	}	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {		qp_context->params2      |= get_hw_access_flags(qp, attr, attr_mask);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |							MTHCA_QP_OPTPAR_RRE |							MTHCA_QP_OPTPAR_RAE);	}	qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);	if (ibqp->srq)		qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);	if (attr_mask & IB_QP_MIN_RNR_TIMER) {		qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);	}	if (attr_mask & IB_QP_RQ_PSN)		qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);	qp_context->ra_buff_indx =		cpu_to_be32(dev->qp_table.rdb_base +			    ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<			     dev->qp_table.rdb_shift));	qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);	if (mthca_is_memfree(dev))		qp_context->rcv_db_index   = cpu_to_be32(qp->rq.db_index);	if (attr_mask & IB_QP_QKEY) {		qp_context->qkey = cpu_to_be32(attr->qkey);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);	}	if (ibqp->srq)		qp_context->srqn = cpu_to_be32(1 << 24 |					       to_msrq(ibqp->srq)->srqn);	if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD	&&	    attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY		&&	    attr->en_sqd_async_notify)		sqd_event = 1 << 31;	err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,			      mailbox, sqd_event, &status);	if (err)		goto out_mailbox;	if (status) {		mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",			   cur_state, new_state, status);		err = -EINVAL;		goto out_mailbox;	}	qp->state = new_state;	if (attr_mask & IB_QP_ACCESS_FLAGS)		qp->atomic_rd_en = attr->qp_access_flags;	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)		qp->resp_depth = attr->max_dest_rd_atomic;	if (attr_mask & IB_QP_PORT)		qp->port = attr->port_num;	if (attr_mask & IB_QP_ALT_PATH)		qp->alt_port = attr->alt_port_num;	if (is_sqp(dev, qp))		store_attrs(to_msqp(qp), attr, attr_mask);	/*	 * If we moved QP0 to RTR, bring the IB link up; if we moved	 * QP0 to RESET or ERROR, bring the link back down.	 */	if (is_qp0(dev, qp)) {		if (cur_state != IB_QPS_RTR &&		    new_state == IB_QPS_RTR)			init_port(dev, qp->port);		if (cur_state != IB_QPS_RESET &&		    cur_state != IB_QPS_ERR &&		    (new_state == IB_QPS_RESET ||		     new_state == IB_QPS_ERR))			mthca_CLOSE_IB(dev, qp->port, &status);	}	/*	 * If we moved a kernel QP to RESET, clean up all old CQ	 * entries and reinitialize the QP.	 */	if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {		mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,			       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);		if (qp->ibqp.send_cq != qp->ibqp.recv_cq)			mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);		mthca_wq_reset(&qp->sq);		qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);		mthca_wq_reset(&qp->rq);		qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);		if (mthca_is_memfree(dev)) {			*qp->sq.db = 0;			*qp->rq.db = 0;		}	}out_mailbox:	mthca_free_mailbox(dev, mailbox);out:	return err;}static const struct ib_qp_attr dummy_init_attr = { .port_num = 1 };static const int dummy_init_attr_mask[] = {	[IB_QPT_UD]  = (IB_QP_PKEY_INDEX		|			IB_QP_PORT			|			IB_QP_QKEY),	[IB_QPT_UC]  = (IB_QP_PKEY_INDEX		|			IB_QP_PORT			|			IB_QP_ACCESS_FLAGS),	[IB_QPT_RC]  = (IB_QP_PKEY_INDEX		|			IB_QP_PORT			|			IB_QP_ACCESS_FLAGS),	[IB_QPT_SMI] = (IB_QP_PKEY_INDEX		|			IB_QP_QKEY),	[IB_QPT_GSI] = (IB_QP_PKEY_INDEX		|			IB_QP_QKEY),};int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,		    struct ib_udata *udata){	struct mthca_dev *dev = to_mdev(ibqp->device);	struct mthca_qp *qp = to_mqp(ibqp);	enum ib_qp_state cur_state, new_state;	int err = -EINVAL;	mutex_lock(&qp->mutex);	if (attr_mask & IB_QP_CUR_STATE) {		cur_state = attr->cur_qp_state;	} else {		spin_lock_irq(&qp->sq.lock);		spin_lock(&qp->rq.lock);		cur_state = qp->state;		spin_unlock(&qp->rq.lock);		spin_unlock_irq(&qp->sq.lock);	}	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {		mthca_dbg(dev, "Bad QP transition (transport %d) "			  "%d->%d with attr 0x%08x\n",			  qp->transport, cur_state, new_state,			  attr_mask);		goto out;	}	if ((attr_mask & IB_QP_PKEY_INDEX) &&	     attr->pkey_index >= dev->limits.pkey_table_len) {		mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",			  attr->pkey_index, dev->limits.pkey_table_len-1);		goto out;	}	if ((attr_mask & IB_QP_PORT) &&	    (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {		mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);		goto out;	}	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&	    attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {		mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",			  attr->max_rd_atomic, dev->limits.max_qp_init_rdma);		goto out;	}	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&	    attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {		mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",			  attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);		goto out;	}	if (cur_state == new_state && cur_state == IB_QPS_RESET) {		err = 0;		goto out;	}	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_ERR) {		err = __mthca_modify_qp(ibqp, &dummy_init_attr,					dummy_init_attr_mask[ibqp->qp_type],					IB_QPS_RESET, IB_QPS_INIT);		if (err)			goto out;		cur_state = IB_QPS_INIT;	}	err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);out:	mutex_unlock(&qp->mutex);	return err;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -