mthca_qp.c

来自「LINUX 2.6.17.4的源码」· C语言 代码 · 共 2,252 行 · 第 1/5 页

C
2,252
字号
	qp_attr->qp_state 	     = to_ib_qp_state(mthca_state);	qp_attr->cur_qp_state 	     = qp_attr->qp_state;	qp_attr->path_mtu 	     = context->mtu_msgmax >> 5;	qp_attr->path_mig_state      =		to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);	qp_attr->qkey 		     = be32_to_cpu(context->qkey);	qp_attr->rq_psn 	     = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;	qp_attr->sq_psn 	     = be32_to_cpu(context->next_send_psn) & 0xffffff;	qp_attr->dest_qp_num 	     = be32_to_cpu(context->remote_qpn) & 0xffffff;	qp_attr->qp_access_flags     =		to_ib_qp_access_flags(be32_to_cpu(context->params2));	qp_attr->cap.max_send_wr     = qp->sq.max;	qp_attr->cap.max_recv_wr     = qp->rq.max;	qp_attr->cap.max_send_sge    = qp->sq.max_gs;	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;	qp_attr->cap.max_inline_data = qp->max_inline_data;	if (qp->transport == RC || qp->transport == UC) {		to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);		to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);	}	qp_attr->pkey_index     = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;	qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */	qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;	qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);	qp_attr->max_dest_rd_atomic =		1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);	qp_attr->min_rnr_timer 	    =		(be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;	qp_attr->port_num 	    = qp_attr->ah_attr.port_num;	qp_attr->timeout 	    = context->pri_path.ackto >> 3;	qp_attr->retry_cnt 	    = (be32_to_cpu(context->params1) >> 16) & 0x7;	qp_attr->rnr_retry 	    = context->pri_path.rnr_retry >> 5;	qp_attr->alt_port_num 	    = qp_attr->alt_ah_attr.port_num;	qp_attr->alt_timeout 	    = context->alt_path.ackto >> 3;	qp_init_attr->cap 	    = qp_attr->cap;out:	mthca_free_mailbox(dev, mailbox);	return err;}static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,			  struct mthca_qp_path *path, u8 port){	path->g_mylmc     = ah->src_path_bits & 0x7f;	path->rlid        = cpu_to_be16(ah->dlid);	path->static_rate = mthca_get_rate(dev, ah->static_rate, port);	if (ah->ah_flags & IB_AH_GRH) {		if (ah->grh.sgid_index >= dev->limits.gid_table_len) {			mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",				  ah->grh.sgid_index, dev->limits.gid_table_len-1);			return -1;		}		path->g_mylmc   |= 1 << 7;		path->mgid_index = ah->grh.sgid_index;		path->hop_limit  = ah->grh.hop_limit;		path->sl_tclass_flowlabel =			cpu_to_be32((ah->sl << 28)                |				    (ah->grh.traffic_class << 20) |				    (ah->grh.flow_label));		memcpy(path->rgid, ah->grh.dgid.raw, 16);	} else		path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);	return 0;}int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask){	struct mthca_dev *dev = to_mdev(ibqp->device);	struct mthca_qp *qp = to_mqp(ibqp);	enum ib_qp_state cur_state, new_state;	struct mthca_mailbox *mailbox;	struct mthca_qp_param *qp_param;	struct mthca_qp_context *qp_context;	u32 sqd_event = 0;	u8 status;	int err;	if (attr_mask & IB_QP_CUR_STATE) {		cur_state = attr->cur_qp_state;	} else {		spin_lock_irq(&qp->sq.lock);		spin_lock(&qp->rq.lock);		cur_state = qp->state;		spin_unlock(&qp->rq.lock);		spin_unlock_irq(&qp->sq.lock);	}	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {		mthca_dbg(dev, "Bad QP transition (transport %d) "			  "%d->%d with attr 0x%08x\n",			  qp->transport, cur_state, new_state,			  attr_mask);		return -EINVAL;	}	if ((attr_mask & IB_QP_PKEY_INDEX) &&	     attr->pkey_index >= dev->limits.pkey_table_len) {		mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",			  attr->pkey_index, dev->limits.pkey_table_len-1);		return -EINVAL;	}	if ((attr_mask & IB_QP_PORT) &&	    (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {		mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);		return -EINVAL;	}	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&	    attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {		mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",			  attr->max_rd_atomic, dev->limits.max_qp_init_rdma);		return -EINVAL;	}	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&	    attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {		mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",			  attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);		return -EINVAL;	}	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);	if (IS_ERR(mailbox))		return PTR_ERR(mailbox);	qp_param = mailbox->buf;	qp_context = &qp_param->context;	memset(qp_param, 0, sizeof *qp_param);	qp_context->flags      = cpu_to_be32((to_mthca_state(new_state) << 28) |					     (to_mthca_st(qp->transport) << 16));	qp_context->flags     |= cpu_to_be32(MTHCA_QP_BIT_DE);	if (!(attr_mask & IB_QP_PATH_MIG_STATE))		qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);	else {		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);		switch (attr->path_mig_state) {		case IB_MIG_MIGRATED:			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);			break;		case IB_MIG_REARM:			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);			break;		case IB_MIG_ARMED:			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);			break;		}	}	/* leave tavor_sched_queue as 0 */	if (qp->transport == MLX || qp->transport == UD)		qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;	else if (attr_mask & IB_QP_PATH_MTU) {		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {			mthca_dbg(dev, "path MTU (%u) is invalid\n",				  attr->path_mtu);			return -EINVAL;		}		qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;	}	if (mthca_is_memfree(dev)) {		if (qp->rq.max)			qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;		qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;		if (qp->sq.max)			qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;		qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;	}	/* leave arbel_sched_queue as 0 */	if (qp->ibqp.uobject)		qp_context->usr_page =			cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);	else		qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);	qp_context->local_qpn  = cpu_to_be32(qp->qpn);	if (attr_mask & IB_QP_DEST_QPN) {		qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);	}	if (qp->transport == MLX)		qp_context->pri_path.port_pkey |=			cpu_to_be32(qp->port << 24);	else {		if (attr_mask & IB_QP_PORT) {			qp_context->pri_path.port_pkey |=				cpu_to_be32(attr->port_num << 24);			qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);		}	}	if (attr_mask & IB_QP_PKEY_INDEX) {		qp_context->pri_path.port_pkey |=			cpu_to_be32(attr->pkey_index);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);	}	if (attr_mask & IB_QP_RNR_RETRY) {		qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =			attr->rnr_retry << 5;		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |							MTHCA_QP_OPTPAR_ALT_RNR_RETRY);	}	if (attr_mask & IB_QP_AV) {		if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,				   attr_mask & IB_QP_PORT ? attr->port_num : qp->port))			return -EINVAL;		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);	}	if (attr_mask & IB_QP_TIMEOUT) {		qp_context->pri_path.ackto = attr->timeout << 3;		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);	}	if (attr_mask & IB_QP_ALT_PATH) {		if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {			mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",				  attr->alt_pkey_index, dev->limits.pkey_table_len-1);			return -EINVAL;		}		if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {			mthca_dbg(dev, "Alternate port number (%u) is invalid\n",				attr->alt_port_num);			return -EINVAL;		}		if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,				   attr->alt_ah_attr.port_num))			return -EINVAL;		qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |							      attr->alt_port_num << 24);		qp_context->alt_path.ackto = attr->alt_timeout << 3;		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);	}	/* leave rdd as 0 */	qp_context->pd         = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);	/* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */	qp_context->wqe_lkey   = cpu_to_be32(qp->mr.ibmr.lkey);	qp_context->params1    = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |					     (MTHCA_FLIGHT_LIMIT << 24) |					     MTHCA_QP_BIT_SWE);	if (qp->sq_policy == IB_SIGNAL_ALL_WR)		qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);	if (attr_mask & IB_QP_RETRY_CNT) {		qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);	}	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {		if (attr->max_rd_atomic) {			qp_context->params1 |=				cpu_to_be32(MTHCA_QP_BIT_SRE |					    MTHCA_QP_BIT_SAE);			qp_context->params1 |=				cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);		}		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);	}	if (attr_mask & IB_QP_SQ_PSN)		qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);	qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);	if (mthca_is_memfree(dev)) {		qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);		qp_context->snd_db_index   = cpu_to_be32(qp->sq.db_index);	}	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {		if (attr->max_dest_rd_atomic)			qp_context->params2 |=				cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);	}	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {		qp_context->params2      |= get_hw_access_flags(qp, attr, attr_mask);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |							MTHCA_QP_OPTPAR_RRE |							MTHCA_QP_OPTPAR_RAE);	}	qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);	if (ibqp->srq)		qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);	if (attr_mask & IB_QP_MIN_RNR_TIMER) {		qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);	}	if (attr_mask & IB_QP_RQ_PSN)		qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);	qp_context->ra_buff_indx =		cpu_to_be32(dev->qp_table.rdb_base +			    ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<			     dev->qp_table.rdb_shift));	qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);	if (mthca_is_memfree(dev))		qp_context->rcv_db_index   = cpu_to_be32(qp->rq.db_index);	if (attr_mask & IB_QP_QKEY) {		qp_context->qkey = cpu_to_be32(attr->qkey);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);	}	if (ibqp->srq)		qp_context->srqn = cpu_to_be32(1 << 24 |					       to_msrq(ibqp->srq)->srqn);	if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD	&&	    attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY		&&	    attr->en_sqd_async_notify)		sqd_event = 1 << 31;	err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,			      mailbox, sqd_event, &status);	if (err)		goto out;	if (status) {		mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",			   cur_state, new_state, status);		err = -EINVAL;		goto out;	}	qp->state = new_state;	if (attr_mask & IB_QP_ACCESS_FLAGS)		qp->atomic_rd_en = attr->qp_access_flags;	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)		qp->resp_depth = attr->max_dest_rd_atomic;	if (attr_mask & IB_QP_PORT)		qp->port = attr->port_num;	if (attr_mask & IB_QP_ALT_PATH)		qp->alt_port = attr->alt_port_num;	if (is_sqp(dev, qp))		store_attrs(to_msqp(qp), attr, attr_mask);	/*	 * If we moved QP0 to RTR, bring the IB link up; if we moved	 * QP0 to RESET or ERROR, bring the link back down.	 */	if (is_qp0(dev, qp)) {		if (cur_state != IB_QPS_RTR &&		    new_state == IB_QPS_RTR)			init_port(dev, qp->port);		if (cur_state != IB_QPS_RESET &&		    cur_state != IB_QPS_ERR &&		    (new_state == IB_QPS_RESET ||		     new_state == IB_QPS_ERR))			mthca_CLOSE_IB(dev, qp->port, &status);	}	/*	 * If we moved a kernel QP to RESET, clean up all old CQ	 * entries and reinitialize the QP.	 */	if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {		mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,			       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);		if (qp->ibqp.send_cq != qp->ibqp.recv_cq)			mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,				       qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);		mthca_wq_init(&qp->sq);		qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);		mthca_wq_init(&qp->rq);		qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);		if (mthca_is_memfree(dev)) {			*qp->sq.db = 0;			*qp->rq.db = 0;		}	}out:	mthca_free_mailbox(dev, mailbox);	return err;}static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz){	/*	 * Calculate the maximum size of WQE s/g segments, excluding	 * the next segment and other non-data segments.	 */	int max_data_size = desc_sz - sizeof (struct mthca_next_seg);	switch (qp->transport) {	case MLX:		max_data_size -= 2 * sizeof (struct mthca_data_seg);		break;	case UD:		if (mthca_is_memfree(dev))			max_data_size -= sizeof (struct mthca_arbel_ud_seg);		else			max_data_size -= sizeof (struct mthca_tavor_ud_seg);		break;	default:		max_data_size -= sizeof (struct mthca_raddr_seg);		break;	}	return max_data_size;}static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size){	/* We don't support inline data for kernel QPs (yet). */	return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;}static void mthca_adjust_qp_caps(struct mthca_dev *dev,				 struct mthca_pd *pd,				 struct mthca_qp *qp){	int max_data_size = mthca_max_data_size(dev, qp,						min(dev->limits.max_desc_sz,						    1 << qp->sq.wqe_shift));

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?