⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 qp.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 4 页
字号:
	qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);	if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)		qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);	else		qp->sq_signal_bits = 0;	qp->mqp.event = mlx4_ib_qp_event;	return 0;err_wrid:	if (pd->uobject) {		if (!init_attr->srq)			mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context),					      &qp->db);	} else {		kfree(qp->sq.wrid);		kfree(qp->rq.wrid);	}err_mtt:	mlx4_mtt_cleanup(dev->dev, &qp->mtt);err_buf:	if (pd->uobject)		ib_umem_release(qp->umem);	else		mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);err_db:	if (!pd->uobject && !init_attr->srq)		mlx4_ib_db_free(dev, &qp->db);err:	return err;}static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state){	switch (state) {	case IB_QPS_RESET:	return MLX4_QP_STATE_RST;	case IB_QPS_INIT:	return MLX4_QP_STATE_INIT;	case IB_QPS_RTR:	return MLX4_QP_STATE_RTR;	case IB_QPS_RTS:	return MLX4_QP_STATE_RTS;	case IB_QPS_SQD:	return MLX4_QP_STATE_SQD;	case IB_QPS_SQE:	return MLX4_QP_STATE_SQER;	case IB_QPS_ERR:	return MLX4_QP_STATE_ERR;	default:		return -1;	}}static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq){	if (send_cq == recv_cq)		spin_lock_irq(&send_cq->lock);	else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {		spin_lock_irq(&send_cq->lock);		spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);	} else {		spin_lock_irq(&recv_cq->lock);		spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);	}}static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq){	if (send_cq == recv_cq)		spin_unlock_irq(&send_cq->lock);	else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {		spin_unlock(&recv_cq->lock);		spin_unlock_irq(&send_cq->lock);	} else {		spin_unlock(&send_cq->lock);		spin_unlock_irq(&recv_cq->lock);	}}static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,			      int is_user){	struct mlx4_ib_cq *send_cq, *recv_cq;	if (qp->state != IB_QPS_RESET)		if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),				   MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))			printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n",			       qp->mqp.qpn);	send_cq = to_mcq(qp->ibqp.send_cq);	recv_cq = to_mcq(qp->ibqp.recv_cq);	mlx4_ib_lock_cqs(send_cq, recv_cq);	if (!is_user) {		__mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,				 qp->ibqp.srq ? to_msrq(qp->ibqp.srq): NULL);		if (send_cq != recv_cq)			__mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);	}	mlx4_qp_remove(dev->dev, &qp->mqp);	mlx4_ib_unlock_cqs(send_cq, recv_cq);	mlx4_qp_free(dev->dev, &qp->mqp);	mlx4_mtt_cleanup(dev->dev, &qp->mtt);	if (is_user) {		if (!qp->ibqp.srq)			mlx4_ib_db_unmap_user(to_mucontext(qp->ibqp.uobject->context),					      &qp->db);		ib_umem_release(qp->umem);	} else {		kfree(qp->sq.wrid);		kfree(qp->rq.wrid);		mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);		if (!qp->ibqp.srq)			mlx4_ib_db_free(dev, &qp->db);	}}struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,				struct ib_qp_init_attr *init_attr,				struct ib_udata *udata){	struct mlx4_ib_dev *dev = to_mdev(pd->device);	struct mlx4_ib_sqp *sqp;	struct mlx4_ib_qp *qp;	int err;	switch (init_attr->qp_type) {	case IB_QPT_RC:	case IB_QPT_UC:	case IB_QPT_UD:	{		qp = kmalloc(sizeof *qp, GFP_KERNEL);		if (!qp)			return ERR_PTR(-ENOMEM);		err = create_qp_common(dev, pd, init_attr, udata, 0, qp);		if (err) {			kfree(qp);			return ERR_PTR(err);		}		qp->ibqp.qp_num = qp->mqp.qpn;		break;	}	case IB_QPT_SMI:	case IB_QPT_GSI:	{		/* Userspace is not allowed to create special QPs: */		if (pd->uobject)			return ERR_PTR(-EINVAL);		sqp = kmalloc(sizeof *sqp, GFP_KERNEL);		if (!sqp)			return ERR_PTR(-ENOMEM);		qp = &sqp->qp;		err = create_qp_common(dev, pd, init_attr, udata,				       dev->dev->caps.sqp_start +				       (init_attr->qp_type == IB_QPT_SMI ? 0 : 2) +				       init_attr->port_num - 1,				       qp);		if (err) {			kfree(sqp);			return ERR_PTR(err);		}		qp->port	= init_attr->port_num;		qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;		break;	}	default:		/* Don't support raw QPs */		return ERR_PTR(-EINVAL);	}	return &qp->ibqp;}int mlx4_ib_destroy_qp(struct ib_qp *qp){	struct mlx4_ib_dev *dev = to_mdev(qp->device);	struct mlx4_ib_qp *mqp = to_mqp(qp);	if (is_qp0(dev, mqp))		mlx4_CLOSE_PORT(dev->dev, mqp->port);	destroy_qp_common(dev, mqp, !!qp->pd->uobject);	if (is_sqp(dev, mqp))		kfree(to_msqp(mqp));	else		kfree(mqp);	return 0;}static int to_mlx4_st(enum ib_qp_type type){	switch (type) {	case IB_QPT_RC:		return MLX4_QP_ST_RC;	case IB_QPT_UC:		return MLX4_QP_ST_UC;	case IB_QPT_UD:		return MLX4_QP_ST_UD;	case IB_QPT_SMI:	case IB_QPT_GSI:	return MLX4_QP_ST_MLX;	default:		return -1;	}}static __be32 to_mlx4_access_flags(struct mlx4_ib_qp *qp, const struct ib_qp_attr *attr,				   int attr_mask){	u8 dest_rd_atomic;	u32 access_flags;	u32 hw_access_flags = 0;	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)		dest_rd_atomic = attr->max_dest_rd_atomic;	else		dest_rd_atomic = qp->resp_depth;	if (attr_mask & IB_QP_ACCESS_FLAGS)		access_flags = attr->qp_access_flags;	else		access_flags = qp->atomic_rd_en;	if (!dest_rd_atomic)		access_flags &= IB_ACCESS_REMOTE_WRITE;	if (access_flags & IB_ACCESS_REMOTE_READ)		hw_access_flags |= MLX4_QP_BIT_RRE;	if (access_flags & IB_ACCESS_REMOTE_ATOMIC)		hw_access_flags |= MLX4_QP_BIT_RAE;	if (access_flags & IB_ACCESS_REMOTE_WRITE)		hw_access_flags |= MLX4_QP_BIT_RWE;	return cpu_to_be32(hw_access_flags);}static void store_sqp_attrs(struct mlx4_ib_sqp *sqp, const struct ib_qp_attr *attr,			    int attr_mask){	if (attr_mask & IB_QP_PKEY_INDEX)		sqp->pkey_index = attr->pkey_index;	if (attr_mask & IB_QP_QKEY)		sqp->qkey = attr->qkey;	if (attr_mask & IB_QP_SQ_PSN)		sqp->send_psn = attr->sq_psn;}static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port){	path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6);}static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,			 struct mlx4_qp_path *path, u8 port){	path->grh_mylmc     = ah->src_path_bits & 0x7f;	path->rlid	    = cpu_to_be16(ah->dlid);	if (ah->static_rate) {		path->static_rate = ah->static_rate + MLX4_STAT_RATE_OFFSET;		while (path->static_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&		       !(1 << path->static_rate & dev->dev->caps.stat_rate_support))			--path->static_rate;	} else		path->static_rate = 0;	path->counter_index = 0xff;	if (ah->ah_flags & IB_AH_GRH) {		if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {			printk(KERN_ERR "sgid_index (%u) too large. max is %d\n",			       ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);			return -1;		}		path->grh_mylmc |= 1 << 7;		path->mgid_index = ah->grh.sgid_index;		path->hop_limit  = ah->grh.hop_limit;		path->tclass_flowlabel =			cpu_to_be32((ah->grh.traffic_class << 20) |				    (ah->grh.flow_label));		memcpy(path->rgid, ah->grh.dgid.raw, 16);	}	path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |		((port - 1) << 6) | ((ah->sl & 0xf) << 2);	return 0;}static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,			       const struct ib_qp_attr *attr, int attr_mask,			       enum ib_qp_state cur_state, enum ib_qp_state new_state){	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);	struct mlx4_ib_qp *qp = to_mqp(ibqp);	struct mlx4_qp_context *context;	enum mlx4_qp_optpar optpar = 0;	int sqd_event;	int err = -EINVAL;	context = kzalloc(sizeof *context, GFP_KERNEL);	if (!context)		return -ENOMEM;	context->flags = cpu_to_be32((to_mlx4_state(new_state) << 28) |				     (to_mlx4_st(ibqp->qp_type) << 16));	context->flags     |= cpu_to_be32(1 << 8); /* DE? */	if (!(attr_mask & IB_QP_PATH_MIG_STATE))		context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);	else {		optpar |= MLX4_QP_OPTPAR_PM_STATE;		switch (attr->path_mig_state) {		case IB_MIG_MIGRATED:			context->flags |= cpu_to_be32(MLX4_QP_PM_MIGRATED << 11);			break;		case IB_MIG_REARM:			context->flags |= cpu_to_be32(MLX4_QP_PM_REARM << 11);			break;		case IB_MIG_ARMED:			context->flags |= cpu_to_be32(MLX4_QP_PM_ARMED << 11);			break;		}	}	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||	    ibqp->qp_type == IB_QPT_UD)		context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;	else if (attr_mask & IB_QP_PATH_MTU) {		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {			printk(KERN_ERR "path MTU (%u) is invalid\n",			       attr->path_mtu);			goto out;		}		context->mtu_msgmax = (attr->path_mtu << 5) | 31;	}	if (qp->rq.wqe_cnt)		context->rq_size_stride = ilog2(qp->rq.wqe_cnt) << 3;	context->rq_size_stride |= qp->rq.wqe_shift - 4;	if (qp->sq.wqe_cnt)		context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3;	context->sq_size_stride |= qp->sq.wqe_shift - 4;	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)		context->sq_size_stride |= !!qp->sq_no_prefetch << 7;	if (qp->ibqp.uobject)		context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);	else		context->usr_page = cpu_to_be32(dev->priv_uar.index);	if (attr_mask & IB_QP_DEST_QPN)		context->remote_qpn = cpu_to_be32(attr->dest_qp_num);	if (attr_mask & IB_QP_PORT) {		if (cur_state == IB_QPS_SQD && new_state == IB_QPS_SQD &&		    !(attr_mask & IB_QP_AV)) {			mlx4_set_sched(&context->pri_path, attr->port_num);			optpar |= MLX4_QP_OPTPAR_SCHED_QUEUE;		}	}	if (attr_mask & IB_QP_PKEY_INDEX) {		context->pri_path.pkey_index = attr->pkey_index;		optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;	}	if (attr_mask & IB_QP_AV) {		if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path,				  attr_mask & IB_QP_PORT ? attr->port_num : qp->port))			goto out;		optpar |= (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH |			   MLX4_QP_OPTPAR_SCHED_QUEUE);	}	if (attr_mask & IB_QP_TIMEOUT) {		context->pri_path.ackto = attr->timeout << 3;		optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT;	}	if (attr_mask & IB_QP_ALT_PATH) {		if (attr->alt_port_num == 0 ||		    attr->alt_port_num > dev->dev->caps.num_ports)			goto out;		if (attr->alt_pkey_index >=		    dev->dev->caps.pkey_table_len[attr->alt_port_num])			goto out;		if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path,				  attr->alt_port_num))			goto out;		context->alt_path.pkey_index = attr->alt_pkey_index;		context->alt_path.ackto = attr->alt_timeout << 3;		optpar |= MLX4_QP_OPTPAR_ALT_ADDR_PATH;	}	context->pd	    = cpu_to_be32(to_mpd(ibqp->pd)->pdn);	context->params1    = cpu_to_be32(MLX4_IB_ACK_REQ_FREQ << 28);	if (attr_mask & IB_QP_RNR_RETRY) {		context->params1 |= cpu_to_be32(attr->rnr_retry << 13);		optpar |= MLX4_QP_OPTPAR_RNR_RETRY;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -