⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 qp.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 4 页
字号:
	 * Need a barrier here before writing the byte_count field to	 * make sure that all the data is visible before the	 * byte_count field is set.  Otherwise, if the segment begins	 * a new cacheline, the HCA prefetcher could grab the 64-byte	 * chunk and get a valid (!= * 0xffffffff) byte count but	 * stale data, and end up sending the wrong data.	 */	wmb();	dseg->byte_count = cpu_to_be32(sg->length);}static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg){	dseg->byte_count = cpu_to_be32(sg->length);	dseg->lkey       = cpu_to_be32(sg->lkey);	dseg->addr       = cpu_to_be64(sg->addr);}int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,		      struct ib_send_wr **bad_wr){	struct mlx4_ib_qp *qp = to_mqp(ibqp);	void *wqe;	struct mlx4_wqe_ctrl_seg *ctrl;	struct mlx4_wqe_data_seg *dseg;	unsigned long flags;	int nreq;	int err = 0;	int ind;	int size;	int i;	spin_lock_irqsave(&qp->sq.lock, flags);	ind = qp->sq.head;	for (nreq = 0; wr; ++nreq, wr = wr->next) {		if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {			err = -ENOMEM;			*bad_wr = wr;			goto out;		}		if (unlikely(wr->num_sge > qp->sq.max_gs)) {			err = -EINVAL;			*bad_wr = wr;			goto out;		}		ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));		qp->sq.wrid[ind & (qp->sq.wqe_cnt - 1)] = wr->wr_id;		ctrl->srcrb_flags =			(wr->send_flags & IB_SEND_SIGNALED ?			 cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE) : 0) |			(wr->send_flags & IB_SEND_SOLICITED ?			 cpu_to_be32(MLX4_WQE_CTRL_SOLICITED) : 0) |			qp->sq_signal_bits;		if (wr->opcode == IB_WR_SEND_WITH_IMM ||		    wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)			ctrl->imm = wr->imm_data;		else			ctrl->imm = 0;		wqe += sizeof *ctrl;		size = sizeof *ctrl / 16;		switch (ibqp->qp_type) {		case IB_QPT_RC:		case IB_QPT_UC:			switch (wr->opcode) {			case IB_WR_ATOMIC_CMP_AND_SWP:			case IB_WR_ATOMIC_FETCH_AND_ADD:				set_raddr_seg(wqe, wr->wr.atomic.remote_addr,					      wr->wr.atomic.rkey);				wqe  += sizeof (struct mlx4_wqe_raddr_seg);				set_atomic_seg(wqe, wr);				wqe  += sizeof (struct mlx4_wqe_atomic_seg);				size += (sizeof (struct mlx4_wqe_raddr_seg) +					 sizeof (struct mlx4_wqe_atomic_seg)) / 16;				break;			case IB_WR_RDMA_READ:			case IB_WR_RDMA_WRITE:			case IB_WR_RDMA_WRITE_WITH_IMM:				set_raddr_seg(wqe, wr->wr.rdma.remote_addr,					      wr->wr.rdma.rkey);				wqe  += sizeof (struct mlx4_wqe_raddr_seg);				size += sizeof (struct mlx4_wqe_raddr_seg) / 16;				break;			default:				/* No extra segments required for sends */				break;			}			break;		case IB_QPT_UD:			set_datagram_seg(wqe, wr);			wqe  += sizeof (struct mlx4_wqe_datagram_seg);			size += sizeof (struct mlx4_wqe_datagram_seg) / 16;			break;		case IB_QPT_SMI:		case IB_QPT_GSI:			err = build_mlx_header(to_msqp(qp), wr, ctrl);			if (err < 0) {				*bad_wr = wr;				goto out;			}			wqe  += err;			size += err / 16;			err = 0;			break;		default:			break;		}		/*		 * Write data segments in reverse order, so as to		 * overwrite cacheline stamp last within each		 * cacheline.  This avoids issues with WQE		 * prefetching.		 */		dseg = wqe;		dseg += wr->num_sge - 1;		size += wr->num_sge * (sizeof (struct mlx4_wqe_data_seg) / 16);		/* Add one more inline data segment for ICRC for MLX sends */		if (unlikely(qp->ibqp.qp_type == IB_QPT_SMI ||			     qp->ibqp.qp_type == IB_QPT_GSI)) {			set_mlx_icrc_seg(dseg + 1);			size += sizeof (struct mlx4_wqe_data_seg) / 16;		}		for (i = wr->num_sge - 1; i >= 0; --i, --dseg)			set_data_seg(dseg, wr->sg_list + i);		ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?				    MLX4_WQE_CTRL_FENCE : 0) | size;		/*		 * Make sure descriptor is fully written before		 * setting ownership bit (because HW can start		 * executing as soon as we do).		 */		wmb();		if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) {			err = -EINVAL;			goto out;		}		ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |			(ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);		/*		 * We can improve latency by not stamping the last		 * send queue WQE until after ringing the doorbell, so		 * only stamp here if there are still more WQEs to post.		 */		if (wr->next)			stamp_send_wqe(qp, (ind + qp->sq_spare_wqes) &				       (qp->sq.wqe_cnt - 1));		++ind;	}out:	if (likely(nreq)) {		qp->sq.head += nreq;		/*		 * Make sure that descriptors are written before		 * doorbell record.		 */		wmb();		writel(qp->doorbell_qpn,		       to_mdev(ibqp->device)->uar_map + MLX4_SEND_DOORBELL);		/*		 * Make sure doorbells don't leak out of SQ spinlock		 * and reach the HCA out of order.		 */		mmiowb();		stamp_send_wqe(qp, (ind + qp->sq_spare_wqes - 1) &			       (qp->sq.wqe_cnt - 1));	}	spin_unlock_irqrestore(&qp->sq.lock, flags);	return err;}int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,		      struct ib_recv_wr **bad_wr){	struct mlx4_ib_qp *qp = to_mqp(ibqp);	struct mlx4_wqe_data_seg *scat;	unsigned long flags;	int err = 0;	int nreq;	int ind;	int i;	spin_lock_irqsave(&qp->rq.lock, flags);	ind = qp->rq.head & (qp->rq.wqe_cnt - 1);	for (nreq = 0; wr; ++nreq, wr = wr->next) {		if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.send_cq)) {			err = -ENOMEM;			*bad_wr = wr;			goto out;		}		if (unlikely(wr->num_sge > qp->rq.max_gs)) {			err = -EINVAL;			*bad_wr = wr;			goto out;		}		scat = get_recv_wqe(qp, ind);		for (i = 0; i < wr->num_sge; ++i)			__set_data_seg(scat + i, wr->sg_list + i);		if (i < qp->rq.max_gs) {			scat[i].byte_count = 0;			scat[i].lkey       = cpu_to_be32(MLX4_INVALID_LKEY);			scat[i].addr       = 0;		}		qp->rq.wrid[ind] = wr->wr_id;		ind = (ind + 1) & (qp->rq.wqe_cnt - 1);	}out:	if (likely(nreq)) {		qp->rq.head += nreq;		/*		 * Make sure that descriptors are written before		 * doorbell record.		 */		wmb();		*qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);	}	spin_unlock_irqrestore(&qp->rq.lock, flags);	return err;}static inline enum ib_qp_state to_ib_qp_state(enum mlx4_qp_state mlx4_state){	switch (mlx4_state) {	case MLX4_QP_STATE_RST:      return IB_QPS_RESET;	case MLX4_QP_STATE_INIT:     return IB_QPS_INIT;	case MLX4_QP_STATE_RTR:      return IB_QPS_RTR;	case MLX4_QP_STATE_RTS:      return IB_QPS_RTS;	case MLX4_QP_STATE_SQ_DRAINING:	case MLX4_QP_STATE_SQD:      return IB_QPS_SQD;	case MLX4_QP_STATE_SQER:     return IB_QPS_SQE;	case MLX4_QP_STATE_ERR:      return IB_QPS_ERR;	default:		     return -1;	}}static inline enum ib_mig_state to_ib_mig_state(int mlx4_mig_state){	switch (mlx4_mig_state) {	case MLX4_QP_PM_ARMED:		return IB_MIG_ARMED;	case MLX4_QP_PM_REARM:		return IB_MIG_REARM;	case MLX4_QP_PM_MIGRATED:	return IB_MIG_MIGRATED;	default: return -1;	}}static int to_ib_qp_access_flags(int mlx4_flags){	int ib_flags = 0;	if (mlx4_flags & MLX4_QP_BIT_RRE)		ib_flags |= IB_ACCESS_REMOTE_READ;	if (mlx4_flags & MLX4_QP_BIT_RWE)		ib_flags |= IB_ACCESS_REMOTE_WRITE;	if (mlx4_flags & MLX4_QP_BIT_RAE)		ib_flags |= IB_ACCESS_REMOTE_ATOMIC;	return ib_flags;}static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr,				struct mlx4_qp_path *path){	memset(ib_ah_attr, 0, sizeof *ib_ah_attr);	ib_ah_attr->port_num	  = path->sched_queue & 0x40 ? 2 : 1;	if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)		return;	ib_ah_attr->dlid	  = be16_to_cpu(path->rlid);	ib_ah_attr->sl		  = (path->sched_queue >> 2) & 0xf;	ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f;	ib_ah_attr->static_rate   = path->static_rate ? path->static_rate - 5 : 0;	ib_ah_attr->ah_flags      = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0;	if (ib_ah_attr->ah_flags) {		ib_ah_attr->grh.sgid_index = path->mgid_index;		ib_ah_attr->grh.hop_limit  = path->hop_limit;		ib_ah_attr->grh.traffic_class =			(be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;		ib_ah_attr->grh.flow_label =			be32_to_cpu(path->tclass_flowlabel) & 0xfffff;		memcpy(ib_ah_attr->grh.dgid.raw,			path->rgid, sizeof ib_ah_attr->grh.dgid.raw);	}}int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,		     struct ib_qp_init_attr *qp_init_attr){	struct mlx4_ib_dev *dev = to_mdev(ibqp->device);	struct mlx4_ib_qp *qp = to_mqp(ibqp);	struct mlx4_qp_context context;	int mlx4_state;	int err;	if (qp->state == IB_QPS_RESET) {		qp_attr->qp_state = IB_QPS_RESET;		goto done;	}	err = mlx4_qp_query(dev->dev, &qp->mqp, &context);	if (err)		return -EINVAL;	mlx4_state = be32_to_cpu(context.flags) >> 28;	qp_attr->qp_state	     = to_ib_qp_state(mlx4_state);	qp_attr->path_mtu	     = context.mtu_msgmax >> 5;	qp_attr->path_mig_state	     =		to_ib_mig_state((be32_to_cpu(context.flags) >> 11) & 0x3);	qp_attr->qkey		     = be32_to_cpu(context.qkey);	qp_attr->rq_psn		     = be32_to_cpu(context.rnr_nextrecvpsn) & 0xffffff;	qp_attr->sq_psn		     = be32_to_cpu(context.next_send_psn) & 0xffffff;	qp_attr->dest_qp_num	     = be32_to_cpu(context.remote_qpn) & 0xffffff;	qp_attr->qp_access_flags     =		to_ib_qp_access_flags(be32_to_cpu(context.params2));	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {		to_ib_ah_attr(dev->dev, &qp_attr->ah_attr, &context.pri_path);		to_ib_ah_attr(dev->dev, &qp_attr->alt_ah_attr, &context.alt_path);		qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f;		qp_attr->alt_port_num	= qp_attr->alt_ah_attr.port_num;	}	qp_attr->pkey_index = context.pri_path.pkey_index & 0x7f;	if (qp_attr->qp_state == IB_QPS_INIT)		qp_attr->port_num = qp->port;	else		qp_attr->port_num = context.pri_path.sched_queue & 0x40 ? 2 : 1;	/* qp_attr->en_sqd_async_notify is only applicable in modify qp */	qp_attr->sq_draining = mlx4_state == MLX4_QP_STATE_SQ_DRAINING;	qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context.params1) >> 21) & 0x7);	qp_attr->max_dest_rd_atomic =		1 << ((be32_to_cpu(context.params2) >> 21) & 0x7);	qp_attr->min_rnr_timer	    =		(be32_to_cpu(context.rnr_nextrecvpsn) >> 24) & 0x1f;	qp_attr->timeout	    = context.pri_path.ackto >> 3;	qp_attr->retry_cnt	    = (be32_to_cpu(context.params1) >> 16) & 0x7;	qp_attr->rnr_retry	    = (be32_to_cpu(context.params1) >> 13) & 0x7;	qp_attr->alt_timeout	    = context.alt_path.ackto >> 3;done:	qp_attr->cur_qp_state	     = qp_attr->qp_state;	qp_attr->cap.max_recv_wr     = qp->rq.wqe_cnt;	qp_attr->cap.max_recv_sge    = qp->rq.max_gs;	if (!ibqp->uobject) {		qp_attr->cap.max_send_wr  = qp->sq.wqe_cnt;		qp_attr->cap.max_send_sge = qp->sq.max_gs;	} else {		qp_attr->cap.max_send_wr  = 0;		qp_attr->cap.max_send_sge = 0;	}	/*	 * We don't support inline sends for kernel QPs (yet), and we	 * don't know what userspace's value should be.	 */	qp_attr->cap.max_inline_data = 0;	qp_init_attr->cap	     = qp_attr->cap;	return 0;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -