⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_qp.c

📁 h内核
💻 C
📖 第 1 页 / 共 3 页
字号:
	struct mthca_qp_param *qp_param;	struct mthca_qp_context *qp_context;	u32 req_param, opt_param;	u8 status;	int err;	if (attr_mask & IB_QP_CUR_STATE) {		if (attr->cur_qp_state != IB_QPS_RTR &&		    attr->cur_qp_state != IB_QPS_RTS &&		    attr->cur_qp_state != IB_QPS_SQD &&		    attr->cur_qp_state != IB_QPS_SQE)			return -EINVAL;		else			cur_state = attr->cur_qp_state;	} else {		spin_lock_irq(&qp->lock);		cur_state = qp->state;		spin_unlock_irq(&qp->lock);	}	if (attr_mask & IB_QP_STATE) {               if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)			return -EINVAL;		new_state = attr->qp_state;	} else		new_state = cur_state;	if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {		mthca_dbg(dev, "Illegal QP transition "			  "%d->%d\n", cur_state, new_state);		return -EINVAL;	}	req_param = state_table[cur_state][new_state].req_param[qp->transport];	opt_param = state_table[cur_state][new_state].opt_param[qp->transport];	if ((req_param & attr_mask) != req_param) {		mthca_dbg(dev, "QP transition "			  "%d->%d missing req attr 0x%08x\n",			  cur_state, new_state,			  req_param & ~attr_mask);		return -EINVAL;	}	if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {		mthca_dbg(dev, "QP transition (transport %d) "			  "%d->%d has extra attr 0x%08x\n",			  qp->transport,			  cur_state, new_state,			  attr_mask & ~(req_param | opt_param |						 IB_QP_STATE));		return -EINVAL;	}	mailbox = kmalloc(sizeof (*qp_param) + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL);	if (!mailbox)		return -ENOMEM;	qp_param = MAILBOX_ALIGN(mailbox);	qp_context = &qp_param->context;	memset(qp_param, 0, sizeof *qp_param);	qp_context->flags      = cpu_to_be32((to_mthca_state(new_state) << 28) |					     (to_mthca_st(qp->transport) << 16));	qp_context->flags     |= cpu_to_be32(MTHCA_QP_BIT_DE);	if (!(attr_mask & IB_QP_PATH_MIG_STATE))		qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);	else {		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);		switch (attr->path_mig_state) {		case IB_MIG_MIGRATED:			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);			break;		case IB_MIG_REARM:			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);			break;		case IB_MIG_ARMED:			qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);			break;		}	}	/* leave sched_queue as 0 */	if (qp->transport == MLX || qp->transport == UD)		qp_context->mtu_msgmax = cpu_to_be32((IB_MTU_2048 << 29) |						     (11 << 24));	else if (attr_mask & IB_QP_PATH_MTU) {		qp_context->mtu_msgmax = cpu_to_be32((attr->path_mtu << 29) |						     (31 << 24));	}	qp_context->usr_page   = cpu_to_be32(MTHCA_KAR_PAGE);	qp_context->local_qpn  = cpu_to_be32(qp->qpn);	if (attr_mask & IB_QP_DEST_QPN) {		qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);	}	if (qp->transport == MLX)		qp_context->pri_path.port_pkey |=			cpu_to_be32(to_msqp(qp)->port << 24);	else {		if (attr_mask & IB_QP_PORT) {			qp_context->pri_path.port_pkey |=				cpu_to_be32(attr->port_num << 24);			qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);		}	}	if (attr_mask & IB_QP_PKEY_INDEX) {		qp_context->pri_path.port_pkey |=			cpu_to_be32(attr->pkey_index);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);	}	if (attr_mask & IB_QP_RNR_RETRY) {		qp_context->pri_path.rnr_retry = attr->rnr_retry << 5;		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY);	}	if (attr_mask & IB_QP_AV) {		qp_context->pri_path.g_mylmc     = attr->ah_attr.src_path_bits & 0x7f;		qp_context->pri_path.rlid        = cpu_to_be16(attr->ah_attr.dlid);		qp_context->pri_path.static_rate = (!!attr->ah_attr.static_rate) << 3;		if (attr->ah_attr.ah_flags & IB_AH_GRH) {			qp_context->pri_path.g_mylmc |= 1 << 7;			qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index;			qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit;			qp_context->pri_path.sl_tclass_flowlabel =				cpu_to_be32((attr->ah_attr.sl << 28)                |					    (attr->ah_attr.grh.traffic_class << 20) |					    (attr->ah_attr.grh.flow_label));			memcpy(qp_context->pri_path.rgid,			       attr->ah_attr.grh.dgid.raw, 16);		} else {			qp_context->pri_path.sl_tclass_flowlabel =				cpu_to_be32(attr->ah_attr.sl << 28);		}		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);	}	if (attr_mask & IB_QP_TIMEOUT) {		qp_context->pri_path.ackto = attr->timeout;		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);	}	/* XXX alt_path */	/* leave rdd as 0 */	qp_context->pd         = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);	/* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */	qp_context->wqe_lkey   = cpu_to_be32(qp->mr.ibmr.lkey);	qp_context->params1    = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |					     (MTHCA_FLIGHT_LIMIT << 24) |					     MTHCA_QP_BIT_SRE           |					     MTHCA_QP_BIT_SWE           |					     MTHCA_QP_BIT_SAE);	if (qp->sq.policy == IB_SIGNAL_ALL_WR)		qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);	if (attr_mask & IB_QP_RETRY_CNT) {		qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);	}	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {		qp_context->params1 |= cpu_to_be32(min(attr->max_dest_rd_atomic ?						       ffs(attr->max_dest_rd_atomic) - 1 : 0,						       7) << 21);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);	}	if (attr_mask & IB_QP_SQ_PSN)		qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);	qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);	if (attr_mask & IB_QP_ACCESS_FLAGS) {		/*		 * Only enable RDMA/atomics if we have responder		 * resources set to a non-zero value.		 */		if (qp->resp_depth) {			qp_context->params2 |=				cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?					    MTHCA_QP_BIT_RWE : 0);			qp_context->params2 |=				cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?					    MTHCA_QP_BIT_RRE : 0);			qp_context->params2 |=				cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ?					    MTHCA_QP_BIT_RAE : 0);		}		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |							MTHCA_QP_OPTPAR_RRE |							MTHCA_QP_OPTPAR_RAE);		qp->atomic_rd_en = attr->qp_access_flags;	}	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {		u8 rra_max;		if (qp->resp_depth && !attr->max_rd_atomic) {			/*			 * Lowering our responder resources to zero.			 * Turn off RDMA/atomics as responder.			 * (RWE/RRE/RAE in params2 already zero)			 */			qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |								MTHCA_QP_OPTPAR_RRE |								MTHCA_QP_OPTPAR_RAE);		}		if (!qp->resp_depth && attr->max_rd_atomic) {			/*			 * Increasing our responder resources from			 * zero.  Turn on RDMA/atomics as appropriate.			 */			qp_context->params2 |=				cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ?					    MTHCA_QP_BIT_RWE : 0);			qp_context->params2 |=				cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?					    MTHCA_QP_BIT_RRE : 0);			qp_context->params2 |=				cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?					    MTHCA_QP_BIT_RAE : 0);			qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |								MTHCA_QP_OPTPAR_RRE |								MTHCA_QP_OPTPAR_RAE);		}		for (rra_max = 0;		     1 << rra_max < attr->max_rd_atomic &&			     rra_max < dev->qp_table.rdb_shift;		     ++rra_max)			; /* nothing */		qp_context->params2      |= cpu_to_be32(rra_max << 21);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);		qp->resp_depth = attr->max_rd_atomic;	}	if (qp->rq.policy == IB_SIGNAL_ALL_WR)		qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);	if (attr_mask & IB_QP_MIN_RNR_TIMER) {		qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);	}	if (attr_mask & IB_QP_RQ_PSN)		qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);	qp_context->ra_buff_indx = dev->qp_table.rdb_base +		((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<		 dev->qp_table.rdb_shift);	qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);	if (attr_mask & IB_QP_QKEY) {		qp_context->qkey = cpu_to_be32(attr->qkey);		qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);	}	err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,			      qp->qpn, 0, qp_param, 0, &status);	if (status) {		mthca_warn(dev, "modify QP %d returned status %02x.\n",			   state_table[cur_state][new_state].trans, status);		err = -EINVAL;	}	if (!err)		qp->state = new_state;	kfree(mailbox);	if (is_sqp(dev, qp))		store_attrs(to_msqp(qp), attr, attr_mask);	/*	 * If we are moving QP0 to RTR, bring the IB link up; if we	 * are moving QP0 to RESET or ERROR, bring the link back down.	 */	if (is_qp0(dev, qp)) {		if (cur_state != IB_QPS_RTR &&		    new_state == IB_QPS_RTR)			init_port(dev, to_msqp(qp)->port);		if (cur_state != IB_QPS_RESET &&		    cur_state != IB_QPS_ERR &&		    (new_state == IB_QPS_RESET ||		     new_state == IB_QPS_ERR))			mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);	}	return err;}/* * Allocate and register buffer for WQEs.  qp->rq.max, sq.max, * rq.max_gs and sq.max_gs must all be assigned. * mthca_alloc_wqe_buf will calculate rq.wqe_shift and * sq.wqe_shift (as well as send_wqe_offset, is_direct, and * queue) */static int mthca_alloc_wqe_buf(struct mthca_dev *dev,			       struct mthca_pd *pd,			       struct mthca_qp *qp){	int size;	int i;	int npages, shift;	dma_addr_t t;	u64 *dma_list = NULL;	int err = -ENOMEM;	size = sizeof (struct mthca_next_seg) +		qp->rq.max_gs * sizeof (struct mthca_data_seg);	for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;	     qp->rq.wqe_shift++)		; /* nothing */	size = sizeof (struct mthca_next_seg) +		qp->sq.max_gs * sizeof (struct mthca_data_seg);	if (qp->transport == MLX)		size += 2 * sizeof (struct mthca_data_seg);	else if (qp->transport == UD)		size += sizeof (struct mthca_ud_seg);	else /* bind seg is as big as atomic + raddr segs */		size += sizeof (struct mthca_bind_seg);	for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;	     qp->sq.wqe_shift++)		; /* nothing */	qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,				    1 << qp->sq.wqe_shift);	size = PAGE_ALIGN(qp->send_wqe_offset +			  (qp->sq.max << qp->sq.wqe_shift));	qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),			   GFP_KERNEL);	if (!qp->wrid)		goto err_out;	if (size <= MTHCA_MAX_DIRECT_QP_SIZE) {		qp->is_direct = 1;		npages = 1;		shift = get_order(size) + PAGE_SHIFT;		if (0)			mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",				  size, shift);		qp->queue.direct.buf = pci_alloc_consistent(dev->pdev, size, &t);		if (!qp->queue.direct.buf)			goto err_out;		pci_unmap_addr_set(&qp->queue.direct, mapping, t);		memset(qp->queue.direct.buf, 0, size);		while (t & ((1 << shift) - 1)) {			--shift;			npages *= 2;		}		dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);		if (!dma_list)			goto err_out_free;		for (i = 0; i < npages; ++i)			dma_list[i] = t + i * (1 << shift);	} else {		qp->is_direct = 0;		npages = size / PAGE_SIZE;		shift = PAGE_SHIFT;		if (0)			mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages);		dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);		if (!dma_list)			goto err_out;		qp->queue.page_list = kmalloc(npages *					      sizeof *qp->queue.page_list,					      GFP_KERNEL);		if (!qp->queue.page_list)			goto err_out;		for (i = 0; i < npages; ++i) {			qp->queue.page_list[i].buf =				pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);			if (!qp->queue.page_list[i].buf)				goto err_out_free;			memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE);			pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t);			dma_list[i] = t;		}	}	err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift,				  npages, 0, size,				  MTHCA_MPT_FLAG_LOCAL_WRITE |				  MTHCA_MPT_FLAG_LOCAL_READ,				  &qp->mr);	if (err)		goto err_out_free;	kfree(dma_list);	return 0; err_out_free:	if (qp->is_direct) {		pci_free_consistent(dev->pdev, size,				    qp->queue.direct.buf,				    pci_unmap_addr(&qp->queue.direct, mapping));	} else		for (i = 0; i < npages; ++i) {			if (qp->queue.page_list[i].buf)				pci_free_consistent(dev->pdev, PAGE_SIZE,						    qp->queue.page_list[i].buf,						    pci_unmap_addr(&qp->queue.page_list[i],								   mapping));		} err_out:	kfree(qp->wrid);	kfree(dma_list);	return err;}static int mthca_alloc_qp_common(struct mthca_dev *dev,				 struct mthca_pd *pd,				 struct mthca_cq *send_cq,				 struct mthca_cq *recv_cq,				 enum ib_sig_type send_policy,				 enum ib_sig_type recv_policy,				 struct mthca_qp *qp){	int err;	spin_lock_init(&qp->lock);	atomic_set(&qp->refcount, 1);	qp->state    	 = IB_QPS_RESET;	qp->atomic_rd_en = 0;	qp->resp_depth   = 0;	qp->sq.policy    = send_policy;	qp->rq.policy    = recv_policy;	qp->rq.cur       = 0;	qp->sq.cur       = 0;	qp->rq.next      = 0;	qp->sq.next      = 0;	qp->rq.last_comp = qp->rq.max - 1;	qp->sq.last_comp = qp->sq.max - 1;	qp->rq.last      = NULL;	qp->sq.last      = NULL;	err = mthca_alloc_wqe_buf(dev, pd, qp);	return err;}int mthca_alloc_qp(struct mthca_dev *dev,		   struct mthca_pd *pd,		   struct mthca_cq *send_cq,		   struct mthca_cq *recv_cq,		   enum ib_qp_type type,		   enum ib_sig_type send_policy,		   enum ib_sig_type recv_policy,		   struct mthca_qp *qp){	int err;	switch (type) {	case IB_QPT_RC: qp->transport = RC; break;	case IB_QPT_UC: qp->transport = UC; break;	case IB_QPT_UD: qp->transport = UD; break;	default: return -EINVAL;	}	qp->qpn = mthca_alloc(&dev->qp_table.alloc);	if (qp->qpn == -1)		return -ENOMEM;	err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,				    send_policy, recv_policy, qp);	if (err) {		mthca_free(&dev->qp_table.alloc, qp->qpn);		return err;	}	spin_lock_irq(&dev->qp_table.lock);	mthca_array_set(&dev->qp_table.qp,			qp->qpn & (dev->limits.num_qps - 1), qp);	spin_unlock_irq(&dev->qp_table.lock);	return 0;}int mthca_alloc_sqp(struct mthca_dev *dev,		    struct mthca_pd *pd,		    struct mthca_cq *send_cq,		    struct mthca_cq *recv_cq,		    enum ib_sig_type send_policy,		    enum ib_sig_type recv_policy,		    int qpn,		    int port,		    struct mthca_sqp *sqp){	int err = 0;	u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;	sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;	sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,					     &sqp->header_dma, GFP_KERNEL);	if (!sqp->header_buf)		return -ENOMEM;	spin_lock_irq(&dev->qp_table.lock);	if (mthca_array_get(&dev->qp_table.qp, mqpn))		err = -EBUSY;	else		mthca_array_set(&dev->qp_table.qp, mqpn, sqp);	spin_unlock_irq(&dev->qp_table.lock);	if (err)		goto err_out;	sqp->port = port;	sqp->qp.qpn       = mqpn;	sqp->qp.transport = MLX;	err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,				    send_policy, recv_policy,				    &sqp->qp);	if (err)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -