⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_cq.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
		mthca_dbg(dev, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",			  cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),			  be32_to_cpu(cqe->wqe));		dump_cqe(dev, cqe);	}	is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==		MTHCA_ERROR_CQE_OPCODE_MASK;	is_send  = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;	if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) {		/*		 * We do not have to take the QP table lock here,		 * because CQs will be locked while QPs are removed		 * from the table.		 */		*cur_qp = mthca_array_get(&dev->qp_table.qp,					  be32_to_cpu(cqe->my_qpn) &					  (dev->limits.num_qps - 1));		if (!*cur_qp) {			mthca_warn(dev, "CQ entry for unknown QP %06x\n",				   be32_to_cpu(cqe->my_qpn) & 0xffffff);			err = -EINVAL;			goto out;		}	}	entry->qp_num = (*cur_qp)->qpn;	if (is_send) {		wq = &(*cur_qp)->sq;		wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)			     >> wq->wqe_shift);		entry->wr_id = (*cur_qp)->wrid[wqe_index +					       (*cur_qp)->rq.max];	} else if ((*cur_qp)->ibqp.srq) {		struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);		u32 wqe = be32_to_cpu(cqe->wqe);		wq = NULL;		wqe_index = wqe >> srq->wqe_shift;		entry->wr_id = srq->wrid[wqe_index];		mthca_free_srq_wqe(srq, wqe);	} else {		wq = &(*cur_qp)->rq;		wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;		entry->wr_id = (*cur_qp)->wrid[wqe_index];	}	if (wq) {		if (wq->last_comp < wqe_index)			wq->tail += wqe_index - wq->last_comp;		else			wq->tail += wqe_index + wq->max - wq->last_comp;		wq->last_comp = wqe_index;	}	if (is_error) {		err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,				       (struct mthca_err_cqe *) cqe,				       entry, &free_cqe);		goto out;	}	if (is_send) {		entry->wc_flags = 0;		switch (cqe->opcode) {		case MTHCA_OPCODE_RDMA_WRITE:			entry->opcode    = IB_WC_RDMA_WRITE;			break;		case MTHCA_OPCODE_RDMA_WRITE_IMM:			entry->opcode    = IB_WC_RDMA_WRITE;			entry->wc_flags |= IB_WC_WITH_IMM;			break;		case MTHCA_OPCODE_SEND:			entry->opcode    = IB_WC_SEND;			break;		case MTHCA_OPCODE_SEND_IMM:			entry->opcode    = IB_WC_SEND;			entry->wc_flags |= IB_WC_WITH_IMM;			break;		case MTHCA_OPCODE_RDMA_READ:			entry->opcode    = IB_WC_RDMA_READ;			entry->byte_len  = be32_to_cpu(cqe->byte_cnt);			break;		case MTHCA_OPCODE_ATOMIC_CS:			entry->opcode    = IB_WC_COMP_SWAP;			entry->byte_len  = be32_to_cpu(cqe->byte_cnt);			break;		case MTHCA_OPCODE_ATOMIC_FA:			entry->opcode    = IB_WC_FETCH_ADD;			entry->byte_len  = be32_to_cpu(cqe->byte_cnt);			break;		case MTHCA_OPCODE_BIND_MW:			entry->opcode    = IB_WC_BIND_MW;			break;		default:			entry->opcode    = MTHCA_OPCODE_INVALID;			break;		}	} else {		entry->byte_len = be32_to_cpu(cqe->byte_cnt);		switch (cqe->opcode & 0x1f) {		case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:		case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:			entry->wc_flags = IB_WC_WITH_IMM;			entry->imm_data = cqe->imm_etype_pkey_eec;			entry->opcode = IB_WC_RECV;			break;		case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:		case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:			entry->wc_flags = IB_WC_WITH_IMM;			entry->imm_data = cqe->imm_etype_pkey_eec;			entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;			break;		default:			entry->wc_flags = 0;			entry->opcode = IB_WC_RECV;			break;		}		entry->slid 	   = be16_to_cpu(cqe->rlid);		entry->sl   	   = be16_to_cpu(cqe->sl_g_mlpath) >> 12;		entry->src_qp 	   = be32_to_cpu(cqe->rqpn) & 0xffffff;		entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f;		entry->pkey_index  = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;		entry->wc_flags   |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ?					IB_WC_GRH : 0;	}	entry->status = IB_WC_SUCCESS; out:	if (likely(free_cqe)) {		set_cqe_hw(cqe);		++(*freed);		++cq->cons_index;	}	return err;}int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,		  struct ib_wc *entry){	struct mthca_dev *dev = to_mdev(ibcq->device);	struct mthca_cq *cq = to_mcq(ibcq);	struct mthca_qp *qp = NULL;	unsigned long flags;	int err = 0;	int freed = 0;	int npolled;	spin_lock_irqsave(&cq->lock, flags);	for (npolled = 0; npolled < num_entries; ++npolled) {		err = mthca_poll_one(dev, cq, &qp,				     &freed, entry + npolled);		if (err)			break;	}	if (freed) {		wmb();		update_cons_index(dev, cq, freed);	}	spin_unlock_irqrestore(&cq->lock, flags);	return err == 0 || err == -EAGAIN ? npolled : err;}int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify){	__be32 doorbell[2];	doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ?				   MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :				   MTHCA_TAVOR_CQ_DB_REQ_NOT)      |				  to_mcq(cq)->cqn);	doorbell[1] = (__force __be32) 0xffffffff;	mthca_write64(doorbell,		      to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL,		      MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock));	return 0;}int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify){	struct mthca_cq *cq = to_mcq(ibcq);	__be32 doorbell[2];	u32 sn;	__be32 ci;	sn = cq->arm_sn & 3;	ci = cpu_to_be32(cq->cons_index);	doorbell[0] = ci;	doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |				  (notify == IB_CQ_SOLICITED ? 1 : 2));	mthca_write_db_rec(doorbell, cq->arm_db);	/*	 * Make sure that the doorbell record in host memory is	 * written before ringing the doorbell via PCI MMIO.	 */	wmb();	doorbell[0] = cpu_to_be32((sn << 28)                       |				  (notify == IB_CQ_SOLICITED ?				   MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :				   MTHCA_ARBEL_CQ_DB_REQ_NOT)      |				  cq->cqn);	doorbell[1] = ci;	mthca_write64(doorbell,		      to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL,		      MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock));	return 0;}static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq){	mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,		       &cq->queue, cq->is_direct, &cq->mr);}int mthca_init_cq(struct mthca_dev *dev, int nent,		  struct mthca_ucontext *ctx, u32 pdn,		  struct mthca_cq *cq){	int size = nent * MTHCA_CQ_ENTRY_SIZE;	struct mthca_mailbox *mailbox;	struct mthca_cq_context *cq_context;	int err = -ENOMEM;	u8 status;	int i;	might_sleep();	cq->ibcq.cqe  = nent - 1;	cq->is_kernel = !ctx;	cq->cqn = mthca_alloc(&dev->cq_table.alloc);	if (cq->cqn == -1)		return -ENOMEM;	if (mthca_is_memfree(dev)) {		err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);		if (err)			goto err_out;		if (cq->is_kernel) {			cq->arm_sn = 1;			err = -ENOMEM;			cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI,							     cq->cqn, &cq->set_ci_db);			if (cq->set_ci_db_index < 0)				goto err_out_icm;			cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM,							  cq->cqn, &cq->arm_db);			if (cq->arm_db_index < 0)				goto err_out_ci;		}	}	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);	if (IS_ERR(mailbox))		goto err_out_arm;	cq_context = mailbox->buf;	if (cq->is_kernel) {		err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,				      &cq->queue, &cq->is_direct,				      &dev->driver_pd, 1, &cq->mr);		if (err)			goto err_out_mailbox;		for (i = 0; i < nent; ++i)			set_cqe_hw(get_cqe(cq, i));	}	spin_lock_init(&cq->lock);	atomic_set(&cq->refcount, 1);	init_waitqueue_head(&cq->wait);	memset(cq_context, 0, sizeof *cq_context);	cq_context->flags           = cpu_to_be32(MTHCA_CQ_STATUS_OK      |						  MTHCA_CQ_STATE_DISARMED |						  MTHCA_CQ_FLAG_TR);	cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);	if (ctx)		cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index);	else		cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);	cq_context->error_eqn       = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);	cq_context->comp_eqn        = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);	cq_context->pd              = cpu_to_be32(pdn);	cq_context->lkey            = cpu_to_be32(cq->mr.ibmr.lkey);	cq_context->cqn             = cpu_to_be32(cq->cqn);	if (mthca_is_memfree(dev)) {		cq_context->ci_db    = cpu_to_be32(cq->set_ci_db_index);		cq_context->state_db = cpu_to_be32(cq->arm_db_index);	}	err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);	if (err) {		mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);		goto err_out_free_mr;	}	if (status) {		mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n",			   status);		err = -EINVAL;		goto err_out_free_mr;	}	spin_lock_irq(&dev->cq_table.lock);	if (mthca_array_set(&dev->cq_table.cq,			    cq->cqn & (dev->limits.num_cqs - 1),			    cq)) {		spin_unlock_irq(&dev->cq_table.lock);		goto err_out_free_mr;	}	spin_unlock_irq(&dev->cq_table.lock);	cq->cons_index = 0;	mthca_free_mailbox(dev, mailbox);	return 0;err_out_free_mr:	if (cq->is_kernel)		mthca_free_cq_buf(dev, cq);err_out_mailbox:	mthca_free_mailbox(dev, mailbox);err_out_arm:	if (cq->is_kernel && mthca_is_memfree(dev))		mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);err_out_ci:	if (cq->is_kernel && mthca_is_memfree(dev))		mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);err_out_icm:	mthca_table_put(dev, dev->cq_table.table, cq->cqn);err_out:	mthca_free(&dev->cq_table.alloc, cq->cqn);	return err;}void mthca_free_cq(struct mthca_dev *dev,		   struct mthca_cq *cq){	struct mthca_mailbox *mailbox;	int err;	u8 status;	might_sleep();	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);	if (IS_ERR(mailbox)) {		mthca_warn(dev, "No memory for mailbox to free CQ.\n");		return;	}	err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);	if (err)		mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);	else if (status)		mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);	if (0) {		__be32 *ctx = mailbox->buf;		int j;		printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",		       cq->cqn, cq->cons_index,		       cq->is_kernel ? !!next_cqe_sw(cq) : 0);		for (j = 0; j < 16; ++j)			printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j]));	}	spin_lock_irq(&dev->cq_table.lock);	mthca_array_clear(&dev->cq_table.cq,			  cq->cqn & (dev->limits.num_cqs - 1));	spin_unlock_irq(&dev->cq_table.lock);	if (dev->mthca_flags & MTHCA_FLAG_MSI_X)		synchronize_irq(dev->eq_table.eq[MTHCA_EQ_COMP].msi_x_vector);	else		synchronize_irq(dev->pdev->irq);	atomic_dec(&cq->refcount);	wait_event(cq->wait, !atomic_read(&cq->refcount));	if (cq->is_kernel) {		mthca_free_cq_buf(dev, cq);		if (mthca_is_memfree(dev)) {			mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM,    cq->arm_db_index);			mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);		}	}	mthca_table_put(dev, dev->cq_table.table, cq->cqn);	mthca_free(&dev->cq_table.alloc, cq->cqn);	mthca_free_mailbox(dev, mailbox);}int __devinit mthca_init_cq_table(struct mthca_dev *dev){	int err;	spin_lock_init(&dev->cq_table.lock);	err = mthca_alloc_init(&dev->cq_table.alloc,			       dev->limits.num_cqs,			       (1 << 24) - 1,			       dev->limits.reserved_cqs);	if (err)		return err;	err = mthca_array_init(&dev->cq_table.cq,			       dev->limits.num_cqs);	if (err)		mthca_alloc_cleanup(&dev->cq_table.alloc);	return err;}void __devexit mthca_cleanup_cq_table(struct mthca_dev *dev){	mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);	mthca_alloc_cleanup(&dev->cq_table.alloc);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -