⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_cq.c

📁 h内核
💻 C
📖 第 1 页 / 共 2 页
字号:
				*freed = 0;			}			spin_unlock(&(*cur_qp)->lock);		}		spin_lock(&dev->qp_table.lock);		*cur_qp = mthca_array_get(&dev->qp_table.qp,					  be32_to_cpu(cqe->my_qpn) &					  (dev->limits.num_qps - 1));		if (*cur_qp)			atomic_inc(&(*cur_qp)->refcount);		spin_unlock(&dev->qp_table.lock);		if (!*cur_qp) {			mthca_warn(dev, "CQ entry for unknown QP %06x\n",				   be32_to_cpu(cqe->my_qpn) & 0xffffff);			err = -EINVAL;			goto out;		}		spin_lock(&(*cur_qp)->lock);	}	entry->qp_num = (*cur_qp)->qpn;	if (is_send) {		wq = &(*cur_qp)->sq;		wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)			     >> wq->wqe_shift);		entry->wr_id = (*cur_qp)->wrid[wqe_index +					       (*cur_qp)->rq.max];	} else {		wq = &(*cur_qp)->rq;		wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;		entry->wr_id = (*cur_qp)->wrid[wqe_index];	}	if (wq->last_comp < wqe_index)		wq->cur -= wqe_index - wq->last_comp;	else		wq->cur -= wq->max - wq->last_comp + wqe_index;	wq->last_comp = wqe_index;	if (0)		mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n",			  is_send ? "Send" : "Receive",			  (*cur_qp)->qpn, wqe_index, wq->max);	if (is_error) {		err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,				       (struct mthca_err_cqe *) cqe,				       entry, &free_cqe);		goto out;	}	if (is_send) {		entry->opcode = IB_WC_SEND; /* XXX */	} else {		entry->byte_len = be32_to_cpu(cqe->byte_cnt);		switch (cqe->opcode & 0x1f) {		case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE:		case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE:			entry->wc_flags = IB_WC_WITH_IMM;			entry->imm_data = cqe->imm_etype_pkey_eec;			entry->opcode = IB_WC_RECV;			break;		case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:		case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:			entry->wc_flags = IB_WC_WITH_IMM;			entry->imm_data = cqe->imm_etype_pkey_eec;			entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;			break;		default:			entry->wc_flags = 0;			entry->opcode = IB_WC_RECV;			break;		}		entry->slid 	   = be16_to_cpu(cqe->rlid);		entry->sl   	   = be16_to_cpu(cqe->sl_g_mlpath) >> 12;		entry->src_qp 	   = be32_to_cpu(cqe->rqpn) & 0xffffff;		entry->dlid_path_bits = be16_to_cpu(cqe->sl_g_mlpath) & 0x7f;		entry->pkey_index  = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;		entry->wc_flags   |= be16_to_cpu(cqe->sl_g_mlpath) & 0x80 ?					IB_WC_GRH : 0;	}	entry->status = IB_WC_SUCCESS; out:	if (free_cqe) {		set_cqe_hw(cq, cq->cons_index);		++(*freed);		cq->cons_index = (cq->cons_index + 1) & cq->ibcq.cqe;	}	return err;}int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,		  struct ib_wc *entry){	struct mthca_dev *dev = to_mdev(ibcq->device);	struct mthca_cq *cq = to_mcq(ibcq);	struct mthca_qp *qp = NULL;	unsigned long flags;	int err = 0;	int freed = 0;	int npolled;	spin_lock_irqsave(&cq->lock, flags);	for (npolled = 0; npolled < num_entries; ++npolled) {		err = mthca_poll_one(dev, cq, &qp,				     &freed, entry + npolled);		if (err)			break;	}	if (freed) {		wmb();		inc_cons_index(dev, cq, freed);	}	if (qp) {		spin_unlock(&qp->lock);		if (atomic_dec_and_test(&qp->refcount))			wake_up(&qp->wait);	}	spin_unlock_irqrestore(&cq->lock, flags);	return err == 0 || err == -EAGAIN ? npolled : err;}void mthca_arm_cq(struct mthca_dev *dev, struct mthca_cq *cq,		  int solicited){	u32 doorbell[2];	doorbell[0] =  cpu_to_be32((solicited ?				    MTHCA_CQ_DB_REQ_NOT_SOL :				    MTHCA_CQ_DB_REQ_NOT)      |				   cq->cqn);	doorbell[1] = 0xffffffff;	mthca_write64(doorbell,		      dev->kar + MTHCA_CQ_DOORBELL,		      MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));}int mthca_init_cq(struct mthca_dev *dev, int nent,		  struct mthca_cq *cq){	int size = nent * MTHCA_CQ_ENTRY_SIZE;	dma_addr_t t;	void *mailbox = NULL;	int npages, shift;	u64 *dma_list = NULL;	struct mthca_cq_context *cq_context;	int err = -ENOMEM;	u8 status;	int i;	might_sleep();	mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA,			  GFP_KERNEL);	if (!mailbox)		goto err_out;	cq_context = MAILBOX_ALIGN(mailbox);	if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) {		if (0)			mthca_dbg(dev, "Creating direct CQ of size %d\n", size);		cq->is_direct = 1;		npages        = 1;		shift         = get_order(size) + PAGE_SHIFT;		cq->queue.direct.buf = pci_alloc_consistent(dev->pdev,							    size, &t);		if (!cq->queue.direct.buf)			goto err_out;		pci_unmap_addr_set(&cq->queue.direct, mapping, t);		memset(cq->queue.direct.buf, 0, size);		while (t & ((1 << shift) - 1)) {			--shift;			npages *= 2;		}		dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);		if (!dma_list)			goto err_out_free;		for (i = 0; i < npages; ++i)			dma_list[i] = t + i * (1 << shift);	} else {		cq->is_direct = 0;		npages        = (size + PAGE_SIZE - 1) / PAGE_SIZE;		shift         = PAGE_SHIFT;		if (0)			mthca_dbg(dev, "Creating indirect CQ with %d pages\n", npages);		dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);		if (!dma_list)			goto err_out;		cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list,					      GFP_KERNEL);		if (!cq->queue.page_list)			goto err_out;		for (i = 0; i < npages; ++i)			cq->queue.page_list[i].buf = NULL;		for (i = 0; i < npages; ++i) {			cq->queue.page_list[i].buf =				pci_alloc_consistent(dev->pdev, PAGE_SIZE, &t);			if (!cq->queue.page_list[i].buf)				goto err_out_free;			dma_list[i] = t;			pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t);			memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE);		}	}	for (i = 0; i < nent; ++i)		set_cqe_hw(cq, i);	cq->cqn = mthca_alloc(&dev->cq_table.alloc);	if (cq->cqn == -1)		goto err_out_free;	err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,				  dma_list, shift, npages,				  0, size,				  MTHCA_MPT_FLAG_LOCAL_WRITE |				  MTHCA_MPT_FLAG_LOCAL_READ,				  &cq->mr);	if (err)		goto err_out_free_cq;	spin_lock_init(&cq->lock);	atomic_set(&cq->refcount, 1);	init_waitqueue_head(&cq->wait);	memset(cq_context, 0, sizeof *cq_context);	cq_context->flags           = cpu_to_be32(MTHCA_CQ_STATUS_OK      |						  MTHCA_CQ_STATE_DISARMED |						  MTHCA_CQ_FLAG_TR);	cq_context->start           = cpu_to_be64(0);	cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24 |						  MTHCA_KAR_PAGE);	cq_context->error_eqn       = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);	cq_context->comp_eqn        = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);	cq_context->pd              = cpu_to_be32(dev->driver_pd.pd_num);	cq_context->lkey            = cpu_to_be32(cq->mr.ibmr.lkey);	cq_context->cqn             = cpu_to_be32(cq->cqn);	err = mthca_SW2HW_CQ(dev, cq_context, cq->cqn, &status);	if (err) {		mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);		goto err_out_free_mr;	}	if (status) {		mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n",			   status);		err = -EINVAL;		goto err_out_free_mr;	}	spin_lock_irq(&dev->cq_table.lock);	if (mthca_array_set(&dev->cq_table.cq,			    cq->cqn & (dev->limits.num_cqs - 1),			    cq)) {		spin_unlock_irq(&dev->cq_table.lock);		goto err_out_free_mr;	}	spin_unlock_irq(&dev->cq_table.lock);	cq->cons_index = 0;	kfree(dma_list);	kfree(mailbox);	return 0; err_out_free_mr:	mthca_free_mr(dev, &cq->mr); err_out_free_cq:	mthca_free(&dev->cq_table.alloc, cq->cqn); err_out_free:	if (cq->is_direct)		pci_free_consistent(dev->pdev, size,				    cq->queue.direct.buf,				    pci_unmap_addr(&cq->queue.direct, mapping));	else {		for (i = 0; i < npages; ++i)			if (cq->queue.page_list[i].buf)				pci_free_consistent(dev->pdev, PAGE_SIZE,						    cq->queue.page_list[i].buf,						    pci_unmap_addr(&cq->queue.page_list[i],								   mapping));		kfree(cq->queue.page_list);	} err_out:	kfree(dma_list);	kfree(mailbox);	return err;}void mthca_free_cq(struct mthca_dev *dev,		   struct mthca_cq *cq){	void *mailbox;	int err;	u8 status;	might_sleep();	mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA,			  GFP_KERNEL);	if (!mailbox) {		mthca_warn(dev, "No memory for mailbox to free CQ.\n");		return;	}	err = mthca_HW2SW_CQ(dev, MAILBOX_ALIGN(mailbox), cq->cqn, &status);	if (err)		mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);	else if (status)		mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n",			   status);	if (0) {		u32 *ctx = MAILBOX_ALIGN(mailbox);		int j;		printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",		       cq->cqn, cq->cons_index, next_cqe_sw(cq));		for (j = 0; j < 16; ++j)			printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j]));	}	spin_lock_irq(&dev->cq_table.lock);	mthca_array_clear(&dev->cq_table.cq,			  cq->cqn & (dev->limits.num_cqs - 1));	spin_unlock_irq(&dev->cq_table.lock);	atomic_dec(&cq->refcount);	wait_event(cq->wait, !atomic_read(&cq->refcount));	mthca_free_mr(dev, &cq->mr);	if (cq->is_direct)		pci_free_consistent(dev->pdev,				    (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,				    cq->queue.direct.buf,				    pci_unmap_addr(&cq->queue.direct,						   mapping));	else {		int i;		for (i = 0;		     i < ((cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE + PAGE_SIZE - 1) /			     PAGE_SIZE;		     ++i)			pci_free_consistent(dev->pdev, PAGE_SIZE,					    cq->queue.page_list[i].buf,					    pci_unmap_addr(&cq->queue.page_list[i],							   mapping));		kfree(cq->queue.page_list);	}	mthca_free(&dev->cq_table.alloc, cq->cqn);	kfree(mailbox);}int __devinit mthca_init_cq_table(struct mthca_dev *dev){	int err;	spin_lock_init(&dev->cq_table.lock);	err = mthca_alloc_init(&dev->cq_table.alloc,			       dev->limits.num_cqs,			       (1 << 24) - 1,			       dev->limits.reserved_cqs);	if (err)		return err;	err = mthca_array_init(&dev->cq_table.cq,			       dev->limits.num_cqs);	if (err)		mthca_alloc_cleanup(&dev->cq_table.alloc);	return err;}void __devexit mthca_cleanup_cq_table(struct mthca_dev *dev){	mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);	mthca_alloc_cleanup(&dev->cq_table.alloc);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -