⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_eq.c

📁 h内核
💻 C
📖 第 1 页 / 共 2 页
字号:
		for (i = 0; i < MTHCA_NUM_EQ; ++i)			if (ecr & dev->eq_table.eq[i].ecr_mask)				mthca_eq_int(dev, &dev->eq_table.eq[i]);	}	return IRQ_RETVAL(work);}static irqreturn_t mthca_msi_x_interrupt(int irq, void *eq_ptr,					 struct pt_regs *regs){	struct mthca_eq  *eq  = eq_ptr;	struct mthca_dev *dev = eq->dev;	mthca_eq_int(dev, eq);	/* MSI-X vectors always belong to us */	return IRQ_HANDLED;}static int __devinit mthca_create_eq(struct mthca_dev *dev,				     int nent,				     u8 intr,				     struct mthca_eq *eq){	int npages = (nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /		PAGE_SIZE;	u64 *dma_list = NULL;	dma_addr_t t;	void *mailbox = NULL;	struct mthca_eq_context *eq_context;	int err = -ENOMEM;	int i;	u8 status;	/* Make sure EQ size is aligned to a power of 2 size. */	for (i = 1; i < nent; i <<= 1)		; /* nothing */	nent = i;	eq->dev = dev;	eq->page_list = kmalloc(npages * sizeof *eq->page_list,				GFP_KERNEL);	if (!eq->page_list)		goto err_out;	for (i = 0; i < npages; ++i)		eq->page_list[i].buf = NULL;	dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);	if (!dma_list)		goto err_out_free;	mailbox = kmalloc(sizeof *eq_context + MTHCA_CMD_MAILBOX_EXTRA,			  GFP_KERNEL);	if (!mailbox)		goto err_out_free;	eq_context = MAILBOX_ALIGN(mailbox);	for (i = 0; i < npages; ++i) {		eq->page_list[i].buf = pci_alloc_consistent(dev->pdev,							    PAGE_SIZE, &t);		if (!eq->page_list[i].buf)			goto err_out_free;		dma_list[i] = t;		pci_unmap_addr_set(&eq->page_list[i], mapping, t);		memset(eq->page_list[i].buf, 0, PAGE_SIZE);	}	for (i = 0; i < nent; ++i)		set_eqe_hw(get_eqe(eq, i));	eq->eqn = mthca_alloc(&dev->eq_table.alloc);	if (eq->eqn == -1)		goto err_out_free;	err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,				  dma_list, PAGE_SHIFT, npages,				  0, npages * PAGE_SIZE,				  MTHCA_MPT_FLAG_LOCAL_WRITE |				  MTHCA_MPT_FLAG_LOCAL_READ,				  &eq->mr);	if (err)		goto err_out_free_eq;	eq->nent = nent;	memset(eq_context, 0, sizeof *eq_context);	eq_context->flags           = cpu_to_be32(MTHCA_EQ_STATUS_OK   |						  MTHCA_EQ_OWNER_HW    |						  MTHCA_EQ_STATE_ARMED |						  MTHCA_EQ_FLAG_TR);	eq_context->start           = cpu_to_be64(0);	eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24 |						  MTHCA_KAR_PAGE);	eq_context->pd              = cpu_to_be32(dev->driver_pd.pd_num);	eq_context->intr            = intr;	eq_context->lkey            = cpu_to_be32(eq->mr.ibmr.lkey);	err = mthca_SW2HW_EQ(dev, eq_context, eq->eqn, &status);	if (err) {		mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);		goto err_out_free_mr;	}	if (status) {		mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n",			   status);		err = -EINVAL;		goto err_out_free_mr;	}	kfree(dma_list);	kfree(mailbox);	eq->ecr_mask   = swab32(1 << eq->eqn);	eq->cons_index = 0;	eq_req_not(dev, eq->eqn);	mthca_dbg(dev, "Allocated EQ %d with %d entries\n",		  eq->eqn, nent);	return err; err_out_free_mr:	mthca_free_mr(dev, &eq->mr); err_out_free_eq:	mthca_free(&dev->eq_table.alloc, eq->eqn); err_out_free:	for (i = 0; i < npages; ++i)		if (eq->page_list[i].buf)			pci_free_consistent(dev->pdev, PAGE_SIZE,					    eq->page_list[i].buf,					    pci_unmap_addr(&eq->page_list[i],							   mapping));	kfree(eq->page_list);	kfree(dma_list);	kfree(mailbox); err_out:	return err;}static void mthca_free_eq(struct mthca_dev *dev,			  struct mthca_eq *eq){	void *mailbox = NULL;	int err;	u8 status;	int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /		PAGE_SIZE;	int i;	mailbox = kmalloc(sizeof (struct mthca_eq_context) + MTHCA_CMD_MAILBOX_EXTRA,			  GFP_KERNEL);	if (!mailbox)		return;	err = mthca_HW2SW_EQ(dev, MAILBOX_ALIGN(mailbox),			     eq->eqn, &status);	if (err)		mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);	if (status)		mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n",			   status);	if (0) {		mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);		for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {			if (i % 4 == 0)				printk("[%02x] ", i * 4);			printk(" %08x", be32_to_cpup(MAILBOX_ALIGN(mailbox) + i * 4));			if ((i + 1) % 4 == 0)				printk("\n");		}	}	mthca_free_mr(dev, &eq->mr);	for (i = 0; i < npages; ++i)		pci_free_consistent(dev->pdev, PAGE_SIZE,				    eq->page_list[i].buf,				    pci_unmap_addr(&eq->page_list[i], mapping));	kfree(eq->page_list);	kfree(mailbox);}static void mthca_free_irqs(struct mthca_dev *dev){	int i;	if (dev->eq_table.have_irq)		free_irq(dev->pdev->irq, dev);	for (i = 0; i < MTHCA_NUM_EQ; ++i)		if (dev->eq_table.eq[i].have_irq)			free_irq(dev->eq_table.eq[i].msi_x_vector,				 dev->eq_table.eq + i);}int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt){	int ret;	u8 status;	/*	 * We assume that mapping one page is enough for the whole EQ	 * context table.  This is fine with all current HCAs, because	 * we only use 32 EQs and each EQ uses 32 bytes of context	 * memory, or 1 KB total.	 */	dev->eq_table.icm_virt = icm_virt;	dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);	if (!dev->eq_table.icm_page)		return -ENOMEM;	dev->eq_table.icm_dma  = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,					      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);	if (pci_dma_mapping_error(dev->eq_table.icm_dma)) {		__free_page(dev->eq_table.icm_page);		return -ENOMEM;	}	ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status);	if (!ret && status)		ret = -EINVAL;	if (ret) {		pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,			       PCI_DMA_BIDIRECTIONAL);		__free_page(dev->eq_table.icm_page);	}	return ret;}void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev){	u8 status;	mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status);	pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,		       PCI_DMA_BIDIRECTIONAL);	__free_page(dev->eq_table.icm_page);}int __devinit mthca_init_eq_table(struct mthca_dev *dev){	int err;	u8 status;	u8 intr;	int i;	err = mthca_alloc_init(&dev->eq_table.alloc,			       dev->limits.num_eqs,			       dev->limits.num_eqs - 1,			       dev->limits.reserved_eqs);	if (err)		return err;	if (dev->mthca_flags & MTHCA_FLAG_MSI ||	    dev->mthca_flags & MTHCA_FLAG_MSI_X) {		dev->eq_table.clr_mask = 0;	} else {		dev->eq_table.clr_mask =			swab32(1 << (dev->eq_table.inta_pin & 31));		dev->eq_table.clr_int  = dev->clr_base +			(dev->eq_table.inta_pin < 31 ? 4 : 0);	}	intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?		128 : dev->eq_table.inta_pin;	err = mthca_create_eq(dev, dev->limits.num_cqs,			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,			      &dev->eq_table.eq[MTHCA_EQ_COMP]);	if (err)		goto err_out_free;	err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE,			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,			      &dev->eq_table.eq[MTHCA_EQ_ASYNC]);	if (err)		goto err_out_comp;	err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE,			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,			      &dev->eq_table.eq[MTHCA_EQ_CMD]);	if (err)		goto err_out_async;	if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {		static const char *eq_name[] = {			[MTHCA_EQ_COMP]  = DRV_NAME " (comp)",			[MTHCA_EQ_ASYNC] = DRV_NAME " (async)",			[MTHCA_EQ_CMD]   = DRV_NAME " (cmd)"		};		for (i = 0; i < MTHCA_NUM_EQ; ++i) {			err = request_irq(dev->eq_table.eq[i].msi_x_vector,					  mthca_msi_x_interrupt, 0,					  eq_name[i], dev->eq_table.eq + i);			if (err)				goto err_out_cmd;			dev->eq_table.eq[i].have_irq = 1;		}	} else {		err = request_irq(dev->pdev->irq, mthca_interrupt, SA_SHIRQ,				  DRV_NAME, dev);		if (err)			goto err_out_cmd;		dev->eq_table.have_irq = 1;	}	err = mthca_MAP_EQ(dev, async_mask(dev),			   0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);	if (err)		mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",			   dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);	if (status)		mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",			   dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);	err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,			   0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);	if (err)		mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",			   dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);	if (status)		mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",			   dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);	return 0;err_out_cmd:	mthca_free_irqs(dev);	mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);err_out_async:	mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);err_out_comp:	mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);err_out_free:	mthca_alloc_cleanup(&dev->eq_table.alloc);	return err;}void __devexit mthca_cleanup_eq_table(struct mthca_dev *dev){	u8 status;	int i;	mthca_free_irqs(dev);	mthca_MAP_EQ(dev, async_mask(dev),		     1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);	mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,		     1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);	for (i = 0; i < MTHCA_NUM_EQ; ++i)		mthca_free_eq(dev, &dev->eq_table.eq[i]);	mthca_alloc_cleanup(&dev->eq_table.alloc);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -