⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_eq.c

📁 linux-2.6.15.6
💻 C
📖 第 1 页 / 共 2 页
字号:
		PAGE_SIZE;	u64 *dma_list = NULL;	dma_addr_t t;	struct mthca_mailbox *mailbox;	struct mthca_eq_context *eq_context;	int err = -ENOMEM;	int i;	u8 status;	eq->dev  = dev;	eq->nent = roundup_pow_of_two(max(nent, 2));	eq->page_list = kmalloc(npages * sizeof *eq->page_list,				GFP_KERNEL);	if (!eq->page_list)		goto err_out;	for (i = 0; i < npages; ++i)		eq->page_list[i].buf = NULL;	dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);	if (!dma_list)		goto err_out_free;	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);	if (IS_ERR(mailbox))		goto err_out_free;	eq_context = mailbox->buf;	for (i = 0; i < npages; ++i) {		eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,							  PAGE_SIZE, &t, GFP_KERNEL);		if (!eq->page_list[i].buf)			goto err_out_free_pages;		dma_list[i] = t;		pci_unmap_addr_set(&eq->page_list[i], mapping, t);		memset(eq->page_list[i].buf, 0, PAGE_SIZE);	}	for (i = 0; i < eq->nent; ++i)		set_eqe_hw(get_eqe(eq, i));	eq->eqn = mthca_alloc(&dev->eq_table.alloc);	if (eq->eqn == -1)		goto err_out_free_pages;	err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,				  dma_list, PAGE_SHIFT, npages,				  0, npages * PAGE_SIZE,				  MTHCA_MPT_FLAG_LOCAL_WRITE |				  MTHCA_MPT_FLAG_LOCAL_READ,				  &eq->mr);	if (err)		goto err_out_free_eq;	memset(eq_context, 0, sizeof *eq_context);	eq_context->flags           = cpu_to_be32(MTHCA_EQ_STATUS_OK   |						  MTHCA_EQ_OWNER_HW    |						  MTHCA_EQ_STATE_ARMED |						  MTHCA_EQ_FLAG_TR);	if (mthca_is_memfree(dev))		eq_context->flags  |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);	eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);	if (mthca_is_memfree(dev)) {		eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);	} else {		eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);		eq_context->tavor_pd         = cpu_to_be32(dev->driver_pd.pd_num);	}	eq_context->intr            = intr;	eq_context->lkey            = cpu_to_be32(eq->mr.ibmr.lkey);	err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);	if (err) {		mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);		goto err_out_free_mr;	}	if (status) {		mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n",			   status);		err = -EINVAL;		goto err_out_free_mr;	}	kfree(dma_list);	mthca_free_mailbox(dev, mailbox);	eq->eqn_mask   = swab32(1 << eq->eqn);	eq->cons_index = 0;	dev->eq_table.arm_mask |= eq->eqn_mask;	mthca_dbg(dev, "Allocated EQ %d with %d entries\n",		  eq->eqn, eq->nent);	return err; err_out_free_mr:	mthca_free_mr(dev, &eq->mr); err_out_free_eq:	mthca_free(&dev->eq_table.alloc, eq->eqn); err_out_free_pages:	for (i = 0; i < npages; ++i)		if (eq->page_list[i].buf)			dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,					  eq->page_list[i].buf,					  pci_unmap_addr(&eq->page_list[i],							 mapping));	mthca_free_mailbox(dev, mailbox); err_out_free:	kfree(eq->page_list);	kfree(dma_list); err_out:	return err;}static void mthca_free_eq(struct mthca_dev *dev,			  struct mthca_eq *eq){	struct mthca_mailbox *mailbox;	int err;	u8 status;	int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /		PAGE_SIZE;	int i;	mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);	if (IS_ERR(mailbox))		return;	err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);	if (err)		mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);	if (status)		mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status);	dev->eq_table.arm_mask &= ~eq->eqn_mask;	if (0) {		mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);		for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {			if (i % 4 == 0)				printk("[%02x] ", i * 4);			printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));			if ((i + 1) % 4 == 0)				printk("\n");		}	}	mthca_free_mr(dev, &eq->mr);	for (i = 0; i < npages; ++i)		pci_free_consistent(dev->pdev, PAGE_SIZE,				    eq->page_list[i].buf,				    pci_unmap_addr(&eq->page_list[i], mapping));	kfree(eq->page_list);	mthca_free_mailbox(dev, mailbox);}static void mthca_free_irqs(struct mthca_dev *dev){	int i;	if (dev->eq_table.have_irq)		free_irq(dev->pdev->irq, dev);	for (i = 0; i < MTHCA_NUM_EQ; ++i)		if (dev->eq_table.eq[i].have_irq)			free_irq(dev->eq_table.eq[i].msi_x_vector,				 dev->eq_table.eq + i);}static int __devinit mthca_map_reg(struct mthca_dev *dev,				   unsigned long offset, unsigned long size,				   void __iomem **map){	unsigned long base = pci_resource_start(dev->pdev, 0);	if (!request_mem_region(base + offset, size, DRV_NAME))		return -EBUSY;	*map = ioremap(base + offset, size);	if (!*map) {		release_mem_region(base + offset, size);		return -ENOMEM;	}	return 0;}static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset,			    unsigned long size, void __iomem *map){	unsigned long base = pci_resource_start(dev->pdev, 0);	release_mem_region(base + offset, size);	iounmap(map);}static int __devinit mthca_map_eq_regs(struct mthca_dev *dev){	unsigned long mthca_base;	mthca_base = pci_resource_start(dev->pdev, 0);	if (mthca_is_memfree(dev)) {		/*		 * We assume that the EQ arm and EQ set CI registers		 * fall within the first BAR.  We can't trust the		 * values firmware gives us, since those addresses are		 * valid on the HCA's side of the PCI bus but not		 * necessarily the host side.		 */		if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &				  dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,				  &dev->clr_base)) {			mthca_err(dev, "Couldn't map interrupt clear register, "				  "aborting.\n");			return -ENOMEM;		}		/*		 * Add 4 because we limit ourselves to EQs 0 ... 31,		 * so we only need the low word of the register.		 */		if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &					dev->fw.arbel.eq_arm_base) + 4, 4,				  &dev->eq_regs.arbel.eq_arm)) {			mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");			mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &					dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,					dev->clr_base);			return -ENOMEM;		}		if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &				  dev->fw.arbel.eq_set_ci_base,				  MTHCA_EQ_SET_CI_SIZE,				  &dev->eq_regs.arbel.eq_set_ci_base)) {			mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");			mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &					      dev->fw.arbel.eq_arm_base) + 4, 4,					dev->eq_regs.arbel.eq_arm);			mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &					dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,					dev->clr_base);			return -ENOMEM;		}	} else {		if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,				  &dev->clr_base)) {			mthca_err(dev, "Couldn't map interrupt clear register, "				  "aborting.\n");			return -ENOMEM;		}		if (mthca_map_reg(dev, MTHCA_ECR_BASE,				  MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,				  &dev->eq_regs.tavor.ecr_base)) {			mthca_err(dev, "Couldn't map ecr register, "				  "aborting.\n");			mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,					dev->clr_base);			return -ENOMEM;		}	}	return 0;}static void __devexit mthca_unmap_eq_regs(struct mthca_dev *dev){	if (mthca_is_memfree(dev)) {		mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &				dev->fw.arbel.eq_set_ci_base,				MTHCA_EQ_SET_CI_SIZE,				dev->eq_regs.arbel.eq_set_ci_base);		mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &				      dev->fw.arbel.eq_arm_base) + 4, 4,				dev->eq_regs.arbel.eq_arm);		mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &				dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,				dev->clr_base);	} else {		mthca_unmap_reg(dev, MTHCA_ECR_BASE,				MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,				dev->eq_regs.tavor.ecr_base);		mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,				dev->clr_base);	}}int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt){	int ret;	u8 status;	/*	 * We assume that mapping one page is enough for the whole EQ	 * context table.  This is fine with all current HCAs, because	 * we only use 32 EQs and each EQ uses 32 bytes of context	 * memory, or 1 KB total.	 */	dev->eq_table.icm_virt = icm_virt;	dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);	if (!dev->eq_table.icm_page)		return -ENOMEM;	dev->eq_table.icm_dma  = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,					      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);	if (pci_dma_mapping_error(dev->eq_table.icm_dma)) {		__free_page(dev->eq_table.icm_page);		return -ENOMEM;	}	ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status);	if (!ret && status)		ret = -EINVAL;	if (ret) {		pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,			       PCI_DMA_BIDIRECTIONAL);		__free_page(dev->eq_table.icm_page);	}	return ret;}void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev){	u8 status;	mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status);	pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,		       PCI_DMA_BIDIRECTIONAL);	__free_page(dev->eq_table.icm_page);}int __devinit mthca_init_eq_table(struct mthca_dev *dev){	int err;	u8 status;	u8 intr;	int i;	err = mthca_alloc_init(&dev->eq_table.alloc,			       dev->limits.num_eqs,			       dev->limits.num_eqs - 1,			       dev->limits.reserved_eqs);	if (err)		return err;	err = mthca_map_eq_regs(dev);	if (err)		goto err_out_free;	if (dev->mthca_flags & MTHCA_FLAG_MSI ||	    dev->mthca_flags & MTHCA_FLAG_MSI_X) {		dev->eq_table.clr_mask = 0;	} else {		dev->eq_table.clr_mask =			swab32(1 << (dev->eq_table.inta_pin & 31));		dev->eq_table.clr_int  = dev->clr_base +			(dev->eq_table.inta_pin < 32 ? 4 : 0);	}	dev->eq_table.arm_mask = 0;	intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?		128 : dev->eq_table.inta_pin;	err = mthca_create_eq(dev, dev->limits.num_cqs,			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,			      &dev->eq_table.eq[MTHCA_EQ_COMP]);	if (err)		goto err_out_unmap;	err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE,			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,			      &dev->eq_table.eq[MTHCA_EQ_ASYNC]);	if (err)		goto err_out_comp;	err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE,			      (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,			      &dev->eq_table.eq[MTHCA_EQ_CMD]);	if (err)		goto err_out_async;	if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {		static const char *eq_name[] = {			[MTHCA_EQ_COMP]  = DRV_NAME " (comp)",			[MTHCA_EQ_ASYNC] = DRV_NAME " (async)",			[MTHCA_EQ_CMD]   = DRV_NAME " (cmd)"		};		for (i = 0; i < MTHCA_NUM_EQ; ++i) {			err = request_irq(dev->eq_table.eq[i].msi_x_vector,					  mthca_is_memfree(dev) ?					  mthca_arbel_msi_x_interrupt :					  mthca_tavor_msi_x_interrupt,					  0, eq_name[i], dev->eq_table.eq + i);			if (err)				goto err_out_cmd;			dev->eq_table.eq[i].have_irq = 1;		}	} else {		err = request_irq(dev->pdev->irq,				  mthca_is_memfree(dev) ?				  mthca_arbel_interrupt :				  mthca_tavor_interrupt,				  SA_SHIRQ, DRV_NAME, dev);		if (err)			goto err_out_cmd;		dev->eq_table.have_irq = 1;	}	err = mthca_MAP_EQ(dev, async_mask(dev),			   0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);	if (err)		mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",			   dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);	if (status)		mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",			   dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);	err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,			   0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);	if (err)		mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",			   dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);	if (status)		mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",			   dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);	for (i = 0; i < MTHCA_EQ_CMD; ++i)		if (mthca_is_memfree(dev))			arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);		else			tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);	return 0;err_out_cmd:	mthca_free_irqs(dev);	mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);err_out_async:	mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);err_out_comp:	mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);err_out_unmap:	mthca_unmap_eq_regs(dev);err_out_free:	mthca_alloc_cleanup(&dev->eq_table.alloc);	return err;}void __devexit mthca_cleanup_eq_table(struct mthca_dev *dev){	u8 status;	int i;	mthca_free_irqs(dev);	mthca_MAP_EQ(dev, async_mask(dev),		     1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);	mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,		     1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);	for (i = 0; i < MTHCA_NUM_EQ; ++i)		mthca_free_eq(dev, &dev->eq_table.eq[i]);	mthca_unmap_eq_regs(dev);	mthca_alloc_cleanup(&dev->eq_table.alloc);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -