iop-adma.c

来自「linux 内核源代码」· C语言 代码 · 共 1,467 行 · 第 1/3 页

C
1,467
字号
		err = -ENODEV;		goto out;	}	/* test xor */	tx = iop_adma_prep_dma_xor(dma_chan, IOP_ADMA_NUM_SRC_TEST,				PAGE_SIZE, 1);	dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,				PAGE_SIZE, DMA_FROM_DEVICE);	iop_adma_set_dest(dest_dma, tx, 0);	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {		dma_addr = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0,			PAGE_SIZE, DMA_TO_DEVICE);		iop_adma_xor_set_src(dma_addr, tx, i);	}	cookie = iop_adma_tx_submit(tx);	iop_adma_issue_pending(dma_chan);	async_tx_ack(tx);	msleep(8);	if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=		DMA_SUCCESS) {		dev_printk(KERN_ERR, dma_chan->device->dev,			"Self-test xor timed out, disabling\n");		err = -ENODEV;		goto free_resources;	}	iop_chan = to_iop_adma_chan(dma_chan);	dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,		PAGE_SIZE, DMA_FROM_DEVICE);	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {		u32 *ptr = page_address(dest);		if (ptr[i] != cmp_word) {			dev_printk(KERN_ERR, dma_chan->device->dev,				"Self-test xor failed compare, disabling\n");			err = -ENODEV;			goto free_resources;		}	}	dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,		PAGE_SIZE, DMA_TO_DEVICE);	/* skip zero sum if the capability is not present */	if (!dma_has_cap(DMA_ZERO_SUM, dma_chan->device->cap_mask))		goto free_resources;	/* zero sum the sources with the destintation page */	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)		zero_sum_srcs[i] = xor_srcs[i];	zero_sum_srcs[i] = dest;	zero_sum_result = 1;	tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1,		PAGE_SIZE, &zero_sum_result, 1);	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) {		dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i],			0, PAGE_SIZE, DMA_TO_DEVICE);		iop_adma_xor_zero_sum_set_src(dma_addr, tx, i);	}	cookie = iop_adma_tx_submit(tx);	iop_adma_issue_pending(dma_chan);	async_tx_ack(tx);	msleep(8);	if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {		dev_printk(KERN_ERR, dma_chan->device->dev,			"Self-test zero sum timed out, disabling\n");		err = -ENODEV;		goto free_resources;	}	if (zero_sum_result != 0) {		dev_printk(KERN_ERR, dma_chan->device->dev,			"Self-test zero sum failed compare, disabling\n");		err = -ENODEV;		goto free_resources;	}	/* test memset */	tx = iop_adma_prep_dma_memset(dma_chan, 0, PAGE_SIZE, 1);	dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,			PAGE_SIZE, DMA_FROM_DEVICE);	iop_adma_set_dest(dma_addr, tx, 0);	cookie = iop_adma_tx_submit(tx);	iop_adma_issue_pending(dma_chan);	async_tx_ack(tx);	msleep(8);	if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {		dev_printk(KERN_ERR, dma_chan->device->dev,			"Self-test memset timed out, disabling\n");		err = -ENODEV;		goto free_resources;	}	for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {		u32 *ptr = page_address(dest);		if (ptr[i]) {			dev_printk(KERN_ERR, dma_chan->device->dev,				"Self-test memset failed compare, disabling\n");			err = -ENODEV;			goto free_resources;		}	}	/* test for non-zero parity sum */	zero_sum_result = 0;	tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1,		PAGE_SIZE, &zero_sum_result, 1);	for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) {		dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i],			0, PAGE_SIZE, DMA_TO_DEVICE);		iop_adma_xor_zero_sum_set_src(dma_addr, tx, i);	}	cookie = iop_adma_tx_submit(tx);	iop_adma_issue_pending(dma_chan);	async_tx_ack(tx);	msleep(8);	if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {		dev_printk(KERN_ERR, dma_chan->device->dev,			"Self-test non-zero sum timed out, disabling\n");		err = -ENODEV;		goto free_resources;	}	if (zero_sum_result != 1) {		dev_printk(KERN_ERR, dma_chan->device->dev,			"Self-test non-zero sum failed compare, disabling\n");		err = -ENODEV;		goto free_resources;	}free_resources:	iop_adma_free_chan_resources(dma_chan);out:	src_idx = IOP_ADMA_NUM_SRC_TEST;	while (src_idx--)		__free_page(xor_srcs[src_idx]);	__free_page(dest);	return err;}static int __devexit iop_adma_remove(struct platform_device *dev){	struct iop_adma_device *device = platform_get_drvdata(dev);	struct dma_chan *chan, *_chan;	struct iop_adma_chan *iop_chan;	int i;	struct iop_adma_platform_data *plat_data = dev->dev.platform_data;	dma_async_device_unregister(&device->common);	for (i = 0; i < 3; i++) {		unsigned int irq;		irq = platform_get_irq(dev, i);		free_irq(irq, device);	}	dma_free_coherent(&dev->dev, plat_data->pool_size,			device->dma_desc_pool_virt, device->dma_desc_pool);	do {		struct resource *res;		res = platform_get_resource(dev, IORESOURCE_MEM, 0);		release_mem_region(res->start, res->end - res->start);	} while (0);	list_for_each_entry_safe(chan, _chan, &device->common.channels,				device_node) {		iop_chan = to_iop_adma_chan(chan);		list_del(&chan->device_node);		kfree(iop_chan);	}	kfree(device);	return 0;}static int __devinit iop_adma_probe(struct platform_device *pdev){	struct resource *res;	int ret = 0, i;	struct iop_adma_device *adev;	struct iop_adma_chan *iop_chan;	struct dma_device *dma_dev;	struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);	if (!res)		return -ENODEV;	if (!devm_request_mem_region(&pdev->dev, res->start,				res->end - res->start, pdev->name))		return -EBUSY;	adev = kzalloc(sizeof(*adev), GFP_KERNEL);	if (!adev)		return -ENOMEM;	dma_dev = &adev->common;	/* allocate coherent memory for hardware descriptors	 * note: writecombine gives slightly better performance, but	 * requires that we explicitly flush the writes	 */	if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,					plat_data->pool_size,					&adev->dma_desc_pool,					GFP_KERNEL)) == NULL) {		ret = -ENOMEM;		goto err_free_adev;	}	dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",		__FUNCTION__, adev->dma_desc_pool_virt,		(void *) adev->dma_desc_pool);	adev->id = plat_data->hw_id;	/* discover transaction capabilites from the platform data */	dma_dev->cap_mask = plat_data->cap_mask;	adev->pdev = pdev;	platform_set_drvdata(pdev, adev);	INIT_LIST_HEAD(&dma_dev->channels);	/* set base routines */	dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;	dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;	dma_dev->device_is_tx_complete = iop_adma_is_complete;	dma_dev->device_issue_pending = iop_adma_issue_pending;	dma_dev->device_dependency_added = iop_adma_dependency_added;	dma_dev->dev = &pdev->dev;	/* set prep routines based on capability */	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))		dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;	if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))		dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset;	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {		dma_dev->max_xor = iop_adma_get_max_xor();		dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;	}	if (dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask))		dma_dev->device_prep_dma_zero_sum =			iop_adma_prep_dma_zero_sum;	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))		dma_dev->device_prep_dma_interrupt =			iop_adma_prep_dma_interrupt;	iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);	if (!iop_chan) {		ret = -ENOMEM;		goto err_free_dma;	}	iop_chan->device = adev;	iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,					res->end - res->start);	if (!iop_chan->mmr_base) {		ret = -ENOMEM;		goto err_free_iop_chan;	}	tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)		iop_chan);	/* clear errors before enabling interrupts */	iop_adma_device_clear_err_status(iop_chan);	for (i = 0; i < 3; i++) {		irq_handler_t handler[] = { iop_adma_eot_handler,					iop_adma_eoc_handler,					iop_adma_err_handler };		int irq = platform_get_irq(pdev, i);		if (irq < 0) {			ret = -ENXIO;			goto err_free_iop_chan;		} else {			ret = devm_request_irq(&pdev->dev, irq,					handler[i], 0, pdev->name, iop_chan);			if (ret)				goto err_free_iop_chan;		}	}	spin_lock_init(&iop_chan->lock);	init_timer(&iop_chan->cleanup_watchdog);	iop_chan->cleanup_watchdog.data = (unsigned long) iop_chan;	iop_chan->cleanup_watchdog.function = iop_adma_tasklet;	INIT_LIST_HEAD(&iop_chan->chain);	INIT_LIST_HEAD(&iop_chan->all_slots);	INIT_RCU_HEAD(&iop_chan->common.rcu);	iop_chan->common.device = dma_dev;	list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {		ret = iop_adma_memcpy_self_test(adev);		dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);		if (ret)			goto err_free_iop_chan;	}	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||		dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {		ret = iop_adma_xor_zero_sum_self_test(adev);		dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);		if (ret)			goto err_free_iop_chan;	}	dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "	  "( %s%s%s%s%s%s%s%s%s%s)\n",	  dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "",	  dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "",	  dma_has_cap(DMA_PQ_ZERO_SUM, dma_dev->cap_mask) ? "pq_zero_sum " : "",	  dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",	  dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "",	  dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask) ? "xor_zero_sum " : "",	  dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)  ? "fill " : "",	  dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "",	  dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",	  dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");	dma_async_device_register(dma_dev);	goto out; err_free_iop_chan:	kfree(iop_chan); err_free_dma:	dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,			adev->dma_desc_pool_virt, adev->dma_desc_pool); err_free_adev:	kfree(adev); out:	return ret;}static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan){	struct iop_adma_desc_slot *sw_desc, *grp_start;	dma_cookie_t cookie;	int slot_cnt, slots_per_op;	dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);	spin_lock_bh(&iop_chan->lock);	slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);	if (sw_desc) {		grp_start = sw_desc->group_head;		list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);		sw_desc->async_tx.ack = 1;		iop_desc_init_memcpy(grp_start, 0);		iop_desc_set_byte_count(grp_start, iop_chan, 0);		iop_desc_set_dest_addr(grp_start, iop_chan, 0);		iop_desc_set_memcpy_src_addr(grp_start, 0);		cookie = iop_chan->common.cookie;		cookie++;		if (cookie <= 1)			cookie = 2;		/* initialize the completed cookie to be less than		 * the most recently used cookie		 */		iop_chan->completed_cookie = cookie - 1;		iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;		/* channel should not be busy */		BUG_ON(iop_chan_is_busy(iop_chan));		/* clear any prior error-status bits */		iop_adma_device_clear_err_status(iop_chan);		/* disable operation */		iop_chan_disable(iop_chan);		/* set the descriptor address */		iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);		/* 1/ don't add pre-chained descriptors		 * 2/ dummy read to flush next_desc write		 */		BUG_ON(iop_desc_get_next_desc(sw_desc));		/* run the descriptor */		iop_chan_enable(iop_chan);	} else		dev_printk(KERN_ERR, iop_chan->device->common.dev,			 "failed to allocate null descriptor\n");	spin_unlock_bh(&iop_chan->lock);}static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan){	struct iop_adma_desc_slot *sw_desc, *grp_start;	dma_cookie_t cookie;	int slot_cnt, slots_per_op;	dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);	spin_lock_bh(&iop_chan->lock);	slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);	if (sw_desc) {		grp_start = sw_desc->group_head;		list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);		sw_desc->async_tx.ack = 1;		iop_desc_init_null_xor(grp_start, 2, 0);		iop_desc_set_byte_count(grp_start, iop_chan, 0);		iop_desc_set_dest_addr(grp_start, iop_chan, 0);		iop_desc_set_xor_src_addr(grp_start, 0, 0);		iop_desc_set_xor_src_addr(grp_start, 1, 0);		cookie = iop_chan->common.cookie;		cookie++;		if (cookie <= 1)			cookie = 2;		/* initialize the completed cookie to be less than		 * the most recently used cookie		 */		iop_chan->completed_cookie = cookie - 1;		iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;		/* channel should not be busy */		BUG_ON(iop_chan_is_busy(iop_chan));		/* clear any prior error-status bits */		iop_adma_device_clear_err_status(iop_chan);		/* disable operation */		iop_chan_disable(iop_chan);		/* set the descriptor address */		iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);		/* 1/ don't add pre-chained descriptors		 * 2/ dummy read to flush next_desc write		 */		BUG_ON(iop_desc_get_next_desc(sw_desc));		/* run the descriptor */		iop_chan_enable(iop_chan);	} else		dev_printk(KERN_ERR, iop_chan->device->common.dev,			"failed to allocate null descriptor\n");	spin_unlock_bh(&iop_chan->lock);}static struct platform_driver iop_adma_driver = {	.probe		= iop_adma_probe,	.remove		= iop_adma_remove,	.driver		= {		.owner	= THIS_MODULE,		.name	= "iop-adma",	},};static int __init iop_adma_init (void){	return platform_driver_register(&iop_adma_driver);}/* it's currently unsafe to unload this module */#if 0static void __exit iop_adma_exit (void){	platform_driver_unregister(&iop_adma_driver);	return;}module_exit(iop_adma_exit);#endifmodule_init(iop_adma_init);MODULE_AUTHOR("Intel Corporation");MODULE_DESCRIPTION("IOP ADMA Engine Driver");MODULE_LICENSE("GPL");

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?