iop-adma.c

来自「linux 内核源代码」· C语言 代码 · 共 1,467 行 · 第 1/3 页

C
1,467
字号
		INIT_LIST_HEAD(&slot->chain_node);		INIT_LIST_HEAD(&slot->slot_node);		INIT_LIST_HEAD(&slot->async_tx.tx_list);		hw_desc = (char *) iop_chan->device->dma_desc_pool;		slot->async_tx.phys =			(dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];		slot->idx = idx;		spin_lock_bh(&iop_chan->lock);		iop_chan->slots_allocated++;		list_add_tail(&slot->slot_node, &iop_chan->all_slots);		spin_unlock_bh(&iop_chan->lock);	} while (iop_chan->slots_allocated < num_descs_in_pool);	if (idx && !iop_chan->last_used)		iop_chan->last_used = list_entry(iop_chan->all_slots.next,					struct iop_adma_desc_slot,					slot_node);	dev_dbg(iop_chan->device->common.dev,		"allocated %d descriptor slots last_used: %p\n",		iop_chan->slots_allocated, iop_chan->last_used);	/* initialize the channel and the chain with a null operation */	if (init) {		if (dma_has_cap(DMA_MEMCPY,			iop_chan->device->common.cap_mask))			iop_chan_start_null_memcpy(iop_chan);		else if (dma_has_cap(DMA_XOR,			iop_chan->device->common.cap_mask))			iop_chan_start_null_xor(iop_chan);		else			BUG();	}	return (idx > 0) ? idx : -ENOMEM;}static struct dma_async_tx_descriptor *iop_adma_prep_dma_interrupt(struct dma_chan *chan){	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);	struct iop_adma_desc_slot *sw_desc, *grp_start;	int slot_cnt, slots_per_op;	dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);	spin_lock_bh(&iop_chan->lock);	slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);	if (sw_desc) {		grp_start = sw_desc->group_head;		iop_desc_init_interrupt(grp_start, iop_chan);		grp_start->unmap_len = 0;	}	spin_unlock_bh(&iop_chan->lock);	return sw_desc ? &sw_desc->async_tx : NULL;}static voidiop_adma_memcpy_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,	int index){	struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);	struct iop_adma_desc_slot *grp_start = sw_desc->group_head;	iop_desc_set_memcpy_src_addr(grp_start, addr);}static struct dma_async_tx_descriptor *iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en){	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);	struct iop_adma_desc_slot *sw_desc, *grp_start;	int slot_cnt, slots_per_op;	if (unlikely(!len))		return NULL;	BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));	dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",		__FUNCTION__, len);	spin_lock_bh(&iop_chan->lock);	slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);	if (sw_desc) {		grp_start = sw_desc->group_head;		iop_desc_init_memcpy(grp_start, int_en);		iop_desc_set_byte_count(grp_start, iop_chan, len);		sw_desc->unmap_src_cnt = 1;		sw_desc->unmap_len = len;		sw_desc->async_tx.tx_set_src = iop_adma_memcpy_set_src;	}	spin_unlock_bh(&iop_chan->lock);	return sw_desc ? &sw_desc->async_tx : NULL;}static struct dma_async_tx_descriptor *iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,	int int_en){	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);	struct iop_adma_desc_slot *sw_desc, *grp_start;	int slot_cnt, slots_per_op;	if (unlikely(!len))		return NULL;	BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));	dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",		__FUNCTION__, len);	spin_lock_bh(&iop_chan->lock);	slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);	if (sw_desc) {		grp_start = sw_desc->group_head;		iop_desc_init_memset(grp_start, int_en);		iop_desc_set_byte_count(grp_start, iop_chan, len);		iop_desc_set_block_fill_val(grp_start, value);		sw_desc->unmap_src_cnt = 1;		sw_desc->unmap_len = len;	}	spin_unlock_bh(&iop_chan->lock);	return sw_desc ? &sw_desc->async_tx : NULL;}static voidiop_adma_xor_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,	int index){	struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);	struct iop_adma_desc_slot *grp_start = sw_desc->group_head;	iop_desc_set_xor_src_addr(grp_start, index, addr);}static struct dma_async_tx_descriptor *iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len,	int int_en){	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);	struct iop_adma_desc_slot *sw_desc, *grp_start;	int slot_cnt, slots_per_op;	if (unlikely(!len))		return NULL;	BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));	dev_dbg(iop_chan->device->common.dev,		"%s src_cnt: %d len: %u int_en: %d\n",		__FUNCTION__, src_cnt, len, int_en);	spin_lock_bh(&iop_chan->lock);	slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);	if (sw_desc) {		grp_start = sw_desc->group_head;		iop_desc_init_xor(grp_start, src_cnt, int_en);		iop_desc_set_byte_count(grp_start, iop_chan, len);		sw_desc->unmap_src_cnt = src_cnt;		sw_desc->unmap_len = len;		sw_desc->async_tx.tx_set_src = iop_adma_xor_set_src;	}	spin_unlock_bh(&iop_chan->lock);	return sw_desc ? &sw_desc->async_tx : NULL;}static voidiop_adma_xor_zero_sum_set_src(dma_addr_t addr,				struct dma_async_tx_descriptor *tx,				int index){	struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);	struct iop_adma_desc_slot *grp_start = sw_desc->group_head;	iop_desc_set_zero_sum_src_addr(grp_start, index, addr);}static struct dma_async_tx_descriptor *iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt,	size_t len, u32 *result, int int_en){	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);	struct iop_adma_desc_slot *sw_desc, *grp_start;	int slot_cnt, slots_per_op;	if (unlikely(!len))		return NULL;	dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",		__FUNCTION__, src_cnt, len);	spin_lock_bh(&iop_chan->lock);	slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);	sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);	if (sw_desc) {		grp_start = sw_desc->group_head;		iop_desc_init_zero_sum(grp_start, src_cnt, int_en);		iop_desc_set_zero_sum_byte_count(grp_start, len);		grp_start->xor_check_result = result;		pr_debug("\t%s: grp_start->xor_check_result: %p\n",			__FUNCTION__, grp_start->xor_check_result);		sw_desc->unmap_src_cnt = src_cnt;		sw_desc->unmap_len = len;		sw_desc->async_tx.tx_set_src = iop_adma_xor_zero_sum_set_src;	}	spin_unlock_bh(&iop_chan->lock);	return sw_desc ? &sw_desc->async_tx : NULL;}static void iop_adma_dependency_added(struct dma_chan *chan){	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);	tasklet_schedule(&iop_chan->irq_tasklet);}static void iop_adma_free_chan_resources(struct dma_chan *chan){	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);	struct iop_adma_desc_slot *iter, *_iter;	int in_use_descs = 0;	iop_adma_slot_cleanup(iop_chan);	spin_lock_bh(&iop_chan->lock);	list_for_each_entry_safe(iter, _iter, &iop_chan->chain,					chain_node) {		in_use_descs++;		list_del(&iter->chain_node);	}	list_for_each_entry_safe_reverse(		iter, _iter, &iop_chan->all_slots, slot_node) {		list_del(&iter->slot_node);		kfree(iter);		iop_chan->slots_allocated--;	}	iop_chan->last_used = NULL;	dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",		__FUNCTION__, iop_chan->slots_allocated);	spin_unlock_bh(&iop_chan->lock);	/* one is ok since we left it on there on purpose */	if (in_use_descs > 1)		printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",			in_use_descs - 1);}/** * iop_adma_is_complete - poll the status of an ADMA transaction * @chan: ADMA channel handle * @cookie: ADMA transaction identifier */static enum dma_status iop_adma_is_complete(struct dma_chan *chan,					dma_cookie_t cookie,					dma_cookie_t *done,					dma_cookie_t *used){	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);	dma_cookie_t last_used;	dma_cookie_t last_complete;	enum dma_status ret;	last_used = chan->cookie;	last_complete = iop_chan->completed_cookie;	if (done)		*done = last_complete;	if (used)		*used = last_used;	ret = dma_async_is_complete(cookie, last_complete, last_used);	if (ret == DMA_SUCCESS)		return ret;	iop_adma_slot_cleanup(iop_chan);	last_used = chan->cookie;	last_complete = iop_chan->completed_cookie;	if (done)		*done = last_complete;	if (used)		*used = last_used;	return dma_async_is_complete(cookie, last_complete, last_used);}static irqreturn_t iop_adma_eot_handler(int irq, void *data){	struct iop_adma_chan *chan = data;	dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__);	tasklet_schedule(&chan->irq_tasklet);	iop_adma_device_clear_eot_status(chan);	return IRQ_HANDLED;}static irqreturn_t iop_adma_eoc_handler(int irq, void *data){	struct iop_adma_chan *chan = data;	dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__);	tasklet_schedule(&chan->irq_tasklet);	iop_adma_device_clear_eoc_status(chan);	return IRQ_HANDLED;}static irqreturn_t iop_adma_err_handler(int irq, void *data){	struct iop_adma_chan *chan = data;	unsigned long status = iop_chan_get_status(chan);	dev_printk(KERN_ERR, chan->device->common.dev,		"error ( %s%s%s%s%s%s%s)\n",		iop_is_err_int_parity(status, chan) ? "int_parity " : "",		iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",		iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",		iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",		iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",		iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",		iop_is_err_split_tx(status, chan) ? "split_tx " : "");	iop_adma_device_clear_err_status(chan);	BUG();	return IRQ_HANDLED;}static void iop_adma_issue_pending(struct dma_chan *chan){	struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);	if (iop_chan->pending) {		iop_chan->pending = 0;		iop_chan_append(iop_chan);	}}/* * Perform a transaction to verify the HW works. */#define IOP_ADMA_TEST_SIZE 2000static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device){	int i;	void *src, *dest;	dma_addr_t src_dma, dest_dma;	struct dma_chan *dma_chan;	dma_cookie_t cookie;	struct dma_async_tx_descriptor *tx;	int err = 0;	struct iop_adma_chan *iop_chan;	dev_dbg(device->common.dev, "%s\n", __FUNCTION__);	src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);	if (!src)		return -ENOMEM;	dest = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);	if (!dest) {		kfree(src);		return -ENOMEM;	}	/* Fill in src buffer */	for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)		((u8 *) src)[i] = (u8)i;	memset(dest, 0, IOP_ADMA_TEST_SIZE);	/* Start copy, using first DMA channel */	dma_chan = container_of(device->common.channels.next,				struct dma_chan,				device_node);	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {		err = -ENODEV;		goto out;	}	tx = iop_adma_prep_dma_memcpy(dma_chan, IOP_ADMA_TEST_SIZE, 1);	dest_dma = dma_map_single(dma_chan->device->dev, dest,				IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);	iop_adma_set_dest(dest_dma, tx, 0);	src_dma = dma_map_single(dma_chan->device->dev, src,				IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);	iop_adma_memcpy_set_src(src_dma, tx, 0);	cookie = iop_adma_tx_submit(tx);	iop_adma_issue_pending(dma_chan);	async_tx_ack(tx);	msleep(1);	if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=			DMA_SUCCESS) {		dev_printk(KERN_ERR, dma_chan->device->dev,			"Self-test copy timed out, disabling\n");		err = -ENODEV;		goto free_resources;	}	iop_chan = to_iop_adma_chan(dma_chan);	dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,		IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);	if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {		dev_printk(KERN_ERR, dma_chan->device->dev,			"Self-test copy failed compare, disabling\n");		err = -ENODEV;		goto free_resources;	}free_resources:	iop_adma_free_chan_resources(dma_chan);out:	kfree(src);	kfree(dest);	return err;}#define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */static int __devinitiop_adma_xor_zero_sum_self_test(struct iop_adma_device *device){	int i, src_idx;	struct page *dest;	struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];	struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];	dma_addr_t dma_addr, dest_dma;	struct dma_async_tx_descriptor *tx;	struct dma_chan *dma_chan;	dma_cookie_t cookie;	u8 cmp_byte = 0;	u32 cmp_word;	u32 zero_sum_result;	int err = 0;	struct iop_adma_chan *iop_chan;	dev_dbg(device->common.dev, "%s\n", __FUNCTION__);	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);		if (!xor_srcs[src_idx])			while (src_idx--) {				__free_page(xor_srcs[src_idx]);				return -ENOMEM;			}	}	dest = alloc_page(GFP_KERNEL);	if (!dest)		while (src_idx--) {			__free_page(xor_srcs[src_idx]);			return -ENOMEM;		}	/* Fill in src buffers */	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {		u8 *ptr = page_address(xor_srcs[src_idx]);		for (i = 0; i < PAGE_SIZE; i++)			ptr[i] = (1 << src_idx);	}	for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)		cmp_byte ^= (u8) (1 << src_idx);	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |			(cmp_byte << 8) | cmp_byte;	memset(page_address(dest), 0, PAGE_SIZE);	dma_chan = container_of(device->common.channels.next,				struct dma_chan,				device_node);	if (iop_adma_alloc_chan_resources(dma_chan) < 1) {

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?