⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dmabounce.c

📁 优龙2410linux2.6.8内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
		 * BUG_ON(buf->direction != dir);		 */		dev_dbg(dev,			"%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",			__func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr),			buf->safe, (void *) buf->safe_dma_addr);		DO_STATS ( device_info->bounce_count++ );		switch (dir) {		case DMA_FROM_DEVICE:			dev_dbg(dev,				"%s: copy back safe %p to unsafe %p size %d\n",				__func__, buf->safe, buf->ptr, size);			memcpy(buf->ptr, buf->safe, size);			break;		case DMA_TO_DEVICE:			dev_dbg(dev,				"%s: copy out unsafe %p to safe %p, size %d\n",				__func__,buf->ptr, buf->safe, size);			memcpy(buf->safe, buf->ptr, size);			break;		case DMA_BIDIRECTIONAL:			BUG();	/* is this allowed?  what does it mean? */		default:			BUG();		}		consistent_sync(buf->safe, size, dir);	} else {		consistent_sync(dma_to_virt(dev, dma_addr), size, dir);	}}/* ************************************************** *//* * see if a buffer address is in an 'unsafe' range.  if it is * allocate a 'safe' buffer and copy the unsafe buffer into it. * substitute the safe buffer for the unsafe one. * (basically move the buffer from an unsafe area to a safe one) */dma_addr_tdma_map_single(struct device *dev, void *ptr, size_t size,		enum dma_data_direction dir){	unsigned long flags;	dma_addr_t dma_addr;	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",		__func__, ptr, size, dir);	BUG_ON(dir == DMA_NONE);	local_irq_save(flags);	dma_addr = map_single(dev, ptr, size, dir);	local_irq_restore(flags);	return dma_addr;}/* * see if a mapped address was really a "safe" buffer and if so, copy * the data from the safe buffer back to the unsafe buffer and free up * the safe buffer.  (basically return things back to the way they * should be) */voiddma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,			enum dma_data_direction dir){	unsigned long flags;	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",		__func__, (void *) dma_addr, size, dir);	BUG_ON(dir == DMA_NONE);	local_irq_save(flags);	unmap_single(dev, dma_addr, size, dir);	local_irq_restore(flags);}intdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,		enum dma_data_direction dir){	unsigned long flags;	int i;	dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",		__func__, sg, nents, dir);	BUG_ON(dir == DMA_NONE);	local_irq_save(flags);	for (i = 0; i < nents; i++, sg++) {		struct page *page = sg->page;		unsigned int offset = sg->offset;		unsigned int length = sg->length;		void *ptr = page_address(page) + offset;		sg->dma_address =			map_single(dev, ptr, length, dir);	}	local_irq_restore(flags);	return nents;}voiddma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,		enum dma_data_direction dir){	unsigned long flags;	int i;	dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",		__func__, sg, nents, dir);	BUG_ON(dir == DMA_NONE);	local_irq_save(flags);	for (i = 0; i < nents; i++, sg++) {		dma_addr_t dma_addr = sg->dma_address;		unsigned int length = sg->length;		unmap_single(dev, dma_addr, length, dir);	}	local_irq_restore(flags);}voiddma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,				enum dma_data_direction dir){	unsigned long flags;	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",		__func__, (void *) dma_addr, size, dir);	local_irq_save(flags);	sync_single(dev, dma_addr, size, dir);	local_irq_restore(flags);}voiddma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,				enum dma_data_direction dir){	unsigned long flags;	dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",		__func__, (void *) dma_addr, size, dir);	local_irq_save(flags);	sync_single(dev, dma_addr, size, dir);	local_irq_restore(flags);}voiddma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,			enum dma_data_direction dir){	unsigned long flags;	int i;	dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",		__func__, sg, nents, dir);	BUG_ON(dir == DMA_NONE);	local_irq_save(flags);	for (i = 0; i < nents; i++, sg++) {		dma_addr_t dma_addr = sg->dma_address;		unsigned int length = sg->length;		sync_single(dev, dma_addr, length, dir);	}	local_irq_restore(flags);}voiddma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,			enum dma_data_direction dir){	unsigned long flags;	int i;	dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",		__func__, sg, nents, dir);	BUG_ON(dir == DMA_NONE);	local_irq_save(flags);	for (i = 0; i < nents; i++, sg++) {		dma_addr_t dma_addr = sg->dma_address;		unsigned int length = sg->length;		sync_single(dev, dma_addr, length, dir);	}	local_irq_restore(flags);}intdmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,			unsigned long large_buffer_size){	struct dmabounce_device_info *device_info;	device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);	if (!device_info) {		printk(KERN_ERR			"Could not allocated dmabounce_device_info for %s",			dev->bus_id);		return -ENOMEM;	}	device_info->small_buffer_pool =		dma_pool_create("small_dmabounce_pool",				dev,				small_buffer_size,				0 /* byte alignment */,				0 /* no page-crossing issues */);	if (!device_info->small_buffer_pool) {		printk(KERN_ERR			"dmabounce: could not allocate small DMA pool for %s\n",			dev->bus_id);		kfree(device_info);		return -ENOMEM;	}	if (large_buffer_size) {		device_info->large_buffer_pool =			dma_pool_create("large_dmabounce_pool",					dev,					large_buffer_size,					0 /* byte alignment */,					0 /* no page-crossing issues */);		if (!device_info->large_buffer_pool) {		printk(KERN_ERR			"dmabounce: could not allocate large DMA pool for %s\n",			dev->bus_id);			dma_pool_destroy(device_info->small_buffer_pool);			return -ENOMEM;		}	}	device_info->dev = dev;	device_info->small_buffer_size = small_buffer_size;	device_info->large_buffer_size = large_buffer_size;	INIT_LIST_HEAD(&device_info->safe_buffers);#ifdef STATS	device_info->sbp_allocs = 0;	device_info->lbp_allocs = 0;	device_info->total_allocs = 0;	device_info->map_op_count = 0;	device_info->bounce_count = 0;#endif	list_add(&device_info->node, &dmabounce_devs);	printk(KERN_INFO "dmabounce: registered device %s on %s bus\n",		dev->bus_id, dev->bus->name);	return 0;}voiddmabounce_unregister_dev(struct device *dev){	struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);	if (!device_info) {		printk(KERN_WARNING			"%s: Never registered with dmabounce but attempting" \			"to unregister!\n", dev->bus_id);		return;	}	if (!list_empty(&device_info->safe_buffers)) {		printk(KERN_ERR			"%s: Removing from dmabounce with pending buffers!\n",			dev->bus_id);		BUG();	}	if (device_info->small_buffer_pool)		dma_pool_destroy(device_info->small_buffer_pool);	if (device_info->large_buffer_pool)		dma_pool_destroy(device_info->large_buffer_pool);#ifdef STATS	print_alloc_stats(device_info);	print_map_stats(device_info);#endif	list_del(&device_info->node);	kfree(device_info);	printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n",		dev->bus_id, dev->bus->name);}EXPORT_SYMBOL(dma_map_single);EXPORT_SYMBOL(dma_unmap_single);EXPORT_SYMBOL(dma_map_sg);EXPORT_SYMBOL(dma_unmap_sg);EXPORT_SYMBOL(dma_sync_single);EXPORT_SYMBOL(dma_sync_sg);EXPORT_SYMBOL(dmabounce_register_dev);EXPORT_SYMBOL(dmabounce_unregister_dev);MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");MODULE_LICENSE("GPL");

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -