📄 dmabounce.c
字号:
/* * arch/arm/common/dmabounce.c * * Special dma_{map/unmap/dma_sync}_* routines for systems that have * limited DMA windows. These functions utilize bounce buffers to * copy data to/from buffers located outside the DMA region. This * only works for systems in which DMA memory is at the bottom of * RAM, the remainder of memory is at the top and the DMA memory * can be marked as ZONE_DMA. Anything beyond that such as discontiguous * DMA windows will require custom implementations that reserve memory * areas at early bootup. * * Original version by Brad Parker (brad@heeltoe.com) * Re-written by Christopher Hoover <ch@murgatroid.com> * Made generic by Deepak Saxena <dsaxena@plexity.net> * * Copyright (C) 2002 Hewlett Packard Company. * Copyright (C) 2004 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */#include <linux/module.h>#include <linux/init.h>#include <linux/slab.h>#include <linux/device.h>#include <linux/dma-mapping.h>#include <linux/dmapool.h>#include <linux/list.h>#include <linux/scatterlist.h>#include <asm/cacheflush.h>#undef STATS#ifdef STATS#define DO_STATS(X) do { X ; } while (0)#else#define DO_STATS(X) do { } while (0)#endif/* ************************************************** */struct safe_buffer { struct list_head node; /* original request */ void *ptr; size_t size; int direction; /* safe buffer info */ struct dmabounce_pool *pool; void *safe; dma_addr_t safe_dma_addr;};struct dmabounce_pool { unsigned long size; struct dma_pool *pool;#ifdef STATS unsigned long allocs;#endif};struct dmabounce_device_info { struct device *dev; struct list_head safe_buffers;#ifdef STATS unsigned long total_allocs; unsigned long map_op_count; unsigned long bounce_count; int attr_res;#endif struct dmabounce_pool small; struct dmabounce_pool large; rwlock_t lock;};#ifdef STATSstatic ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr, char *buf){ struct dmabounce_device_info *device_info = dev->archdata.dmabounce; return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n", device_info->small.allocs, device_info->large.allocs, device_info->total_allocs - device_info->small.allocs - device_info->large.allocs, device_info->total_allocs, device_info->map_op_count, device_info->bounce_count);}static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);#endif/* allocate a 'safe' buffer and keep track of it */static inline struct safe_buffer *alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, size_t size, enum dma_data_direction dir){ struct safe_buffer *buf; struct dmabounce_pool *pool; struct device *dev = device_info->dev; unsigned long flags; dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", __func__, ptr, size, dir); if (size <= device_info->small.size) { pool = &device_info->small; } else if (size <= device_info->large.size) { pool = &device_info->large; } else { pool = NULL; } buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); if (buf == NULL) { dev_warn(dev, "%s: kmalloc failed\n", __func__); return NULL; } buf->ptr = ptr; buf->size = size; buf->direction = dir; buf->pool = pool; if (pool) { buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, &buf->safe_dma_addr); } else { buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, GFP_ATOMIC); } if (buf->safe == NULL) { dev_warn(dev, "%s: could not alloc dma memory (size=%d)\n", __func__, size); kfree(buf); return NULL; }#ifdef STATS if (pool) pool->allocs++; device_info->total_allocs++;#endif write_lock_irqsave(&device_info->lock, flags); list_add(&buf->node, &device_info->safe_buffers); write_unlock_irqrestore(&device_info->lock, flags); return buf;}/* determine if a buffer is from our "safe" pool */static inline struct safe_buffer *find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr){ struct safe_buffer *b, *rb = NULL; unsigned long flags; read_lock_irqsave(&device_info->lock, flags); list_for_each_entry(b, &device_info->safe_buffers, node) if (b->safe_dma_addr == safe_dma_addr) { rb = b; break; } read_unlock_irqrestore(&device_info->lock, flags); return rb;}static inline voidfree_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf){ unsigned long flags; dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); write_lock_irqsave(&device_info->lock, flags); list_del(&buf->node); write_unlock_irqrestore(&device_info->lock, flags); if (buf->pool) dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); else dma_free_coherent(device_info->dev, buf->size, buf->safe, buf->safe_dma_addr); kfree(buf);}/* ************************************************** */static inline dma_addr_tmap_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir){ struct dmabounce_device_info *device_info = dev->archdata.dmabounce; dma_addr_t dma_addr; int needs_bounce = 0; if (device_info) DO_STATS ( device_info->map_op_count++ ); dma_addr = virt_to_dma(dev, ptr); if (dev->dma_mask) { unsigned long mask = *dev->dma_mask; unsigned long limit; limit = (mask + 1) & ~mask; if (limit && size > limit) { dev_err(dev, "DMA mapping too big (requested %#x " "mask %#Lx)\n", size, *dev->dma_mask); return ~0; } /* * Figure out if we need to bounce from the DMA mask. */ needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; } if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { struct safe_buffer *buf; buf = alloc_safe_buffer(device_info, ptr, size, dir); if (buf == 0) { dev_err(dev, "%s: unable to map unsafe buffer %p!\n", __func__, ptr); return 0; } dev_dbg(dev, "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), buf->safe, (void *) buf->safe_dma_addr); if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL)) { dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", __func__, ptr, buf->safe, size); memcpy(buf->safe, ptr, size); } ptr = buf->safe; dma_addr = buf->safe_dma_addr; } else { /* * We don't need to sync the DMA buffer since * it was allocated via the coherent allocators. */ dma_cache_maint(ptr, size, dir); } return dma_addr;}static inline voidunmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir){ struct dmabounce_device_info *device_info = dev->archdata.dmabounce; struct safe_buffer *buf = NULL; /* * Trying to unmap an invalid mapping */ if (dma_mapping_error(dma_addr)) { dev_err(dev, "Trying to unmap invalid mapping\n"); return; } if (device_info) buf = find_safe_buffer(device_info, dma_addr); if (buf) { BUG_ON(buf->size != size); dev_dbg(dev, "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), buf->safe, (void *) buf->safe_dma_addr); DO_STATS ( device_info->bounce_count++ ); if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { void *ptr = buf->ptr; dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", __func__, buf->safe, ptr, size); memcpy(ptr, buf->safe, size); /* * DMA buffers must have the same cache properties * as if they were really used for DMA - which means * data must be written back to RAM. Note that * we don't use dmac_flush_range() here for the * bidirectional case because we know the cache * lines will be coherent with the data written. */ dmac_clean_range(ptr, ptr + size); outer_clean_range(__pa(ptr), __pa(ptr) + size); } free_safe_buffer(device_info, buf); }}static inline voidsync_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir){ struct dmabounce_device_info *device_info = dev->archdata.dmabounce; struct safe_buffer *buf = NULL;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -