📄 memalloc.c
字号:
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Takashi Iwai <tiwai@suse.de> * * Generic memory allocators * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */#include <linux/module.h>#include <linux/proc_fs.h>#include <linux/init.h>#include <linux/pci.h>#include <linux/slab.h>#include <linux/mm.h>#include <linux/seq_file.h>#include <asm/uaccess.h>#include <linux/dma-mapping.h>#include <linux/moduleparam.h>#include <linux/mutex.h>#include <sound/memalloc.h>#ifdef CONFIG_SBUS#include <asm/sbus.h>#endifMODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz>");MODULE_DESCRIPTION("Memory allocator for ALSA system.");MODULE_LICENSE("GPL");/* */void *snd_malloc_sgbuf_pages(struct device *device, size_t size, struct snd_dma_buffer *dmab, size_t *res_size);int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab);/* */static DEFINE_MUTEX(list_mutex);static LIST_HEAD(mem_list_head);/* buffer preservation list */struct snd_mem_list { struct snd_dma_buffer buffer; unsigned int id; struct list_head list;};/* id for pre-allocated buffers */#define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1#ifdef CONFIG_SND_DEBUG#define __ASTRING__(x) #x#define snd_assert(expr, args...) do {\ if (!(expr)) {\ printk(KERN_ERR "snd-malloc: BUG? (%s) (called from %p)\n", __ASTRING__(expr), __builtin_return_address(0));\ args;\ }\} while (0)#else#define snd_assert(expr, args...) /**/#endif/* * Hacks */#if defined(__i386__)/* * A hack to allocate large buffers via dma_alloc_coherent() * * since dma_alloc_coherent always tries GFP_DMA when the requested * pci memory region is below 32bit, it happens quite often that even * 2 order of pages cannot be allocated. * * so in the following, we allocate at first without dma_mask, so that * allocation will be done without GFP_DMA. if the area doesn't match * with the requested region, then realloate with the original dma_mask * again. * * Really, we want to move this type of thing into dma_alloc_coherent() * so dma_mask doesn't have to be messed with. */static void *snd_dma_hack_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags){ void *ret; u64 dma_mask, coherent_dma_mask; if (dev == NULL || !dev->dma_mask) return dma_alloc_coherent(dev, size, dma_handle, flags); dma_mask = *dev->dma_mask; coherent_dma_mask = dev->coherent_dma_mask; *dev->dma_mask = 0xffffffff; /* do without masking */ dev->coherent_dma_mask = 0xffffffff; /* do without masking */ ret = dma_alloc_coherent(dev, size, dma_handle, flags); *dev->dma_mask = dma_mask; /* restore */ dev->coherent_dma_mask = coherent_dma_mask; /* restore */ if (ret) { /* obtained address is out of range? */ if (((unsigned long)*dma_handle + size - 1) & ~dma_mask) { /* reallocate with the proper mask */ dma_free_coherent(dev, size, ret, *dma_handle); ret = dma_alloc_coherent(dev, size, dma_handle, flags); } } else { /* wish to success now with the proper mask... */ if (dma_mask != 0xffffffffUL) { /* allocation with GFP_ATOMIC to avoid the long stall */ flags &= ~GFP_KERNEL; flags |= GFP_ATOMIC; ret = dma_alloc_coherent(dev, size, dma_handle, flags); } } return ret;}/* redefine dma_alloc_coherent for some architectures */#undef dma_alloc_coherent#define dma_alloc_coherent snd_dma_hack_alloc_coherent#endif /* arch *//* * * Generic memory allocators * */static long snd_allocated_pages; /* holding the number of allocated pages */static inline void inc_snd_pages(int order){ snd_allocated_pages += 1 << order;}static inline void dec_snd_pages(int order){ snd_allocated_pages -= 1 << order;}/** * snd_malloc_pages - allocate pages with the given size * @size: the size to allocate in bytes * @gfp_flags: the allocation conditions, GFP_XXX * * Allocates the physically contiguous pages with the given size. * * Returns the pointer of the buffer, or NULL if no enoguh memory. */void *snd_malloc_pages(size_t size, gfp_t gfp_flags){ int pg; void *res; snd_assert(size > 0, return NULL); snd_assert(gfp_flags != 0, return NULL); gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */ pg = get_order(size); if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL) inc_snd_pages(pg); return res;}/** * snd_free_pages - release the pages * @ptr: the buffer pointer to release * @size: the allocated buffer size * * Releases the buffer allocated via snd_malloc_pages(). */void snd_free_pages(void *ptr, size_t size){ int pg; if (ptr == NULL) return; pg = get_order(size); dec_snd_pages(pg); free_pages((unsigned long) ptr, pg);}/* * * Bus-specific memory allocators * */#ifdef CONFIG_HAS_DMA/* allocate the coherent DMA pages */static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma){ int pg; void *res; gfp_t gfp_flags; snd_assert(size > 0, return NULL); snd_assert(dma != NULL, return NULL); pg = get_order(size); gfp_flags = GFP_KERNEL | __GFP_COMP /* compound page lets parts be mapped */ | __GFP_NORETRY /* don't trigger OOM-killer */ | __GFP_NOWARN; /* no stack trace print - this call is non-critical */ res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags); if (res != NULL) inc_snd_pages(pg); return res;}/* free the coherent DMA pages */static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr, dma_addr_t dma){ int pg; if (ptr == NULL) return; pg = get_order(size); dec_snd_pages(pg); dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);}#endif /* CONFIG_HAS_DMA */#ifdef CONFIG_SBUSstatic void *snd_malloc_sbus_pages(struct device *dev, size_t size, dma_addr_t *dma_addr){ struct sbus_dev *sdev = (struct sbus_dev *)dev; int pg; void *res; snd_assert(size > 0, return NULL); snd_assert(dma_addr != NULL, return NULL); pg = get_order(size); res = sbus_alloc_consistent(sdev, PAGE_SIZE * (1 << pg), dma_addr); if (res != NULL) inc_snd_pages(pg); return res;}static void snd_free_sbus_pages(struct device *dev, size_t size, void *ptr, dma_addr_t dma_addr){ struct sbus_dev *sdev = (struct sbus_dev *)dev; int pg; if (ptr == NULL) return; pg = get_order(size); dec_snd_pages(pg); sbus_free_consistent(sdev, PAGE_SIZE * (1 << pg), ptr, dma_addr);}#endif /* CONFIG_SBUS *//* * * ALSA generic memory management * *//** * snd_dma_alloc_pages - allocate the buffer area according to the given type * @type: the DMA buffer type * @device: the device pointer * @size: the buffer size to allocate * @dmab: buffer allocation record to store the allocated data * * Calls the memory-allocator function for the corresponding * buffer type. * * Returns zero if the buffer with the given size is allocated successfuly, * other a negative value at error. */int snd_dma_alloc_pages(int type, struct device *device, size_t size, struct snd_dma_buffer *dmab){ snd_assert(size > 0, return -ENXIO); snd_assert(dmab != NULL, return -ENXIO); dmab->dev.type = type; dmab->dev.dev = device; dmab->bytes = 0; switch (type) { case SNDRV_DMA_TYPE_CONTINUOUS: dmab->area = snd_malloc_pages(size, (unsigned long)device); dmab->addr = 0; break;#ifdef CONFIG_SBUS case SNDRV_DMA_TYPE_SBUS: dmab->area = snd_malloc_sbus_pages(device, size, &dmab->addr); break;#endif#ifdef CONFIG_HAS_DMA case SNDRV_DMA_TYPE_DEV: dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr); break; case SNDRV_DMA_TYPE_DEV_SG: snd_malloc_sgbuf_pages(device, size, dmab, NULL); break;#endif default: printk(KERN_ERR "snd-malloc: invalid device type %d\n", type); dmab->area = NULL; dmab->addr = 0; return -ENXIO; } if (! dmab->area) return -ENOMEM;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -