📄 heap.c
字号:
/** * @file * This file is part of the Xenomai project. * * @note Copyright (C) 2004 Philippe Gerum <rpm@xenomai.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * \ingroup native_heap *//*! * \ingroup native * \defgroup native_heap Memory heap services. * * Memory heaps are regions of memory used for dynamic memory * allocation in a time-bounded fashion. Blocks of memory are * allocated and freed in an arbitrary order and the pattern of * allocation and size of blocks is not known until run time. * * The implementation of the memory allocator follows the algorithm * described in a USENIX 1988 paper called "Design of a General * Purpose Memory Allocator for the 4.3BSD Unix Kernel" by Marshall * K. McKusick and Michael J. Karels. * * Xenomai memory heaps are built over the nucleus's heap objects, which * in turn provide the needed support for sharing a memory area * between kernel and user-space using direct memory mapping. * *@{*/#include <nucleus/pod.h>#include <nucleus/registry.h>#include <native/task.h>#include <native/heap.h>#ifdef CONFIG_XENO_EXPORT_REGISTRYstatic int __heap_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data){ RT_HEAP *heap = (RT_HEAP *)data; char *p = page; int len; spl_t s; p += sprintf(p,"type=%s:size=%lu:used=%lu\n", (heap->mode & H_SHARED) == H_SHARED ? "shared" : (heap->mode & H_MAPPABLE) ? "mappable" : "kernel", (u_long) heap->csize, xnheap_used_mem(&heap->heap_base)); xnlock_get_irqsave(&nklock,s); if (xnsynch_nsleepers(&heap->synch_base) > 0) { xnpholder_t *holder; /* Pended heap -- dump waiters. */ holder = getheadpq(xnsynch_wait_queue(&heap->synch_base)); while (holder) { xnthread_t *sleeper = link2thread(holder,plink); RT_TASK *task = thread2rtask(sleeper); size_t size = task->wait_args.heap.size; p += sprintf(p,"+%s (size=%zd)\n",xnthread_name(sleeper),size); holder = nextpq(xnsynch_wait_queue(&heap->synch_base),holder); } } xnlock_put_irqrestore(&nklock,s); len = (p - page) - off; if (len <= off + count) *eof = 1; *start = page + off; if(len > count) len = count; if(len < 0) len = 0; return len;}extern xnptree_t __native_ptree;static xnpnode_t __heap_pnode = { .dir = NULL, .type = "heaps", .entries = 0, .read_proc = &__heap_read_proc, .write_proc = NULL, .root = &__native_ptree,};#elif defined(CONFIG_XENO_OPT_REGISTRY)static xnpnode_t __heap_pnode = { .type = "heaps"};#endif /* CONFIG_XENO_EXPORT_REGISTRY */static void __heap_flush_private (xnheap_t *heap, void *heapmem, u_long heapsize, void *cookie){ xnarch_sysfree(heapmem,heapsize);}/*! * \fn int rt_heap_create(RT_HEAP *heap,const char *name,size_t heapsize,int mode); * \brief Create a memory heap or a shared memory segment. * * Initializes a memory heap suitable for time-bounded allocation * requests of dynamic memory. Memory heaps can be local to the kernel * address space, or mapped to user-space. * * In their simplest form, heaps are only accessible from kernel * space, and are merely usable as regular memory allocators. * * Heaps existing in kernel space can be mapped by user-space * processes to their own address space provided H_MAPPABLE has been * passed into the @a mode parameter. * * By default, heaps support allocation of multiple blocks of memory * in an arbitrary order. However, it is possible to ask for * single-block management by passing the H_SINGLE flag into the @a * mode parameter, in which case the entire memory space managed by * the heap is made available as a unique block. In this mode, all * allocation requests made through rt_heap_alloc() will then return * the same block address, pointing at the beginning of the heap * memory. * * H_SHARED is a shorthand for creating shared memory segments * transparently accessible from kernel and user-space contexts, which * are basically single-block, mappable heaps. By proper use of a * common @a name, all tasks can bind themselves to the same heap and * thus share the same memory space, which start address should be * subsequently retrieved by a call to rt_heap_alloc(). * * @param heap The address of a heap descriptor Xenomai will use to store * the heap-related data. This descriptor must always be valid while * the heap is active therefore it must be allocated in permanent * memory. * * @param name An ASCII string standing for the symbolic name of the * heap. When non-NULL and non-empty, this string is copied to a safe * place into the descriptor, and passed to the registry package if * enabled for indexing the created heap. Mappable heaps must be given * a valid name. * * @param heapsize The size (in bytes) of the block pool which is * going to be pre-allocated to the heap. Memory blocks will be * claimed and released to this pool. The block pool is not * extensible, so this value must be compatible with the highest * memory pressure that could be expected. * * @param mode The heap creation mode. The following flags can be * OR'ed into this bitmask, each of them affecting the new heap: * * - H_FIFO makes tasks pend in FIFO order on the heap when waiting * for available blocks. * * - H_PRIO makes tasks pend in priority order on the heap when * waiting for available blocks. * * - H_MAPPABLE causes the heap to be sharable between kernel and * user-space contexts. Otherwise, the new heap is only available for * kernel-based usage. This flag is implicitely set when the caller is * running in user-space. This feature requires the real-time support * in user-space to be configured in (CONFIG_XENO_OPT_PERVASIVE). * * - H_SINGLE causes the entire heap space to be managed as a single * memory block. * * - H_SHARED is a shorthand for H_MAPPABLE|H_SINGLE, creating a * global shared memory segment accessible from both the kernel and * user-space contexts. * * - H_DMA causes the block pool associated to the heap to be * allocated in physically contiguous memory, suitable for DMA * operations with I/O devices. A 128Kb limit exists for @a heapsize * when this flag is passed. * * @return 0 is returned upon success. Otherwise: * * - -EEXIST is returned if the @a name is already in use by some * registered object. * * - -EINVAL is returned if @a heapsize is null, greater than the * system limit, or @a name is null or empty for a mappable heap. * * - -ENOMEM is returned if not enough system memory is available to * create or register the heap. Additionally, and if H_MAPPABLE has * been passed in @a mode, errors while mapping the block pool in the * caller's address space might beget this return code too. * * - -EPERM is returned if this service was called from an invalid * context. * * - -ENOSYS is returned if @a mode specifies H_MAPPABLE, but the * real-time support in user-space is unavailable. * * Environments: * * This service can be called from: * * - Kernel module initialization/cleanup code * - User-space task (switches to secondary mode) * * Rescheduling: possible. */int rt_heap_create (RT_HEAP *heap, const char *name, size_t heapsize, int mode){ int err; if (!xnpod_root_p()) return -EPERM; if (heapsize == 0) return -EINVAL; /* Make sure we won't hit trivial argument errors when calling xnheap_init(). */ if (heapsize < 2 * PAGE_SIZE) heapsize = 2 * PAGE_SIZE; heap->csize = heapsize; /* Record this for SBA management and inquiry. */ /* Account for the overhead so that the actual free space is large enough to match the requested size. Using PAGE_SIZE for large single-block heaps might reserve way too much useless page map memory, but this should never get pathological anyway, since we are only consuming 1 byte per page. */ heapsize += xnheap_overhead(heapsize,PAGE_SIZE); heapsize = PAGE_ALIGN(heapsize);#ifdef __KERNEL__ if (mode & H_MAPPABLE) { if (!name || !*name) return -EINVAL;#ifdef CONFIG_XENO_OPT_PERVASIVE err = xnheap_init_mapped(&heap->heap_base, heapsize, (mode & H_DMA) ? GFP_DMA : 0); if (err) return err; heap->cpid = 0;#else /* !CONFIG_XENO_OPT_PERVASIVE */ return -ENOSYS;#endif /* CONFIG_XENO_OPT_PERVASIVE */ } else#endif /* __KERNEL__ */ { void *heapmem = xnarch_sysalloc(heapsize); if (!heapmem) return -ENOMEM; err = xnheap_init(&heap->heap_base, heapmem, heapsize, PAGE_SIZE); /* Use natural page size */ if (err) { xnarch_sysfree(heapmem,heapsize); return err; } } xnsynch_init(&heap->synch_base,mode & (H_PRIO|H_FIFO)); heap->handle = 0; /* i.e. (still) unregistered heap. */ heap->magic = XENO_HEAP_MAGIC; heap->mode = mode; heap->sba = NULL; xnobject_copy_name(heap->name,name);#ifdef CONFIG_XENO_OPT_REGISTRY /* <!> Since xnregister_enter() may reschedule, only register complete objects, so that the registry cannot return handles to half-baked objects... */ if (name) { xnpnode_t *pnode = &__heap_pnode; if (!*name) { /* Since this is an anonymous object (empty name on entry) from user-space, it gets registered under an unique internal name but is not exported through /proc. */ xnobject_create_name(heap->name,sizeof(heap->name),(void*)heap); pnode = NULL; } err = xnregistry_enter(heap->name,heap,&heap->handle,pnode); if (err) rt_heap_delete(heap); }#endif /* CONFIG_XENO_OPT_REGISTRY */ return err;}/** * @fn int rt_heap_delete(RT_HEAP *heap) * * @brief Delete a real-time heap. * * Destroy a heap and release all the tasks currently pending on it. * A heap exists in the system since rt_heap_create() has been called * to create it, so this service must be called in order to destroy it * afterwards. * * @param heap The descriptor address of the affected heap. * * @return 0 is returned upon success. Otherwise: * * - -EINVAL is returned if @a heap is not a heap descriptor. * * - -EIDRM is returned if @a heap is a deleted heap descriptor. * * - -EPERM is returned if this service was called from an * asynchronous context. * * Environments: * * This service can be called from: * * - Kernel module initialization/cleanup code * - User-space task (switches to secondary mode). * * Rescheduling: possible. */int rt_heap_delete (RT_HEAP *heap){ int err = 0, rc; spl_t s; if (xnpod_asynch_p()) return -EPERM; xnlock_get_irqsave(&nklock,s); heap = xeno_h2obj_validate(heap,XENO_HEAP_MAGIC,RT_HEAP); if (!heap) { err = xeno_handle_error(heap,XENO_HEAP_MAGIC,RT_HEAP); xnlock_put_irqrestore(&nklock,s); return err; } rc = xnsynch_destroy(&heap->synch_base);#ifdef CONFIG_XENO_OPT_REGISTRY if (heap->handle) xnregistry_remove(heap->handle);#endif /* CONFIG_XENO_OPT_REGISTRY */ xeno_mark_deleted(heap); /* Get out of the nklocked section before releasing the heap memory, since we are about to invoke Linux kernel services. */ xnlock_put_irqrestore(&nklock,s); /* The heap descriptor has been marked as deleted before we released the superlock thus preventing any sucessful subsequent calls of rt_heap_delete(), so now we can actually destroy it safely. */#if defined(__KERNEL__) && defined(CONFIG_XENO_OPT_PERVASIVE) if (heap->mode & H_MAPPABLE) err = xnheap_destroy_mapped(&heap->heap_base); else#endif /* __KERNEL__ && CONFIG_XENO_OPT_PERVASIVE */ err = xnheap_destroy(&heap->heap_base,&__heap_flush_private,NULL); if (rc == XNSYNCH_RESCHED) /* Some task has been woken up as a result of the deletion: reschedule now. */ xnpod_schedule(); return err;}/** * @fn int rt_heap_alloc(RT_HEAP *heap,size_t size,RTIME timeout,void **blockp)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -