📄 heap.c
字号:
} /* No available free range in the existing extents so far. If we cannot extend the heap, we have failed and we are done with this request. */ if (!testbits(heap->flags,XNHEAP_EXTENDABLE)) return NULL; /* If the caller has to wait, it must be running on behalf of a regular thread context (i.e. not an interrupt context). */ if (!testbits(flags,XNHEAP_NOWAIT)) xnpod_check_context(XNPOD_THREAD_CONTEXT); /* Asynchronous code cannot wait -- Bail out. */ if (xnpod_asynch_p() || testbits(flags,XNHEAP_NOWAIT)) return NULL; /* Get a new extent. */ extent = (xnextent_t *)xnarch_sysalloc(heap->extentsize); if (extent == NULL) return NULL; init_extent(heap,extent); appendq(&heap->extents,&extent->link); goto searchrange; /* Always successful at the first try */splitpage: /* At this point, headpage is valid and points to the first page of a range of contiguous free pages larger or equal than 'bsize'. */ if (bsize < heap->pagesize) { /* If the allocation size is smaller than the standard page size, split the page in smaller blocks of this size, building a free list of free blocks. */ for (block = headpage, eblock = headpage + heap->pagesize - bsize; block < eblock; block += bsize) *((caddr_t *)block) = block + bsize; *((caddr_t *)eblock) = NULL; } else *((caddr_t *)headpage) = NULL; pagenum = (headpage - extent->membase) >> heap->pageshift; /* Update the extent's page map. If log2size is non-zero (i.e. bsize <= 2 * pagesize), store it in the first page's slot to record the exact block size (which is a power of two). Otherwise, store the special marker XNHEAP_PLIST, indicating the start of a block whose size is a multiple of the standard page size, but not necessarily a power of two. In any case, the following pages slots are marked as 'continued' (PCONT). */ extent->pagemap[pagenum] = log2size ? log2size : XNHEAP_PLIST; for (pagecont = bsize >> heap->pageshift; pagecont > 1; pagecont--) extent->pagemap[pagenum + pagecont - 1] = XNHEAP_PCONT; return headpage;}/*! * \fn void *xnheap_alloc(xnheap_t *heap, u_long size, xnflags_t flags); * \brief Allocate a memory block from a memory heap. * * Allocates a contiguous region of memory from an active memory heap. * Such allocation is guaranteed to be time-bounded if the heap is * non-extendable (see xnheap_init()). Otherwise, it might trigger a * dynamic extension of the storage area through an internal request * to the host operating environment. * * @param heap The descriptor address of the heap to get memory from. * * @param size The size in bytes of the requested block. Sizes lower * or equal to the page size are rounded either to the minimum * allocation size if lower than this value, or to the minimum * alignment size if greater or equal to this value. In the current * implementation, with MINALLOC = 8 and MINALIGN = 16, a 7 bytes * request will be rounded to 8 bytes, and a 17 bytes request will be * rounded to 32. * * @param flags A set of flags affecting the operation. If * XNHEAP_NOWAIT is passed, this service will return NULL without * attempting to extend the heap dynamically upon memory * starvation. This flag is not applicable to non-extendable heaps. * * @return The address of the allocated region upon success, or NULL * if no memory is available from the specified non-extendable heap, * or no memory can be obtained from the host operating environment to * extend the heap. * * Side-effect: This routine does not call the rescheduling procedure. * * Context: This routine can always be called on behalf of a thread * context. It can also be called on behalf of an IST context if the * heap storage area has been statically-defined at initialization * time (see xnheap_init()). */void *xnheap_alloc (xnheap_t *heap, u_long size, xnflags_t flags){ caddr_t block; u_long bsize; int log2size; if (size == 0) return NULL; if (size <= heap->pagesize) /* Sizes lower or equal to the page size are rounded either to the minimum allocation size if lower than this value, or to the minimum alignment size if greater or equal to this value. In other words, with MINALLOC = 8 and MINALIGN = 16, a 7 bytes request will be rounded to 8 bytes, and a 17 bytes request will be rounded to 32. */ { if (size <= XNHEAP_MINALIGNSZ) size = (size + XNHEAP_MINALLOCSZ - 1) & ~(XNHEAP_MINALLOCSZ - 1); else size = (size + XNHEAP_MINALIGNSZ - 1) & ~(XNHEAP_MINALIGNSZ - 1); } else /* Sizes greater than the page size are rounded to a multiple of the page size. */ size = (size + heap->pagesize - 1) & ~(heap->pagesize - 1); /* It becomes more space efficient to directly allocate pages from the free page list whenever the requested size is greater than 2 times the page size. Otherwise, use the bucketed memory blocks. */ if (size <= heap->pagesize * 2) { /* Find the first power of two greater or equal to the rounded size. The log2 value of this size is also computed. */ for (bsize = (1 << XNHEAP_MINLOG2), log2size = XNHEAP_MINLOG2; bsize < size; bsize <<= 1, log2size++) ; /* Loop */ xnmutex_lock(&heap->mutex); block = heap->buckets[log2size - XNHEAP_MINLOG2]; if (block == NULL) { block = get_free_range(heap,bsize,log2size,flags); if (block == NULL) goto release_and_exit; } heap->buckets[log2size - XNHEAP_MINLOG2] = *((caddr_t *)block); heap->ubytes += bsize; } else { if (size > heap->maxcont) return NULL; xnmutex_lock(&heap->mutex); /* Directly request a free page range. */ block = get_free_range(heap,size,0,flags); if (block) heap->ubytes += size; }release_and_exit: xnmutex_unlock(&heap->mutex); return block;}/*! * \fn int xnheap_free(xnheap_t *heap, void *block); * \brief Release a memory block to a memory heap. * * Releases a memory region to the memory heap it was previously * allocated from. * * @param heap The descriptor address of the heap to release memory * to. * * @param block The address of the region to release returned by a * previous call to xnheap_alloc(). * * @return XN_OK is returned upon success, or XNERR_PARAM is returned * whenever the block is not a valid region of the specified heap. * * Side-effect: This routine does not call the rescheduling procedure. * * Context: This routine can be called on behalf of a thread or IST * context */int xnheap_free (xnheap_t *heap, void *block){ caddr_t freepage, lastpage, nextpage, tailpage; u_long pagenum, pagecont, boffset, bsize; xnextent_t *extent = NULL; int log2size, npages; xnholder_t *holder; xnmutex_lock(&heap->mutex); /* Find the extent from which the returned block is originating. If the heap is non-extendable, then a single extent is scanned at most. */ for (holder = getheadq(&heap->extents); holder != NULL; holder = nextq(&heap->extents,holder)) { extent = link2extent(holder); if ((caddr_t)block >= extent->membase && (caddr_t)block < extent->memlim) break; } if (!holder) goto unlock_and_fail; /* Compute the heading page number in the page map. */ pagenum = ((caddr_t)block - extent->membase) >> heap->pageshift; boffset = ((caddr_t)block - (extent->membase + (pagenum << heap->pageshift))); switch (extent->pagemap[pagenum]) { case XNHEAP_PFREE: /* Unallocated page? */ case XNHEAP_PCONT: /* Not a range heading page? */unlock_and_fail: xnmutex_unlock(&heap->mutex); return XNERR_PARAM; case XNHEAP_PLIST: npages = 1; while (npages < heap->npages && extent->pagemap[pagenum + npages] == XNHEAP_PCONT) npages++; bsize = npages * heap->pagesize; /* Link all freed pages in a single sub-list. */ for (freepage = (caddr_t)block, tailpage = (caddr_t)block + bsize - heap->pagesize; freepage < tailpage; freepage += heap->pagesize) *((caddr_t *)freepage) = freepage + heap->pagesize; /* Mark the released pages as free in the extent's page map. */ for (pagecont = 0; pagecont < npages; pagecont++) extent->pagemap[pagenum + pagecont] = XNHEAP_PFREE; /* Return the sub-list to the free page list, keeping an increasing address order to favor coalescence. */ for (nextpage = extent->freelist, lastpage = NULL; nextpage != NULL && nextpage < (caddr_t)block; lastpage = nextpage, nextpage = *((caddr_t *)nextpage)) ; /* Loop */ *((caddr_t *)tailpage) = nextpage; if (lastpage) *((caddr_t *)lastpage) = (caddr_t)block; else extent->freelist = (caddr_t)block; break; default: log2size = extent->pagemap[pagenum]; bsize = (1 << log2size); if ((boffset & (bsize - 1)) != 0) /* Not a block start? */ goto unlock_and_fail; /* Return the block to the bucketed memory space. */ *((caddr_t *)block) = heap->buckets[log2size - XNHEAP_MINLOG2]; heap->buckets[log2size - XNHEAP_MINLOG2] = block; break; } heap->ubytes -= bsize; xnmutex_unlock(&heap->mutex); return XN_OK;}/* * IMPLEMENTATION NOTES: * * The implementation follows the algorithm described in a USENIX * 1988 paper called "Design of a General Purpose Memory Allocator for * the 4.3BSD Unix Kernel" by Marshall K. McKusick and Michael * J. Karels. You can find it at various locations on the net, * including http://docs.FreeBSD.org/44doc/papers/kernmalloc.pdf. * A minor variation allows this implementation to have 'extendable' * heaps when needed, with multiple memory extents providing autonomous * page address spaces. When the non-extendable form is used, the heap * management routines show bounded worst-case execution time. * * The data structures hierarchy is as follows: * * HEAP { * block_buckets[] * extent_queue -------+ * } | * V * EXTENT #1 { * <static header> * page_map[npages] * page_array[npages][pagesize] * } -+ * | * | * V * EXTENT #n { * <static header> * page_map[npages] * page_array[npages][pagesize] * } *//*@}*/EXPORT_SYMBOL(xnheap_alloc);EXPORT_SYMBOL(xnheap_destroy);EXPORT_SYMBOL(xnheap_free);EXPORT_SYMBOL(xnheap_init);EXPORT_SYMBOL(kheap);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -