📄 heap.c
字号:
} result = morecore( heap, blocks * BLOCKSIZE ); if (result == NULL) return NULL; block = BLOCK (result); heap->heapinfo[block].busy.type = 0; heap->heapinfo[block].busy.info.size = blocks; heap->chunks_used++; heap->bytes_used += blocks * BLOCKSIZE; return result; } } /* At this point we have found a suitable free list entry. Figure out how to remove what we need from the list. */ result = ADDRESS (block); if (heap->heapinfo[block].free.size > blocks) { /* The block we found has a bit left over, so relink the tail end back into the free list. */ heap->heapinfo[block + blocks].free.size = heap->heapinfo[block].free.size - blocks; heap->heapinfo[block + blocks].free.next = heap->heapinfo[block].free.next; heap->heapinfo[block + blocks].free.prev = heap->heapinfo[block].free.prev; heap->heapinfo[heap->heapinfo[block].free.prev].free.next = heap->heapinfo[heap->heapinfo[block].free.next].free.prev = heap->heapindex = block + blocks; } else { /* The block exactly matches our requirements, so just remove it from the list. */ heap->heapinfo[heap->heapinfo[block].free.next].free.prev = heap->heapinfo[block].free.prev; heap->heapinfo[heap->heapinfo[block].free.prev].free.next = heap->heapindex = heap->heapinfo[block].free.next; heap->chunks_free--; } heap->heapinfo[block].busy.type = 0; heap->heapinfo[block].busy.info.size = blocks; heap->chunks_used++; heap->bytes_used += blocks * BLOCKSIZE; heap->bytes_free -= blocks * BLOCKSIZE; } return result;}/* Resize the given region to the new size, returning a pointer to the (possibly moved) region. This is optimized for speed; some benchmarks seem to indicate that greater compactness is achieved by unconditionally allocating and copying to a new region. This module has incestuous knowledge of the internals of both free and shmalloc. */void *_fusion_shrealloc( shmalloc_heap *heap, void *ptr, size_t size ){ void *result; int type; size_t block, blocks, oldlimit; D_DEBUG_AT( Fusion_SHMHeap, "%s( %p, %p, %d )\n", __FUNCTION__, heap, ptr, size ); D_MAGIC_ASSERT( heap, shmalloc_heap ); if (ptr == NULL) return _fusion_shmalloc( heap, size ); else if (size == 0) { _fusion_shfree( heap, ptr ); return NULL; } block = BLOCK (ptr); type = heap->heapinfo[block].busy.type; switch (type) { case 0: /* Maybe reallocate a large block to a small fragment. */ if (size <= BLOCKSIZE / 2) { result = _fusion_shmalloc( heap, size ); if (result != NULL) { direct_memcpy (result, ptr, size); _fusion_shfree( heap, ptr ); return result; } } /* The new size is a large allocation as well; see if we can hold it in place. */ blocks = BLOCKIFY (size); if (blocks < heap->heapinfo[block].busy.info.size) { /* The new size is smaller; return excess memory to the free list. */ heap->heapinfo[block + blocks].busy.type = 0; heap->heapinfo[block + blocks].busy.info.size = heap->heapinfo[block].busy.info.size - blocks; heap->heapinfo[block].busy.info.size = blocks; _fusion_shfree( heap, ADDRESS (block + blocks) ); result = ptr; } else if (blocks == heap->heapinfo[block].busy.info.size) /* No size change necessary. */ result = ptr; else { /* Won't fit, so allocate a new region that will. Free the old region first in case there is sufficient adjacent free space to grow without moving. */ blocks = heap->heapinfo[block].busy.info.size; /* Prevent free from actually returning memory to the system. */ oldlimit = heap->heaplimit; heap->heaplimit = 0; _fusion_shfree( heap, ptr ); heap->heaplimit = oldlimit; result = _fusion_shmalloc( heap, size ); if (result == NULL) { /* Now we're really in trouble. We have to unfree the thing we just freed. Unfortunately it might have been coalesced with its neighbors. */ if (heap->heapindex == block) (void) _fusion_shmalloc( heap, blocks * BLOCKSIZE ); else { void *previous = _fusion_shmalloc( heap, (block - heap->heapindex) * BLOCKSIZE ); (void) _fusion_shmalloc( heap, blocks * BLOCKSIZE ); _fusion_shfree( heap, previous ); } return NULL; } if (ptr != result) direct_memmove (result, ptr, blocks * BLOCKSIZE); } break; default: /* Old size is a fragment; type is logarithm to base two of the fragment size. */ if (size > (size_t) (1 << (type - 1)) && size <= (size_t) (1 << type)) /* The new size is the same kind of fragment. */ result = ptr; else { /* The new size is different; allocate a new space, and copy the lesser of the new size and the old. */ result = _fusion_shmalloc( heap, size ); if (result == NULL) return NULL; direct_memcpy (result, ptr, MIN (size, (size_t) 1 << type)); _fusion_shfree( heap, ptr ); } break; } return result;}/* Return memory to the heap. */void_fusion_shfree( shmalloc_heap *heap, void *ptr ){ int type; size_t block, blocks; register size_t i; struct list *prev, *next; D_DEBUG_AT( Fusion_SHMHeap, "%s( %p, %p )\n", __FUNCTION__, heap, ptr ); D_MAGIC_ASSERT( heap, shmalloc_heap ); if (ptr == NULL) return; block = BLOCK (ptr); type = heap->heapinfo[block].busy.type; switch (type) { case 0: /* Get as many statistics as early as we can. */ heap->chunks_used--; heap->bytes_used -= heap->heapinfo[block].busy.info.size * BLOCKSIZE; heap->bytes_free += heap->heapinfo[block].busy.info.size * BLOCKSIZE; /* Find the free cluster previous to this one in the free list. Start searching at the last block referenced; this may benefit programs with locality of allocation. */ i = heap->heapindex; if (i > block) while (i > block) i = heap->heapinfo[i].free.prev; else { do i = heap->heapinfo[i].free.next; while (i > 0 && i < block); i = heap->heapinfo[i].free.prev; } /* Determine how to link this block into the free list. */ if (block == i + heap->heapinfo[i].free.size) { /* Coalesce this block with its predecessor. */ heap->heapinfo[i].free.size += heap->heapinfo[block].busy.info.size; block = i; } else { /* Really link this block back into the free list. */ heap->heapinfo[block].free.size = heap->heapinfo[block].busy.info.size; heap->heapinfo[block].free.next = heap->heapinfo[i].free.next; heap->heapinfo[block].free.prev = i; heap->heapinfo[i].free.next = block; heap->heapinfo[heap->heapinfo[block].free.next].free.prev = block; heap->chunks_free++; } /* Now that the block is linked in, see if we can coalesce it with its successor (by deleting its successor from the list and adding in its size). */ if (block + heap->heapinfo[block].free.size == heap->heapinfo[block].free.next) { heap->heapinfo[block].free.size += heap->heapinfo[heap->heapinfo[block].free.next].free.size; heap->heapinfo[block].free.next = heap->heapinfo[heap->heapinfo[block].free.next].free.next; heap->heapinfo[heap->heapinfo[block].free.next].free.prev = block; heap->chunks_free--; } blocks = heap->heapinfo[block].free.size;/* FIXME: as this is used when kernel is detected as >= 2.6.19.2 only, this fallback definition should be ok for now */#ifndef MADV_REMOVE#define MADV_REMOVE 9#endif /* Punch a hole into the tmpfs file to really free RAM. */ if (fusion_config->madv_remove) madvise( ADDRESS(block), blocks * BLOCKSIZE, MADV_REMOVE ); /* Now see if we can truncate the end. */ if (blocks >= FINAL_FREE_BLOCKS && block + blocks == heap->heaplimit && __shmalloc_brk( heap, 0 ) == ADDRESS (block + blocks)) { register size_t bytes = blocks * BLOCKSIZE; heap->heaplimit -= blocks; __shmalloc_brk( heap, -bytes ); heap->heapinfo[heap->heapinfo[block].free.prev].free.next = heap->heapinfo[block].free.next; heap->heapinfo[heap->heapinfo[block].free.next].free.prev = heap->heapinfo[block].free.prev; block = heap->heapinfo[block].free.prev; heap->chunks_free--; heap->bytes_free -= bytes; } /* Set the next search to begin at this block. */ heap->heapindex = block; break; default: /* Do some of the statistics. */ heap->chunks_used--; heap->bytes_used -= 1 << type; heap->chunks_free++; heap->bytes_free += 1 << type; /* Get the address of the first free fragment in this block. */ prev = (struct list *) ((char *) ADDRESS (block) + (heap->heapinfo[block].busy.info.frag.first << type));#if 1 /* Adapted from Mike */ if ((int)heap->heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1 && heap->fragblocks[type] > 1)#else if ((int)heap->heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)#endif { /* If all fragments of this block are free, remove them from the fragment list and free the whole block. */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -