📄 gc-mem.c
字号:
info->free = 0; DBG(GCDIAG, memset(info->data, 0, sz)); GCBLOCK2FREE(info, 0)->next = 0; /* * XXX gc_large_block only called during a block allocation. * The following is just going to get overwritten. (Right?) */ GC_SET_COLOUR(info, 0, GC_COLOUR_FREE); GC_SET_STATE(info, 0, GC_STATE_NORMAL); return (info);}/* * Primitive block management: Allocating and freeing whole pages. * * Each primitive block of the heap consists of one or more contiguous * pages. Pages of unused primitive blocks are marked unreadable when * kaffe is compiled with debugging enabled. Whether a block is in use * can be determined by its nr field: when it's in use, its nr field * will be > 0. * * All primitive blocks are chained through their pnext / pprev fields, * no matter whether or not they are in use. This makes the necessary * check for mergable blocks as cheap as possible. Merging small blocks * is necessary so that single unused primitive blocks in the heap are * always as large as possible. The first block in the list is stored * in gc_block_base, the last block in the list is gc_last_block. * * In order to speed up the search for the primitive block that fits * a given allocation request best, small primitive blocks are stored * in several lists (one per size). If no primitive blocks of a given * size are left, a larger one is splitted instead. */#define GC_PRIM_LIST_COUNT 20uintp gc_block_base;static gc_block *gc_last_block;static gc_block *gc_prim_freelist[GC_PRIM_LIST_COUNT+1];/* Mark a primitive block as used */static inline void gc_block_add(gc_block *b){ b->nr = 1;}/* Mark a primitive block as unused */static inline void gc_block_rm(gc_block *b){ b->nr = 0;}/* Get the end a gc block * * This is OK, gc_prim_(alloc|free) never assume GCBLOCKEND is really * a valid block */static inline gc_block*gc_block_end(gc_block *b){ return b + ((b->size+gc_pgsize-1)>>gc_pgbits);}/* return the prim list blk belongs to */static inline gc_block **gc_get_prim_freelist (gc_block *blk){ size_t sz = blk->size >> gc_pgbits; if (sz <= GC_PRIM_LIST_COUNT) { assert (sz > 0); return &gc_prim_freelist[sz-1]; } return &gc_prim_freelist[GC_PRIM_LIST_COUNT];}/* add a primitive block to the correct freelist */static inline voidgc_add_to_prim_freelist(gc_block *blk){ gc_block **list = gc_get_prim_freelist (blk); /* insert the block int the list, sorting by ascending addresses */ while (*list && blk > *list) { list = &(*list)->next; } if (*list) { (*list)->free = (gc_freeobj *)&blk->next; } blk->next = *list; *list = blk; blk->free = (gc_freeobj *)list;}/* remove a primitive block from its freelist */static inline voidgc_remove_from_prim_freelist(gc_block *blk){ *( (gc_block **) blk->free ) = blk->next; if (blk->next) { blk->next->free = blk->free; }} /* * Allocate a block of memory from the free list or, failing that, the * system pool. */staticgc_block*gc_primitive_alloc(size_t sz){ size_t diff = 0; gc_block* best_fit = NULL; size_t i = sz >> gc_pgbits; assert(sz % gc_pgsize == 0); DBG(GCPRIM, dprintf("\ngc_primitive_alloc: got to allocate 0x%x bytes\n", (unsigned int)sz); ) /* try freelists for small primitive blocks first */ if (i <= GC_PRIM_LIST_COUNT) { for (i-=1; i<GC_PRIM_LIST_COUNT; i++) { if (gc_prim_freelist[i]) { best_fit = gc_prim_freelist[i]; diff = gc_prim_freelist[i]->size - sz; break; } } } /* if that fails, try the big remaining list */ if (!best_fit) { gc_block *ptr; for (ptr = gc_prim_freelist[GC_PRIM_LIST_COUNT]; ptr != 0; ptr=ptr->next) { /* Best fit */ if (sz == ptr->size) { diff = 0; best_fit = ptr; break; } else if (sz < ptr->size) { size_t left = ptr->size - sz; if (best_fit==NULL || left<diff) { diff = left; best_fit = ptr; } } } } /* if we found a block, remove it from the list and check if splitting is necessary */ if (best_fit) { gc_remove_from_prim_freelist (best_fit); DBG(GCPRIM, dprintf ("gc_primitive_alloc: found best_fit %p diff 0x%x (0x%x - 0x%x)\n", best_fit, (unsigned int)diff, best_fit->size, (unsigned int)sz); ) assert ( diff % gc_pgsize == 0 ); if (diff > 0) { gc_block *nptr; best_fit->size = sz; nptr = gc_block_end(best_fit); nptr->size = diff; gc_block_rm (nptr); DBG(GCPRIM, dprintf ("gc_primitive_alloc: splitted remaining 0x%x bytes @ %p\n", (unsigned int)diff, nptr); ) DBG(GCDIAG, gc_set_magic_marker(nptr)); /* maintain list of primitive blocks */ nptr->pnext = best_fit->pnext; nptr->pprev = best_fit; best_fit->pnext = nptr; if (nptr->pnext) { nptr->pnext->pprev = nptr; } else { gc_last_block = nptr; } /* and add nptr to one of the freelists */ gc_add_to_prim_freelist (nptr); }DBG(GCPRIM, dprintf("gc_primitive_alloc: 0x%x bytes from freelist @ %p\n", best_fit->size, best_fit); ) gc_block_add(best_fit); return (best_fit); }DBG(GCPRIM, dprintf("gc_primitive_alloc: no suitable block found!\n"); ) /* Nothing found on free list */ return (0);}/* * merge a primitive block with its successor. */static inline voidgc_merge_with_successor (gc_block *blk){ gc_block *next_blk = blk->pnext; assert (next_blk); blk->size += next_blk->size; blk->pnext = next_blk->pnext; /* * if the merged block has a successor, update its pprev field. * otherwise, the merged block is the last block in the primitive * chain. */ if (blk->pnext) { blk->pnext->pprev = blk; } else { gc_last_block = blk; }}/* * Return a block of memory to the free list. */voidgc_primitive_free(gc_block* mem){ gc_block *blk; assert(mem->size % gc_pgsize == 0); /* Remove from object hash */ gc_block_rm(mem); DBG(GCPRIM, dprintf ("\ngc_primitive_free: freeing block %p (%x bytes, %x)\n", mem, mem->size, mem->size >> gc_pgbits); ) /* * Test whether this block is mergable with its successor. * We need to do the gc_block_end check, since the heap may not be a continuous * memory area and thus two consecutive blocks need not be mergable. */ if ((blk=mem->pnext) && !GCBLOCKINUSE(blk) && gc_block_end(mem)==blk) { DBG(GCPRIM, dprintf ("gc_primitive_free: merging %p with its successor (%p, %u)\n", mem, blk, blk->size);) gc_remove_from_prim_freelist(blk); gc_merge_with_successor (mem); } if ((blk=mem->pprev) && !GCBLOCKINUSE(blk) && gc_block_end(blk)==mem) { DBG(GCPRIM, dprintf ("gc_primitive_free: merging %p with its predecessor (%p, %u)\n", mem, blk, blk->size); ) gc_remove_from_prim_freelist(blk); mem = blk; gc_merge_with_successor (mem); } gc_add_to_prim_freelist (mem); DBG(GCPRIM, dprintf ("gc_primitive_free: added 0x%x bytes @ %p to freelist %u @ %p\n", mem->size, mem, (unsigned int)(gc_get_prim_freelist(mem)-&gc_prim_freelist[0]), gc_get_prim_freelist(mem)); )}/* * Try to reserve some memory for OOM exception handling. Gc once at * the beginning. We start out looking for an arbitrary number of * pages (4), and cut our expectations in half until we are able to * meet them. */gc_block *gc_primitive_reserve(void){ gc_block *r = 0; size_t size = 4 * gc_pgsize; while (size >= gc_pgsize && !(r = gc_primitive_alloc(size))) { if (size == gc_pgsize) { break; } size /= 2; } return r;}/* * System memory management: Obtaining additional memory from the * OS. This looks more complicated than it is, since it does not require * sbrk. *//* Get some page-aligned memory from the system. */static uintppagealloc(size_t size){ void* ptr;#define CHECK_OUT_OF_MEMORY(P) if ((P) == 0) return 0;#if defined(HAVE_SBRK) /* Our primary choice for basic memory allocation is sbrk() which * should avoid any unsee space overheads. */ for (;;) { int missed; ptr = sbrk(size); if (ptr == (void*)-1) { ptr = 0; break; } if ((uintp)ptr % gc_pgsize == 0) { break; } missed = gc_pgsize - ((uintp)ptr % gc_pgsize); DBG(GCSYSALLOC, dprintf("unaligned sbrk %p, missed %d bytes\n", ptr, missed)); sbrk(-size + missed); } CHECK_OUT_OF_MEMORY(ptr);#elif defined(HAVE_MEMALIGN) ptr = memalign(gc_pgsize, size); CHECK_OUT_OF_MEMORY(ptr);#elif defined(HAVE_VALLOC) ptr = valloc(size); CHECK_OUT_OF_MEMORY(ptr);#else /* Fallback ... * Allocate memory using malloc and align by hand. */ size += gc_pgsize; ptr = malloc(size); CHECK_OUT_OF_MEMORY(ptr); ptr = (void*)((((uintp)ptr) + gc_pgsize - 1) & -gc_pgsize);#endif addToCounter(&gcpages, "gcmem-system pages", 1, size); return ((uintp) ptr);}/* Free memory allocated with pagealloc */static void pagefree(uintp base, size_t size){#ifdef HAVE_SBRK sbrk(-size);#else /* it must have been allocated with memalign, valloc or malloc */ free((void *)base);#endif}/* * Allocate size bytes of heap memory, and return the corresponding * gc_block *. */static void *gc_block_alloc(size_t size){ int size_pg = (size>>gc_pgbits); static int n_live = 0; /* number of pages in java heap */ static int nblocks; /* number of gc_blocks in array */ uintp heap_addr; static uintp last_addr; if (!gc_block_base) { nblocks = (gc_heap_limit+gc_pgsize-1)>>gc_pgbits; gc_block_base = (uintp) malloc(nblocks * sizeof(gc_block)); if (!gc_block_base) return 0; memset((void *)gc_block_base, 0, nblocks * sizeof(gc_block)); } DBG(GCSYSALLOC, dprintf("pagealloc(%ld)", (long) size)); heap_addr = pagealloc(size); DBG(GCSYSALLOC, dprintf(" => %p\n", (void *) heap_addr)); if (!heap_addr) return 0; if (!gc_heap_base) { gc_heap_base = heap_addr; } if (GCMEM2BLOCK(heap_addr + size) > ((gc_block *)gc_block_base) + nblocks || heap_addr < gc_heap_base) { uintp old_blocks = gc_block_base; int onb = nblocks; int min_nb; /* minimum size of array to hold heap_addr */#if defined(KAFFE_STATS) static timespent growtime;#endif startTiming(&growtime, "gctime-blockrealloc"); /* Pick a new size for the gc_block array. Remember, malloc does not simply grow a memory segment. We can extrapolate how many gc_blocks we need for the entire heap based on how many heap pages currently fit in the gc_block array. But, we must also make sure to allocate enough blocks to cover the current allocation */ nblocks = (nblocks * (gc_heap_limit >> gc_pgbits)) / n_live; if (heap_addr < gc_heap_base) min_nb = nblocks + ((gc_heap_base - heap_addr) >> gc_pgbits); else min_nb = ((heap_addr + size) - gc_heap_base) >> gc_pgbits; nblocks = MAX(nblocks, min_nb); DBG(GCSYSALLOC, dprintf("growing block array from %d to %d elements\n", onb, nblocks)); jthread_spinon(0); gc_block_base = (uintp) realloc((void *) old_blocks, nblocks * sizeof(gc_block)); if (!gc_block_base) { /* roll back this call */ pagefree(heap_addr, size); gc_block_base = old_blocks; nblocks = onb; jthread_spinoff(0); return 0; } /* If the array's address has changed, we have to fix up the pointers in the gc_blocks, as well as all external pointers to the gc_blocks. We can only fix gc_prim_freelist and the size-freelist array. There should be no gc_block *'s on any stack now. */ if (gc_block_base != old_blocks) { int i; gc_block *b = (void *) gc_block_base; uintp delta = gc_block_base - old_blocks;#define R(X) if (X) ((uintp) (X)) += delta DBG(GCSYSALLOC, dprintf("relocating gc_block array\n")); for (i = 0; i < onb; i++) R(b[i].next); memset(b + onb, 0, (nblocks - onb) * sizeof(gc_block)); for (i = 0; i<=GC_PRIM_LIST_COUNT; i++) R(gc_prim_freelist[i]); for (i = 0; freelist[i].list != (void*)-1; i++) R(freelist[i].list);#undef R } jthread_spinoff(0); stopTiming(&growtime); } n_live += size_pg; last_addr = MAX(last_addr, heap_addr + size); gc_heap_range = last_addr - gc_heap_base; DBG(GCSYSALLOC, dprintf("%ld unused bytes in heap addr range\n", (long) (gc_heap_range - gc_heap_total))); return GCMEM2BLOCK(heap_addr);}/** * Grows the heap. * * @param sz minimum number of bytes to grow. * @return 0 in case of an error, otherwise != 0 */void *gc_heap_grow(size_t sz){ gc_block* blk; int iLockRoot; if (GC_SMALL_OBJECT(sz)) { sz = gc_pgsize; } else { sz = sz + 2 + ROUNDUPALIGN(1); sz = ROUNDUPPAGESIZE(sz); } if (sz < gc_heap_allocation_size) { sz = gc_heap_allocation_size; } assert(sz % gc_pgsize == 0); lockStaticMutex(&gc_heap_lock); if (gc_heap_total == gc_heap_limit) { unlockStaticMutex(&gc_heap_lock); return (0); } else if (gc_heap_total + sz > gc_heap_limit) { /* take as much memory as we can */ sz = gc_heap_limit - gc_heap_total; assert(sz % gc_pgsize == 0); DBG(GCSYSALLOC, dprintf("allocating up to limit\n")); }#ifdef KAFFE_VMDEBUG gc_system_alloc_cnt++;#endif blk = gc_block_alloc(sz); DBG(GCSYSALLOC, dprintf("gc_system_alloc: %ld byte at %p\n", (long) sz, blk); ) if (blk == 0) { unlockStaticMutex(&gc_heap_lock); return (0); } gc_heap_total += sz; assert(gc_heap_total <= gc_heap_limit); /* Place block into the freelist for subsequent use */ DBG(GCDIAG, gc_set_magic_marker(blk)); blk->size = sz; /* maintain list of primitive blocks */ if (gc_last_block) { gc_last_block->pnext = blk; blk->pprev = gc_last_block; } else { gc_last_block = blk; } /* Free block into the system */ gc_primitive_free(blk); unlockStaticMutex(&gc_heap_lock); return (blk);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -