📄 gc-mem.c
字号:
* each of 'sz' bytes. */staticgc_block*gc_small_block(size_t sz){ gc_block* info; int i; int nr; info = gc_primitive_alloc(gc_pgsize); if (info == 0) { return (0); } /* Calculate number of objects in this block */ nr = (gc_pgsize-GCBLOCK_OVH-ROUNDUPALIGN(1))/(sz+2); /* Setup the meta-data for the block */ DBG(GCDIAG, info->magic = GC_MAGIC); info->size = sz; info->nr = nr; info->avail = nr; info->funcs = (uint8*)GCBLOCK2BASE(info); info->state = (uint8*)(info->funcs + nr); info->data = (uint8*)ROUNDUPALIGN(info->state + nr); DBG(GCDIAG, memset(info->data, 0, sz * nr)); /* Build the objects into a free list */ for (i = nr-1; i >= 0; i--) { GCBLOCK2FREE(info, i)->next = GCBLOCK2FREE(info, i+1); GC_SET_COLOUR(info, i, GC_COLOUR_FREE); GC_SET_STATE(info, i, GC_STATE_NORMAL); } GCBLOCK2FREE(info, nr-1)->next = 0; info->free = GCBLOCK2FREE(info, 0);DBG(SLACKANAL, int slack = ((void *)info) + gc_pgsize - (void *)(GCBLOCK2MEM(info, nr)); totalslack += slack; ) return (info);}/* * Allocate a new block of GC'ed memory. The block will contain one object */staticgc_block*gc_large_block(size_t sz){ gc_block* info; size_t msz; /* Add in management overhead */ msz = sz+GCBLOCK_OVH+2+ROUNDUPALIGN(1); /* Round size up to a number of pages */ msz = ROUNDUPPAGESIZE(msz); info = gc_primitive_alloc(msz); if (info == 0) { return (0); } /* Setup the meta-data for the block */ DBG(GCDIAG, info->magic = GC_MAGIC); info->size = sz; info->nr = 1; info->avail = 1; info->funcs = (uint8*)GCBLOCK2BASE(info); info->state = (uint8*)(info->funcs + 1); info->data = (uint8*)ROUNDUPALIGN(info->state + 1); info->free = 0; DBG(GCDIAG, memset(info->data, 0, sz)); GCBLOCK2FREE(info, 0)->next = 0; /* * XXX gc_large_block only called during a block allocation. * The following is just going to get overwritten. (Right?) */ GC_SET_COLOUR(info, 0, GC_COLOUR_FREE); GC_SET_STATE(info, 0, GC_STATE_NORMAL); return (info);}/* * Primitive block management: Allocating and freeing whole pages. * * Unused pages may be marked unreadable. This is only done when * compiled with DEBUG. */ #ifndef PROT_NONE#define PROT_NONE 0#endif#if !defined(HAVE_MPROTECT) || !defined(DEBUG)#define mprotect(A,L,P)#define ALL_PROT#define NO_PROT#else/* In a sense, this is backwards. */#define ALL_PROT PROT_READ|PROT_WRITE|PROT_EXEC#define NO_PROT PROT_NONE#endif/* Mark this block as in-use */static inline void gc_block_add(gc_block *b){ b->inuse = 1; mprotect(GCBLOCK2BASE(b), b->size, ALL_PROT);}/* Mark this block as free */static inline void gc_block_rm(gc_block *b){ b->inuse = 0; mprotect(GCBLOCK2BASE(b), b->size, NO_PROT);}/* * Allocate a block of memory from the free list or, failing that, the * system pool. */staticgc_block*gc_primitive_alloc(size_t sz){ gc_block* ptr; gc_block** pptr; assert(sz % gc_pgsize == 0); for (pptr = &gc_prim_freelist; *pptr != 0; pptr = &ptr->next) { ptr = *pptr; /* First fit */ if (sz <= ptr->size) { size_t left; /* If there's more than a page left, split it */ left = ptr->size - sz; if (left >= gc_pgsize) { gc_block* nptr; ptr->size = sz; nptr = GCBLOCKEND(ptr); nptr->size = left; DBG(GCDIAG, nptr->magic = GC_MAGIC); nptr->next = ptr->next; ptr->next = nptr; } *pptr = ptr->next;DBG(GCPRIM, dprintf("gc_primitive_alloc: %d bytes from freelist @ %p\n", ptr->size, ptr); ) gc_block_add(ptr); return (ptr); } } /* Nothing found on free list */ return (0);}/* * Return a block of memory to the free list. */voidgc_primitive_free(gc_block* mem){ gc_block* lptr; gc_block* nptr; assert(mem->size % gc_pgsize == 0); /* Remove from object hash */ gc_block_rm(mem); mem->next = 0; if(mem < gc_prim_freelist || gc_prim_freelist == 0) { /* If this block is directly before the first block on the * freelist, merge it into that block. Otherwise just * attached it to the beginning. */ if (GCBLOCKEND(mem) == gc_prim_freelist) {DBG(GCPRIM, dprintf("gc_primitive_free: Merging (%d,%p) beginning of freelist\n", mem->size, mem); ) mem->size += gc_prim_freelist->size; mem->next = gc_prim_freelist->next; } else {DBG(GCPRIM, dprintf("gc_primitive_free: Prepending (%d,%p) beginning of freelist\n", mem->size, mem); ) mem->next = gc_prim_freelist; } gc_prim_freelist = mem; return; } /* Search the freelist for the logical place to put this block */ lptr = gc_prim_freelist; while (lptr->next != 0) { nptr = lptr->next; if (mem > lptr && mem < nptr) { /* Block goes here in the logical scheme of things. * Work out how to merge it with those which come * before and after. */ if (GCBLOCKEND(lptr) == mem) { if (GCBLOCKEND(mem) == nptr) { /* Merge with last and next */DBG(GCPRIM, dprintf("gc_primitive_free: Merging (%d,%p) into list\n", mem->size, mem); ) lptr->size += mem->size + nptr->size; lptr->next = nptr->next; } else { /* Merge with last but not next */DBG(GCPRIM, dprintf("gc_primitive_free: Merging (%d,%p) with last in list\n", mem->size, mem); ) lptr->size += mem->size; } } else { if (GCBLOCKEND(mem) == nptr) { /* Merge with next but not last */DBG(GCPRIM, dprintf("gc_primitive_free: Merging (%d,%p) with next in list\n", mem->size, mem); ) mem->size += nptr->size; mem->next = nptr->next; lptr->next = mem; } else { /* Wont merge with either */DBG(GCPRIM, dprintf("gc_primitive_free: Inserting (%d,%p) into list\n", mem->size, mem); ) mem->next = nptr; lptr->next = mem; } } return; } lptr = nptr; } /* If 'mem' goes directly after the last block, merge it in. * Otherwise, just add in onto the list at the end. */ if (GCBLOCKEND(lptr) == mem) {DBG(GCPRIM, dprintf("gc_primitive_free: Merge (%d,%p) onto last in list\n", mem->size, mem); ) lptr->size += mem->size; } else {DBG(GCPRIM, dprintf("gc_primitive_free: Append (%d,%p) onto last in list\n", mem->size, mem); ) lptr->next = mem; }}/* * Try to reserve some memory for OOM exception handling. Gc once at * the beginning. We start out looking for an arbitrary number of * pages (4), and cut our expectations in half until we are able to * meet them. */gc_block *gc_primitive_reserve(void){ gc_block *r = 0; size_t size = 4 * gc_pgsize; while (size >= gc_pgsize && !(r = gc_primitive_alloc(size))) { if (size == gc_pgsize) { break; } size /= 2; } return r;}/* * System memory management: Obtaining additional memory from the * OS. This looks more complicated than it is, since it does not require * sbrk. *//* Get some page-aligned memory from the system. */static uintppagealloc(size_t size){ void* ptr;#define CHECK_OUT_OF_MEMORY(P) if ((P) == 0) return 0;#if defined(HAVE_SBRK) /* Our primary choice for basic memory allocation is sbrk() which * should avoid any unsee space overheads. */ for (;;) { int missed; ptr = sbrk(size); if (ptr == (void*)-1) { ptr = 0; break; } if ((uintp)ptr % gc_pgsize == 0) { break; } missed = gc_pgsize - ((uintp)ptr % gc_pgsize); DBG(GCSYSALLOC, dprintf("unaligned sbrk %p, missed %d bytes\n", ptr, missed)); sbrk(-size + missed); } CHECK_OUT_OF_MEMORY(ptr);#elif defined(HAVE_MEMALIGN) ptr = memalign(gc_pgsize, size); CHECK_OUT_OF_MEMORY(ptr);#elif defined(HAVE_VALLOC) ptr = valloc(size); CHECK_OUT_OF_MEMORY(ptr);#else /* Fallback ... * Allocate memory using malloc and align by hand. */ size += gc_pgsize; ptr = malloc(size); CHECK_OUT_OF_MEMORY(ptr); ptr = (void*)((((uintp)ptr) + gc_pgsize - 1) & -gc_pgsize);#endif addToCounter(&gcpages, "gcmem-system pages", 1, size); return ((uintp) ptr);}/* Free memory allocated with pagealloc */static void pagefree(uintp base, size_t size){#ifdef HAVE_SBRK sbrk(-size);#else /* it must have been allocated with memalign, valloc or malloc */ free((void *)base);#endif}/* * Allocate size bytes of heap memory, and return the corresponding * gc_block *. */static void *gc_block_alloc(size_t size){ int size_pg = (size>>gc_pgbits); static int n_live = 0; /* number of pages in java heap */ static int nblocks; /* number of gc_blocks in array */ uintp heap_addr; static uintp last_addr; if (!gc_block_base) { nblocks = (gc_heap_limit>>gc_pgbits); nblocks += nblocks/4; gc_block_base = (uintp) malloc(nblocks * sizeof(gc_block)); if (!gc_block_base) return 0; memset((void *)gc_block_base, 0, nblocks * sizeof(gc_block)); } DBG(GCSYSALLOC, dprintf("pagealloc(%d)", size)); heap_addr = pagealloc(size); DBG(GCSYSALLOC, dprintf(" => %p\n", heap_addr)); if (!heap_addr) return 0; if (!gc_heap_base) { gc_heap_base = heap_addr; } if (GCMEM2BLOCK(heap_addr + size) > ((gc_block *)gc_block_base) + nblocks || heap_addr < gc_heap_base) { uintp old_blocks = gc_block_base; int onb = nblocks; int min_nb; /* minimum size of array to hold heap_addr */#if defined(KAFFE_STATS) static timespent growtime;#endif startTiming(&growtime, "gctime-blockrealloc"); /* Pick a new size for the gc_block array. Remember, malloc does not simply grow a memory segment. We can extrapolate how many gc_blocks we need for the entire heap based on how many heap pages currently fit in the gc_block array. But, we must also make sure to allocate enough blocks to cover the current allocation */ nblocks = (nblocks * (gc_heap_limit >> gc_pgbits)) / n_live; if (heap_addr < gc_heap_base) min_nb = nblocks + ((gc_heap_base - heap_addr) >> gc_pgbits); else min_nb = ((heap_addr + size) - gc_heap_base) >> gc_pgbits; nblocks = MAX(nblocks, min_nb); DBG(GCSYSALLOC, dprintf("growing block array from %d to %d elements\n", onb, nblocks)); jthread_spinon(0); gc_block_base = (uintp) realloc((void *) old_blocks, nblocks * sizeof(gc_block)); if (!gc_block_base) { /* roll back this call */ pagefree(heap_addr, size); gc_block_base = old_blocks; nblocks = onb; jthread_spinoff(0); return 0; } /* If the array's address has changed, we have to fix up the pointers in the gc_blocks, as well as all external pointers to the gc_blocks. We can only fix gc_prim_freelist and the size-freelist array. There should be no gc_block *'s on any stack now. */ if (gc_block_base != old_blocks) { extern gc_block *gc_prim_freelist; int i; gc_block *b = (void *) gc_block_base; uintp delta = gc_block_base - old_blocks;#define R(X) if (X) ((uintp) (X)) += delta DBG(GCSYSALLOC, dprintf("relocating gc_block array\n")); for (i = 0; i < onb; i++) R(b[i].next); memset(b + onb, 0, (nblocks - onb) * sizeof(gc_block)); R(gc_prim_freelist); for (i = 0; freelist[i].list != (void*)-1; i++) R(freelist[i].list);#undef R } jthread_spinoff(0); stopTiming(&growtime); } n_live += size_pg; last_addr = MAX(last_addr, heap_addr + size); gc_heap_range = last_addr - gc_heap_base; DBG(GCSYSALLOC, dprintf("%d unused bytes in heap addr range\n", gc_heap_range - gc_heap_total)); mprotect((void *) heap_addr, size, NO_PROT); return GCMEM2BLOCK(heap_addr);}staticvoid*gc_system_alloc(size_t sz){ gc_block* blk; assert(sz % gc_pgsize == 0); if (gc_heap_total == gc_heap_limit) { return (0); } else if (gc_heap_total + sz > gc_heap_limit) { /* take as much memory as we can */ sz = gc_heap_limit - gc_heap_total; assert(sz % gc_pgsize == 0); DBG(GCSYSALLOC, dprintf("allocating up to limit\n")); }#ifdef DEBUG gc_system_alloc_cnt++;#endif blk = gc_block_alloc(sz); DBG(GCSYSALLOC, dprintf("gc_system_alloc: %d byte at %p\n", sz, blk); ) if (blk == 0) { return (0); } gc_heap_total += sz; assert(gc_heap_total <= gc_heap_limit); /* Place block into the freelist for subsequent use */ DBG(GCDIAG, blk->magic = GC_MAGIC); blk->size = sz; /* Attach block to object hash */ gc_block_add(blk); /* Free block into the system */ gc_primitive_free(blk); return (blk);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -