📄 gc-mem.c
字号:
out: stopTiming(&heap_alloc_time); unlockStaticMutex(&gc_heap_lock); return (mem);}/** * Free a piece of memory. */voidgc_heap_free(void* mem){ gc_block* info; gc_freeobj* obj; int lnr; int msz; int idx;#if defined(KAFFE_STATS) static timespent heap_free_time;#endif info = gc_mem2block(mem); idx = GCMEM2IDX(info, mem); DBG(GCDIAG, gc_heap_check(); assert(gc_check_magic_marker(info)); assert(KGC_GET_COLOUR(info, idx) != KGC_COLOUR_FREE)); KGC_SET_COLOUR(info, idx, KGC_COLOUR_FREE);DBG(GCFREE, dprintf("gc_heap_free: memory %p size %d\n", mem, info->size); ); lockStaticMutex(&gc_heap_lock); startTiming(&heap_free_time, "gc_heap_free"); if (KGC_SMALL_OBJECT(info->size)) { lnr = sztable[info->size].list; info->avail++; DBG(GCDIAG, /* write pattern in memory to see when live objects were * freed - Note that (f4f4f4f4 == -185273100) */ memset(mem, 0xf4, info->size)); obj = GCMEM2FREE(mem); obj->next = info->free; info->free = obj; ASSERT_ONBLOCK(obj, info); /* If we free all sub-blocks, free the block */ assert(info->avail <= info->nr); if (info->avail == info->nr) { /* * note that *finfo==0 is ok if we free a block * whose small object is so large that it can * only contain one object. */ gc_block** finfo = &freelist[lnr].list; for (;*finfo;) { if (*finfo == info) { (*finfo) = info->next; break; } finfo = &(*finfo)->next; } info->size = gc_pgsize; gc_primitive_free(info); } else if (info->avail==1) { /* * If this block contains no free sub-blocks yet, attach * it to freelist. */ gc_block **finfo = &freelist[lnr].list; info->next = *finfo; *finfo = info; } } else { /* Calculate true size of block */ msz = info->size + 2 + ROUNDUPALIGN(1); msz = ROUNDUPPAGESIZE(msz); info->size = msz; gc_primitive_free(info); } stopTiming(&heap_free_time); unlockStaticMutex(&gc_heap_lock);DBG(GCDIAG, gc_heap_check(); );}/* * Allocate a new block of GC'ed memory. The block will contain 'nr' objects * each of 'sz' bytes. */staticgc_block*gc_small_block(size_t sz){ gc_block* info; int i; int nr; assert(sz >= sizeof(gc_block *)); info = gc_primitive_alloc(gc_pgsize); if (info == 0) { return (NULL); } /* Calculate number of objects in this block */ nr = (gc_pgsize-ROUNDUPALIGN(1))/(sz+2); /* Setup the meta-data for the block */ DBG(GCDIAG, gc_set_magic_marker(info)); info->size = sz; info->nr = nr; info->avail = nr; info->funcs = (uint8*)GCBLOCK2BASE(info); info->state = (uint8*)(info->funcs + nr); info->data = (uint8*)ROUNDUPALIGN(info->state + nr); DBG(GCDIAG, memset(info->data, 0, sz * nr)); /* Build the objects into a free list */ for (i = nr-1; i-- > 0;) { GCBLOCK2FREE(info, i)->next = GCBLOCK2FREE(info, i+1); KGC_SET_COLOUR(info, i, KGC_COLOUR_FREE); KGC_SET_STATE(info, i, KGC_STATE_NORMAL); } GCBLOCK2FREE(info, nr-1)->next = NULL; KGC_SET_COLOUR(info, nr-1, KGC_COLOUR_FREE); KGC_SET_STATE(info, nr-1, KGC_STATE_NORMAL); info->free = GCBLOCK2FREE(info, 0);DBG(SLACKANAL, int slack = ((char *)info) + gc_pgsize - (char *)(GCBLOCK2MEM(info, nr)); totalslack += slack; ); return (info);}/* * Allocate a new block of GC'ed memory. The block will contain one object */staticgc_block*gc_large_block(size_t sz){ gc_block* info; size_t msz; size_t block_count; /* Add in management overhead */ msz = sz+2+ROUNDUPALIGN(1); /* Round size up to a number of pages */ msz = ROUNDUPPAGESIZE(msz); info = gc_primitive_alloc(msz); if (info == 0) { return (NULL); } /* number of pages allocated for block */ block_count = msz >> gc_pgbits; DBG(GCPRIM, dprintf ("large block covers %zx pages\n", block_count); ); /* Setup the meta-data for the block */ DBG(GCDIAG, gc_set_magic_marker(info)); info->size = sz; info->nr = 1; info->avail = 1; info->funcs = (uint8*)GCBLOCK2BASE(info); info->state = (uint8*)(info->funcs + 1); info->data = (uint8*)ROUNDUPALIGN(info->state + 1); info->free = NULL; DBG(GCDIAG, memset(info->data, 0, sz)); GCBLOCK2FREE(info, 0)->next = NULL; /* now initialize the other blocks that were allocated */ while (--block_count > 0) { info[block_count].size = sz; info[block_count].nr = 1; info[block_count].avail = 0; info[block_count].funcs = info[0].funcs; info[block_count].state = info[0].state; info[block_count].data = info[0].data; info[block_count].free = NULL; } /* * XXX gc_large_block only called during a block allocation. * The following is just going to get overwritten. (Right?) */ KGC_SET_COLOUR(info, 0, KGC_COLOUR_FREE); KGC_SET_STATE(info, 0, KGC_STATE_NORMAL); return (info);}/* * Primitive block management: Allocating and freeing whole pages. * * Each primitive block of the heap consists of one or more contiguous * pages. Pages of unused primitive blocks are marked unreadable when * kaffe is compiled with debugging enabled. Whether a block is in use * can be determined by its nr field: when it's in use, its nr field * will be > 0. * * All primitive blocks are chained through their pnext / pprev fields, * no matter whether or not they are in use. This makes the necessary * check for mergable blocks as cheap as possible. Merging small blocks * is necessary so that single unused primitive blocks in the heap are * always as large as possible. The first block in the list is stored * in gc_block_base, the last block in the list is gc_last_block. * * In order to speed up the search for the primitive block that fits * a given allocation request best, small primitive blocks are stored * in several lists (one per size). If no primitive blocks of a given * size are left, a larger one is splitted instead. */#define KGC_PRIM_LIST_COUNT 20static gc_block *gc_prim_freelist[KGC_PRIM_LIST_COUNT+1];#ifndef PROT_NONE#define PROT_NONE 0#endif#if !defined(HAVE_MPROTECT) || !defined(HAVE_COMPATIBLE_MPROTECT)#define mprotect(A,L,P)#define ALL_PROT#define NO_PROT#else/* In a sense, this is backwards. */#define ALL_PROT PROT_READ|PROT_WRITE|PROT_EXEC#define NO_PROT PROT_NONE#endif/* Mark a primitive block as used */static inline void gc_block_add(gc_block *b){ b->nr = 1;#if defined(KAFFE_VMDEBUG) mprotect(GCBLOCK2BASE(b), b->size, ALL_PROT);#endif}/* Mark a primitive block as unused */static inline void gc_block_rm(gc_block *b){ b->nr = 0;#if defined(KAFFE_VMDEBUG) mprotect(GCBLOCK2BASE(b), b->size, NO_PROT);#endif}/* Get the end a gc block * * This is OK, gc_prim_(alloc|free) never assume GCBLOCKEND is really * a valid block */static inline gc_block*gc_block_end(gc_block *b){ return b + ((b->size+gc_pgsize-1)>>gc_pgbits);}/* return the prim list blk belongs to */static inline gc_block **gc_get_prim_freelist (gc_block *blk){ size_t sz = blk->size >> gc_pgbits; if (sz <= KGC_PRIM_LIST_COUNT) { assert (sz > 0); return &gc_prim_freelist[sz-1]; } return &gc_prim_freelist[KGC_PRIM_LIST_COUNT];}/* add a primitive block to the correct freelist */static inline voidgc_add_to_prim_freelist(gc_block *blk){ gc_block **list = gc_get_prim_freelist (blk); /* insert the block int the list, sorting by ascending addresses */ while (*list && blk > *list) { list = &(*list)->next; } if (*list) { (*list)->free = (gc_freeobj *)&blk->next; } blk->next = *list; *list = blk; blk->free = (gc_freeobj *)list;}/* remove a primitive block from its freelist */static inline voidgc_remove_from_prim_freelist(gc_block *blk){ *( (gc_block **) blk->free ) = blk->next; if (blk->next) { blk->next->free = blk->free; }} /* * Allocate a block of memory from the free list or, failing that, the * system pool. */staticgc_block*gc_primitive_alloc(size_t sz){ size_t diff = 0; gc_block* best_fit = NULL; size_t i = sz >> gc_pgbits; assert(sz % gc_pgsize == 0); DBG(GCPRIM, dprintf("\ngc_primitive_alloc: got to allocate 0x%x bytes\n", (unsigned int)sz); ); /* try freelists for small primitive blocks first */ if (i <= KGC_PRIM_LIST_COUNT) { for (i-=1; i<KGC_PRIM_LIST_COUNT; i++) { if (gc_prim_freelist[i]) { best_fit = gc_prim_freelist[i]; diff = gc_prim_freelist[i]->size - sz; break; } } } /* if that fails, try the big remaining list */ if (!best_fit) { gc_block *ptr; for (ptr = gc_prim_freelist[KGC_PRIM_LIST_COUNT]; ptr != 0; ptr=ptr->next) { /* Best fit */ if (sz == ptr->size) { diff = 0; best_fit = ptr; break; } else if (sz < ptr->size) { size_t left = ptr->size - sz; if (best_fit==NULL || left<diff) { diff = left; best_fit = ptr; } } } } /* if we found a block, remove it from the list and check if splitting is necessary */ if (best_fit) { gc_remove_from_prim_freelist (best_fit); DBG(GCPRIM, dprintf ("gc_primitive_alloc: found best_fit %p diff 0x%x (0x%x - 0x%x)\n", best_fit, (unsigned int)diff, best_fit->size, (unsigned int)sz); ); assert ( diff % gc_pgsize == 0 ); if (diff > 0) { gc_block *nptr; best_fit->size = sz; nptr = gc_block_end(best_fit); nptr->size = diff; gc_block_rm (nptr); DBG(GCPRIM, dprintf ("gc_primitive_alloc: splitted remaining 0x%x bytes @ %p\n", (unsigned int)diff, nptr); ); DBG(GCDIAG, gc_set_magic_marker(nptr)); /* maintain list of primitive blocks */ nptr->pnext = best_fit->pnext; nptr->pprev = best_fit; best_fit->pnext = nptr; if (nptr->pnext) { nptr->pnext->pprev = nptr; } else { gc_last_block = nptr; } /* and add nptr to one of the freelists */ gc_add_to_prim_freelist (nptr); }DBG(GCPRIM, dprintf("gc_primitive_alloc: 0x%x bytes from freelist @ %p\n", best_fit->size, best_fit); ); gc_block_add(best_fit); return (best_fit); }DBG(GCPRIM, dprintf("gc_primitive_alloc: no suitable block found!\n"); ); /* Nothing found on free list */ return (NULL);}/* * merge a primitive block with its successor. */static inline voidgc_merge_with_successor (gc_block *blk){ gc_block *next_blk = blk->pnext; assert (next_blk); blk->size += next_blk->size; blk->pnext = next_blk->pnext; /* * if the merged block has a successor, update its pprev field. * otherwise, the merged block is the last block in the primitive * chain. */ if (blk->pnext) { blk->pnext->pprev = blk; } else { gc_last_block = blk; }}/* * Return a block of memory to the free list. */void
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -