📄 jsgc.c
字号:
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim: set ts=8 sw=4 et tw=78: * * ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is Mozilla Communicator client code, released * March 31, 1998. * * The Initial Developer of the Original Code is * Netscape Communications Corporation. * Portions created by the Initial Developer are Copyright (C) 1998 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Alternatively, the contents of this file may be used under the terms of * either of the GNU General Public License Version 2 or later (the "GPL"), * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** *//* * JS Mark-and-Sweep Garbage Collector. * * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see * jsgc.h). It allocates from a special GC arena pool with each arena allocated * using malloc. It uses an ideally parallel array of flag bytes to hold the * mark bit, finalizer type index, etc. * * XXX swizzle page to freelist for better locality of reference */#include "jsstddef.h"#include <stdlib.h> /* for free */#include <string.h> /* for memset used when DEBUG */#include "jstypes.h"#include "jsutil.h" /* Added by JSIFY */#include "jshash.h" /* Added by JSIFY */#include "jsapi.h"#include "jsatom.h"#include "jsbit.h"#include "jsclist.h"#include "jscntxt.h"#include "jsconfig.h"#include "jsdbgapi.h"#include "jsexn.h"#include "jsfun.h"#include "jsgc.h"#include "jsinterp.h"#include "jsiter.h"#include "jslock.h"#include "jsnum.h"#include "jsobj.h"#include "jsscope.h"#include "jsscript.h"#include "jsstr.h"#if JS_HAS_XML_SUPPORT#include "jsxml.h"#endif/* * GC arena sizing depends on amortizing arena overhead using a large number * of things per arena, and on the thing/flags ratio of 8:1 on most platforms. * * On 64-bit platforms, we would have half as many things per arena because * pointers are twice as big, so we double the bytes for things per arena. * This preserves the 1024 byte flags sub-arena size, which relates to the * GC_PAGE_SIZE (see below for why). */#if JS_BYTES_PER_WORD == 8# define GC_THINGS_SHIFT 14 /* 16KB for things on Alpha, etc. */#else# define GC_THINGS_SHIFT 13 /* 8KB for things on most platforms */#endif#define GC_THINGS_SIZE JS_BIT(GC_THINGS_SHIFT)#define GC_FLAGS_SIZE (GC_THINGS_SIZE / sizeof(JSGCThing))/* * A GC arena contains one flag byte for each thing in its heap, and supports * O(1) lookup of a flag given its thing's address. * * To implement this, we take advantage of the thing/flags numerology: given * the 8K bytes worth of GC-things, there are 1K flag bytes. Within each 9K * allocation for things+flags there are always 8 consecutive 1K-pages each * aligned on 1K boundary. We use these pages to allocate things and the * remaining 1K of space before and after the aligned pages to store flags. * If we are really lucky and things+flags starts on a 1K boundary, then * flags would consist of a single 1K chunk that comes after 8K of things. * Otherwise there are 2 chunks of flags, one before and one after things. * * To be able to find the flag byte for a particular thing, we put a * JSGCPageInfo record at the beginning of each 1K-aligned page to hold that * page's offset from the beginning of things+flags allocation and we allocate * things after this record. Thus for each thing |thing_address & ~1023| * gives the address of a JSGCPageInfo record from which we read page_offset. * Due to page alignment * (page_offset & ~1023) + (thing_address & 1023) * gives thing_offset from the beginning of 8K paged things. We then divide * thing_offset by sizeof(JSGCThing) to get thing_index. * * Now |page_address - page_offset| is things+flags arena_address and * (page_offset & 1023) is the offset of the first page from the start of * things+flags area. Thus if * thing_index < (page_offset & 1023) * then * allocation_start_address + thing_index < address_of_the_first_page * and we use * allocation_start_address + thing_index * as the address to store thing's flags. If * thing_index >= (page_offset & 1023), * then we use the chunk of flags that comes after the pages with things * and calculate the address for the flag byte as * address_of_the_first_page + 8K + (thing_index - (page_offset & 1023)) * which is just * allocation_start_address + thing_index + 8K. * * When we allocate things with size equal to sizeof(JSGCThing), the overhead * of this scheme for 32 bit platforms is (8+8*(8+1))/(8+9K) or 0.87% * (assuming 4 bytes for each JSGCArena header, and 8 bytes for each * JSGCThing and JSGCPageInfo). When thing_size > 8, the scheme wastes the * flag byte for each extra 8 bytes beyond sizeof(JSGCThing) in thing_size * and the overhead is close to 1/8 or 12.5%. * FIXME: How can we avoid this overhead? * * Here's some ASCII art showing an arena: * * split or the first 1-K aligned address. * | * V * +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+ * |fB| tp0 | tp1 | tp2 | tp3 | tp4 | tp5 | tp6 | tp7 | fA | * +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+ * ^ ^ * tI ---------+ | * tJ -------------------------------------------+ * * - fB are the "before split" flags, fA are the "after split" flags * - tp0-tp7 are the 8 thing pages * - thing tI points into tp1, whose flags are below the split, in fB * - thing tJ points into tp5, clearly above the split * * In general, one of the thing pages will have some of its things' flags on * the low side of the split, and the rest of its things' flags on the high * side. All the other pages have flags only below or only above. * * (If we need to implement card-marking for an incremental GC write barrier, * we can replace word-sized offsetInArena in JSGCPageInfo by pair of * uint8 card_mark and uint16 offsetInArena fields as the offset can not exceed * GC_THINGS_SIZE. This would gives an extremely efficient write barrier: * when mutating an object obj, just store a 1 byte at * (uint8 *) ((jsuword)obj & ~1023) on 32-bit platforms.) */#define GC_PAGE_SHIFT 10#define GC_PAGE_MASK ((jsuword) JS_BITMASK(GC_PAGE_SHIFT))#define GC_PAGE_SIZE JS_BIT(GC_PAGE_SHIFT)#define GC_PAGE_COUNT (1 << (GC_THINGS_SHIFT - GC_PAGE_SHIFT))typedef struct JSGCPageInfo { jsuword offsetInArena; /* offset from the arena start */ jsuword unscannedBitmap; /* bitset for fast search of marked but not yet scanned GC things */} JSGCPageInfo;struct JSGCArena { JSGCArenaList *list; /* allocation list for the arena */ JSGCArena *prev; /* link field for allocation list */ JSGCArena *prevUnscanned; /* link field for the list of arenas with marked but not yet scanned things */ jsuword unscannedPages; /* bitset for fast search of pages with marked but not yet scanned things */ uint8 base[1]; /* things+flags allocation area */};#define GC_ARENA_SIZE \ (offsetof(JSGCArena, base) + GC_THINGS_SIZE + GC_FLAGS_SIZE)#define FIRST_THING_PAGE(a) \ (((jsuword)(a)->base + GC_FLAGS_SIZE - 1) & ~GC_PAGE_MASK)#define PAGE_TO_ARENA(pi) \ ((JSGCArena *)((jsuword)(pi) - (pi)->offsetInArena \ - offsetof(JSGCArena, base)))#define PAGE_INDEX(pi) \ ((size_t)((pi)->offsetInArena >> GC_PAGE_SHIFT))#define THING_TO_PAGE(thing) \ ((JSGCPageInfo *)((jsuword)(thing) & ~GC_PAGE_MASK))/* * Given a thing size n, return the size of the gap from the page start before * the first thing. We know that any n not a power of two packs from * the end of the page leaving at least enough room for one JSGCPageInfo, but * not for another thing, at the front of the page (JS_ASSERTs below insist * on this). * * This works because all allocations are a multiple of sizeof(JSGCThing) == * sizeof(JSGCPageInfo) in size. */#define PAGE_THING_GAP(n) (((n) & ((n) - 1)) ? (GC_PAGE_SIZE % (n)) : (n))#ifdef JS_THREADSAFE/* * The maximum number of things to put to the local free list by taking * several things from the global free list or from the tail of the last * allocated arena to amortize the cost of rt->gcLock. * * We use number 8 based on benchmarks from bug 312238. */#define MAX_THREAD_LOCAL_THINGS 8#endifJS_STATIC_ASSERT(sizeof(JSGCThing) == sizeof(JSGCPageInfo));JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSObject));JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));JS_STATIC_ASSERT(GC_FLAGS_SIZE >= GC_PAGE_SIZE);JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));/* * JSPtrTable capacity growth descriptor. The table grows by powers of two * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold. */typedef struct JSPtrTableInfo { uint16 minCapacity; uint16 linearGrowthThreshold;} JSPtrTableInfo;#define GC_ITERATOR_TABLE_MIN 4#define GC_ITERATOR_TABLE_LINEAR 1024static const JSPtrTableInfo iteratorTableInfo = { GC_ITERATOR_TABLE_MIN, GC_ITERATOR_TABLE_LINEAR};/* Calculate table capacity based on the current value of JSPtrTable.count. */static size_tPtrTableCapacity(size_t count, const JSPtrTableInfo *info){ size_t linear, log, capacity; linear = info->linearGrowthThreshold; JS_ASSERT(info->minCapacity <= linear); if (count == 0) { capacity = 0; } else if (count < linear) { log = JS_CEILING_LOG2W(count); JS_ASSERT(log != JS_BITS_PER_WORD); capacity = (size_t)1 << log; if (capacity < info->minCapacity) capacity = info->minCapacity; } else { capacity = JS_ROUNDUP(count, linear); } JS_ASSERT(capacity >= count); return capacity;}static voidFreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info){ if (table->array) { JS_ASSERT(table->count > 0); free(table->array); table->array = NULL; table->count = 0; } JS_ASSERT(table->count == 0);}static JSBoolAddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info, void *ptr){ size_t count, capacity; void **array; count = table->count; capacity = PtrTableCapacity(count, info); if (count == capacity) { if (capacity < info->minCapacity) { JS_ASSERT(capacity == 0); JS_ASSERT(!table->array); capacity = info->minCapacity; } else { /* * Simplify the overflow detection assuming pointer is bigger * than byte. */ // JS_STATIC_ASSERT(2 <= sizeof table->array[0]); capacity = (capacity < info->linearGrowthThreshold) ? 2 * capacity : capacity + info->linearGrowthThreshold; if (capacity > (size_t)-1 / sizeof table->array[0]) goto bad; } array = (void **) realloc(table->array, capacity * sizeof table->array[0]); if (!array) goto bad;#ifdef DEBUG memset(array + count, JS_FREE_PATTERN, (capacity - count) * sizeof table->array[0]);#endif table->array = array; } table->array[count] = ptr; table->count = count + 1; return JS_TRUE; bad: JS_ReportOutOfMemory(cx); return JS_FALSE;}static voidShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info, size_t newCount){ size_t oldCapacity, capacity; void **array; JS_ASSERT(newCount <= table->count); if (newCount == table->count) return; oldCapacity = PtrTableCapacity(table->count, info); table->count = newCount; capacity = PtrTableCapacity(newCount, info); if (oldCapacity != capacity) { array = table->array; JS_ASSERT(array); if (capacity == 0) { free(array); table->array = NULL; return; } array = (void **) realloc(array, capacity * sizeof array[0]); if (array) table->array = array; }#ifdef DEBUG memset(table->array + newCount, JS_FREE_PATTERN, (capacity - newCount) * sizeof table->array[0]);#endif}#ifdef JS_GCMETER# define METER(x) x#else# define METER(x) ((void) 0)#endifstatic JSBoolNewGCArena(JSRuntime *rt, JSGCArenaList *arenaList){ JSGCArena *a; jsuword offset; JSGCPageInfo *pi; uint32 *bytesptr; /* Check if we are allowed and can allocate a new arena. */ if (rt->gcBytes >= rt->gcMaxBytes) return JS_FALSE; a = (JSGCArena *)malloc(GC_ARENA_SIZE); if (!a) return JS_FALSE; /* Initialize the JSGCPageInfo records at the start of every thing page. */ offset = (GC_PAGE_SIZE - ((jsuword)a->base & GC_PAGE_MASK)) & GC_PAGE_MASK; JS_ASSERT((jsuword)a->base + offset == FIRST_THING_PAGE(a)); do { pi = (JSGCPageInfo *) (a->base + offset); pi->offsetInArena = offset; pi->unscannedBitmap = 0; offset += GC_PAGE_SIZE; } while (offset < GC_THINGS_SIZE); METER(++arenaList->stats.narenas); METER(arenaList->stats.maxarenas = JS_MAX(arenaList->stats.maxarenas, arenaList->stats.narenas)); a->list = arenaList;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -