⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gc.c

📁 unix-like 系统下c实现的垃圾回收器
💻 C
📖 第 1 页 / 共 2 页
字号:
/****************************************************************************** * gc.c *  * A fully recycling epoch-based garbage collector. Works by counting * threads in and out of critical regions, to work out when * garbage queues can be fully deleted. *  * Copyright (c) 2001-2003, K A Fraser *  * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. *  * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. *  * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */#include <assert.h>#include <stdio.h>#include <stdlib.h>#include <string.h>#include <sys/mman.h>#include <unistd.h>#include "portable_defns.h"#include "gc.h"/*#define MINIMAL_GC*//*#define YIELD_TO_HELP_PROGRESS*/#define PROFILE_GC/* Recycled nodes are filled with this value if WEAK_MEM_ORDER. */#define INVALID_BYTE 0#define INITIALISE_NODES(_p,_c) memset((_p), INVALID_BYTE, (_c));/* Number of unique block sizes we can deal with. */#define MAX_SIZES 20#define MAX_HOOKS 4/* * The initial number of allocation chunks for each per-blocksize list. * Popular allocation lists will steadily increase the allocation unit * in line with demand. */#define ALLOC_CHUNKS_PER_LIST 10/* * How many times should a thread call gc_enter(), seeing the same epoch * each time, before it makes a reclaim attempt? */#define ENTRIES_PER_RECLAIM_ATTEMPT 100/* *  0: current epoch -- threads are moving to this; * -1: some threads may still throw garbage into this epoch; * -2: no threads can see this epoch => we can zero garbage lists; * -3: all threads see zeros in these garbage lists => move to alloc lists. */#ifdef WEAK_MEM_ORDER#define NR_EPOCHS 4#else#define NR_EPOCHS 3#endif/* * A chunk amortises the cost of allocation from shared lists. It also * helps when zeroing nodes, as it increases per-cacheline pointer density * and means that node locations don't need to be brought into the cache  * (most architectures have a non-temporal store instruction). */#define BLKS_PER_CHUNK 100typedef struct chunk_st chunk_t;struct chunk_st{    chunk_t *next;             /* chunk chaining                 */    unsigned int i;            /* the next entry in blk[] to use */    void *blk[BLKS_PER_CHUNK];};static struct gc_global_st{    CACHE_PAD(0);    /* The current epoch. */    VOLATILE unsigned int current;    CACHE_PAD(1);    /* Exclusive access to gc_reclaim(). */    VOLATILE unsigned int inreclaim;    CACHE_PAD(2);    /*     * RUN-TIME CONSTANTS (to first approximation)     */    /* Memory page size, in bytes. */    unsigned int page_size;    /* Node sizes (run-time constants). */    int nr_sizes;    int blk_sizes[MAX_SIZES];    /* Registered epoch hooks. */    int nr_hooks;    hook_fn_t hook_fns[MAX_HOOKS];    CACHE_PAD(3);    /*     * DATA WE MAY HIT HARD     */    /* Chain of free, empty chunks. */    chunk_t * VOLATILE free_chunks;    /* Main allocation lists. */    chunk_t * VOLATILE alloc[MAX_SIZES];    VOLATILE unsigned int alloc_size[MAX_SIZES];#ifdef PROFILE_GC    VOLATILE unsigned int total_size;    VOLATILE unsigned int allocations;#endif} gc_global;/* Per-thread state. */struct gc_st{    /* Epoch that this thread sees. */    unsigned int epoch;    /* Number of calls to gc_entry() since last gc_reclaim() attempt. */    unsigned int entries_since_reclaim;#ifdef YIELD_TO_HELP_PROGRESS    /* Number of calls to gc_reclaim() since we last yielded. */    unsigned int reclaim_attempts_since_yield;#endif    /* Used by gc_async_barrier(). */    void *async_page;    int   async_page_state;    /* Garbage lists. */    chunk_t *garbage[NR_EPOCHS][MAX_SIZES];    chunk_t *garbage_tail[NR_EPOCHS][MAX_SIZES];    chunk_t *chunk_cache;    /* Local allocation lists. */    chunk_t *alloc[MAX_SIZES];    unsigned int alloc_chunks[MAX_SIZES];    /* Hook pointer lists. */    chunk_t *hook[NR_EPOCHS][MAX_HOOKS];};#define MEM_FAIL(_s)                                                         \do {                                                                         \    fprintf(stderr, "OUT OF MEMORY: %d bytes at line %d\n", (_s), __LINE__); \    exit(1);                                                                 \} while ( 0 )/* Allocate more empty chunks from the heap. */#define CHUNKS_PER_ALLOC 1000static chunk_t *alloc_more_chunks(void){    int i;    chunk_t *h, *p;    h = p = ALIGNED_ALLOC(CHUNKS_PER_ALLOC * sizeof(*h));    if ( h == NULL ) MEM_FAIL(CHUNKS_PER_ALLOC * sizeof(*h));    for ( i = 1; i < CHUNKS_PER_ALLOC; i++ )    {        p->next = p + 1;        p++;    }    p->next = h;    return(h);}/* Put a chain of chunks onto a list. */static void add_chunks_to_list(chunk_t *ch, chunk_t *head){    chunk_t *h_next, *new_h_next, *ch_next;    ch_next    = ch->next;    new_h_next = head->next;    do { ch->next = h_next = new_h_next; WMB_NEAR_CAS(); }    while ( (new_h_next = CASPO(&head->next, h_next, ch_next)) != h_next );}/* Allocate a chain of @n empty chunks. Pointers may be garbage. */static chunk_t *get_empty_chunks(int n){    int i;    chunk_t *new_rh, *rh, *rt, *head; retry:    head = gc_global.free_chunks;    new_rh = head->next;    do {        rh = new_rh;        rt = head;        WEAK_DEP_ORDER_RMB();        for ( i = 0; i < n; i++ )        {            if ( (rt = rt->next) == head )            {                /* Allocate some more chunks. */                add_chunks_to_list(alloc_more_chunks(), head);                goto retry;            }        }    }    while ( (new_rh = CASPO(&head->next, rh, rt->next)) != rh );    rt->next = rh;    return(rh);}/* Get @n filled chunks, pointing at blocks of @sz bytes each. */static chunk_t *get_filled_chunks(int n, int sz){    chunk_t *h, *p;    char *node;    int i;#ifdef PROFILE_GC    ADD_TO(gc_global.total_size, n * BLKS_PER_CHUNK * sz);    ADD_TO(gc_global.allocations, 1);#endif    node = ALIGNED_ALLOC(n * BLKS_PER_CHUNK * sz);    if ( node == NULL ) MEM_FAIL(n * BLKS_PER_CHUNK * sz);#ifdef WEAK_MEM_ORDER    INITIALISE_NODES(node, n * BLKS_PER_CHUNK * sz);#endif    h = p = get_empty_chunks(n);    do {        p->i = BLKS_PER_CHUNK;        for ( i = 0; i < BLKS_PER_CHUNK; i++ )        {            p->blk[i] = node;            node += sz;        }    }    while ( (p = p->next) != h );    return(h);}/* * gc_async_barrier: Cause an asynchronous barrier in all other threads. We do  * this by causing a TLB shootdown to be propagated to all other processors.  * Each time such an action is required, this function calls: *   mprotect(async_page, <page size>, <new flags>) * Each thread's state contains a memory page dedicated for this purpose. */#ifdef WEAK_MEM_ORDERstatic void gc_async_barrier(gc_t *gc){    mprotect(gc->async_page, gc_global.page_size,             gc->async_page_state ? PROT_READ : PROT_NONE);    gc->async_page_state = !gc->async_page_state;}#else#define gc_async_barrier(_g) ((void)0)#endif/* Grab a level @i allocation chunk from main chain. */static chunk_t *get_alloc_chunk(gc_t *gc, int i){    chunk_t *alloc, *p, *new_p, *nh;    unsigned int sz;    alloc = gc_global.alloc[i];    new_p = alloc->next;    do {        p = new_p;        while ( p == alloc )        {            sz = gc_global.alloc_size[i];            nh = get_filled_chunks(sz, gc_global.blk_sizes[i]);            ADD_TO(gc_global.alloc_size[i], sz >> 3);            gc_async_barrier(gc);            add_chunks_to_list(nh, alloc);            p = alloc->next;        }        WEAK_DEP_ORDER_RMB();    }    while ( (new_p = CASPO(&alloc->next, p, p->next)) != p );    p->next = p;    assert(p->i == BLKS_PER_CHUNK);    return(p);}#ifndef MINIMAL_GC/* * gc_reclaim: Scans the list of struct gc_perthread looking for the lowest * maximum epoch number seen by a thread that's in the list code. If it's the * current epoch, the "nearly-free" lists from the previous epoch are  * reclaimed, and the epoch is incremented. */static void gc_reclaim(void){    ptst_t       *ptst, *first_ptst, *our_ptst = NULL;    gc_t         *gc = NULL;    unsigned long curr_epoch;    chunk_t      *ch, *t;    int           two_ago, three_ago, i, j;    

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -