📄 os_dep.c
字号:
result = (ptr_t) VirtualAlloc(result, bytes, MEM_COMMIT, PAGE_EXECUTE_READWRITE); if (result != NULL) { if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result"); GC_heap_lengths[i] += bytes; } return(result); }# endif#ifdef USE_MUNMAP/* For now, this only works on Win32/WinCE and some Unix-like *//* systems. If you have something else, don't define *//* USE_MUNMAP. *//* We assume ANSI C to support this feature. */#if !defined(MSWIN32) && !defined(MSWINCE)#include <unistd.h>#include <sys/mman.h>#include <sys/stat.h>#include <sys/types.h>#endif/* Compute a page aligned starting address for the unmap *//* operation on a block of size bytes starting at start. *//* Return 0 if the block is too small to make this feasible. */ptr_t GC_unmap_start(ptr_t start, word bytes){ ptr_t result = start; /* Round start to next page boundary. */ result += GC_page_size - 1; result = (ptr_t)((word)result & ~(GC_page_size - 1)); if (result + GC_page_size > start + bytes) return 0; return result;}/* Compute end address for an unmap operation on the indicated *//* block. */ptr_t GC_unmap_end(ptr_t start, word bytes){ ptr_t end_addr = start + bytes; end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1)); return end_addr;}/* Under Win32/WinCE we commit (map) and decommit (unmap) *//* memory using VirtualAlloc and VirtualFree. These functions *//* work on individual allocations of virtual memory, made *//* previously using VirtualAlloc with the MEM_RESERVE flag. *//* The ranges we need to (de)commit may span several of these *//* allocations; therefore we use VirtualQuery to check *//* allocation lengths, and split up the range as necessary. *//* We assume that GC_remap is called on exactly the same range *//* as a previous call to GC_unmap. It is safe to consistently *//* round the endpoints in both places. */void GC_unmap(ptr_t start, word bytes){ ptr_t start_addr = GC_unmap_start(start, bytes); ptr_t end_addr = GC_unmap_end(start, bytes); word len = end_addr - start_addr; if (0 == start_addr) return;# if defined(MSWIN32) || defined(MSWINCE) while (len != 0) { MEMORY_BASIC_INFORMATION mem_info; GC_word free_len; if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info)) != sizeof(mem_info)) ABORT("Weird VirtualQuery result"); free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize; if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT)) ABORT("VirtualFree failed"); GC_unmapped_bytes += free_len; start_addr += free_len; len -= free_len; }# else /* We immediately remap it to prevent an intervening mmap from */ /* accidentally grabbing the same address space. */ { void * result; result = mmap(start_addr, len, PROT_NONE, MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON, zero_fd, 0/* offset */); if (result != (void *)start_addr) ABORT("mmap(...PROT_NONE...) failed"); } GC_unmapped_bytes += len;# endif}void GC_remap(ptr_t start, word bytes){ ptr_t start_addr = GC_unmap_start(start, bytes); ptr_t end_addr = GC_unmap_end(start, bytes); word len = end_addr - start_addr;# if defined(MSWIN32) || defined(MSWINCE) ptr_t result; if (0 == start_addr) return; while (len != 0) { MEMORY_BASIC_INFORMATION mem_info; GC_word alloc_len; if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info)) != sizeof(mem_info)) ABORT("Weird VirtualQuery result"); alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize; result = VirtualAlloc(start_addr, alloc_len, MEM_COMMIT, PAGE_EXECUTE_READWRITE); if (result != start_addr) { ABORT("VirtualAlloc remapping failed"); } GC_unmapped_bytes -= alloc_len; start_addr += alloc_len; len -= alloc_len; }# else /* It was already remapped with PROT_NONE. */ int result; if (0 == start_addr) return; result = mprotect(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC); if (result != 0) { GC_err_printf3( "Mprotect failed at 0x%lx (length %ld) with errno %ld\n", start_addr, len, errno); ABORT("Mprotect remapping failed"); } GC_unmapped_bytes -= len;# endif}/* Two adjacent blocks have already been unmapped and are about to *//* be merged. Unmap the whole block. This typically requires *//* that we unmap a small section in the middle that was not previously *//* unmapped due to alignment constraints. */void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2){ ptr_t start1_addr = GC_unmap_start(start1, bytes1); ptr_t end1_addr = GC_unmap_end(start1, bytes1); ptr_t start2_addr = GC_unmap_start(start2, bytes2); ptr_t end2_addr = GC_unmap_end(start2, bytes2); ptr_t start_addr = end1_addr; ptr_t end_addr = start2_addr; word len; GC_ASSERT(start1 + bytes1 == start2); if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2); if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2); if (0 == start_addr) return; len = end_addr - start_addr;# if defined(MSWIN32) || defined(MSWINCE) while (len != 0) { MEMORY_BASIC_INFORMATION mem_info; GC_word free_len; if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info)) != sizeof(mem_info)) ABORT("Weird VirtualQuery result"); free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize; if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT)) ABORT("VirtualFree failed"); GC_unmapped_bytes += free_len; start_addr += free_len; len -= free_len; }# else if (len != 0 && munmap(start_addr, len) != 0) ABORT("munmap failed"); GC_unmapped_bytes += len;# endif}#endif /* USE_MUNMAP *//* Routine for pushing any additional roots. In THREADS *//* environment, this is also responsible for marking from *//* thread stacks. */#ifndef THREADSvoid (*GC_push_other_roots)() = 0;#else /* THREADS */# ifdef PCRPCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy){ struct PCR_ThCtl_TInfoRep info; PCR_ERes result; info.ti_stkLow = info.ti_stkHi = 0; result = PCR_ThCtl_GetInfo(t, &info); GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi)); return(result);}/* Push the contents of an old object. We treat this as stack *//* data only becasue that makes it robust against mark stack *//* overflow. */PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data){ GC_push_all_stack((ptr_t)p, (ptr_t)p + size); return(PCR_ERes_okay);}void GC_default_push_other_roots GC_PROTO((void)){ /* Traverse data allocated by previous memory managers. */ { extern struct PCR_MM_ProcsRep * GC_old_allocator; if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false, GC_push_old_obj, 0) != PCR_ERes_okay) { ABORT("Old object enumeration failed"); } } /* Traverse all thread stacks. */ if (PCR_ERes_IsErr( PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0)) || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) { ABORT("Thread stack marking failed\n"); }}# endif /* PCR */# ifdef SRC_M3# ifdef ALL_INTERIOR_POINTERS --> misconfigured# endifvoid GC_push_thread_structures GC_PROTO((void)){ /* Not our responsibibility. */}extern void ThreadF__ProcessStacks();void GC_push_thread_stack(start, stop)word start, stop;{ GC_push_all_stack((ptr_t)start, (ptr_t)stop + sizeof(word));}/* Push routine with M3 specific calling convention. */GC_m3_push_root(dummy1, p, dummy2, dummy3)word *p;ptr_t dummy1, dummy2;int dummy3;{ word q = *p; GC_PUSH_ONE_STACK(q, p);}/* M3 set equivalent to RTHeap.TracedRefTypes */typedef struct { int elts[1]; } RefTypeSet;RefTypeSet GC_TracedRefTypes = {{0x1}};void GC_default_push_other_roots GC_PROTO((void)){ /* Use the M3 provided routine for finding static roots. */ /* This is a bit dubious, since it presumes no C roots. */ /* We handle the collector roots explicitly in GC_push_roots */ RTMain__GlobalMapProc(GC_m3_push_root, 0, GC_TracedRefTypes); if (GC_words_allocd > 0) { ThreadF__ProcessStacks(GC_push_thread_stack); } /* Otherwise this isn't absolutely necessary, and we have */ /* startup ordering problems. */}# endif /* SRC_M3 */# if defined(GC_SOLARIS_THREADS) || defined(GC_PTHREADS) || \ defined(GC_WIN32_THREADS)extern void GC_push_all_stacks();void GC_default_push_other_roots GC_PROTO((void)){ GC_push_all_stacks();}# endif /* GC_SOLARIS_THREADS || GC_PTHREADS */void (*GC_push_other_roots) GC_PROTO((void)) = GC_default_push_other_roots;#endif /* THREADS *//* * Routines for accessing dirty bits on virtual pages. * We plan to eventually implement four strategies for doing so: * DEFAULT_VDB: A simple dummy implementation that treats every page * as possibly dirty. This makes incremental collection * useless, but the implementation is still correct. * PCR_VDB: Use PPCRs virtual dirty bit facility. * PROC_VDB: Use the /proc facility for reading dirty bits. Only * works under some SVR4 variants. Even then, it may be * too slow to be entirely satisfactory. Requires reading * dirty bits for entire address space. Implementations tend * to assume that the client is a (slow) debugger. * MPROTECT_VDB:Protect pages and then catch the faults to keep track of * dirtied pages. The implementation (and implementability) * is highly system dependent. This usually fails when system * calls write to a protected page. We prevent the read system * call from doing so. It is the clients responsibility to * make sure that other system calls are similarly protected * or write only to the stack. */GC_bool GC_dirty_maintained = FALSE;# ifdef DEFAULT_VDB/* All of the following assume the allocation lock is held, and *//* signals are disabled. *//* The client asserts that unallocated pages in the heap are never *//* written. *//* Initialize virtual dirty bit implementation. */void GC_dirty_init(){# ifdef PRINTSTATS GC_printf0("Initializing DEFAULT_VDB...\n");# endif GC_dirty_maintained = TRUE;}/* Retrieve system dirty bits for heap to a local buffer. *//* Restore the systems notion of which pages are dirty. */void GC_read_dirty(){}/* Is the HBLKSIZE sized page at h marked dirty in the local buffer? *//* If the actual page size is different, this returns TRUE if any *//* of the pages overlapping h are dirty. This routine may err on the *//* side of labelling pages as dirty (and this implementation does). *//*ARGSUSED*/GC_bool GC_page_was_dirty(h)struct hblk *h;{ return(TRUE);}/* * The following two routines are typically less crucial. They matter * most with large dynamic libraries, or if we can't accurately identify * stacks, e.g. under Solaris 2.X. Otherwise the following default * versions are adequate. */ /* Could any valid GC heap pointer ever have been written to this page? *//*ARGSUSED*/GC_bool GC_page_was_ever_dirty(h)struct hblk *h;{ return(TRUE);}/* Reset the n pages starting at h to "was never dirty" status. */void GC_is_fresh(h, n)struct hblk *h;word n;{}/* A call that: *//* I) hints that [h, h+nblocks) is about to be written. *//* II) guarantees that protection is removed. *//* (I) may speed up some dirty bit implementations. *//* (II) may be essential if we need to ensure that *//* pointer-free system call buffers in the heap are *//* not protected. *//*ARGSUSED*/void GC_remove_protection(h, nblocks, is_ptrfree)struct hblk *h;word nblocks;GC_bool is_ptrfree;{}# endif /* DEFAULT_VDB */# ifdef MPROTECT_VDB/* * See DEFAULT_VDB for interface descriptions. *//* * This implementation maintains dirty bits itself by catching write * faults and keeping track of them. We assume nobody else catches * SIGBUS or SIGSEGV. We assume no write faults occur in system calls. * This means that clients must ensure that system calls don't write * to the write-protected heap. Probably the best way to do this is to * ensure that system calls write at most to POINTERFREE objects in the * heap, and do even that only if we are on a platform on which those * are not protected. Another alternative is to wrap system calls * (see example for read below), but the current implementation holds * a lock across blocking calls, making it problematic for multithreaded * applications. * We assume the page size is a multiple of HBLKSIZE. * We prefer them to be the same. We avoid protecting POINTERFREE * objects only if they are the same. */# if !defined(MSWIN32) && !defined(MSWINCE) && !defined(DARWIN)# include <sys/mman.h># include <signal.h># include <sys/syscall.h># define PROTECT(addr, len) \ if (mprotect((caddr_t)(addr), (size_t)(len), \ PROT_READ | OPT_PROT_EXEC) < 0) { \ ABORT("mprotect failed"); \ }# define UNPROTECT(addr, len) \ if (mprotect((caddr_t)(addr), (size_t)(len), \ PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \ ABORT("un-mprotect failed"); \ } # else# ifdef DARWIN /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to decrease the likelihood of some of the problems described below. */ #include <mach/vm_map.h> static mach_port_t GC_task_self; #define PROTECT(addr,len) \ if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -