📄 os_dep.c
字号:
ABORT("Seek to object table failed"); } for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) { int flags; if (fread((char *)(&seg), 1, sizeof seg, myexefile) < sizeof seg) { GC_err_puts("Couldn't read obj table entry from "); GC_err_puts(path); GC_err_puts("\n"); ABORT("Couldn't read obj table entry"); } flags = O32_FLAGS(seg); if (!(flags & OBJWRITE)) continue; if (!(flags & OBJREAD)) continue; if (flags & OBJINVALID) { GC_err_printf("Object with invalid pages?\n"); continue; } GC_add_roots_inner(O32_BASE(seg), O32_BASE(seg)+O32_SIZE(seg), FALSE); }}# else /* !OS2 */# if defined(MSWIN32) || defined(MSWINCE)# ifdef MSWIN32 /* Unfortunately, we have to handle win32s very differently from NT, */ /* Since VirtualQuery has very different semantics. In particular, */ /* under win32s a VirtualQuery call on an unmapped page returns an */ /* invalid result. Under NT, GC_register_data_segments is a noop and */ /* all real work is done by GC_register_dynamic_libraries. Under */ /* win32s, we cannot find the data segments associated with dll's. */ /* We register the main data segment here. */ GC_bool GC_no_win32_dlls = FALSE; /* This used to be set for gcc, to avoid dealing with */ /* the structured exception handling issues. But we now have */ /* assembly code to do that right. */# if defined(GWW_VDB)# ifndef _BASETSD_H_ typedef ULONG * PULONG_PTR;# endif typedef UINT (WINAPI * GetWriteWatch_type)( DWORD, PVOID, SIZE_T, PVOID*, PULONG_PTR, PULONG); static GetWriteWatch_type GetWriteWatch_func; static DWORD GetWriteWatch_alloc_flag;# define GC_GWW_AVAILABLE() (GetWriteWatch_func != NULL) static void detect_GetWriteWatch(void) { static GC_bool done; if (done) return; GetWriteWatch_func = (GetWriteWatch_type) GetProcAddress(GetModuleHandle("kernel32.dll"), "GetWriteWatch"); if (GetWriteWatch_func != NULL) { /* Also check whether VirtualAlloc accepts MEM_WRITE_WATCH, */ /* as some versions of kernel32.dll have one but not the */ /* other, making the feature completely broken. */ void * page = VirtualAlloc(NULL, GC_page_size, MEM_WRITE_WATCH | MEM_RESERVE, PAGE_READWRITE); if (page != NULL) { PVOID pages[16]; ULONG_PTR count = 16; DWORD page_size; /* Check that it actually works. In spite of some */ /* documentation it actually seems to exist on W2K. */ /* This test may be unnecessary, but ... */ if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET, page, GC_page_size, pages, &count, &page_size) != 0) { /* GetWriteWatch always fails. */ GetWriteWatch_func = NULL; } else { GetWriteWatch_alloc_flag = MEM_WRITE_WATCH; } VirtualFree(page, GC_page_size, MEM_RELEASE); } else { /* GetWriteWatch will be useless. */ GetWriteWatch_func = NULL; } } if (GC_print_stats) { if (GetWriteWatch_func == NULL) { GC_log_printf("Did not find a usable GetWriteWatch()\n"); } else { GC_log_printf("Using GetWriteWatch()\n"); } } done = TRUE; }# endif /* GWW_VDB */ GC_bool GC_wnt = FALSE; /* This is a Windows NT derivative, i.e. NT, W2K, XP or later. */ void GC_init_win32(void) { /* Set GC_wnt. */ /* If we're running under win32s, assume that no DLLs will be loaded */ /* I doubt anyone still runs win32s, but ... */ DWORD v = GetVersion(); GC_wnt = !(v & 0x80000000); GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3); } /* Return the smallest address a such that VirtualQuery */ /* returns correct results for all addresses between a and start. */ /* Assumes VirtualQuery returns correct information for start. */ ptr_t GC_least_described_address(ptr_t start) { MEMORY_BASIC_INFORMATION buf; size_t result; LPVOID limit; ptr_t p; LPVOID q; limit = GC_sysinfo.lpMinimumApplicationAddress; p = (ptr_t)((word)start & ~(GC_page_size - 1)); for (;;) { q = (LPVOID)(p - GC_page_size); if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break; result = VirtualQuery(q, &buf, sizeof(buf)); if (result != sizeof(buf) || buf.AllocationBase == 0) break; p = (ptr_t)(buf.AllocationBase); } return p; }# endif# ifndef REDIRECT_MALLOC /* We maintain a linked list of AllocationBase values that we know */ /* correspond to malloc heap sections. Currently this is only called */ /* during a GC. But there is some hope that for long running */ /* programs we will eventually see most heap sections. */ /* In the long run, it would be more reliable to occasionally walk */ /* the malloc heap with HeapWalk on the default heap. But that */ /* apparently works only for NT-based Windows. */ /* In the long run, a better data structure would also be nice ... */ struct GC_malloc_heap_list { void * allocation_base; struct GC_malloc_heap_list *next; } *GC_malloc_heap_l = 0; /* Is p the base of one of the malloc heap sections we already know */ /* about? */ GC_bool GC_is_malloc_heap_base(ptr_t p) { struct GC_malloc_heap_list *q = GC_malloc_heap_l; while (0 != q) { if (q -> allocation_base == p) return TRUE; q = q -> next; } return FALSE; } void *GC_get_allocation_base(void *p) { MEMORY_BASIC_INFORMATION buf; size_t result = VirtualQuery(p, &buf, sizeof(buf)); if (result != sizeof(buf)) { ABORT("Weird VirtualQuery result"); } return buf.AllocationBase; } size_t GC_max_root_size = 100000; /* Appr. largest root size. */ void GC_add_current_malloc_heap() { struct GC_malloc_heap_list *new_l = malloc(sizeof(struct GC_malloc_heap_list)); void * candidate = GC_get_allocation_base(new_l); if (new_l == 0) return; if (GC_is_malloc_heap_base(candidate)) { /* Try a little harder to find malloc heap. */ size_t req_size = 10000; do { void *p = malloc(req_size); if (0 == p) { free(new_l); return; } candidate = GC_get_allocation_base(p); free(p); req_size *= 2; } while (GC_is_malloc_heap_base(candidate) && req_size < GC_max_root_size/10 && req_size < 500000); if (GC_is_malloc_heap_base(candidate)) { free(new_l); return; } } if (GC_print_stats) GC_log_printf("Found new system malloc AllocationBase at %p\n", candidate); new_l -> allocation_base = candidate; new_l -> next = GC_malloc_heap_l; GC_malloc_heap_l = new_l; }# endif /* REDIRECT_MALLOC */ /* Is p the start of either the malloc heap, or of one of our */ /* heap sections? */ GC_bool GC_is_heap_base (ptr_t p) { unsigned i; # ifndef REDIRECT_MALLOC static word last_gc_no = (word)(-1); if (last_gc_no != GC_gc_no) { GC_add_current_malloc_heap(); last_gc_no = GC_gc_no; } if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size; if (GC_is_malloc_heap_base(p)) return TRUE;# endif for (i = 0; i < GC_n_heap_bases; i++) { if (GC_heap_bases[i] == p) return TRUE; } return FALSE ; }# ifdef MSWIN32 void GC_register_root_section(ptr_t static_root) { MEMORY_BASIC_INFORMATION buf; size_t result; DWORD protect; LPVOID p; char * base; char * limit, * new_limit; if (!GC_no_win32_dlls) return; p = base = limit = GC_least_described_address(static_root); while (p < GC_sysinfo.lpMaximumApplicationAddress) { result = VirtualQuery(p, &buf, sizeof(buf)); if (result != sizeof(buf) || buf.AllocationBase == 0 || GC_is_heap_base(buf.AllocationBase)) break; new_limit = (char *)p + buf.RegionSize; protect = buf.Protect; if (buf.State == MEM_COMMIT && is_writable(protect)) { if ((char *)p == limit) { limit = new_limit; } else { if (base != limit) GC_add_roots_inner(base, limit, FALSE); base = p; limit = new_limit; } } if (p > (LPVOID)new_limit /* overflow */) break; p = (LPVOID)new_limit; } if (base != limit) GC_add_roots_inner(base, limit, FALSE); }#endif void GC_register_data_segments() {# ifdef MSWIN32 static char dummy; GC_register_root_section((ptr_t)(&dummy));# endif }# else /* !OS2 && !Windows */# if (defined(SVR4) || defined(AUX) || defined(DGUX) \ || (defined(LINUX) && defined(SPARC))) && !defined(PCR)ptr_t GC_SysVGetDataStart(size_t max_page_size, ptr_t etext_addr){ word text_end = ((word)(etext_addr) + sizeof(word) - 1) & ~(sizeof(word) - 1); /* etext rounded to word boundary */ word next_page = ((text_end + (word)max_page_size - 1) & ~((word)max_page_size - 1)); word page_offset = (text_end & ((word)max_page_size - 1)); volatile char * result = (char *)(next_page + page_offset); /* Note that this isnt equivalent to just adding */ /* max_page_size to &etext if &etext is at a page boundary */ GC_setup_temporary_fault_handler(); if (SETJMP(GC_jmp_buf) == 0) { /* Try writing to the address. */ *result = *result; GC_reset_fault_handler(); } else { GC_reset_fault_handler(); /* We got here via a longjmp. The address is not readable. */ /* This is known to happen under Solaris 2.4 + gcc, which place */ /* string constants in the text segment, but after etext. */ /* Use plan B. Note that we now know there is a gap between */ /* text and data segments, so plan A bought us something. */ result = (char *)GC_find_limit((ptr_t)(DATAEND), FALSE); } return((ptr_t)result);}# endif# if defined(FREEBSD) && (defined(I386) || defined(X86_64) || defined(powerpc) || defined(__powerpc__)) && !defined(PCR)/* Its unclear whether this should be identical to the above, or *//* whether it should apply to non-X86 architectures. *//* For now we don't assume that there is always an empty page after *//* etext. But in some cases there actually seems to be slightly more. *//* This also deals with holes between read-only data and writable data. */ptr_t GC_FreeBSDGetDataStart(size_t max_page_size, ptr_t etext_addr){ word text_end = ((word)(etext_addr) + sizeof(word) - 1) & ~(sizeof(word) - 1); /* etext rounded to word boundary */ volatile word next_page = (text_end + (word)max_page_size - 1) & ~((word)max_page_size - 1); volatile ptr_t result = (ptr_t)text_end; GC_setup_temporary_fault_handler(); if (SETJMP(GC_jmp_buf) == 0) { /* Try reading at the address. */ /* This should happen before there is another thread. */ for (; next_page < (word)(DATAEND); next_page += (word)max_page_size) *(volatile char *)next_page; GC_reset_fault_handler(); } else { GC_reset_fault_handler(); /* As above, we go to plan B */ result = GC_find_limit((ptr_t)(DATAEND), FALSE); } return(result);}# endif#ifdef AMIGA# define GC_AMIGA_DS# include "AmigaOS.c"# undef GC_AMIGA_DS#else /* !OS2 && !Windows && !AMIGA */void GC_register_data_segments(void){# if !defined(PCR) && !defined(MACOS)# if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS) /* As of Solaris 2.3, the Solaris threads implementation */ /* allocates the data structure for the initial thread with */ /* sbrk at process startup. It needs to be scanned, so that */ /* we don't lose some malloc allocated data structures */ /* hanging from it. We're on thin ice here ... */ extern caddr_t sbrk(); GC_add_roots_inner(DATASTART, (ptr_t)sbrk(0), FALSE);# else GC_add_roots_inner(DATASTART, (ptr_t)(DATAEND), FALSE);# if defined(DATASTART2) GC_add_roots_inner(DATASTART2, (ptr_t)(DATAEND2), FALSE);# endif# endif# endif# if defined(MACOS) {# if defined(THINK_C) extern void* GC_MacGetDataStart(void); /* globals begin above stack and end at a5. */ GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), (ptr_t)LMGetCurrentA5(), FALSE);# else# if defined(__MWERKS__)# if !__POWERPC__ extern void* GC_MacGetDataStart(void); /* MATTHEW: Function to handle Far Globals (CW Pro 3) */# if __option(far_data) extern void* GC_MacGetDataEnd(void);# endif /* globals begin above stack and end at a5. */ GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), (ptr_t)LMGetCurrentA5(), FALSE); /* MATTHEW: Handle Far Globals */ # if __option(far_data) /* Far globals follow he QD globals: */ GC_add_roots_inner((ptr_t)LMGetCurrentA5(), (ptr_t)GC_MacGetDataEnd(), FALSE);# endif# else extern char __data_start__[], __data_end__[]; GC_add_roots_inner((ptr_t)&__data_start__, (ptr_t)&__data_end__, FALSE);# endif /* __POWERPC__ */# endif /* __MWERKS__ */# endif /* !THINK_C */ }# endif /* MACOS */ /* Dynamic libraries are added at every collection, since they may */ /* change. */}# endif /* ! AMIGA */# endif /* ! MSWIN32 && ! MSWINCE*/# endif /* ! OS2 *//* * Auxiliary routines for obtaining memory from OS. */# if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \ && !defined(MSWIN32) && !defined(MSWINCE) \ && !defined(MACOS) && !defined(DOS4GW) && !defined(NONSTOP)# define SBRK_ARG_T ptrdiff_t#if defined(MMAP_SUPPORTED)#ifdef USE_MMAP_FIXED# define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE /* Seems to yield better performance on Solaris 2, but can */ /* be unreliable if something is already mapped at the address. */#else# define GC_MMAP_FLAGS MAP_PRIVATE#endif#ifdef USE_MMAP_ANON# define zero_fd -1# if defined(MAP_ANONYMOUS)# define OPT_MAP_ANON MAP_ANONYMOUS# else# define OPT_MAP_ANON MAP_ANON# endif#else static int zero_fd;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -