📄 os_dep.c
字号:
if (result != sizeof(buf) || buf.AllocationBase == 0 || GC_is_heap_base(buf.AllocationBase)) break; new_limit = (char *)p + buf.RegionSize; protect = buf.Protect; if (buf.State == MEM_COMMIT && is_writable(protect)) { if ((char *)p == limit) { limit = new_limit; } else { if (base != limit) GC_add_roots_inner(base, limit, FALSE); base = p; limit = new_limit; } } if (p > (LPVOID)new_limit /* overflow */) break; p = (LPVOID)new_limit; } if (base != limit) GC_add_roots_inner(base, limit, FALSE); }#endif void GC_register_data_segments() {# ifdef MSWIN32 static char dummy; GC_register_root_section((ptr_t)(&dummy));# endif }# else /* !OS2 && !Windows */# if (defined(SVR4) || defined(AUX) || defined(DGUX) \ || (defined(LINUX) && defined(SPARC))) && !defined(PCR)ptr_t GC_SysVGetDataStart(max_page_size, etext_addr)int max_page_size;int * etext_addr;{ word text_end = ((word)(etext_addr) + sizeof(word) - 1) & ~(sizeof(word) - 1); /* etext rounded to word boundary */ word next_page = ((text_end + (word)max_page_size - 1) & ~((word)max_page_size - 1)); word page_offset = (text_end & ((word)max_page_size - 1)); VOLATILE char * result = (char *)(next_page + page_offset); /* Note that this isnt equivalent to just adding */ /* max_page_size to &etext if &etext is at a page boundary */ GC_setup_temporary_fault_handler(); if (SETJMP(GC_jmp_buf) == 0) { /* Try writing to the address. */ *result = *result; GC_reset_fault_handler(); } else { GC_reset_fault_handler(); /* We got here via a longjmp. The address is not readable. */ /* This is known to happen under Solaris 2.4 + gcc, which place */ /* string constants in the text segment, but after etext. */ /* Use plan B. Note that we now know there is a gap between */ /* text and data segments, so plan A bought us something. */ result = (char *)GC_find_limit((ptr_t)(DATAEND), FALSE); } return((ptr_t)result);}# endif# if defined(FREEBSD) && (defined(I386) || defined(powerpc) || defined(__powerpc__)) && !defined(PCR)/* Its unclear whether this should be identical to the above, or *//* whether it should apply to non-X86 architectures. *//* For now we don't assume that there is always an empty page after *//* etext. But in some cases there actually seems to be slightly more. *//* This also deals with holes between read-only data and writable data. */ptr_t GC_FreeBSDGetDataStart(max_page_size, etext_addr)int max_page_size;int * etext_addr;{ word text_end = ((word)(etext_addr) + sizeof(word) - 1) & ~(sizeof(word) - 1); /* etext rounded to word boundary */ VOLATILE word next_page = (text_end + (word)max_page_size - 1) & ~((word)max_page_size - 1); VOLATILE ptr_t result = (ptr_t)text_end; GC_setup_temporary_fault_handler(); if (SETJMP(GC_jmp_buf) == 0) { /* Try reading at the address. */ /* This should happen before there is another thread. */ for (; next_page < (word)(DATAEND); next_page += (word)max_page_size) *(VOLATILE char *)next_page; GC_reset_fault_handler(); } else { GC_reset_fault_handler(); /* As above, we go to plan B */ result = GC_find_limit((ptr_t)(DATAEND), FALSE); } return(result);}# endif#ifdef AMIGA# define GC_AMIGA_DS# include "AmigaOS.c"# undef GC_AMIGA_DS#else /* !OS2 && !Windows && !AMIGA */void GC_register_data_segments(){# if !defined(PCR) && !defined(SRC_M3) && !defined(MACOS)# if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS) /* As of Solaris 2.3, the Solaris threads implementation */ /* allocates the data structure for the initial thread with */ /* sbrk at process startup. It needs to be scanned, so that */ /* we don't lose some malloc allocated data structures */ /* hanging from it. We're on thin ice here ... */ extern caddr_t sbrk(); GC_add_roots_inner(DATASTART, (char *)sbrk(0), FALSE);# else GC_add_roots_inner(DATASTART, (char *)(DATAEND), FALSE);# if defined(DATASTART2) GC_add_roots_inner(DATASTART2, (char *)(DATAEND2), FALSE);# endif# endif# endif# if defined(MACOS) {# if defined(THINK_C) extern void* GC_MacGetDataStart(void); /* globals begin above stack and end at a5. */ GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), (ptr_t)LMGetCurrentA5(), FALSE);# else# if defined(__MWERKS__)# if !__POWERPC__ extern void* GC_MacGetDataStart(void); /* MATTHEW: Function to handle Far Globals (CW Pro 3) */# if __option(far_data) extern void* GC_MacGetDataEnd(void);# endif /* globals begin above stack and end at a5. */ GC_add_roots_inner((ptr_t)GC_MacGetDataStart(), (ptr_t)LMGetCurrentA5(), FALSE); /* MATTHEW: Handle Far Globals */ # if __option(far_data) /* Far globals follow he QD globals: */ GC_add_roots_inner((ptr_t)LMGetCurrentA5(), (ptr_t)GC_MacGetDataEnd(), FALSE);# endif# else extern char __data_start__[], __data_end__[]; GC_add_roots_inner((ptr_t)&__data_start__, (ptr_t)&__data_end__, FALSE);# endif /* __POWERPC__ */# endif /* __MWERKS__ */# endif /* !THINK_C */ }# endif /* MACOS */ /* Dynamic libraries are added at every collection, since they may */ /* change. */}# endif /* ! AMIGA */# endif /* ! MSWIN32 && ! MSWINCE*/# endif /* ! OS2 *//* * Auxiliary routines for obtaining memory from OS. */# if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \ && !defined(MSWIN32) && !defined(MSWINCE) \ && !defined(MACOS) && !defined(DOS4GW)# ifdef SUNOS4 extern caddr_t sbrk();# endif# ifdef __STDC__# define SBRK_ARG_T ptrdiff_t# else# define SBRK_ARG_T int# endif# if 0 && defined(RS6000) /* We now use mmap *//* The compiler seems to generate speculative reads one past the end of *//* an allocated object. Hence we need to make sure that the page *//* following the last heap page is also mapped. */ptr_t GC_unix_get_mem(bytes)word bytes;{ caddr_t cur_brk = (caddr_t)sbrk(0); caddr_t result; SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1); static caddr_t my_brk_val = 0; if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */ if (lsbs != 0) { if((caddr_t)(sbrk(GC_page_size - lsbs)) == (caddr_t)(-1)) return(0); } if (cur_brk == my_brk_val) { /* Use the extra block we allocated last time. */ result = (ptr_t)sbrk((SBRK_ARG_T)bytes); if (result == (caddr_t)(-1)) return(0); result -= GC_page_size; } else { result = (ptr_t)sbrk(GC_page_size + (SBRK_ARG_T)bytes); if (result == (caddr_t)(-1)) return(0); } my_brk_val = result + bytes + GC_page_size; /* Always page aligned */ return((ptr_t)result);}#else /* Not RS6000 */#if defined(USE_MMAP) || defined(USE_MUNMAP)#ifdef USE_MMAP_FIXED# define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE /* Seems to yield better performance on Solaris 2, but can */ /* be unreliable if something is already mapped at the address. */#else# define GC_MMAP_FLAGS MAP_PRIVATE#endif#ifdef USE_MMAP_ANON# define zero_fd -1# if defined(MAP_ANONYMOUS)# define OPT_MAP_ANON MAP_ANONYMOUS# else# define OPT_MAP_ANON MAP_ANON# endif#else static int zero_fd;# define OPT_MAP_ANON 0#endif #endif /* defined(USE_MMAP) || defined(USE_MUNMAP) */#if defined(USE_MMAP)/* Tested only under Linux, IRIX5 and Solaris 2 */#ifndef HEAP_START# define HEAP_START 0#endifptr_t GC_unix_get_mem(bytes)word bytes;{ void *result; static ptr_t last_addr = HEAP_START;# ifndef USE_MMAP_ANON static GC_bool initialized = FALSE; if (!initialized) { zero_fd = open("/dev/zero", O_RDONLY); fcntl(zero_fd, F_SETFD, FD_CLOEXEC); initialized = TRUE; }# endif if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg"); result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC, GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */); if (result == MAP_FAILED) return(0); last_addr = (ptr_t)result + bytes + GC_page_size - 1; last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));# if !defined(LINUX) if (last_addr == 0) { /* Oops. We got the end of the address space. This isn't */ /* usable by arbitrary C code, since one-past-end pointers */ /* don't work, so we discard it and try again. */ munmap(result, (size_t)(-GC_page_size) - (size_t)result); /* Leave last page mapped, so we can't repeat. */ return GC_unix_get_mem(bytes); }# else GC_ASSERT(last_addr != 0);# endif return((ptr_t)result);}#else /* Not RS6000, not USE_MMAP */ptr_t GC_unix_get_mem(bytes)word bytes;{ ptr_t result;# ifdef IRIX5 /* Bare sbrk isn't thread safe. Play by malloc rules. */ /* The equivalent may be needed on other systems as well. */ __LOCK_MALLOC();# endif { ptr_t cur_brk = (ptr_t)sbrk(0); SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1); if ((SBRK_ARG_T)bytes < 0) return(0); /* too big */ if (lsbs != 0) { if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) return(0); } result = (ptr_t)sbrk((SBRK_ARG_T)bytes); if (result == (ptr_t)(-1)) result = 0; }# ifdef IRIX5 __UNLOCK_MALLOC();# endif return(result);}#endif /* Not USE_MMAP */#endif /* Not RS6000 */# endif /* UN*X */# ifdef OS2void * os2_alloc(size_t bytes){ void * result; if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ | PAG_WRITE | PAG_COMMIT) != NO_ERROR) { return(0); } if (result == 0) return(os2_alloc(bytes)); return(result);}# endif /* OS2 */# if defined(MSWIN32) || defined(MSWINCE)SYSTEM_INFO GC_sysinfo;# endif# ifdef MSWIN32# ifdef USE_GLOBAL_ALLOC# define GLOBAL_ALLOC_TEST 1# else# define GLOBAL_ALLOC_TEST GC_no_win32_dlls# endifword GC_n_heap_bases = 0;ptr_t GC_win32_get_mem(bytes)word bytes;{ ptr_t result; if (GLOBAL_ALLOC_TEST) { /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */ /* There are also unconfirmed rumors of other */ /* problems, so we dodge the issue. */ result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE); result = (ptr_t)(((word)result + HBLKSIZE) & ~(HBLKSIZE-1)); } else { /* VirtualProtect only works on regions returned by a */ /* single VirtualAlloc call. Thus we allocate one */ /* extra page, which will prevent merging of blocks */ /* in separate regions, and eliminate any temptation */ /* to call VirtualProtect on a range spanning regions. */ /* This wastes a small amount of memory, and risks */ /* increased fragmentation. But better alternatives */ /* would require effort. */ result = (ptr_t) VirtualAlloc(NULL, bytes + 1, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE); } if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result"); /* If I read the documentation correctly, this can */ /* only happen if HBLKSIZE > 64k or not a power of 2. */ if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections"); GC_heap_bases[GC_n_heap_bases++] = result; return(result); }void GC_win32_free_heap (){ if (GC_no_win32_dlls) { while (GC_n_heap_bases > 0) { GlobalFree (GC_heap_bases[--GC_n_heap_bases]); GC_heap_bases[GC_n_heap_bases] = 0; } }}# endif#ifdef AMIGA# define GC_AMIGA_AM# include "AmigaOS.c"# undef GC_AMIGA_AM#endif# ifdef MSWINCEword GC_n_heap_bases = 0;ptr_t GC_wince_get_mem(bytes)word bytes;{ ptr_t result; word i; /* Round up allocation size to multiple of page size */ bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1); /* Try to find reserved, uncommitted pages */ for (i = 0; i < GC_n_heap_bases; i++) { if (((word)(-(signed_word)GC_heap_lengths[i]) & (GC_sysinfo.dwAllocationGranularity-1)) >= bytes) { result = GC_heap_bases[i] + GC_heap_lengths[i]; break; } } if (i == GC_n_heap_bases) { /* Reserve more pages */ word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1) & ~(GC_sysinfo.dwAllocationGranularity-1); /* If we ever support MPROTECT_VDB here, we will probably need to */ /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */ /* never spans regions. It seems to be OK for a VirtualFree argument */ /* to span regions, so we should be OK for now. */ result = (ptr_t) VirtualAlloc(NULL, res_bytes, MEM_RESERVE | MEM_TOP_DOWN, PAGE_EXECUTE_READWRITE); if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result"); /* If I read the documentation correctly, this can */ /* only happen if HBLKSIZE > 64k or not a power of 2. */ if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections"); GC_heap_bases[GC_n_heap_bases] = result; GC_heap_lengths[GC_n_heap_bases] = 0; GC_n_heap_bases++; } /* Commit pages */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -