📄 os_dep.c
字号:
# define OPT_MAP_ANON 0#endif #ifndef HEAP_START# define HEAP_START 0#endifptr_t GC_unix_mmap_get_mem(word bytes){ void *result; static ptr_t last_addr = HEAP_START;# ifndef USE_MMAP_ANON static GC_bool initialized = FALSE; if (!initialized) { zero_fd = open("/dev/zero", O_RDONLY); fcntl(zero_fd, F_SETFD, FD_CLOEXEC); initialized = TRUE; }# endif if (bytes & (GC_page_size -1)) ABORT("Bad GET_MEM arg"); result = mmap(last_addr, bytes, PROT_READ | PROT_WRITE | OPT_PROT_EXEC, GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */); if (result == MAP_FAILED) return(0); last_addr = (ptr_t)result + bytes + GC_page_size - 1; last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));# if !defined(LINUX) if (last_addr == 0) { /* Oops. We got the end of the address space. This isn't */ /* usable by arbitrary C code, since one-past-end pointers */ /* don't work, so we discard it and try again. */ munmap(result, (size_t)(-GC_page_size) - (size_t)result); /* Leave last page mapped, so we can't repeat. */ return GC_unix_mmap_get_mem(bytes); }# else GC_ASSERT(last_addr != 0);# endif return((ptr_t)result);}# endif /* MMAP_SUPPORTED */#if defined(USE_MMAP)ptr_t GC_unix_get_mem(word bytes){ return GC_unix_mmap_get_mem(bytes);}#else /* Not USE_MMAP */ptr_t GC_unix_sbrk_get_mem(word bytes){ ptr_t result;# ifdef IRIX5 /* Bare sbrk isn't thread safe. Play by malloc rules. */ /* The equivalent may be needed on other systems as well. */ __LOCK_MALLOC();# endif { ptr_t cur_brk = (ptr_t)sbrk(0); SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1); if ((SBRK_ARG_T)bytes < 0) { result = 0; /* too big */ goto out; } if (lsbs != 0) { if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) { result = 0; goto out; } }# ifdef ADD_HEAP_GUARD_PAGES /* This is useful for catching severe memory overwrite problems that */ /* span heap sections. It shouldn't otherwise be turned on. */ { ptr_t guard = (ptr_t)sbrk((SBRK_ARG_T)GC_page_size); if (mprotect(guard, GC_page_size, PROT_NONE) != 0) ABORT("ADD_HEAP_GUARD_PAGES: mprotect failed"); }# endif /* ADD_HEAP_GUARD_PAGES */ result = (ptr_t)sbrk((SBRK_ARG_T)bytes); if (result == (ptr_t)(-1)) result = 0; } out:# ifdef IRIX5 __UNLOCK_MALLOC();# endif return(result);}#if defined(MMAP_SUPPORTED)/* By default, we try both sbrk and mmap, in that order. */ptr_t GC_unix_get_mem(word bytes){ static GC_bool sbrk_failed = FALSE; ptr_t result = 0; if (!sbrk_failed) result = GC_unix_sbrk_get_mem(bytes); if (0 == result) { sbrk_failed = TRUE; result = GC_unix_mmap_get_mem(bytes); } if (0 == result) { /* Try sbrk again, in case sbrk memory became available. */ result = GC_unix_sbrk_get_mem(bytes); } return result;}#else /* !MMAP_SUPPORTED */ptr_t GC_unix_get_mem(word bytes){ return GC_unix_sbrk_get_mem(bytes);}#endif#endif /* Not USE_MMAP */# endif /* UN*X */# ifdef OS2void * os2_alloc(size_t bytes){ void * result; if (DosAllocMem(&result, bytes, PAG_EXECUTE | PAG_READ | PAG_WRITE | PAG_COMMIT) != NO_ERROR) { return(0); } if (result == 0) return(os2_alloc(bytes)); return(result);}# endif /* OS2 */# if defined(MSWIN32) || defined(MSWINCE)SYSTEM_INFO GC_sysinfo;# endif# ifdef MSWIN32# ifdef USE_GLOBAL_ALLOC# define GLOBAL_ALLOC_TEST 1# else# define GLOBAL_ALLOC_TEST GC_no_win32_dlls# endifword GC_n_heap_bases = 0;word GC_mem_top_down = 0; /* Change to MEM_TOP_DOWN for better 64-bit */ /* testing. Otherwise all addresses tend to */ /* end up in first 4GB, hiding bugs. */ptr_t GC_win32_get_mem(word bytes){ ptr_t result; if (GLOBAL_ALLOC_TEST) { /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */ /* There are also unconfirmed rumors of other */ /* problems, so we dodge the issue. */ result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE); result = (ptr_t)(((word)result + HBLKSIZE - 1) & ~(HBLKSIZE-1)); } else { /* VirtualProtect only works on regions returned by a */ /* single VirtualAlloc call. Thus we allocate one */ /* extra page, which will prevent merging of blocks */ /* in separate regions, and eliminate any temptation */ /* to call VirtualProtect on a range spanning regions. */ /* This wastes a small amount of memory, and risks */ /* increased fragmentation. But better alternatives */ /* would require effort. */ /* Pass the MEM_WRITE_WATCH only if GetWriteWatch-based */ /* VDBs are enabled and the GetWriteWatch function is */ /* available. Otherwise we waste resources or possibly */ /* cause VirtualAlloc to fail (observed in Windows 2000 */ /* SP2). */ result = (ptr_t) VirtualAlloc(NULL, bytes + 1,# ifdef GWW_VDB GetWriteWatch_alloc_flag |# endif MEM_COMMIT | MEM_RESERVE | GC_mem_top_down, PAGE_EXECUTE_READWRITE); } if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result"); /* If I read the documentation correctly, this can */ /* only happen if HBLKSIZE > 64k or not a power of 2. */ if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections"); GC_heap_bases[GC_n_heap_bases++] = result; return(result); }void GC_win32_free_heap(void){ if (GC_no_win32_dlls) { while (GC_n_heap_bases > 0) { GlobalFree (GC_heap_bases[--GC_n_heap_bases]); GC_heap_bases[GC_n_heap_bases] = 0; } }}# endif#ifdef AMIGA# define GC_AMIGA_AM# include "AmigaOS.c"# undef GC_AMIGA_AM#endif# ifdef MSWINCEword GC_n_heap_bases = 0;ptr_t GC_wince_get_mem(word bytes){ ptr_t result; word i; /* Round up allocation size to multiple of page size */ bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1); /* Try to find reserved, uncommitted pages */ for (i = 0; i < GC_n_heap_bases; i++) { if (((word)(-(signed_word)GC_heap_lengths[i]) & (GC_sysinfo.dwAllocationGranularity-1)) >= bytes) { result = GC_heap_bases[i] + GC_heap_lengths[i]; break; } } if (i == GC_n_heap_bases) { /* Reserve more pages */ word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1) & ~(GC_sysinfo.dwAllocationGranularity-1); /* If we ever support MPROTECT_VDB here, we will probably need to */ /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */ /* never spans regions. It seems to be OK for a VirtualFree */ /* argument to span regions, so we should be OK for now. */ result = (ptr_t) VirtualAlloc(NULL, res_bytes, MEM_RESERVE | MEM_TOP_DOWN, PAGE_EXECUTE_READWRITE); if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result"); /* If I read the documentation correctly, this can */ /* only happen if HBLKSIZE > 64k or not a power of 2. */ if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections"); GC_heap_bases[GC_n_heap_bases] = result; GC_heap_lengths[GC_n_heap_bases] = 0; GC_n_heap_bases++; } /* Commit pages */ result = (ptr_t) VirtualAlloc(result, bytes, MEM_COMMIT, PAGE_EXECUTE_READWRITE); if (result != NULL) { if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result"); GC_heap_lengths[i] += bytes; } return(result); }# endif#ifdef USE_MUNMAP/* For now, this only works on Win32/WinCE and some Unix-like *//* systems. If you have something else, don't define *//* USE_MUNMAP. *//* We assume ANSI C to support this feature. */#if !defined(MSWIN32) && !defined(MSWINCE)#include <unistd.h>#include <sys/mman.h>#include <sys/stat.h>#include <sys/types.h>#endif/* Compute a page aligned starting address for the unmap *//* operation on a block of size bytes starting at start. *//* Return 0 if the block is too small to make this feasible. */ptr_t GC_unmap_start(ptr_t start, size_t bytes){ ptr_t result = start; /* Round start to next page boundary. */ result += GC_page_size - 1; result = (ptr_t)((word)result & ~(GC_page_size - 1)); if (result + GC_page_size > start + bytes) return 0; return result;}/* Compute end address for an unmap operation on the indicated *//* block. */ptr_t GC_unmap_end(ptr_t start, size_t bytes){ ptr_t end_addr = start + bytes; end_addr = (ptr_t)((word)end_addr & ~(GC_page_size - 1)); return end_addr;}/* Under Win32/WinCE we commit (map) and decommit (unmap) *//* memory using VirtualAlloc and VirtualFree. These functions *//* work on individual allocations of virtual memory, made *//* previously using VirtualAlloc with the MEM_RESERVE flag. *//* The ranges we need to (de)commit may span several of these *//* allocations; therefore we use VirtualQuery to check *//* allocation lengths, and split up the range as necessary. *//* We assume that GC_remap is called on exactly the same range *//* as a previous call to GC_unmap. It is safe to consistently *//* round the endpoints in both places. */void GC_unmap(ptr_t start, size_t bytes){ ptr_t start_addr = GC_unmap_start(start, bytes); ptr_t end_addr = GC_unmap_end(start, bytes); word len = end_addr - start_addr; if (0 == start_addr) return;# if defined(MSWIN32) || defined(MSWINCE) while (len != 0) { MEMORY_BASIC_INFORMATION mem_info; GC_word free_len; if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info)) != sizeof(mem_info)) ABORT("Weird VirtualQuery result"); free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize; if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT)) ABORT("VirtualFree failed"); GC_unmapped_bytes += free_len; start_addr += free_len; len -= free_len; }# else /* We immediately remap it to prevent an intervening mmap from */ /* accidentally grabbing the same address space. */ { void * result; result = mmap(start_addr, len, PROT_NONE, MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON, zero_fd, 0/* offset */); if (result != (void *)start_addr) ABORT("mmap(...PROT_NONE...) failed"); } GC_unmapped_bytes += len;# endif}void GC_remap(ptr_t start, size_t bytes){ ptr_t start_addr = GC_unmap_start(start, bytes); ptr_t end_addr = GC_unmap_end(start, bytes); word len = end_addr - start_addr;# if defined(MSWIN32) || defined(MSWINCE) ptr_t result; if (0 == start_addr) return; while (len != 0) { MEMORY_BASIC_INFORMATION mem_info; GC_word alloc_len; if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info)) != sizeof(mem_info)) ABORT("Weird VirtualQuery result"); alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize; result = VirtualAlloc(start_addr, alloc_len, MEM_COMMIT, PAGE_EXECUTE_READWRITE); if (result != start_addr) { ABORT("VirtualAlloc remapping failed"); } GC_unmapped_bytes -= alloc_len; start_addr += alloc_len; len -= alloc_len; }# else /* It was already remapped with PROT_NONE. */ int result; if (0 == start_addr) return; result = mprotect(start_addr, len, PROT_READ | PROT_WRITE | OPT_PROT_EXEC); if (result != 0) { GC_err_printf( "Mprotect failed at %p (length %ld) with errno %d\n", start_addr, (unsigned long)len, errno); ABORT("Mprotect remapping failed"); } GC_unmapped_bytes -= len;# endif}/* Two adjacent blocks have already been unmapped and are about to *//* be merged. Unmap the whole block. This typically requires *//* that we unmap a small section in the middle that was not previously *//* unmapped due to alignment constraints. */void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2, size_t bytes2){ ptr_t start1_addr = GC_unmap_start(start1, bytes1); ptr_t end1_addr = GC_unmap_end(start1, bytes1); ptr_t start2_addr = GC_unmap_start(start2, bytes2); ptr_t end2_addr = GC_unmap_end(start2, bytes2); ptr_t start_addr = end1_addr; ptr_t end_addr = start2_addr; size_t len; GC_ASSERT(start1 + bytes1 == start2); if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2); if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2); if (0 == start_addr) return; len = end_addr - start_addr;# if defined(MSWIN32) || defined(MSWINCE) while (len != 0) { MEMORY_BASIC_INFORMATION mem_info; GC_word free_len; if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info)) != sizeof(mem_info)) ABORT("Weird VirtualQuery result"); free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize; if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT)) ABORT("VirtualFree failed"); GC_unmapped_bytes += free_len; start_addr += free_len; len -= free_len; }# else if (len != 0 && munmap(start_add
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -