📄 elfload.c
字号:
bswaptls(&shdr->sh_flags); bswaptls(&shdr->sh_addr); bswaptls(&shdr->sh_offset); bswaptls(&shdr->sh_size); bswap32s(&shdr->sh_link); bswap32s(&shdr->sh_info); bswaptls(&shdr->sh_addralign); bswaptls(&shdr->sh_entsize);}static void bswap_sym(Elf32_Sym *sym){ bswap32s(&sym->st_name); bswap32s(&sym->st_value); bswap32s(&sym->st_size); bswap16s(&sym->st_shndx);}#endif/* * 'copy_elf_strings()' copies argument/envelope strings from user * memory to free pages in kernel mem. These are in a format ready * to be put directly into the top of new user memory. * */static unsigned long copy_elf_strings(int argc,char ** argv, void **page, unsigned long p){ char *tmp, *tmp1, *pag = NULL; int len, offset = 0; if (!p) { return 0; /* bullet-proofing */ } while (argc-- > 0) { tmp = argv[argc]; if (!tmp) { fprintf(stderr, "VFS: argc is wrong"); exit(-1); } tmp1 = tmp; while (*tmp++); len = tmp - tmp1; if (p < len) { /* this shouldn't happen - 128kB */ return 0; } while (len) { --p; --tmp; --len; if (--offset < 0) { offset = p % TARGET_PAGE_SIZE; pag = (char *)page[p/TARGET_PAGE_SIZE]; if (!pag) { pag = (char *)malloc(TARGET_PAGE_SIZE); page[p/TARGET_PAGE_SIZE] = pag; if (!pag) return 0; } } if (len == 0 || offset == 0) { *(pag + offset) = *tmp; } else { int bytes_to_copy = (len > offset) ? offset : len; tmp -= bytes_to_copy; p -= bytes_to_copy; offset -= bytes_to_copy; len -= bytes_to_copy; memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1); } } } return p;}unsigned long setup_arg_pages(target_ulong p, struct linux_binprm * bprm, struct image_info * info){ target_ulong stack_base, size, error; int i; /* Create enough stack to hold everything. If we don't use * it for args, we'll use it for something else... */ size = x86_stack_size; if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) size = MAX_ARG_PAGES*TARGET_PAGE_SIZE; error = target_mmap(0, size + qemu_host_page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (error == -1) { perror("stk mmap"); exit(-1); } /* we reserve one extra page at the top of the stack as guard */ target_mprotect(error + size, qemu_host_page_size, PROT_NONE); stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE; p += stack_base; for (i = 0 ; i < MAX_ARG_PAGES ; i++) { if (bprm->page[i]) { info->rss++; memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE); free(bprm->page[i]); } stack_base += TARGET_PAGE_SIZE; } return p;}static void set_brk(unsigned long start, unsigned long end){ /* page-align the start and end addresses... */ start = HOST_PAGE_ALIGN(start); end = HOST_PAGE_ALIGN(end); if (end <= start) return; if(target_mmap(start, end - start, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) { perror("cannot mmap brk"); exit(-1); }}/* We need to explicitly zero any fractional pages after the data section (i.e. bss). This would contain the junk from the file that should not be in memory. */static void padzero(unsigned long elf_bss, unsigned long last_bss){ unsigned long nbyte; if (elf_bss >= last_bss) return; /* XXX: this is really a hack : if the real host page size is smaller than the target page size, some pages after the end of the file may not be mapped. A better fix would be to patch target_mmap(), but it is more complicated as the file size must be known */ if (qemu_real_host_page_size < qemu_host_page_size) { unsigned long end_addr, end_addr1; end_addr1 = (elf_bss + qemu_real_host_page_size - 1) & ~(qemu_real_host_page_size - 1); end_addr = HOST_PAGE_ALIGN(elf_bss); if (end_addr1 < end_addr) { mmap((void *)end_addr1, end_addr - end_addr1, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); } } nbyte = elf_bss & (qemu_host_page_size-1); if (nbyte) { nbyte = qemu_host_page_size - nbyte; do { tput8(elf_bss, 0); elf_bss++; } while (--nbyte); }}static unsigned long create_elf_tables(target_ulong p, int argc, int envc, struct elfhdr * exec, unsigned long load_addr, unsigned long load_bias, unsigned long interp_load_addr, int ibcs, struct image_info *info){ target_ulong sp; int size; target_ulong u_platform; const char *k_platform; const int n = sizeof(target_ulong); sp = p; u_platform = 0; k_platform = ELF_PLATFORM; if (k_platform) { size_t len = strlen(k_platform) + 1; sp -= (len + n - 1) & ~(n - 1); u_platform = sp; memcpy_to_target(sp, k_platform, len); } /* * Force 16 byte _final_ alignment here for generality. */ sp = sp &~ (target_ulong)15; size = (DLINFO_ITEMS + 1) * 2; if (k_platform) size += 2;#ifdef DLINFO_ARCH_ITEMS size += DLINFO_ARCH_ITEMS * 2;#endif size += envc + argc + 2; size += (!ibcs ? 3 : 1); /* argc itself */ size *= n; if (size & 15) sp -= 16 - (size & 15); #define NEW_AUX_ENT(id, val) do { \ sp -= n; tputl(sp, val); \ sp -= n; tputl(sp, id); \ } while(0) NEW_AUX_ENT (AT_NULL, 0); /* There must be exactly DLINFO_ITEMS entries here. */ NEW_AUX_ENT(AT_PHDR, (target_ulong)(load_addr + exec->e_phoff)); NEW_AUX_ENT(AT_PHENT, (target_ulong)(sizeof (struct elf_phdr))); NEW_AUX_ENT(AT_PHNUM, (target_ulong)(exec->e_phnum)); NEW_AUX_ENT(AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE)); NEW_AUX_ENT(AT_BASE, (target_ulong)(interp_load_addr)); NEW_AUX_ENT(AT_FLAGS, (target_ulong)0); NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry); NEW_AUX_ENT(AT_UID, (target_ulong) getuid()); NEW_AUX_ENT(AT_EUID, (target_ulong) geteuid()); NEW_AUX_ENT(AT_GID, (target_ulong) getgid()); NEW_AUX_ENT(AT_EGID, (target_ulong) getegid()); NEW_AUX_ENT(AT_HWCAP, (target_ulong) ELF_HWCAP); if (k_platform) NEW_AUX_ENT(AT_PLATFORM, u_platform);#ifdef ARCH_DLINFO /* * ARCH_DLINFO must come last so platform specific code can enforce * special alignment requirements on the AUXV if necessary (eg. PPC). */ ARCH_DLINFO;#endif#undef NEW_AUX_ENT sp = loader_build_argptr(envc, argc, sp, p, !ibcs); return sp;}static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex, int interpreter_fd, unsigned long *interp_load_addr){ struct elf_phdr *elf_phdata = NULL; struct elf_phdr *eppnt; unsigned long load_addr = 0; int load_addr_set = 0; int retval; unsigned long last_bss, elf_bss; unsigned long error; int i; elf_bss = 0; last_bss = 0; error = 0;#ifdef BSWAP_NEEDED bswap_ehdr(interp_elf_ex);#endif /* First of all, some simple consistency checks */ if ((interp_elf_ex->e_type != ET_EXEC && interp_elf_ex->e_type != ET_DYN) || !elf_check_arch(interp_elf_ex->e_machine)) { return ~0UL; } /* Now read in all of the header information */ if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE) return ~0UL; elf_phdata = (struct elf_phdr *) malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); if (!elf_phdata) return ~0UL; /* * If the size of this structure has changed, then punt, since * we will be doing the wrong thing. */ if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) { free(elf_phdata); return ~0UL; } retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET); if(retval >= 0) { retval = read(interpreter_fd, (char *) elf_phdata, sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); } if (retval < 0) { perror("load_elf_interp"); exit(-1); free (elf_phdata); return retval; }#ifdef BSWAP_NEEDED eppnt = elf_phdata; for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { bswap_phdr(eppnt); }#endif if (interp_elf_ex->e_type == ET_DYN) { /* in order to avoid harcoding the interpreter load address in qemu, we allocate a big enough memory zone */ error = target_mmap(0, INTERP_MAP_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0); if (error == -1) { perror("mmap"); exit(-1); } load_addr = error; load_addr_set = 1; } eppnt = elf_phdata; for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) if (eppnt->p_type == PT_LOAD) { int elf_type = MAP_PRIVATE | MAP_DENYWRITE; int elf_prot = 0; unsigned long vaddr = 0; unsigned long k; if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) { elf_type |= MAP_FIXED; vaddr = eppnt->p_vaddr; } error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr), eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr), elf_prot, elf_type, interpreter_fd, eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr)); if (error == -1) { /* Real error */ close(interpreter_fd); free(elf_phdata); return ~0UL; } if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) { load_addr = error; load_addr_set = 1; } /* * Find the end of the file mapping for this phdr, and keep * track of the largest address we see for this. */ k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; if (k > elf_bss) elf_bss = k; /* * Do the same thing for the memory mapping - between * elf_bss and last_bss is the bss section. */ k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; if (k > last_bss) last_bss = k; } /* Now use mmap to map the library into memory. */ close(interpreter_fd); /* * Now fill out the bss section. First pad the last page up * to the page boundary, and then perform a mmap to make sure * that there are zeromapped pages up to and including the last * bss page. */ padzero(elf_bss, last_bss); elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */ /* Map the last of the bss segment */ if (last_bss > elf_bss) { target_mmap(elf_bss, last_bss-elf_bss, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); } free(elf_phdata); *interp_load_addr = load_addr; return ((unsigned long) interp_elf_ex->e_entry) + load_addr;}/* Best attempt to load symbols from this ELF object. */static void load_symbols(struct elfhdr *hdr, int fd){ unsigned int i; struct elf_shdr sechdr, symtab, strtab; char *strings; struct syminfo *s; lseek(fd, hdr->e_shoff, SEEK_SET); for (i = 0; i < hdr->e_shnum; i++) { if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr)) return;#ifdef BSWAP_NEEDED bswap_shdr(&sechdr);#endif if (sechdr.sh_type == SHT_SYMTAB) { symtab = sechdr; lseek(fd, hdr->e_shoff + sizeof(sechdr) * sechdr.sh_link, SEEK_SET); if (read(fd, &strtab, sizeof(strtab)) != sizeof(strtab)) return;#ifdef BSWAP_NEEDED bswap_shdr(&strtab);#endif goto found; } }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -