📄 elfload.c.svn-base
字号:
#ifdef TARGET_ABI32#undef ELF_CLASS#define ELF_CLASS ELFCLASS32#undef bswaptls#define bswaptls(ptr) bswap32s(ptr)#endif#include "elf.h"struct exec{ unsigned int a_info; /* Use macros N_MAGIC, etc for access */ unsigned int a_text; /* length of text, in bytes */ unsigned int a_data; /* length of data, in bytes */ unsigned int a_bss; /* length of uninitialized data area, in bytes */ unsigned int a_syms; /* length of symbol table data in file, in bytes */ unsigned int a_entry; /* start address */ unsigned int a_trsize; /* length of relocation info for text, in bytes */ unsigned int a_drsize; /* length of relocation info for data, in bytes */};#define N_MAGIC(exec) ((exec).a_info & 0xffff)#define OMAGIC 0407#define NMAGIC 0410#define ZMAGIC 0413#define QMAGIC 0314/* max code+data+bss space allocated to elf interpreter */#define INTERP_MAP_SIZE (32 * 1024 * 1024)/* max code+data+bss+brk space allocated to ET_DYN executables */#define ET_DYN_MAP_SIZE (128 * 1024 * 1024)/* Necessary parameters */#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE#define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))#define INTERPRETER_NONE 0#define INTERPRETER_AOUT 1#define INTERPRETER_ELF 2#define DLINFO_ITEMS 12static inline void memcpy_fromfs(void * to, const void * from, unsigned long n){ memcpy(to, from, n);}extern unsigned long x86_stack_size;static int load_aout_interp(void * exptr, int interp_fd);#ifdef BSWAP_NEEDEDstatic void bswap_ehdr(struct elfhdr *ehdr){ bswap16s(&ehdr->e_type); /* Object file type */ bswap16s(&ehdr->e_machine); /* Architecture */ bswap32s(&ehdr->e_version); /* Object file version */ bswaptls(&ehdr->e_entry); /* Entry point virtual address */ bswaptls(&ehdr->e_phoff); /* Program header table file offset */ bswaptls(&ehdr->e_shoff); /* Section header table file offset */ bswap32s(&ehdr->e_flags); /* Processor-specific flags */ bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ bswap16s(&ehdr->e_phnum); /* Program header table entry count */ bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ bswap16s(&ehdr->e_shnum); /* Section header table entry count */ bswap16s(&ehdr->e_shstrndx); /* Section header string table index */}static void bswap_phdr(struct elf_phdr *phdr){ bswap32s(&phdr->p_type); /* Segment type */ bswaptls(&phdr->p_offset); /* Segment file offset */ bswaptls(&phdr->p_vaddr); /* Segment virtual address */ bswaptls(&phdr->p_paddr); /* Segment physical address */ bswaptls(&phdr->p_filesz); /* Segment size in file */ bswaptls(&phdr->p_memsz); /* Segment size in memory */ bswap32s(&phdr->p_flags); /* Segment flags */ bswaptls(&phdr->p_align); /* Segment alignment */}static void bswap_shdr(struct elf_shdr *shdr){ bswap32s(&shdr->sh_name); bswap32s(&shdr->sh_type); bswaptls(&shdr->sh_flags); bswaptls(&shdr->sh_addr); bswaptls(&shdr->sh_offset); bswaptls(&shdr->sh_size); bswap32s(&shdr->sh_link); bswap32s(&shdr->sh_info); bswaptls(&shdr->sh_addralign); bswaptls(&shdr->sh_entsize);}static void bswap_sym(struct elf_sym *sym){ bswap32s(&sym->st_name); bswaptls(&sym->st_value); bswaptls(&sym->st_size); bswap16s(&sym->st_shndx);}#endif/* * 'copy_elf_strings()' copies argument/envelope strings from user * memory to free pages in kernel mem. These are in a format ready * to be put directly into the top of new user memory. * */static abi_ulong copy_elf_strings(int argc,char ** argv, void **page, abi_ulong p){ char *tmp, *tmp1, *pag = NULL; int len, offset = 0; if (!p) { return 0; /* bullet-proofing */ } while (argc-- > 0) { tmp = argv[argc]; if (!tmp) { fprintf(stderr, "VFS: argc is wrong"); exit(-1); } tmp1 = tmp; while (*tmp++); len = tmp - tmp1; if (p < len) { /* this shouldn't happen - 128kB */ return 0; } while (len) { --p; --tmp; --len; if (--offset < 0) { offset = p % TARGET_PAGE_SIZE; pag = (char *)page[p/TARGET_PAGE_SIZE]; if (!pag) { pag = (char *)malloc(TARGET_PAGE_SIZE); memset(pag, 0, TARGET_PAGE_SIZE); page[p/TARGET_PAGE_SIZE] = pag; if (!pag) return 0; } } if (len == 0 || offset == 0) { *(pag + offset) = *tmp; } else { int bytes_to_copy = (len > offset) ? offset : len; tmp -= bytes_to_copy; p -= bytes_to_copy; offset -= bytes_to_copy; len -= bytes_to_copy; memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1); } } } return p;}static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm, struct image_info *info){ abi_ulong stack_base, size, error; int i; /* Create enough stack to hold everything. If we don't use * it for args, we'll use it for something else... */ size = x86_stack_size; if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) size = MAX_ARG_PAGES*TARGET_PAGE_SIZE; error = target_mmap(0, size + qemu_host_page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (error == -1) { perror("stk mmap"); exit(-1); } /* we reserve one extra page at the top of the stack as guard */ target_mprotect(error + size, qemu_host_page_size, PROT_NONE); stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE; p += stack_base; for (i = 0 ; i < MAX_ARG_PAGES ; i++) { if (bprm->page[i]) { info->rss++; /* FIXME - check return value of memcpy_to_target() for failure */ memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE); free(bprm->page[i]); } stack_base += TARGET_PAGE_SIZE; } return p;}static void set_brk(abi_ulong start, abi_ulong end){ /* page-align the start and end addresses... */ start = HOST_PAGE_ALIGN(start); end = HOST_PAGE_ALIGN(end); if (end <= start) return; if(target_mmap(start, end - start, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) { perror("cannot mmap brk"); exit(-1); }}/* We need to explicitly zero any fractional pages after the data section (i.e. bss). This would contain the junk from the file that should not be in memory. */static void padzero(abi_ulong elf_bss, abi_ulong last_bss){ abi_ulong nbyte; if (elf_bss >= last_bss) return; /* XXX: this is really a hack : if the real host page size is smaller than the target page size, some pages after the end of the file may not be mapped. A better fix would be to patch target_mmap(), but it is more complicated as the file size must be known */ if (qemu_real_host_page_size < qemu_host_page_size) { abi_ulong end_addr, end_addr1; end_addr1 = (elf_bss + qemu_real_host_page_size - 1) & ~(qemu_real_host_page_size - 1); end_addr = HOST_PAGE_ALIGN(elf_bss); if (end_addr1 < end_addr) { mmap((void *)g2h(end_addr1), end_addr - end_addr1, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); } } nbyte = elf_bss & (qemu_host_page_size-1); if (nbyte) { nbyte = qemu_host_page_size - nbyte; do { /* FIXME - what to do if put_user() fails? */ put_user_u8(0, elf_bss); elf_bss++; } while (--nbyte); }}static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, struct elfhdr * exec, abi_ulong load_addr, abi_ulong load_bias, abi_ulong interp_load_addr, int ibcs, struct image_info *info){ abi_ulong sp; int size; abi_ulong u_platform; const char *k_platform; const int n = sizeof(elf_addr_t); sp = p; u_platform = 0; k_platform = ELF_PLATFORM; if (k_platform) { size_t len = strlen(k_platform) + 1; sp -= (len + n - 1) & ~(n - 1); u_platform = sp; /* FIXME - check return value of memcpy_to_target() for failure */ memcpy_to_target(sp, k_platform, len); } /* * Force 16 byte _final_ alignment here for generality. */ sp = sp &~ (abi_ulong)15; size = (DLINFO_ITEMS + 1) * 2; if (k_platform) size += 2;#ifdef DLINFO_ARCH_ITEMS size += DLINFO_ARCH_ITEMS * 2;#endif size += envc + argc + 2; size += (!ibcs ? 3 : 1); /* argc itself */ size *= n; if (size & 15) sp -= 16 - (size & 15); /* This is correct because Linux defines * elf_addr_t as Elf32_Off / Elf64_Off */#define NEW_AUX_ENT(id, val) do { \ sp -= n; put_user_ual(val, sp); \ sp -= n; put_user_ual(id, sp); \ } while(0) NEW_AUX_ENT (AT_NULL, 0); /* There must be exactly DLINFO_ITEMS entries here. */ NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff)); NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr)); NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry); NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); if (k_platform) NEW_AUX_ENT(AT_PLATFORM, u_platform);#ifdef ARCH_DLINFO /* * ARCH_DLINFO must come last so platform specific code can enforce * special alignment requirements on the AUXV if necessary (eg. PPC). */ ARCH_DLINFO;#endif#undef NEW_AUX_ENT sp = loader_build_argptr(envc, argc, sp, p, !ibcs); return sp;}static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex, int interpreter_fd, abi_ulong *interp_load_addr){ struct elf_phdr *elf_phdata = NULL; struct elf_phdr *eppnt; abi_ulong load_addr = 0; int load_addr_set = 0; int retval; abi_ulong last_bss, elf_bss; abi_ulong error; int i; elf_bss = 0; last_bss = 0; error = 0;#ifdef BSWAP_NEEDED bswap_ehdr(interp_elf_ex);#endif /* First of all, some simple consistency checks */ if ((interp_elf_ex->e_type != ET_EXEC && interp_elf_ex->e_type != ET_DYN) || !elf_check_arch(interp_elf_ex->e_machine)) { return ~((abi_ulong)0UL); } /* Now read in all of the header information */ if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE) return ~(abi_ulong)0UL; elf_phdata = (struct elf_phdr *) malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); if (!elf_phdata) return ~((abi_ulong)0UL); /* * If the size of this structure has changed, then punt, since * we will be doing the wrong thing. */ if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) { free(elf_phdata); return ~((abi_ulong)0UL); } retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET); if(retval >= 0) { retval = read(interpreter_fd, (char *) elf_phdata, sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); } if (retval < 0) { perror("load_elf_interp"); exit(-1); free (elf_phdata); return retval; }#ifdef BSWAP_NEEDED eppnt = elf_phdata; for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { bswap_phdr(eppnt); }#endif if (interp_elf_ex->e_type == ET_DYN) { /* in order to avoid hardcoding the interpreter load address in qemu, we allocate a big enough memory zone */ error = target_mmap(0, INTERP_MAP_SIZE, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0); if (error == -1) { perror("mmap"); exit(-1); } load_addr = error; load_addr_set = 1; } eppnt = elf_phdata; for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) if (eppnt->p_type == PT_LOAD) { int elf_type = MAP_PRIVATE | MAP_DENYWRITE; int elf_prot = 0; abi_ulong vaddr = 0; abi_ulong k; if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) { elf_type |= MAP_FIXED; vaddr = eppnt->p_vaddr; } error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr), eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr), elf_prot, elf_type, interpreter_fd, eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr)); if (error == -1) { /* Real error */ close(interpreter_fd); free(elf_phdata); return ~((abi_ulong)0UL); } if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) { load_addr = error; load_addr_set = 1; } /* * Find the end of the file mapping for this phdr, and keep * track of the largest address we see for this. */ k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; if (k > elf_bss) elf_bss = k; /* * Do the same thing for the memory mapping - between * elf_bss and last_bss is the bss section. */ k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; if (k > last_bss) last_bss = k; } /* Now use mmap to map the library into memory. */ close(interpreter_fd); /* * Now fill out the bss section. First pad the last page up * to the page boundary, and then perform a mmap to make sure * that there are zeromapped pages up to and including the last * bss page. */ padzero(elf_bss, last_bss); elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */ /* Map the last of the bss segment */ if (last_bss > elf_bss) { target_mmap(elf_bss, last_bss-elf_bss, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); } free(elf_phdata);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -