📄 i386-linux.elf-main.c
字号:
// Try page fragmentation just beyond .text . if ( ( (hatch = (void *)(phdr->p_memsz + phdr->p_vaddr + reloc)), ( phdr->p_memsz==phdr->p_filesz // don't pollute potential .bss && (3*4)<=(frag_mask & -(int)hatch) ) ) // space left on page ) { hatch[0]= 0x0000000c; // syscall hatch[1]= 0x03200008; // jr $25 # $25 === $t9 === jp hatch[2]= 0x00000000; // nop } else { hatch = 0; } } return hatch;}#endif /*}*/static void#if defined(__i386__) /*{*/__attribute__((regparm(2), stdcall))#endif /*}*/upx_bzero(char *p, size_t len){ if (len) do { *p++= 0; } while (--len);}#define bzero upx_bzerostatic Elf32_auxv_t *#if defined(__i386__) /*{*/__attribute__((regparm(2), stdcall))#endif /*}*/auxv_find(Elf32_auxv_t *av, unsigned const type){ if (av#if defined(__i386__) /*{*/ && 0==(1&(int)av) /* PT_INTERP usually inhibits, except for hatch */#endif /*}*/ ) for (;; ++av) { if (av->a_type==type || (av->a_type==AT_IGNORE && type!=AT_NULL)) { av->a_type = type; return av; } } return 0;}static void#if defined(__i386__) /*{*/__attribute__((regparm(3), stdcall))#endif /*}*/auxv_up(Elf32_auxv_t *av, unsigned const type, unsigned const value){ DPRINTF((STR_auxv_up(),av,type,value)); av = auxv_find(av, type); if (av) { av->a_un.a_val = value; }}// The PF_* and PROT_* bits are {1,2,4}; the conversion table fits in 32 bits.#define REP8(x) \ ((x)|((x)<<4)|((x)<<8)|((x)<<12)|((x)<<16)|((x)<<20)|((x)<<24)|((x)<<28))#define EXP8(y) \ ((1&(y)) ? 0xf0f0f0f0 : (2&(y)) ? 0xff00ff00 : (4&(y)) ? 0xffff0000 : 0)#define PF_TO_PROT(pf) \ ((PROT_READ|PROT_WRITE|PROT_EXEC) & ( \ ( (REP8(PROT_EXEC ) & EXP8(PF_X)) \ |(REP8(PROT_READ ) & EXP8(PF_R)) \ |(REP8(PROT_WRITE) & EXP8(PF_W)) \ ) >> ((pf & (PF_R|PF_W|PF_X))<<2) ))// Find convex hull of PT_LOAD (the minimal interval which covers all PT_LOAD),// and mmap that much, to be sure that a kernel using exec-shield-randomize// won't place the first piece in a way that leaves no room for the rest.static unsigned long // returns relocation constant#if defined(__i386__) /*{*/__attribute__((regparm(3), stdcall))#endif /*}*/xfind_pages(unsigned mflags, Elf32_Phdr const *phdr, int phnum, char **const p_brk#if defined(__mips__) /*{ any machine with varying PAGE_SIZE */ , unsigned const page_mask#else /*}{*/#define page_mask PAGE_MASK#endif /*}*/){ size_t lo= ~0, hi= 0, szlo= 0; char *addr; DPRINTF((STR_xfind_pages(), mflags, phdr, phnum, p_brk)); mflags += MAP_PRIVATE | MAP_ANONYMOUS; // '+' can optimize better than '|' for (; --phnum>=0; ++phdr) if (PT_LOAD==phdr->p_type) { if (phdr->p_vaddr < lo) { lo = phdr->p_vaddr; szlo = phdr->p_filesz; } if (hi < (phdr->p_memsz + phdr->p_vaddr)) { hi = phdr->p_memsz + phdr->p_vaddr; } } szlo += ~page_mask & lo; // page fragment on lo edge lo -= ~page_mask & lo; // round down to page boundary hi = page_mask & (hi - lo - page_mask -1); // page length szlo = page_mask & (szlo - page_mask -1); // page length if (MAP_FIXED & mflags) { addr = (char *)lo; } else { addr = mmap((void *)lo, hi, PROT_NONE, mflags, -1, 0); //munmap(szlo + addr, hi - szlo); } *p_brk = hi + addr; // the logical value of brk(0) return (unsigned long)addr - lo;}static Elf32_Addr // entry addressdo_xmap(int const fdi, Elf32_Ehdr const *const ehdr, Extent *const xi, Elf32_auxv_t *const av, unsigned *p_reloc, f_unfilter *const f_unf){ Elf32_Phdr const *phdr = (Elf32_Phdr const *) (ehdr->e_phoff + (void const *)ehdr);#if defined(__mips__) /*{ any machine with varying PAGE_SIZE */ unsigned frag_mask = ~PAGE_MASK; { Elf32_auxv_t const *const av_pgsz = auxv_find(av, AT_PAGESZ); if (av_pgsz) { frag_mask = av_pgsz->a_un.a_val -1; } }#else /*}{*/ unsigned const frag_mask = ~PAGE_MASK;#endif /*}*/ char *v_brk; unsigned const reloc = xfind_pages(((ET_EXEC==ehdr->e_type) ? MAP_FIXED : 0), phdr, ehdr->e_phnum, &v_brk#if defined(__mips__) /*{ any machine with varying PAGE_SIZE */ , ~frag_mask#endif /*}*/ ); int j; DPRINTF((STR_do_xmap(), fdi, ehdr, xi, (xi? xi->size: 0), (xi? xi->buf: 0), av, p_reloc, f_unf)); for (j=0; j < ehdr->e_phnum; ++phdr, ++j) if (xi && PT_PHDR==phdr->p_type) { auxv_up(av, AT_PHDR, phdr->p_vaddr + reloc); } else if (PT_LOAD==phdr->p_type) { unsigned const prot = PF_TO_PROT(phdr->p_flags); Extent xo; size_t mlen = xo.size = phdr->p_filesz; char * addr = xo.buf = (char *)(phdr->p_vaddr + reloc); char *const haddr = phdr->p_memsz + addr; size_t frag = (int)addr & frag_mask; mlen += frag; addr -= frag; if (addr != mmap(addr, mlen#if defined(__i386__) /*{*/ // Decompressor can overrun the destination by 3 bytes. + (xi ? 3 : 0)#endif /*}*/ , prot | (xi ? PROT_WRITE : 0), MAP_FIXED | MAP_PRIVATE | (xi ? MAP_ANONYMOUS : 0), (xi ? -1 : fdi), phdr->p_offset - frag) ) { err_exit(8); } if (xi) { unpackExtent(xi, &xo, (f_expand *)fdi, ((PROT_EXEC & prot) ? f_unf : 0) ); } // Linux does not fixup the low end, so neither do we. // Indeed, must leave it alone because some PT_GNU_RELRO // dangle below PT_LOAD (but still on the low page)! //if (PROT_WRITE & prot) { // bzero(addr, frag); // fragment at lo end //} frag = (-mlen) & frag_mask; // distance to next page boundary if (PROT_WRITE & prot) { // note: read-only .bss not supported here bzero(mlen+addr, frag); // fragment at hi end } if (xi) {#if defined(__i386__) /*{*/ void *const hatch = make_hatch_x86(phdr, reloc); if (0!=hatch) { /* always update AT_NULL, especially for compressed PT_INTERP */ auxv_up((Elf32_auxv_t *)(~1 & (int)av), AT_NULL, (unsigned)hatch); }#elif defined(__arm__) /*}{*/ void *const hatch = make_hatch_arm(phdr, reloc); if (0!=hatch) { auxv_up(av, AT_NULL, (unsigned)hatch); }#elif defined(__mips__) /*}{*/ void *const hatch = make_hatch_mips(phdr, reloc, frag_mask); if (0!=hatch) { auxv_up(av, AT_NULL, (unsigned)hatch); }#endif /*}*/ if (0!=mprotect(addr, mlen, prot)) { err_exit(10);ERR_LAB } } addr += mlen + frag; /* page boundary on hi end */ if (addr < haddr) { // need pages for .bss if (addr != mmap(addr, haddr - addr, prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0 ) ) { err_exit(9); } }#if defined(__i386__) /*{*/ else if (xi) { // cleanup if decompressor overrun crosses page boundary mlen = frag_mask & (3+ mlen); if (mlen<=3) { // page fragment was overrun buffer only munmap(addr, mlen); } }#endif /*}*/ } if (xi && ET_DYN!=ehdr->e_type) { // Needed only if compressed shell script invokes compressed shell. do_brk(v_brk); } if (0!=p_reloc) { *p_reloc = reloc; } return ehdr->e_entry + reloc;}/*************************************************************************// upx_main - called by our entry code//// This function is optimized for size.**************************************************************************/#if defined(__mips__) /*{*/void *upx_main( // returns entry address struct b_info const *const bi, // 1st block header size_t const sz_compressed, // total length Elf32_Ehdr *const ehdr, // temp char[sz_ehdr] for decompressing Elf32_auxv_t *const av, f_expand *const f_decompress, f_unfilter *const f_unf)#else /*}{ !__mips__ */void *upx_main( Elf32_auxv_t *const av, unsigned const sz_compressed, f_expand *const f_decompress, f_unfilter */*const*/ f_unfilter, Extent xo, Extent xi, unsigned const volatile dynbase) __asm__("upx_main");void *upx_main( Elf32_auxv_t *const av, unsigned const sz_compressed, f_expand *const f_decompress, f_unfilter */*const*/ f_unf, Extent xo, // {sz_unc, ehdr} for ELF headers Extent xi, // {sz_cpr, &b_info} for ELF headers unsigned const volatile dynbase // value+result: compiler must not change)#endif /*}*/{#if !defined(__mips__) /*{*/ Elf32_Ehdr *const ehdr = (Elf32_Ehdr *)(void *)xo.buf; // temp char[MAX_ELF_HDR+OVERHEAD]#endif /*}*/ Elf32_Phdr const *phdr = (Elf32_Phdr const *)(1+ ehdr); Elf32_Addr reloc; Elf32_Addr entry;#if defined(__mips__) /*{*/ unsigned dynbase = 0; Extent xo, xi, xj; xo.buf = (char *)ehdr; xo.size = bi->sz_unc; xi.buf = CONST_CAST(char *, bi); xi.size = sz_compressed; xj.buf = CONST_CAST(char *, bi); xj.size = sz_compressed; // ehdr = Uncompress Ehdr and Phdrs unpackExtent(&xj, &xo, f_decompress, 0); // never filtered?#else /*}{ !__mips__ */ // sizeof(Ehdr+Phdrs), compressed; including b_info header size_t const sz_pckhdrs = xi.size; DPRINTF((STR_upx_main(), av, sz_compressed, f_decompress, f_unf, &xo, xo.size, xo.buf, &xi, xi.size, xi.buf, dynbase));#if defined(__i386__) /*{*/ f_unf = (f_unfilter *)(2+ (long)f_decompress);#endif /*}*/ // Uncompress Ehdr and Phdrs. unpackExtent(&xi, &xo, f_decompress, 0); // Prepare to decompress the Elf headers again, into the first PT_LOAD. xi.buf -= sz_pckhdrs; xi.size = sz_compressed;#endif /*}*/ // Some kernels omit AT_PHNUM,AT_PHENT,AT_PHDR because this stub has no PT_INTERP. // That is "too much" optimization. Linux 2.6.x seems to give all AT_*. //auxv_up(av, AT_PAGESZ, PAGE_SIZE); /* ld-linux.so.2 does not need this */ auxv_up(av, AT_PHNUM , ehdr->e_phnum); auxv_up(av, AT_PHENT , ehdr->e_phentsize); auxv_up(av, AT_PHDR , dynbase + (unsigned)(1+(Elf32_Ehdr *)phdr->p_vaddr)); // AT_PHDR.a_un.a_val is set again by do_xmap if PT_PHDR is present. // This is necessary for ET_DYN if|when we override a prelink address. entry = do_xmap((int)f_decompress, ehdr, &xi, av, &reloc, f_unf); auxv_up(av, AT_ENTRY , entry); // might not be necessary? { // Map PT_INTERP program interpreter int j; for (j=0; j < ehdr->e_phnum; ++phdr, ++j) if (PT_INTERP==phdr->p_type) { int const fdi = open(reloc + (char const *)phdr->p_vaddr, O_RDONLY, 0); if (0 > fdi) { err_exit(18); } if (MAX_ELF_HDR!=read(fdi, (void *)ehdr, MAX_ELF_HDR)) {ERR_LAB err_exit(19); } entry = do_xmap(fdi, ehdr, 0, av, &reloc, 0); auxv_up(av, AT_BASE, reloc); // uClibc only? close(fdi); break; } } return (void *)entry;}/*vi:ts=4:et:nowrap*/
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -