📄 peal.c
字号:
ptrdiff_t rw_slide;
unsigned int i;
uint32_t max_align = 1;
// find extent of r/w memory
rw_sh_addr = 0xffffffff;
rw_sh_end = 0;
for (i = 0; (shdr = section_for_index(m->ehdr, i)); i++) {
uint32_t sh_flags = swap32(shdr->sh_flags);
uint32_t sh_addr = swap32(shdr->sh_addr);
uint32_t sh_size = swap32(shdr->sh_size);
uint32_t sh_type = swap32(shdr->sh_type);
uint32_t sh_addralign = swap32(shdr->sh_addralign);
if ((sh_flags & SHF_ALLOC) &&
(sh_type == SHT_PROGBITS || sh_type == SHT_NOBITS))
{
if ((sh_flags & SHF_WRITE) && sh_addr < rw_sh_addr) {
rw_sh_addr = sh_addr;
}
if ((sh_flags & SHF_WRITE) && sh_addr + sh_size > rw_sh_end) {
rw_sh_end = sh_addr + sh_size;
}
if (sh_addralign > max_align) {
max_align = sh_addralign;
}
}
}
// allocate r/w memory
if (rw_sh_addr == 0xffffffff || rw_sh_addr == rw_sh_end) {
m->rw_block = 0;
m->rw_start = 0;
rw_slide = 0;
} else {
// add leading pad to fix alignment in case first rw section
// is less aligned than other rw sections.
rw_sh_addr -= rw_sh_addr % max_align;
// add leading pad to heap block in case max_align is
// more aligned than MemGluePtrNew's result.
m->rw_block = (uintptr_t)MemGluePtrNew(rw_sh_end - rw_sh_addr + max_align);
if (!m->rw_block) return false;
m->rw_start = m->rw_block + (max_align - m->rw_block % max_align);
if (m->rw_start % max_align) return false;
rw_slide = m->rw_start - rw_sh_addr;
}
// populate r/w memory
for (i = 0; (shdr = section_for_index(m->ehdr, i)); i++) {
uint32_t sh_flags = swap32(shdr->sh_flags);
uint32_t sh_addr = swap32(shdr->sh_addr);
uint32_t sh_size = swap32(shdr->sh_size);
uint32_t sh_type = swap32(shdr->sh_type);
void *vm_addr = (void *)(sh_addr + rw_slide);
if ((sh_flags & SHF_ALLOC) && (sh_flags & SHF_WRITE) &&
(sh_type == SHT_NOBITS || sh_type == SHT_PROGBITS))
{
if (sh_type == SHT_NOBITS) {
MemSet(vm_addr, sh_size, 0); // .bss section
} else {
MemMove(vm_addr, (void *)m->sinfo[i].vm_addr, sh_size);
}
// use r/w location instead of r/o location from now on
// If this section was in a large resource, block is a
// temporary heap buffer that is now freed.
// fixme large temporary buffers suck
if (m->sinfo[i].resource) {
MemHandleUnlock(m->sinfo[i].resource);
DmReleaseResource(m->sinfo[i].resource);
} else if (m->sinfo[i].is_ftr) {
FtrPtrFree(m->prgId,m->ftrId+i);
}
m->sinfo[i].block = m->rw_block;
m->sinfo[i].vm_addr = (uintptr_t)vm_addr;
m->sinfo[i].resource = 0;
m->sinfo[i].is_ftr = false;
}
}
// find symtab and string sections (both unique)
m->syms = NULL;
m->symcount = 0;
m->strings = 0;
for (i = 0; (shdr = section_for_index(m->ehdr, i)); i++) {
if (swap32(shdr->sh_type) == SHT_SYMTAB) {
m->syms = (Elf32_Sym *)m->sinfo[i].vm_addr;
m->symcount = swap32(shdr->sh_size) / sizeof(Elf32_Sym);
}
if (swap32(shdr->sh_type) == SHT_STRTAB) {
m->strings = (char *)m->sinfo[i].vm_addr;
}
}
// find GOT using section named .got
// This must be done AFTER the symtab, strtab, and slides are available
// This must be done BEFORE relocations are performed
m->got = 0;
for (i = 0; (shdr = section_for_index(m->ehdr, i)); i++) {
const char *name = section_name(m, shdr);
if (0 == StrNCompareAscii(name, ".got", 5)) {
m->got = m->sinfo[i].vm_addr;
}
if (0 == StrNCompareAscii(name, ".lib", 4)) {
m->sinfo[i].is_lib = true;
}
}
if (m->is_memsemaphore)
MemSemaphoreReserve(1);
// perform relocations
// Don't use Thumb interworkable addresses for any relocation.
// All of these symbols should be section symbols, and any
// interwork mangling should have been done by peal-postlink.
for (i = 0; (shdr = section_for_index(m->ehdr, i)); i++) {
uint32_t sh_size = swap32(shdr->sh_size);
uint32_t sh_type = swap32(shdr->sh_type);
Elf32_Rela *rel, *relend;
uint32_t dst_base;
uintptr_t dst_block;
size_t dst_size;
uint32_t dst_index;
uintptr_t dst_thunks;
if (sh_type != SHT_RELA) continue;
free_thunk_descs();
rel = (Elf32_Rela *)m->sinfo[i].vm_addr;
relend = rel + sh_size / sizeof(Elf32_Rela);
dst_index = swap32(shdr->sh_info);
dst_base = m->sinfo[dst_index].vm_addr;
dst_block = m->sinfo[dst_index].block;
dst_size = swap32(section_for_index(m->ehdr, dst_index)->sh_size);
dst_thunks = dst_base + dst_size; // assume postlinker aligned this
for ( ; rel < relend; rel++) {
uint32_t dst_offset;
uint32_t sym_index;
uintptr_t dst;
uint32_t addend;
uintptr_t symbol;
dst_offset = swap32(rel->r_offset);
sym_index = ELF32_R_SYM(swap32(rel->r_info));
addend = swap32(rel->r_addend);
symbol = symbol_address(m, m->syms + sym_index, 0, common);
if (!symbol) goto fail;
// *dst is ARM-swapped, and may be in storage memory.
// Use poke32() to change it.
dst = dst_base + dst_offset;
switch (ELF32_R_TYPE(swap32(rel->r_info))) {
case R_ARM_PC24: {
// *dst[0-23] = ((symbol + addend - dst) - 8) / 4
// value must be SIGNED!
int32_t value = symbol + addend - (uintptr_t)dst;
if (value % 4) goto fail;
value = (value - 8) / 4;
if (value != ((value << 8) >> 8)) {
// Relocation no longer fits in 24 bits. Use a thunk.
uintptr_t thunk =
generate_thunk(m, &dst_thunks, dst_block, dst,
symbol + addend, 0);
if (thunk == 0) goto fail;
// Re-aim value at the thunk.
value = thunk - (uintptr_t)dst;
if (value % 4) goto fail;
value = (value - 8) / 4;
if (value != ((value << 8) >> 8)) goto fail;
}
poke32(m,dst_block, dst,
(value & 0x00ffffff) |
(peek32(dst_block, dst) & 0xff000000));
break;
}
case R_ARM_THM_PC22: {
// *(dst+0)[0-10] = (((symbol + addend - dst) - 4) / 2)[11-21]
// *(dst+2)[0-10] = (((symbol + addend - dst) - 4) / 2)[0-10]
// value must be SIGNED!
int32_t value = symbol + addend - (uintptr_t)dst;
if (value % 2) goto fail;
value = (value - 4) / 2;
if (value != ((value << 10) >> 10)) {
// Relocation no longer fits in 22 bits. Use a thunk.
uintptr_t thunk =
generate_thunk(m, &dst_thunks, dst_block, dst,
(symbol+addend) & 0xfffffffe, thumb);
if (thunk == 0) goto fail;
// Re-aim value at the thunk.
value = thunk - (uintptr_t)dst;
if (value % 2) goto fail;
value = (value - 4) / 2;
if (value != ((value << 10) >> 10)) goto fail;
}
poke16(m,dst_block, dst+0,
((value >> 11) & 0x07ff) |
(peek16(dst_block, dst+0) & 0xf800));
poke16(m,dst_block, dst+2,
((value >> 0) & 0x07ff) |
(peek16(dst_block, dst+2) & 0xf800));
break;
}
case R_ARM_ABS32:
// *dst = symbol + addend
poke32(m,dst_block, dst,
symbol + addend);
break;
case R_ARM_REL32:
// *dst = symbol + addend - dst
poke32(m,dst_block, dst,
symbol + addend - (uintptr_t)dst);
break;
case R_ARM_GOTOFF:
// *dst = symbol + addend - GOT
if (!m->got) goto fail;
poke32(m,dst_block, dst,
symbol + addend - m->got);
break;
default:
break;
}
}
}
if (m->is_memsemaphore)
MemSemaphoreRelease(1);
// find ARM-side stub function
stub_sym = symbol_lookup(m, "PealArmStub", 0);
if (stub_sym) {
// Don't use a Thumb interworkable address for the stub,
// because PceNativeCall can't handle it.
m->stub = symbol_address(m, stub_sym, 0, NULL);
} else {
m->stub = 0;
}
// fixme call initializers and C++ constructors here
free_thunk_descs();
return true;
fail:
if (m->is_memsemaphore)
MemSemaphoreRelease(1);
return false;
}
PealModule *PealLoad(void *mem)
{
int i;
const Elf32_Shdr *shdr;
const Elf32_Ehdr *ehdr;
PealModule *m;
ehdr = (const Elf32_Ehdr *)mem;
m = allocate(ehdr);
if (!m) return NULL;
// find sections (contiguous version)
for (i = 0; (shdr = section_for_index(ehdr, i)); i++) {
m->sinfo[i].block = (uintptr_t)mem;
m->sinfo[i].vm_addr = ((uintptr_t)mem) + swap32(shdr->sh_offset);
}
if (load(m,NULL)) {
return m;
} else {
cleanup(m);
return NULL;
}
}
Boolean isCodeSection(const Elf32_Shdr* shdr)
{
uint32_t sh_flags = swap32(shdr->sh_flags);
uint32_t sh_type = swap32(shdr->sh_type);
if (sh_type == SHT_SYMTAB ||
sh_type == SHT_STRTAB ||
sh_type == SHT_RELA)
return false;
if ((sh_flags & SHF_ALLOC) && (sh_flags & SHF_WRITE) &&
(sh_type == SHT_NOBITS || sh_type == SHT_PROGBITS))
return false;
return true;
}
PealModule *PealLoadFromResources(DmResType type, DmResID baseID, const PealModule *common, UInt32 prgId, UInt16 ftrId, Boolean memDup, Boolean onlyFtr, Boolean memSema)
{
int i;
int resID;
PealModule *m;
const Elf32_Shdr *shdr;
Boolean rom;
MemHandle rsrcH;
const Elf32_Ehdr *ehdr;
rsrcH = DmGet1Resource(type, baseID);
if (!rsrcH) return NULL;
ehdr = (Elf32_Ehdr *)MemHandleLock(rsrcH);
m = allocate(ehdr);
if (!m) {
MemHandleUnlock(rsrcH);
DmReleaseResource(rsrcH);
return NULL;
}
m->is_memsemaphore = memSema;
rom = DmWriteCheck((void*)ehdr,0,4) != errNone;
// find sections (resource version)
// resource baseID+0 is ehdr+shdrs
// additional sections are in consecutive resources
// sections bigger than 65400 bytes are split into multiple resources
// section 0 (SHT_NULL) has no resource
// Use section 0's sinfo to stash ehdr's resource
resID = baseID+1;
m->ftrId = ++ftrId;
m->prgId = prgId;
m->sinfo[0].block = (uintptr_t)ehdr;
m->sinfo[0].vm_addr = 0;
m->sinfo[0].resource = rsrcH;
if (memDup)
{
// duplicate first section (header)
size_t resSize;
resSize = MemHandleSize(rsrcH);
if (FtrPtrNew(prgId,ftrId,resSize,(void**)&m->sinfo[0].block)!=errNone) {
cleanup(m);
return NULL;
}
m->ehdr = (Elf32_Ehdr *)m->sinfo[0].block;
m->sinfo[0].vm_addr = m->sinfo[0].block;
m->sinfo[0].is_ftr = 1;
m->sinfo[0].resource = 0;
DmWrite((void*)m->ehdr,0,ehdr,resSize);
MemHandleUnlock(rsrcH);
DmReleaseResource(rsrcH);
}
for (i = 1; (shdr = section_for_index(m->ehdr, i)); i++) {
uint32_t sh_type = swap32(shdr->sh_type);
uint32_t sh_size = swap32(shdr->sh_size);
size_t offset = 0;
size_t left = sh_size;
if (sh_size==0 || sh_type==SHT_NULL || sh_type==SHT_NOBITS) {
// empty section or .bss section - no resource expected
// m->sinfo[i] already zeroed
continue;
}
do {
size_t resSize;
MemHandle rsrcH = DmGet1Resource(type, resID++);
if (!rsrcH) {
// no resource - bail
cleanup(m);
return NULL;
}
resSize = MemHandleSize(rsrcH);
if (resSize > left) {
// resource too big - bail
DmReleaseResource(rsrcH);
cleanup(m);
return NULL;
} else if (resSize == sh_size && !memDup && (!rom || !isCodeSection(shdr))) {
// resource just right - keep it
if (m->sinfo[i].block) {
// oops, already concatenating
DmReleaseResource(rsrcH);
cleanup(m);
return NULL;
}
m->sinfo[i].block = (uintptr_t)MemHandleLock(rsrcH);
m->sinfo[i].vm_addr = m->sinfo[i].block;
m->sinfo[i].resource = rsrcH;
} else {
// concatenate multiple resources
if (!m->sinfo[i].block) {
if (i>=FTRID_STEP-1 || FtrPtrNew(prgId,ftrId+i,sh_size,(void**)&m->sinfo[i].block)!=errNone) {
DmReleaseResource(rsrcH);
cleanup(m);
return NULL;
}
m->sinfo[i].vm_addr = m->sinfo[i].block;
m->sinfo[i].is_ftr = true;
}
DmWrite((void*)m->sinfo[i].block,offset,MemHandleLock(rsrcH), resSize);
MemHandleUnlock(rsrcH);
DmReleaseResource(rsrcH);
offset += resSize;
}
left -= resSize;
} while (left > 0);
}
if (load(m,common)) {
PealModule *m2;
if (onlyFtr && FtrPtrNew(prgId,ftrId-1,m->size,(void**)&m2)==errNone) {
m->is_ftr = 1;
DmWrite(m2,0,m,m->size);
MemPtrFree(m);
m = m2;
}
return m;
} else {
cleanup(m); // this cleanup includes rsrcH
return NULL;
}
}
void *PealLookupSymbol(const PealModule *m, char *query)
{
const Elf32_Sym *sym = symbol_lookup(m, query, 0);
// Do return Thumb interworkable addresses to client code
return sym ? (void *)symbol_address(m, sym, thumb, NULL) : NULL;
}
uint32_t PealCall(PealModule *m, void *addr, void *arg)
{
// args does not have to be aligned; ARM side handles misalignment and swap
PealArgs args;
args.fn = addr;
args.arg = arg;
args.got = (void *)m->got;
return PceNativeCall((NativeFuncType *)m->stub, &args);
}
void PealUnload(PealModule *m)
{
if (m) {
// fixme call terminators and C++ destructors here
cleanup(m);
}
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -