elf64-sparc.c
来自「基于4个mips核的noc设计」· C语言 代码 · 共 2,055 行 · 第 1/5 页
C
2,055 行
PTR data; asection *input_section; bfd *output_bfd; char **error_message ATTRIBUTE_UNUSED;{ bfd_vma relocation; bfd_vma insn; bfd_reloc_status_type status; status = init_insn_reloc (abfd, reloc_entry, symbol, data, input_section, output_bfd, &relocation, &insn); if (status != bfd_reloc_other) return status; insn = (insn & ~0x1fff) | 0x1c00 | (relocation & 0x3ff); bfd_put_32 (abfd, insn, (bfd_byte *) data + reloc_entry->address); return bfd_reloc_ok;}/* PLT/GOT stuff *//* Both the headers and the entries are icache aligned. */#define PLT_ENTRY_SIZE 32#define PLT_HEADER_SIZE (4 * PLT_ENTRY_SIZE)#define LARGE_PLT_THRESHOLD 32768#define GOT_RESERVED_ENTRIES 1#define ELF_DYNAMIC_INTERPRETER "/usr/lib/sparcv9/ld.so.1"/* Fill in the .plt section. */static voidsparc64_elf_build_plt (output_bfd, contents, nentries) bfd *output_bfd; unsigned char *contents; int nentries;{ const unsigned int nop = 0x01000000; int i, j; /* The first four entries are reserved, and are initially undefined. We fill them with `illtrap 0' to force ld.so to do something. */ for (i = 0; i < PLT_HEADER_SIZE/4; ++i) bfd_put_32 (output_bfd, 0, contents+i*4); /* The first 32768 entries are close enough to plt1 to get there via a straight branch. */ for (i = 4; i < LARGE_PLT_THRESHOLD && i < nentries; ++i) { unsigned char *entry = contents + i * PLT_ENTRY_SIZE; unsigned int sethi, ba; /* sethi (. - plt0), %g1 */ sethi = 0x03000000 | (i * PLT_ENTRY_SIZE); /* ba,a,pt %xcc, plt1 */ ba = 0x30680000 | (((contents+PLT_ENTRY_SIZE) - (entry+4)) / 4 & 0x7ffff); bfd_put_32 (output_bfd, sethi, entry); bfd_put_32 (output_bfd, ba, entry+4); bfd_put_32 (output_bfd, nop, entry+8); bfd_put_32 (output_bfd, nop, entry+12); bfd_put_32 (output_bfd, nop, entry+16); bfd_put_32 (output_bfd, nop, entry+20); bfd_put_32 (output_bfd, nop, entry+24); bfd_put_32 (output_bfd, nop, entry+28); } /* Now the tricky bit. Entries 32768 and higher are grouped in blocks of 160: 160 entries and 160 pointers. This is to separate code from data, which is much friendlier on the cache. */ for (; i < nentries; i += 160) { int block = (i + 160 <= nentries ? 160 : nentries - i); for (j = 0; j < block; ++j) { unsigned char *entry, *ptr; unsigned int ldx; entry = contents + i*PLT_ENTRY_SIZE + j*4*6; ptr = contents + i*PLT_ENTRY_SIZE + block*4*6 + j*8; /* ldx [%o7 + ptr - entry+4], %g1 */ ldx = 0xc25be000 | ((ptr - entry+4) & 0x1fff); bfd_put_32 (output_bfd, 0x8a10000f, entry); /* mov %o7,%g5 */ bfd_put_32 (output_bfd, 0x40000002, entry+4); /* call .+8 */ bfd_put_32 (output_bfd, nop, entry+8); /* nop */ bfd_put_32 (output_bfd, ldx, entry+12); /* ldx [%o7+P],%g1 */ bfd_put_32 (output_bfd, 0x83c3c001, entry+16); /* jmpl %o7+%g1,%g1 */ bfd_put_32 (output_bfd, 0x9e100005, entry+20); /* mov %g5,%o7 */ bfd_put_64 (output_bfd, contents - (entry+4), ptr); } }}/* Return the offset of a particular plt entry within the .plt section. */static bfd_vmasparc64_elf_plt_entry_offset (index) int index;{ int block, ofs; if (index < LARGE_PLT_THRESHOLD) return index * PLT_ENTRY_SIZE; /* See above for details. */ block = (index - LARGE_PLT_THRESHOLD) / 160; ofs = (index - LARGE_PLT_THRESHOLD) % 160; return ((bfd_vma) (LARGE_PLT_THRESHOLD + block*160) * PLT_ENTRY_SIZE + ofs * 6*4);}static bfd_vmasparc64_elf_plt_ptr_offset (index, max) int index, max;{ int block, ofs, last; BFD_ASSERT(index >= LARGE_PLT_THRESHOLD); /* See above for details. */ block = (((index - LARGE_PLT_THRESHOLD) / 160) * 160) + LARGE_PLT_THRESHOLD; ofs = index - block; if (block + 160 > max) last = (max - LARGE_PLT_THRESHOLD) % 160; else last = 160; return (block * PLT_ENTRY_SIZE + last * 6*4 + ofs * 8);}/* Look through the relocs for a section during the first phase, and allocate space in the global offset table or procedure linkage table. */static booleansparc64_elf_check_relocs (abfd, info, sec, relocs) bfd *abfd; struct bfd_link_info *info; asection *sec; const Elf_Internal_Rela *relocs;{ bfd *dynobj; Elf_Internal_Shdr *symtab_hdr; struct elf_link_hash_entry **sym_hashes; bfd_vma *local_got_offsets; const Elf_Internal_Rela *rel; const Elf_Internal_Rela *rel_end; asection *sgot; asection *srelgot; asection *sreloc; if (info->relocateable || !(sec->flags & SEC_ALLOC)) return true; dynobj = elf_hash_table (info)->dynobj; symtab_hdr = &elf_tdata (abfd)->symtab_hdr; sym_hashes = elf_sym_hashes (abfd); local_got_offsets = elf_local_got_offsets (abfd); sgot = NULL; srelgot = NULL; sreloc = NULL; rel_end = relocs + NUM_SHDR_ENTRIES (& elf_section_data (sec)->rel_hdr); for (rel = relocs; rel < rel_end; rel++) { unsigned long r_symndx; struct elf_link_hash_entry *h; r_symndx = ELF64_R_SYM (rel->r_info); if (r_symndx < symtab_hdr->sh_info) h = NULL; else h = sym_hashes[r_symndx - symtab_hdr->sh_info]; switch (ELF64_R_TYPE_ID (rel->r_info)) { case R_SPARC_GOT10: case R_SPARC_GOT13: case R_SPARC_GOT22: /* This symbol requires a global offset table entry. */ if (dynobj == NULL) { /* Create the .got section. */ elf_hash_table (info)->dynobj = dynobj = abfd; if (! _bfd_elf_create_got_section (dynobj, info)) return false; } if (sgot == NULL) { sgot = bfd_get_section_by_name (dynobj, ".got"); BFD_ASSERT (sgot != NULL); } if (srelgot == NULL && (h != NULL || info->shared)) { srelgot = bfd_get_section_by_name (dynobj, ".rela.got"); if (srelgot == NULL) { srelgot = bfd_make_section (dynobj, ".rela.got"); if (srelgot == NULL || ! bfd_set_section_flags (dynobj, srelgot, (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY)) || ! bfd_set_section_alignment (dynobj, srelgot, 3)) return false; } } if (h != NULL) { if (h->got.offset != (bfd_vma) -1) { /* We have already allocated space in the .got. */ break; } h->got.offset = sgot->_raw_size; /* Make sure this symbol is output as a dynamic symbol. */ if (h->dynindx == -1) { if (! bfd_elf64_link_record_dynamic_symbol (info, h)) return false; } srelgot->_raw_size += sizeof (Elf64_External_Rela); } else { /* This is a global offset table entry for a local symbol. */ if (local_got_offsets == NULL) { size_t size; register unsigned int i; size = symtab_hdr->sh_info * sizeof (bfd_vma); local_got_offsets = (bfd_vma *) bfd_alloc (abfd, size); if (local_got_offsets == NULL) return false; elf_local_got_offsets (abfd) = local_got_offsets; for (i = 0; i < symtab_hdr->sh_info; i++) local_got_offsets[i] = (bfd_vma) -1; } if (local_got_offsets[r_symndx] != (bfd_vma) -1) { /* We have already allocated space in the .got. */ break; } local_got_offsets[r_symndx] = sgot->_raw_size; if (info->shared) { /* If we are generating a shared object, we need to output a R_SPARC_RELATIVE reloc so that the dynamic linker can adjust this GOT entry. */ srelgot->_raw_size += sizeof (Elf64_External_Rela); } } sgot->_raw_size += 8;#if 0 /* Doesn't work for 64-bit -fPIC, since sethi/or builds unsigned numbers. If we permit ourselves to modify code so we get sethi/xor, this could work. Question: do we consider conditionally re-enabling this for -fpic, once we know about object code models? */ /* If the .got section is more than 0x1000 bytes, we add 0x1000 to the value of _GLOBAL_OFFSET_TABLE_, so that 13 bit relocations have a greater chance of working. */ if (sgot->_raw_size >= 0x1000 && elf_hash_table (info)->hgot->root.u.def.value == 0) elf_hash_table (info)->hgot->root.u.def.value = 0x1000;#endif break; case R_SPARC_WPLT30: case R_SPARC_PLT32: case R_SPARC_HIPLT22: case R_SPARC_LOPLT10: case R_SPARC_PCPLT32: case R_SPARC_PCPLT22: case R_SPARC_PCPLT10: case R_SPARC_PLT64: /* This symbol requires a procedure linkage table entry. We actually build the entry in adjust_dynamic_symbol, because this might be a case of linking PIC code without linking in any dynamic objects, in which case we don't need to generate a procedure linkage table after all. */ if (h == NULL) { /* It does not make sense to have a procedure linkage table entry for a local symbol. */ bfd_set_error (bfd_error_bad_value); return false; } /* Make sure this symbol is output as a dynamic symbol. */ if (h->dynindx == -1) { if (! bfd_elf64_link_record_dynamic_symbol (info, h)) return false; } h->elf_link_hash_flags |= ELF_LINK_HASH_NEEDS_PLT; break; case R_SPARC_PC10: case R_SPARC_PC22: case R_SPARC_PC_HH22: case R_SPARC_PC_HM10: case R_SPARC_PC_LM22: if (h != NULL && strcmp (h->root.root.string, "_GLOBAL_OFFSET_TABLE_") == 0) break; /* Fall through. */ case R_SPARC_DISP8: case R_SPARC_DISP16: case R_SPARC_DISP32: case R_SPARC_DISP64: case R_SPARC_WDISP30: case R_SPARC_WDISP22: case R_SPARC_WDISP19: case R_SPARC_WDISP16: if (h == NULL) break; /* Fall through. */ case R_SPARC_8: case R_SPARC_16: case R_SPARC_32: case R_SPARC_HI22: case R_SPARC_22: case R_SPARC_13: case R_SPARC_LO10: case R_SPARC_UA32: case R_SPARC_10: case R_SPARC_11: case R_SPARC_64: case R_SPARC_OLO10: case R_SPARC_HH22: case R_SPARC_HM10: case R_SPARC_LM22: case R_SPARC_7: case R_SPARC_5: case R_SPARC_6: case R_SPARC_HIX22: case R_SPARC_LOX10: case R_SPARC_H44: case R_SPARC_M44: case R_SPARC_L44: case R_SPARC_UA64: case R_SPARC_UA16: /* When creating a shared object, we must copy these relocs into the output file. We create a reloc section in dynobj and make room for the reloc. But don't do this for debugging sections -- this shows up with DWARF2 -- first because they are not loaded, and second because DWARF sez the debug info is not to be biased by the load address. */ if (info->shared && (sec->flags & SEC_ALLOC)) { if (sreloc == NULL) { const char *name; name = (bfd_elf_string_from_elf_section (abfd, elf_elfheader (abfd)->e_shstrndx, elf_section_data (sec)->rel_hdr.sh_name)); if (name == NULL) return false; BFD_ASSERT (strncmp (name, ".rela", 5) == 0 && strcmp (bfd_get_section_name (abfd, sec), name + 5) == 0); sreloc = bfd_get_section_by_name (dynobj, name); if (sreloc == NULL) { flagword flags; sreloc = bfd_make_section (dynobj, name); flags = (SEC_HAS_CONTENTS | SEC_READONLY | SEC_IN_MEMORY | SEC_LINKER_CREATED); if ((sec->flags & SEC_ALLOC) != 0) flags |= SEC_ALLOC | SEC_LOAD; if (sreloc == NULL
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?