elf64-alpha.c

来自「基于4个mips核的noc设计」· C语言 代码 · 共 2,160 行 · 第 1/5 页

C
2,160
字号
/* A relocation function which doesn't do anything.  */static bfd_reloc_status_typeelf64_alpha_reloc_nil (abfd, reloc, sym, data, sec, output_bfd, error_message)     bfd *abfd ATTRIBUTE_UNUSED;     arelent *reloc;     asymbol *sym ATTRIBUTE_UNUSED;     PTR data ATTRIBUTE_UNUSED;     asection *sec;     bfd *output_bfd;     char **error_message ATTRIBUTE_UNUSED;{  if (output_bfd)    reloc->address += sec->output_offset;  return bfd_reloc_ok;}/* A relocation function used for an unsupported reloc.  */static bfd_reloc_status_typeelf64_alpha_reloc_bad (abfd, reloc, sym, data, sec, output_bfd, error_message)     bfd *abfd ATTRIBUTE_UNUSED;     arelent *reloc;     asymbol *sym ATTRIBUTE_UNUSED;     PTR data ATTRIBUTE_UNUSED;     asection *sec;     bfd *output_bfd;     char **error_message ATTRIBUTE_UNUSED;{  if (output_bfd)    reloc->address += sec->output_offset;  return bfd_reloc_notsupported;}/* Do the work of the GPDISP relocation.  */static bfd_reloc_status_typeelf64_alpha_do_reloc_gpdisp (abfd, gpdisp, p_ldah, p_lda)     bfd *abfd;     bfd_vma gpdisp;     bfd_byte *p_ldah;     bfd_byte *p_lda;{  bfd_reloc_status_type ret = bfd_reloc_ok;  bfd_vma addend;  unsigned long i_ldah, i_lda;  i_ldah = bfd_get_32 (abfd, p_ldah);  i_lda = bfd_get_32 (abfd, p_lda);  /* Complain if the instructions are not correct.  */  if (((i_ldah >> 26) & 0x3f) != 0x09      || ((i_lda >> 26) & 0x3f) != 0x08)    ret = bfd_reloc_dangerous;  /* Extract the user-supplied offset, mirroring the sign extensions     that the instructions perform.  */  addend = ((i_ldah & 0xffff) << 16) | (i_lda & 0xffff);  addend = (addend ^ 0x80008000) - 0x80008000;  gpdisp += addend;  if ((bfd_signed_vma) gpdisp < -(bfd_signed_vma) 0x80000000      || (bfd_signed_vma) gpdisp >= (bfd_signed_vma) 0x7fff8000)    ret = bfd_reloc_overflow;  /* compensate for the sign extension again.  */  i_ldah = ((i_ldah & 0xffff0000)	    | (((gpdisp >> 16) + ((gpdisp >> 15) & 1)) & 0xffff));  i_lda = (i_lda & 0xffff0000) | (gpdisp & 0xffff);  bfd_put_32 (abfd, i_ldah, p_ldah);  bfd_put_32 (abfd, i_lda, p_lda);  return ret;}/* The special function for the GPDISP reloc.  */static bfd_reloc_status_typeelf64_alpha_reloc_gpdisp (abfd, reloc_entry, sym, data, input_section,			  output_bfd, err_msg)     bfd *abfd;     arelent *reloc_entry;     asymbol *sym ATTRIBUTE_UNUSED;     PTR data;     asection *input_section;     bfd *output_bfd;     char **err_msg;{  bfd_reloc_status_type ret;  bfd_vma gp, relocation;  bfd_byte *p_ldah, *p_lda;  /* Don't do anything if we're not doing a final link.  */  if (output_bfd)    {      reloc_entry->address += input_section->output_offset;      return bfd_reloc_ok;    }  if (reloc_entry->address > input_section->_cooked_size ||      reloc_entry->address + reloc_entry->addend > input_section->_cooked_size)    return bfd_reloc_outofrange;  /* The gp used in the portion of the output object to which this     input object belongs is cached on the input bfd.  */  gp = _bfd_get_gp_value (abfd);  relocation = (input_section->output_section->vma		+ input_section->output_offset		+ reloc_entry->address);  p_ldah = (bfd_byte *) data + reloc_entry->address;  p_lda = p_ldah + reloc_entry->addend;  ret = elf64_alpha_do_reloc_gpdisp (abfd, gp - relocation, p_ldah, p_lda);  /* Complain if the instructions are not correct.  */  if (ret == bfd_reloc_dangerous)    *err_msg = _("GPDISP relocation did not find ldah and lda instructions");  return ret;}/* A mapping from BFD reloc types to Alpha ELF reloc types.  */struct elf_reloc_map{  bfd_reloc_code_real_type bfd_reloc_val;  int elf_reloc_val;};static const struct elf_reloc_map elf64_alpha_reloc_map[] ={  {BFD_RELOC_NONE,		R_ALPHA_NONE},  {BFD_RELOC_32,		R_ALPHA_REFLONG},  {BFD_RELOC_64,		R_ALPHA_REFQUAD},  {BFD_RELOC_CTOR,		R_ALPHA_REFQUAD},  {BFD_RELOC_GPREL32,		R_ALPHA_GPREL32},  {BFD_RELOC_ALPHA_ELF_LITERAL,	R_ALPHA_LITERAL},  {BFD_RELOC_ALPHA_LITUSE,	R_ALPHA_LITUSE},  {BFD_RELOC_ALPHA_GPDISP,	R_ALPHA_GPDISP},  {BFD_RELOC_23_PCREL_S2,	R_ALPHA_BRADDR},  {BFD_RELOC_ALPHA_HINT,	R_ALPHA_HINT},  {BFD_RELOC_16_PCREL,		R_ALPHA_SREL16},  {BFD_RELOC_32_PCREL,		R_ALPHA_SREL32},  {BFD_RELOC_64_PCREL,		R_ALPHA_SREL64},/* The BFD_RELOC_ALPHA_USER_* relocations are used by the assembler to process   the explicit !<reloc>!sequence relocations, and are mapped into the normal   relocations at the end of processing.  */  {BFD_RELOC_ALPHA_USER_LITERAL,	R_ALPHA_LITERAL},  {BFD_RELOC_ALPHA_USER_LITUSE_BASE,	R_ALPHA_LITUSE},  {BFD_RELOC_ALPHA_USER_LITUSE_BYTOFF,	R_ALPHA_LITUSE},  {BFD_RELOC_ALPHA_USER_LITUSE_JSR,	R_ALPHA_LITUSE},  {BFD_RELOC_ALPHA_USER_GPDISP,		R_ALPHA_GPDISP},  {BFD_RELOC_ALPHA_USER_GPRELHIGH,	R_ALPHA_GPRELHIGH},  {BFD_RELOC_ALPHA_USER_GPRELLOW,	R_ALPHA_GPRELLOW},};/* Given a BFD reloc type, return a HOWTO structure.  */static reloc_howto_type *elf64_alpha_bfd_reloc_type_lookup (abfd, code)     bfd *abfd ATTRIBUTE_UNUSED;     bfd_reloc_code_real_type code;{  const struct elf_reloc_map *i, *e;  i = e = elf64_alpha_reloc_map;  e += sizeof (elf64_alpha_reloc_map) / sizeof (struct elf_reloc_map);  for (; i != e; ++i)    {      if (i->bfd_reloc_val == code)	return &elf64_alpha_howto_table[i->elf_reloc_val];    }  return 0;}/* Given an Alpha ELF reloc type, fill in an arelent structure.  */static voidelf64_alpha_info_to_howto (abfd, cache_ptr, dst)     bfd *abfd ATTRIBUTE_UNUSED;     arelent *cache_ptr;     Elf64_Internal_Rela *dst;{  unsigned r_type;  r_type = ELF64_R_TYPE(dst->r_info);  BFD_ASSERT (r_type < (unsigned int) R_ALPHA_max);  cache_ptr->howto = &elf64_alpha_howto_table[r_type];}/* These functions do relaxation for Alpha ELF.   Currently I'm only handling what I can do with existing compiler   and assembler support, which means no instructions are removed,   though some may be nopped.  At this time GCC does not emit enough   information to do all of the relaxing that is possible.  It will   take some not small amount of work for that to happen.   There are a couple of interesting papers that I once read on this   subject, that I cannot find references to at the moment, that   related to Alpha in particular.  They are by David Wall, then of   DEC WRL.  */#define OP_LDA		0x08#define OP_LDAH		0x09#define INSN_JSR	0x68004000#define INSN_JSR_MASK	0xfc00c000#define OP_LDQ		0x29#define OP_BR		0x30#define OP_BSR		0x34#define INSN_UNOP	0x2fe00000struct alpha_relax_info{  bfd *abfd;  asection *sec;  bfd_byte *contents;  Elf_Internal_Rela *relocs, *relend;  struct bfd_link_info *link_info;  boolean changed_contents;  boolean changed_relocs;  bfd_vma gp;  bfd *gotobj;  asection *tsec;  struct alpha_elf_link_hash_entry *h;  struct alpha_elf_got_entry *gotent;  unsigned char other;};static Elf_Internal_Rela * elf64_alpha_relax_with_lituse  PARAMS((struct alpha_relax_info *info, bfd_vma symval,          Elf_Internal_Rela *irel, Elf_Internal_Rela *irelend));static boolean elf64_alpha_relax_without_lituse  PARAMS((struct alpha_relax_info *info, bfd_vma symval,          Elf_Internal_Rela *irel));static bfd_vma elf64_alpha_relax_opt_call  PARAMS((struct alpha_relax_info *info, bfd_vma symval));static boolean elf64_alpha_relax_section  PARAMS((bfd *abfd, asection *sec, struct bfd_link_info *link_info,	  boolean *again));static Elf_Internal_Rela *elf64_alpha_find_reloc_at_ofs (rel, relend, offset, type)     Elf_Internal_Rela *rel, *relend;     bfd_vma offset;     int type;{  while (rel < relend)    {      if (rel->r_offset == offset && ELF64_R_TYPE (rel->r_info) == type)	return rel;      ++rel;    }  return NULL;}static Elf_Internal_Rela *elf64_alpha_relax_with_lituse (info, symval, irel, irelend)     struct alpha_relax_info *info;     bfd_vma symval;     Elf_Internal_Rela *irel, *irelend;{  Elf_Internal_Rela *urel;  int flags, count, i;  bfd_signed_vma disp;  boolean fits16;  boolean fits32;  boolean lit_reused = false;  boolean all_optimized = true;  unsigned int lit_insn;  lit_insn = bfd_get_32 (info->abfd, info->contents + irel->r_offset);  if (lit_insn >> 26 != OP_LDQ)    {      ((*_bfd_error_handler)       ("%s: %s+0x%lx: warning: LITERAL relocation against unexpected insn",	bfd_get_filename (info->abfd), info->sec->name,	(unsigned long)irel->r_offset));      return irel;    }  /* Summarize how this particular LITERAL is used.  */  for (urel = irel+1, flags = count = 0; urel < irelend; ++urel, ++count)    {      if (ELF64_R_TYPE (urel->r_info) != R_ALPHA_LITUSE)	break;      if (urel->r_addend >= 0 && urel->r_addend <= 3)	flags |= 1 << urel->r_addend;    }  /* A little preparation for the loop...  */  disp = symval - info->gp;  for (urel = irel+1, i = 0; i < count; ++i, ++urel)    {      unsigned int insn;      int insn_disp;      bfd_signed_vma xdisp;      insn = bfd_get_32 (info->abfd, info->contents + urel->r_offset);      switch (urel->r_addend)	{	default: /* 0 = ADDRESS FORMAT */	  /* This type is really just a placeholder to note that all	     uses cannot be optimized, but to still allow some.  */	  all_optimized = false;	  break;	case 1: /* MEM FORMAT */	  /* We can always optimize 16-bit displacements.  */	  /* Extract the displacement from the instruction, sign-extending	     it if necessary, then test whether it is within 16 or 32 bits	     displacement from GP.  */	  insn_disp = insn & 0x0000ffff;	  if (insn_disp & 0x00008000)	    insn_disp |= 0xffff0000;  /* Negative: sign-extend.  */	  xdisp = disp + insn_disp;	  fits16 = (xdisp >= - (bfd_signed_vma) 0x00008000 && xdisp < 0x00008000);	  fits32 = (xdisp >= - (bfd_signed_vma) 0x80000000 && xdisp < 0x7fff8000);	  if (fits16)	    {	      /* Take the op code and dest from this insn, take the base		 register from the literal insn.  Leave the offset alone.  */	      insn = (insn & 0xffe0ffff) | (lit_insn & 0x001f0000);	      urel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),					   R_ALPHA_GPRELLOW);	      urel->r_addend = irel->r_addend;	      info->changed_relocs = true;	      bfd_put_32 (info->abfd, insn, info->contents + urel->r_offset);	      info->changed_contents = true;	    }	  /* If all mem+byte, we can optimize 32-bit mem displacements.  */	  else if (fits32 && !(flags & ~6))	    {	      /* FIXME: sanity check that lit insn Ra is mem insn Rb.  */	      irel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),					   R_ALPHA_GPRELHIGH);	      lit_insn = (OP_LDAH << 26) | (lit_insn & 0x03ff0000);	      bfd_put_32 (info->abfd, lit_insn,			  info->contents + irel->r_offset);	      lit_reused = true;	      info->changed_contents = true;	      urel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),					   R_ALPHA_GPRELLOW);	      urel->r_addend = irel->r_addend;	      info->changed_relocs = true;	    }	  else	    all_optimized = false;	  break;	case 2: /* BYTE OFFSET FORMAT */	  /* We can always optimize byte instructions.  */	  /* FIXME: sanity check the insn for byte op.  Check that the	     literal dest reg is indeed Rb in the byte insn.  */	  insn = (insn & ~0x001ff000) | ((symval & 7) << 13) | 0x1000;	  urel->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);	  urel->r_addend = 0;	  info->changed_relocs = true;	  bfd_put_32 (info->abfd, insn, info->contents + urel->r_offset);	  info->changed_contents = true;	  break;	case 3: /* CALL FORMAT */	  {	    /* If not zero, place to jump without needing pv.  */	    bfd_vma optdest = elf64_alpha_relax_opt_call (info, symval);	    bfd_vma org = (info->sec->output_section->vma			   + info->sec->output_offset			   + urel->r_offset + 4);	    bfd_signed_vma odisp;	    odisp = (optdest ? optdest : symval) - org;	    if (odisp >= -0x400000 && odisp < 0x400000)	      {		Elf_Internal_Rela *xrel;		/* Preserve branch prediction call stack when possible.  */		if ((insn & INSN_JSR_MASK) == INSN_JSR)		  insn = (OP_BSR << 26) | (insn & 0x03e00000);		else		  insn = (OP_BR << 26) | (insn & 0x03e00000);		urel->r_info = ELF64_R_INFO (ELF64_R_SYM (irel->r_info),					     R_ALPHA_BRADDR);		urel->r_addend = irel->r_addend;		if (optdest)		  urel->r_addend += optdest - symval;		else		  all_optimized = false;		bfd_put_32 (info->abfd, insn, info->contents + urel->r_offset);		/* Kill any HINT reloc that might exist for this insn.  */		xrel = (elf64_alpha_find_reloc_at_ofs			(info->relocs, info->relend, urel->r_offset,			 R_ALPHA_HINT));		if (xrel)		  xrel->r_info = ELF64_R_INFO (0, R_ALPHA_NONE);		info->changed_contents = true;		info->changed_relocs = true;	      }	    else	      all_optimized = false;	    /* ??? If target gp == current gp we can eliminate the gp reload.	       This does depend on every place a gp could be reloaded will	       be, which currently happens for all code produced by gcc, but	       not necessarily by hand-coded assembly, or if sibling calls	       are enabled in gcc.

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?