⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dl-load.c

📁 Newlib 嵌入式 C库 标准实现代码
💻 C
📖 第 1 页 / 共 4 页
字号:
	    struct loadcmd *c = &loadcmds[nloadcmds++];	    c->mapstart = ph->p_vaddr & ~(ph->p_align - 1);	    c->mapend = ((ph->p_vaddr + ph->p_filesz + _dl_pagesize - 1)			 & ~(_dl_pagesize - 1));	    c->dataend = ph->p_vaddr + ph->p_filesz;	    c->allocend = ph->p_vaddr + ph->p_memsz;	    c->mapoff = ph->p_offset & ~(ph->p_align - 1);	    /* Optimize a common case.  */#if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7	    c->prot = (PF_TO_PROT		       >> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;#else	    c->prot = 0;	    if (ph->p_flags & PF_R)	      c->prot |= PROT_READ;	    if (ph->p_flags & PF_W)	      c->prot |= PROT_WRITE;	    if (ph->p_flags & PF_X)	      c->prot |= PROT_EXEC;#endif	  }	  break;	}    /* Now process the load commands and map segments into memory.  */    c = loadcmds;    /* Length of the sections to be loaded.  */    maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;    if (__builtin_expect (type, ET_DYN) == ET_DYN)      {	/* This is a position-independent shared object.  We can let the	   kernel map it anywhere it likes, but we must have space for all	   the segments in their specified positions relative to the first.	   So we map the first segment without MAP_FIXED, but with its	   extent increased to cover all the segments.  Then we remove	   access from excess portion, and there is known sufficient space	   there to remap from the later segments.	   As a refinement, sometimes we have an address that we would	   prefer to map such objects at; but this is only a preference,	   the OS can do whatever it likes. */	ElfW(Addr) mappref;	mappref = (ELF_PREFERRED_ADDRESS (loader, maplength, c->mapstart)		   - MAP_BASE_ADDR (l));	/* Remember which part of the address space this object uses.  */	l->l_map_start = (ElfW(Addr)) mmap ((void *) mappref, maplength,					      c->prot, MAP_COPY | MAP_FILE,					      fd, c->mapoff);	if ((void *) l->l_map_start == MAP_FAILED)	  {	  map_error:	    errstring = N_("failed to map segment from shared object");	    goto call_lose_errno;	  }	l->l_map_end = l->l_map_start + maplength;	l->l_addr = l->l_map_start - c->mapstart;	/* Change protection on the excess portion to disallow all access;	   the portions we do not remap later will be inaccessible as if	   unallocated.  Then jump into the normal segment-mapping loop to	   handle the portion of the segment past the end of the file	   mapping.  */	mprotect ((caddr_t) (l->l_addr + c->mapend),		    loadcmds[nloadcmds - 1].allocend - c->mapend,		    PROT_NONE);	goto postmap;      }    else      {	/* This object is loaded at a fixed address.  This must never           happen for objects loaded with dlopen().  */	if (__builtin_expect (mode & __RTLD_DLOPEN, 0))	  {	    errstring = N_("cannot dynamically load executable");	    goto call_lose;	  }	/* Notify ELF_PREFERRED_ADDRESS that we have to load this one	   fixed.  */	ELF_FIXED_ADDRESS (loader, c->mapstart);      }    /* Remember which part of the address space this object uses.  */    l->l_map_start = c->mapstart + l->l_addr;    l->l_map_end = l->l_map_start + maplength;    while (c < &loadcmds[nloadcmds])      {	if (c->mapend > c->mapstart	    /* Map the segment contents from the file.  */	    && (mmap ((void *) (l->l_addr + c->mapstart),			c->mapend - c->mapstart, c->prot,			MAP_FIXED | MAP_COPY | MAP_FILE, fd, c->mapoff)		== MAP_FAILED))	  goto map_error;      postmap:	if (l->l_phdr == 0	    && c->mapoff <= header->e_phoff	    && (c->mapend - c->mapstart + c->mapoff		>= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))	  /* Found the program header in this segment.  */	  l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);	if (c->allocend > c->dataend)	  {	    /* Extra zero pages should appear at the end of this segment,	       after the data mapped from the file.   */	    ElfW(Addr) zero, zeroend, zeropage;	    zero = l->l_addr + c->dataend;	    zeroend = l->l_addr + c->allocend;	    zeropage = (zero + _dl_pagesize - 1) & ~(_dl_pagesize - 1);	    if (zeroend < zeropage)	      /* All the extra data is in the last page of the segment.		 We can just zero it.  */	      zeropage = zeroend;	    if (zeropage > zero)	      {		/* Zero the final part of the last page of the segment.  */		if ((c->prot & PROT_WRITE) == 0)		  {		    /* Dag nab it.  */		    if (mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),				    _dl_pagesize, c->prot|PROT_WRITE) < 0)		      {			errstring = N_("cannot change memory protections");			goto call_lose_errno;		      }		  }		memset ((void *) zero, '\0', zeropage - zero);		if ((c->prot & PROT_WRITE) == 0)		  mprotect ((caddr_t) (zero & ~(_dl_pagesize - 1)),			      _dl_pagesize, c->prot);	      }	    if (zeroend > zeropage)	      {		/* Map the remaining zero pages in from the zero fill FD.  */		caddr_t mapat;		mapat = mmap ((caddr_t) zeropage, zeroend - zeropage,				c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,				ANONFD, 0);		if (mapat == MAP_FAILED)		  {		    errstring = N_("cannot map zero-fill pages");		    goto call_lose_errno;		  }	      }	  }	++c;      }    if (l->l_phdr == NULL)      {	/* The program header is not contained in any of the segments.	   We have to allocate memory ourself and copy it over from	   out temporary place.  */	ElfW(Phdr) *newp = (ElfW(Phdr) *) malloc (header->e_phnum						  * sizeof (ElfW(Phdr)));	if (newp == NULL)	  {	    errstring = N_("cannot allocate memory for program header");	    goto call_lose_errno;	  }	l->l_phdr = memcpy (newp, phdr,			    (header->e_phnum * sizeof (ElfW(Phdr))));	l->l_phdr_allocated = 1;      }    else      /* Adjust the PT_PHDR value by the runtime load address.  */      l->l_phdr = (ElfW(Addr)) l->l_phdr + l->l_addr;  }  /* We are done mapping in the file.  We no longer need the descriptor.  */  close (fd);  /* Signal that we closed the file.  */  fd = -1;  if (l->l_type == lt_library && type == ET_EXEC)    l->l_type = lt_executable;  if (l->l_ld == 0)    {      if (type == ET_DYN)	{	  errstring = N_("object file has no dynamic section");	  goto call_lose;	}    }  else    l->l_ld = (ElfW(Addr)) l->l_ld + l->l_addr;  l->l_entry += l->l_addr;  if (__builtin_expect (_dl_debug_mask & DL_DEBUG_FILES, 0))    _dl_debug_printf ("  dynamic: 0x%0*lx  base: 0x%0*lx   size: 0x%0*Zx\n"		      "    entry: 0x%0*lx  phdr: 0x%0*lx  phnum:   %*u\n\n",		      (int) sizeof (void *) * 2, (unsigned long int) l->l_ld,		      (int) sizeof (void *) * 2, (unsigned long int) l->l_addr,		      (int) sizeof (void *) * 2, maplength,		      (int) sizeof (void *) * 2, (unsigned long int) l->l_entry,		      (int) sizeof (void *) * 2, (unsigned long int) l->l_phdr,		      (int) sizeof (void *) * 2, l->l_phnum);  elf_get_dynamic_info (l);  /* Make sure we are dlopen()ing an object which has the DF_1_NOOPEN     flag set.  */  if (__builtin_expect (l->l_flags_1 & DF_1_NOOPEN, 0)      && (mode & __RTLD_DLOPEN))    {      /* We are not supposed to load this object.  Free all resources.  */      munmap ((void *) l->l_map_start, l->l_map_end - l->l_map_start);      if (!l->l_libname->dont_free)	free (l->l_libname);      if (l->l_phdr_allocated)	free ((void *) l->l_phdr);      errstring = N_("shared object cannot be dlopen()ed");      goto call_lose;    }  if (l->l_info[DT_HASH])    _dl_setup_hash (l);  /* If this object has DT_SYMBOLIC set modify now its scope.  We don't     have to do this for the main map.  */  if (__builtin_expect (l->l_info[DT_SYMBOLIC] != NULL, 0)      && &l->l_searchlist != l->l_scope[0])    {      /* Create an appropriate searchlist.  It contains only this map.	 XXX This is the definition of DT_SYMBOLIC in SysVr4.  The old	 GNU ld.so implementation had a different interpretation which	 is more reasonable.  We are prepared to add this possibility	 back as part of a GNU extension of the ELF format.  */      l->l_symbolic_searchlist.r_list =	(struct link_map **) malloc (sizeof (struct link_map *));      if (l->l_symbolic_searchlist.r_list == NULL)	{	  errstring = N_("cannot create searchlist");	  goto call_lose_errno;	}      l->l_symbolic_searchlist.r_list[0] = l;      l->l_symbolic_searchlist.r_nlist = 1;      /* Now move the existing entries one back.  */      memmove (&l->l_scope[1], &l->l_scope[0],	       (l->l_scope_max - 1) * sizeof (l->l_scope[0]));      /* Now add the new entry.  */      l->l_scope[0] = &l->l_symbolic_searchlist;    }  /* Remember whether this object must be initialized first.  */  if (l->l_flags_1 & DF_1_INITFIRST)    _dl_initfirst = l;  /* Finally the file information.  */  l->l_dev = st.st_dev;  l->l_ino = st.st_ino;  return l;}/* Print search path.  */static voidprint_search_path (struct r_search_path_elem **list,                   const char *what, const char *name){  char buf[max_dirnamelen + max_capstrlen];  int first = 1;  _dl_debug_printf (" search path=");  while (*list != NULL && (*list)->what == what) /* Yes, ==.  */    {      char *endp = memcpy (buf, (*list)->dirname, (*list)->dirnamelen);      size_t cnt;      endp += (*list)->dirnamelen;      for (cnt = 0; cnt < ncapstr; ++cnt)	if ((*list)->status[cnt] != nonexisting)	  {	    char *cp = memcpy (endp, capstr[cnt].str, capstr[cnt].len);            cp += capstr[cnt].len;	    if (cp == buf || (cp == buf + 1 && buf[0] == '/'))	      cp[0] = '\0';	    else	      cp[-1] = '\0';	    _dl_debug_printf_c (first ? "%s" : ":%s", buf);	    first = 0;	  }      ++list;    }  if (name != NULL)    _dl_debug_printf_c ("\t\t(%s from file %s)\n", what,			name[0] ? name : _dl_argv[0]);  else    _dl_debug_printf_c ("\t\t(%s)\n", what);}/* Open a file and verify it is an ELF file for this architecture.  We   ignore only ELF files for other architectures.  Non-ELF files and   ELF files with different header information cause fatal errors since   this could mean there is something wrong in the installation and the   user might want to know about this.  */static intopen_verify (const char *name, struct filebuf *fbp){  /* This is the expected ELF header.  */#define ELF32_CLASS ELFCLASS32#define ELF64_CLASS ELFCLASS64#ifndef VALID_ELF_HEADER# define VALID_ELF_HEADER(hdr,exp,size)	(memcmp (hdr, exp, size) == 0)# define VALID_ELF_OSABI(osabi)		(osabi == ELFOSABI_SYSV)# define VALID_ELF_ABIVERSION(ver)	(ver == 0)#endif  static const unsigned char expected[EI_PAD] =  {    [EI_MAG0] = ELFMAG0,    [EI_MAG1] = ELFMAG1,    [EI_MAG2] = ELFMAG2,    [EI_MAG3] = ELFMAG3,    [EI_CLASS] = ELFW(CLASS),    [EI_DATA] = byteorder,    [EI_VERSION] = EV_CURRENT,    [EI_OSABI] = ELFOSABI_SYSV,    [EI_ABIVERSION] = 0  };  static const struct  {    ElfW(Word) vendorlen;    ElfW(Word) datalen;    ElfW(Word) type;    char vendor[4];  } expected_note = { 4, 16, 1, "GNU" };  int fd;  /* Initialize it to make the compiler happy.  */  const char *errstring = NULL;  int errval = 0;  /* Open the file.  We always open files read-only.  */  fd = open (name, O_RDONLY);  if (fd != -1)    {      ElfW(Ehdr) *ehdr;      ElfW(Phdr) *phdr, *ph;      ElfW(Word) *abi_note, abi_note_buf[8];      unsigned int osversion;      size_t maplength;      /* We successfully openened the file.  Now verify it is a file	 we can use.  */      __set_errno (0);      fbp->len = __libc_read (fd, fbp->buf, sizeof (fbp->buf));      /* This is where the ELF header is loaded.  */      assert (sizeof (fbp->buf) > sizeof (ElfW(Ehdr)));      ehdr = (ElfW(Ehdr) *) fbp->buf;      /* Now run the tests.  */      if (__builtin_expect (fbp->len < (ssize_t) sizeof (ElfW(Ehdr)), 0))	{	  errval = errno;	  errstring = (errval == 0		       ? N_("file too short") : N_("cannot read file data"));	call_lose:	  lose (errval, fd, name, NULL, NULL, errstring);	}      /* See whether the ELF header is what we expect.  */      if (__builtin_expect (! VALID_ELF_HEADER (ehdr->e_ident, expected,						EI_PAD), 0))	{	  /* Something is wrong.  */	  if (*(Elf32_Word *) &ehdr->e_ident !=#if BYTE_ORDER == LITTLE_ENDIAN	      ((ELFMAG0 << (EI_MAG0 * 8)) |	       (ELFMAG1 << (EI_MAG1 * 8)) |	       (ELFMAG2 << (EI_MAG2 * 8)) |	       (ELFMAG3 << (EI_MAG3 * 8)))#else	      ((ELFMAG0 << (EI_MAG3 * 8)) |	       (ELFMAG1 << (EI_MAG2 * 8)) |	       (ELFMAG2 << (EI_MAG1 * 8)) |	       (ELFMAG3 << (EI_MAG0 * 8)))#endif	      )	    errstring = N_("invalid ELF header");	  else if (ehdr->e_ident[EI_CLASS] != ELFW(CLASS))	    /* This is not a fatal error.  On architectures where	       32-bit and 64-bit binaries can be run this might	       happen.  */	    goto close_and_out;	  else if (ehdr->e_ident[EI_DATA] != byteorder)	    {	      if (BYTE_ORDER == BIG_ENDIAN)		errstring = N_("ELF file data encoding not big-endian");	      else		errstring = N_("ELF file data encoding not little-endian");	    }	  else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT)	    errstring	      = N_("ELF file version ident does not match current one");	  /* XXX We should be able so set system specific versions which are	     allowed here.  */	  else if (!VALID_ELF_OSABI (ehdr->e_ident[EI_OSABI]))	    errstring = N_("ELF file OS ABI invalid");	  else if (!VALID_ELF_ABIVERSION (ehdr->e_ident[EI_ABIVERSION]))	    errstring = N_("ELF file ABI version invalid");	  else	    /* Otherwise we don't know what went wrong.  */	    errstring = N_("internal error");	  goto call_lose;	}      if (__builtin_expect (ehdr->e_version, EV_CURRENT) != EV_CURRENT)	{	  errstring = N_("ELF file version does not match current one");	  goto call_lose;	}      if (! __builtin_expect (elf_machine_matches_host (ehdr), 1))	goto close_and_out;      else if (__builtin_expect (ehdr->e_phentsize, sizeof (ElfW(Phdr)))	       != sizeof (ElfW(Phdr)))	{	  errstring = N_("ELF file's phentsize not the expected size");	  goto call_lose;	}      else if (__builtin_expect (ehdr->e_type, ET_DYN) != ET_DYN	       && __builtin_expect (ehdr->e_type, ET_EXEC) != ET_EXEC)	{	  errstring = N_("only ET_DYN and ET_EXEC can be loaded");	  goto call_lose;	}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -