2 * Copyright (C) 2008, 2009 The Android Open Source Project
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <linux/auxvec.h>
44 /* special private C library header - see Android.mk */
45 //#include "bionic_tls.h"
48 #include "linker_debug.h"
49 #include "linker_environ.h"
50 #include "linker_format.h"
52 #define ALLOW_SYMBOLS_FROM_MAIN 1
55 /* Assume average path length of 64 and max 8 paths */
56 #define LDPATH_BUFSIZE 512
59 #define LDPRELOAD_BUFSIZE 512
60 #define LDPRELOAD_MAX 8
62 /* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
64 * Do NOT use malloc() and friends or pthread_*() code here.
65 * Don't use printf() either; it's caused mysterious memory
66 * corruption in the past.
67 * The linker runs before we bring up libc and it's easiest
68 * to make sure it does not depend on any complex libc features
72 * - are we doing everything we should for ARM_COPY relocations?
73 * - cleaner error reporting
74 * - after linking, set as much stuff as possible to READONLY
76 * - linker hardcodes PAGE_SIZE and PAGE_MASK because the kernel
77 * headers provide versions that are negative...
78 * - allocate space for soinfo structs dynamically instead of
79 * having a hard limit (64)
83 static int link_image(soinfo
*si
, unsigned wr_offset
);
85 static int socount
= 0;
86 static soinfo sopool
[SO_MAX
];
87 static soinfo
*freelist
= NULL
;
88 static soinfo
*solist
= &libdl_info
;
89 static soinfo
*sonext
= &libdl_info
;
90 #if ALLOW_SYMBOLS_FROM_MAIN
91 static soinfo
*somain
; /* main process, always the one after libdl_info */
95 static inline int validate_soinfo(soinfo
*si
)
97 return (si
>= sopool
&& si
< sopool
+ SO_MAX
) ||
101 static char ldpaths_buf
[LDPATH_BUFSIZE
];
102 static const char *ldpaths
[LDPATH_MAX
+ 1];
104 static char ldpreloads_buf
[LDPRELOAD_BUFSIZE
];
105 static const char *ldpreload_names
[LDPRELOAD_MAX
+ 1];
107 static soinfo
*preloads
[LDPRELOAD_MAX
+ 1];
110 int debug_verbosity
= 0;
111 int debug_stdout
= 0;
116 /* This boolean is set if the program being loaded is setuid */
117 static int program_is_setuid
;
120 struct _link_stats linker_stats
;
124 unsigned bitmask
[4096];
128 #define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
132 // disable abort() since this is not a linker anymore
133 #define HOODLUM(name, ret, ...) \
134 ret name __VA_ARGS__ \
136 char errstr[] = "ERROR: " #name " called from the dynamic linker!\n"; \
137 write(2, errstr, sizeof(errstr)); \
140 HOODLUM(malloc
, void *, (size_t size
));
141 HOODLUM(free
, void, (void *ptr
));
142 HOODLUM(realloc
, void *, (void *ptr
, size_t size
));
143 HOODLUM(calloc
, void *, (size_t cnt
, size_t size
));
146 static char tmp_err_buf
[768];
147 static char __linker_dl_err_buf
[768];
148 #define DL_ERR(fmt, x...) \
150 format_buffer(__linker_dl_err_buf, sizeof(__linker_dl_err_buf), \
151 "%s[%d]: " fmt, __func__, __LINE__, ##x); \
152 ERROR(fmt "\n", ##x); \
155 const char *linker_get_error(void)
157 return (const char *)&__linker_dl_err_buf
[0];
161 * This function is an empty stub where GDB locates a breakpoint to get notified
162 * about linker activity.
164 extern void __attribute__((noinline
)) rtld_db_dlactivity(void);
166 static struct r_debug _r_debug
= {1, NULL
, &rtld_db_dlactivity
,
168 static struct link_map
*r_debug_tail
= 0;
170 static pthread_mutex_t _r_debug_lock
= PTHREAD_MUTEX_INITIALIZER
;
172 static void insert_soinfo_into_debug_map(soinfo
* info
)
174 struct link_map
* map
;
176 /* Copy the necessary fields into the debug structure.
178 map
= &(info
->linkmap
);
179 map
->l_addr
= info
->base
;
180 map
->l_name
= (char*) info
->name
;
181 map
->l_ld
= (uintptr_t)info
->dynamic
;
183 /* Stick the new library at the end of the list.
184 * gdb tends to care more about libc than it does
185 * about leaf libraries, and ordering it this way
186 * reduces the back-and-forth over the wire.
189 r_debug_tail
->l_next
= map
;
190 map
->l_prev
= r_debug_tail
;
193 _r_debug
.r_map
= map
;
200 static void remove_soinfo_from_debug_map(soinfo
* info
)
202 struct link_map
* map
= &(info
->linkmap
);
204 if (r_debug_tail
== map
)
205 r_debug_tail
= map
->l_prev
;
207 if (map
->l_prev
) map
->l_prev
->l_next
= map
->l_next
;
208 if (map
->l_next
) map
->l_next
->l_prev
= map
->l_prev
;
211 void notify_gdb_of_load(soinfo
* info
)
213 if (info
->flags
& FLAG_EXE
) {
214 // GDB already knows about the main executable
218 pthread_mutex_lock(&_r_debug_lock
);
220 _r_debug
.r_state
= RT_ADD
;
221 rtld_db_dlactivity();
223 insert_soinfo_into_debug_map(info
);
225 _r_debug
.r_state
= RT_CONSISTENT
;
226 rtld_db_dlactivity();
228 pthread_mutex_unlock(&_r_debug_lock
);
231 void notify_gdb_of_unload(soinfo
* info
)
233 if (info
->flags
& FLAG_EXE
) {
234 // GDB already knows about the main executable
238 pthread_mutex_lock(&_r_debug_lock
);
240 _r_debug
.r_state
= RT_DELETE
;
241 rtld_db_dlactivity();
243 remove_soinfo_from_debug_map(info
);
245 _r_debug
.r_state
= RT_CONSISTENT
;
246 rtld_db_dlactivity();
248 pthread_mutex_unlock(&_r_debug_lock
);
251 void notify_gdb_of_libraries()
253 pthread_mutex_lock(&_r_debug_lock
);
254 _r_debug
.r_state
= RT_ADD
;
255 rtld_db_dlactivity();
256 _r_debug
.r_state
= RT_CONSISTENT
;
257 rtld_db_dlactivity();
258 pthread_mutex_unlock(&_r_debug_lock
);
261 static soinfo
*alloc_info(const char *name
)
265 if(strlen(name
) >= SOINFO_NAME_LEN
) {
266 DL_ERR("%5d library name %s too long", pid
, name
);
270 /* The freelist is populated when we call free_info(), which in turn is
271 done only by dlclose(), which is not likely to be used.
274 if(socount
== SO_MAX
) {
275 DL_ERR("%5d too many libraries when loading %s", pid
, name
);
278 freelist
= sopool
+ socount
++;
279 freelist
->next
= NULL
;
283 freelist
= freelist
->next
;
285 /* Make sure we get a clean block of soinfo */
286 memset(si
, 0, sizeof(soinfo
));
287 strlcpy((char*) si
->name
, name
, sizeof(si
->name
));
293 TRACE("%5d name %s: allocated soinfo @ %p\n", pid
, name
, si
);
297 static void free_info(soinfo
*si
)
299 soinfo
*prev
= NULL
, *trav
;
301 TRACE("%5d name %s: freeing soinfo @ %p\n", pid
, si
->name
, si
);
303 for(trav
= solist
; trav
!= NULL
; trav
= trav
->next
){
309 /* si was not ni solist */
310 DL_ERR("%5d name %s is not in solist!", pid
, si
->name
);
314 /* prev will never be NULL, because the first entry in solist is
315 always the static libdl_info.
317 prev
->next
= si
->next
;
318 if (si
== sonext
) sonext
= prev
;
323 const char *addr_to_name(unsigned addr
)
327 for(si
= solist
; si
!= 0; si
= si
->next
){
328 if((addr
>= si
->base
) && (addr
< (si
->base
+ si
->size
))) {
336 /* For a given PC, find the .so that it belongs to.
337 * Returns the base address of the .ARM.exidx section
338 * for that .so, and the number of 8-byte entries
339 * in that section (via *pcount).
341 * Intended to be called by libc's __gnu_Unwind_Find_exidx().
343 * This function is exposed via dlfcn.c and libdl.so.
345 #ifdef ANDROID_ARM_LINKER
346 _Unwind_Ptr
android_dl_unwind_find_exidx(_Unwind_Ptr pc
, int *pcount
)
349 unsigned addr
= (unsigned)pc
;
351 for (si
= solist
; si
!= 0; si
= si
->next
){
352 if ((addr
>= si
->base
) && (addr
< (si
->base
+ si
->size
))) {
353 *pcount
= si
->ARM_exidx_count
;
354 return (_Unwind_Ptr
)(si
->base
+ (unsigned long)si
->ARM_exidx
);
360 #elif defined(ANDROID_X86_LINKER)
361 /* Here, we only have to provide a callback to iterate across all the
362 * loaded libraries. gcc_eh does the rest. */
364 android_dl_iterate_phdr(int (*cb
)(struct dl_phdr_info
*info
, size_t size
, void *data
),
368 struct dl_phdr_info dl_info
;
371 for (si
= solist
; si
!= NULL
; si
= si
->next
) {
372 dl_info
.dlpi_addr
= si
->linkmap
.l_addr
;
373 dl_info
.dlpi_name
= si
->linkmap
.l_name
;
374 dl_info
.dlpi_phdr
= si
->phdr
;
375 dl_info
.dlpi_phnum
= si
->phnum
;
376 rv
= cb(&dl_info
, sizeof (struct dl_phdr_info
), data
);
384 static Elf32_Sym
*_elf_lookup(soinfo
*si
, unsigned hash
, const char *name
)
387 Elf32_Sym
*symtab
= si
->symtab
;
388 const char *strtab
= si
->strtab
;
391 TRACE_TYPE(LOOKUP
, "%5d SEARCH %s in %s@0x%08x %08x %d\n", pid
,
392 name
, si
->name
, si
->base
, hash
, hash
% si
->nbucket
);
393 n
= hash
% si
->nbucket
;
395 for(n
= si
->bucket
[hash
% si
->nbucket
]; n
!= 0; n
= si
->chain
[n
]){
397 if(strcmp(strtab
+ s
->st_name
, name
)) continue;
399 /* only concern ourselves with global and weak symbol definitions */
400 switch(ELF32_ST_BIND(s
->st_info
)){
403 /* no section == undefined */
404 if(s
->st_shndx
== 0) continue;
406 TRACE_TYPE(LOOKUP
, "%5d FOUND %s in %s (%08x) %d\n", pid
,
407 name
, si
->name
, s
->st_value
, s
->st_size
);
415 static unsigned elfhash(const char *_name
)
417 const unsigned char *name
= (const unsigned char *) _name
;
421 h
= (h
<< 4) + *name
++;
430 _do_lookup(soinfo
*si
, const char *name
, unsigned *base
)
432 unsigned elf_hash
= elfhash(name
);
438 /* Look for symbols in the local scope (the object who is
439 * searching). This happens with C++ templates on i386 for some
442 * Notes on weak symbols:
443 * The ELF specs are ambigious about treatment of weak definitions in
444 * dynamic linking. Some systems return the first definition found
445 * and some the first non-weak definition. This is system dependent.
446 * Here we return the first definition found for simplicity. */
448 s
= _elf_lookup(si
, elf_hash
, name
);
452 /* Next, look for it in the preloads list */
453 for(i
= 0; preloads
[i
] != NULL
; i
++) {
455 s
= _elf_lookup(lsi
, elf_hash
, name
);
460 for(d
= si
->dynamic
; *d
; d
+= 2) {
461 if(d
[0] == DT_NEEDED
){
462 lsi
= (soinfo
*)d
[1];
463 if (!validate_soinfo(lsi
)) {
464 DL_ERR("%5d bad DT_NEEDED pointer in %s",
469 DEBUG("%5d %s: looking up %s in %s\n",
470 pid
, si
->name
, name
, lsi
->name
);
471 s
= _elf_lookup(lsi
, elf_hash
, name
);
472 if ((s
!= NULL
) && (s
->st_shndx
!= SHN_UNDEF
))
477 #if ALLOW_SYMBOLS_FROM_MAIN
478 /* If we are resolving relocations while dlopen()ing a library, it's OK for
479 * the library to resolve a symbol that's defined in the executable itself,
480 * although this is rare and is generally a bad idea.
484 DEBUG("%5d %s: looking up %s in executable %s\n",
485 pid
, si
->name
, name
, lsi
->name
);
486 s
= _elf_lookup(lsi
, elf_hash
, name
);
492 TRACE_TYPE(LOOKUP
, "%5d si %s sym %s s->st_value = 0x%08x, "
493 "found in %s, base = 0x%08x\n",
494 pid
, si
->name
, name
, s
->st_value
, lsi
->name
, lsi
->base
);
502 /* This is used by dl_sym(). It performs symbol lookup only within the
503 specified soinfo object and not in any of its dependencies.
505 Elf32_Sym
*lookup_in_library(soinfo
*si
, const char *name
)
507 return _elf_lookup(si
, elfhash(name
), name
);
510 /* This is used by dl_sym(). It performs a global symbol lookup.
512 Elf32_Sym
*lookup(const char *name
, soinfo
**found
, soinfo
*start
)
514 unsigned elf_hash
= elfhash(name
);
522 for(si
= start
; (s
== NULL
) && (si
!= NULL
); si
= si
->next
)
524 if(si
->flags
& FLAG_ERROR
)
526 s
= _elf_lookup(si
, elf_hash
, name
);
534 TRACE_TYPE(LOOKUP
, "%5d %s s->st_value = 0x%08x, "
535 "si->base = 0x%08x\n", pid
, name
, s
->st_value
, si
->base
);
542 soinfo
*find_containing_library(const void *addr
)
546 for(si
= solist
; si
!= NULL
; si
= si
->next
)
548 if((unsigned)addr
>= si
->base
&& (unsigned)addr
- si
->base
< si
->size
) {
556 Elf32_Sym
*find_containing_symbol(const void *addr
, soinfo
*si
)
559 unsigned soaddr
= (unsigned)addr
- si
->base
;
561 /* Search the library's symbol table for any defined symbol which
562 * contains this address */
563 for(i
=0; i
<si
->nchain
; i
++) {
564 Elf32_Sym
*sym
= &si
->symtab
[i
];
566 if(sym
->st_shndx
!= SHN_UNDEF
&&
567 soaddr
>= sym
->st_value
&&
568 soaddr
< sym
->st_value
+ sym
->st_size
) {
577 static void dump(soinfo
*si
)
579 Elf32_Sym
*s
= si
->symtab
;
582 for(n
= 0; n
< si
->nchain
; n
++) {
583 TRACE("%5d %04d> %08x: %02x %04x %08x %08x %s\n", pid
, n
, s
,
584 s
->st_info
, s
->st_shndx
, s
->st_value
, s
->st_size
,
585 si
->strtab
+ s
->st_name
);
591 static const char *sopaths
[] = {
597 static int _open_lib(const char *name
)
600 struct stat filestat
;
602 if ((stat(name
, &filestat
) >= 0) && S_ISREG(filestat
.st_mode
)) {
603 if ((fd
= open(name
, O_RDONLY
)) >= 0)
610 static void parse_library_path(const char *path
, char *delim
);
612 static int open_library(const char *name
)
619 TRACE("[ %5d opening %s ]\n", pid
, name
);
621 if(name
== 0) return -1;
622 if(strlen(name
) > 256) return -1;
624 if ((name
[0] == '/') && ((fd
= _open_lib(name
)) >= 0))
627 #ifdef DEFAULT_HYBRIS_LD_LIBRARY_PATH
628 if (getenv("HYBRIS_LD_LIBRARY_PATH") == NULL
&& *ldpaths
== 0)
630 parse_library_path(DEFAULT_HYBRIS_LD_LIBRARY_PATH
, ":");
633 if (getenv("HYBRIS_LD_LIBRARY_PATH") != NULL
&& *ldpaths
== 0)
635 parse_library_path(getenv("HYBRIS_LD_LIBRARY_PATH"), ":");
638 for (path
= ldpaths
; *path
; path
++) {
639 n
= format_buffer(buf
, sizeof(buf
), "%s/%s", *path
, name
);
640 if (n
< 0 || n
>= (int)sizeof(buf
)) {
641 WARN("Ignoring very long library path: %s/%s\n", *path
, name
);
644 if ((fd
= _open_lib(buf
)) >= 0)
647 for (path
= sopaths
; *path
; path
++) {
648 n
= format_buffer(buf
, sizeof(buf
), "%s/%s", *path
, name
);
649 if (n
< 0 || n
>= (int)sizeof(buf
)) {
650 WARN("Ignoring very long library path: %s/%s\n", *path
, name
);
653 if ((fd
= _open_lib(buf
)) >= 0)
660 /* temporary space for holding the first page of the shared lib
661 * which contains the elf header (with the pht). */
662 static unsigned char __header
[PAGE_SIZE
];
666 char tag
[4]; /* 'P', 'R', 'E', ' ' */
669 /* Returns the requested base address if the library is prelinked,
670 * and 0 otherwise. */
672 is_prelinked(int fd
, const char *name
)
677 sz
= lseek(fd
, -sizeof(prelink_info_t
), SEEK_END
);
679 DL_ERR("lseek() failed!");
683 if (read(fd
, &info
, sizeof(info
)) != sizeof(info
)) {
684 INFO("Could not read prelink_info_t structure for `%s`\n", name
);
688 if (strncmp(info
.tag
, "PRE ", 4)) {
689 INFO("`%s` is not a prelinked library\n", name
);
693 return (unsigned long)info
.mmap_addr
;
697 * Verifies if the object @ base is a valid ELF object
703 * -1 if no valid ELF object is found @ base.
706 verify_elf_object(void *base
, const char *name
)
708 Elf32_Ehdr
*hdr
= (Elf32_Ehdr
*) base
;
710 if (hdr
->e_ident
[EI_MAG0
] != ELFMAG0
) return -1;
711 if (hdr
->e_ident
[EI_MAG1
] != ELFMAG1
) return -1;
712 if (hdr
->e_ident
[EI_MAG2
] != ELFMAG2
) return -1;
713 if (hdr
->e_ident
[EI_MAG3
] != ELFMAG3
) return -1;
715 /* TODO: Should we verify anything else in the header? */
716 #ifdef ANDROID_ARM_LINKER
717 if (hdr
->e_machine
!= EM_ARM
) return -1;
718 #elif defined(ANDROID_X86_LINKER)
719 if (hdr
->e_machine
!= EM_386
) return -1;
726 * Retrieves the base (*base) address where the ELF object should be
727 * mapped and its overall memory size (*total_sz).
730 * fd: Opened file descriptor for the library
731 * name: The name of the library
732 * _hdr: Pointer to the header page of the library
733 * total_sz: Total size of the memory that should be allocated for
737 * -1 if there was an error while trying to get the lib extents.
738 * The possible reasons are:
739 * - Could not determine if the library was prelinked.
740 * - The library provided is not a valid ELF object
741 * 0 if the library did not request a specific base offset (normal
742 * for non-prelinked libs)
743 * > 0 if the library requests a specific address to be mapped to.
744 * This indicates a pre-linked library.
747 get_lib_extents(int fd
, const char *name
, void *__hdr
, unsigned *total_sz
)
750 unsigned min_vaddr
= 0xffffffff;
751 unsigned max_vaddr
= 0;
752 unsigned char *_hdr
= (unsigned char *)__hdr
;
753 Elf32_Ehdr
*ehdr
= (Elf32_Ehdr
*)_hdr
;
757 TRACE("[ %5d Computing extents for '%s'. ]\n", pid
, name
);
758 if (verify_elf_object(_hdr
, name
) < 0) {
759 DL_ERR("%5d - %s is not a valid ELF object", pid
, name
);
763 req_base
= (unsigned) is_prelinked(fd
, name
);
764 if (req_base
== (unsigned)-1)
766 else if (req_base
!= 0) {
767 TRACE("[ %5d - Prelinked library '%s' requesting base @ 0x%08x ]\n",
768 pid
, name
, req_base
);
770 TRACE("[ %5d - Non-prelinked library '%s' found. ]\n", pid
, name
);
773 phdr
= (Elf32_Phdr
*)(_hdr
+ ehdr
->e_phoff
);
775 /* find the min/max p_vaddrs from all the PT_LOAD segments so we can
777 for (cnt
= 0; cnt
< ehdr
->e_phnum
; ++cnt
, ++phdr
) {
778 if (phdr
->p_type
== PT_LOAD
) {
779 if ((phdr
->p_vaddr
+ phdr
->p_memsz
) > max_vaddr
)
780 max_vaddr
= phdr
->p_vaddr
+ phdr
->p_memsz
;
781 if (phdr
->p_vaddr
< min_vaddr
)
782 min_vaddr
= phdr
->p_vaddr
;
786 if ((min_vaddr
== 0xffffffff) && (max_vaddr
== 0)) {
787 DL_ERR("%5d - No loadable segments found in %s.", pid
, name
);
791 /* truncate min_vaddr down to page boundary */
792 min_vaddr
&= ~PAGE_MASK
;
794 /* round max_vaddr up to the next page */
795 max_vaddr
= (max_vaddr
+ PAGE_SIZE
- 1) & ~PAGE_MASK
;
797 *total_sz
= (max_vaddr
- min_vaddr
);
798 return (unsigned)req_base
;
801 /* reserve_mem_region
803 * This function reserves a chunk of memory to be used for mapping in
804 * a prelinked shared library. We reserve the entire memory region here, and
805 * then the rest of the linker will relocate the individual loadable
806 * segments into the correct locations within this memory range.
809 * si->base: The requested base of the allocation.
810 * si->size: The size of the allocation.
813 * -1 on failure, and 0 on success. On success, si->base will contain
814 * the virtual address at which the library will be mapped.
817 static int reserve_mem_region(soinfo
*si
)
819 void *base
= mmap((void *)si
->base
, si
->size
, PROT_NONE
,
820 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
821 if (base
== MAP_FAILED
) {
822 DL_ERR("%5d can NOT map (%sprelinked) library '%s' at 0x%08x "
823 "as requested, will try general pool: %d (%s)",
824 pid
, (si
->base
? "" : "non-"), si
->name
, si
->base
,
825 errno
, strerror(errno
));
827 } else if (base
!= (void *)si
->base
) {
828 DL_ERR("OOPS: %5d %sprelinked library '%s' mapped at 0x%08x, "
829 "not at 0x%08x", pid
, (si
->base
? "" : "non-"),
830 si
->name
, (unsigned)base
, si
->base
);
831 munmap(base
, si
->size
);
837 static int alloc_mem_region(soinfo
*si
)
840 /* Attempt to mmap a prelinked library. */
841 return reserve_mem_region(si
);
844 /* This is not a prelinked library, so we use the kernel's default
848 void *base
= mmap(NULL
, si
->size
, PROT_NONE
,
849 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
850 if (base
== MAP_FAILED
) {
851 DL_ERR("%5d mmap of library '%s' failed: %d (%s)\n",
853 errno
, strerror(errno
));
856 si
->base
= (unsigned) base
;
857 INFO("%5d mapped library '%s' to %08x via kernel allocator.\n",
858 pid
, si
->name
, si
->base
);
862 DL_ERR("OOPS: %5d cannot map library '%s'. no vspace available.",
867 #define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0)
868 #define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
869 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
870 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
873 * This function loads all the loadable (PT_LOAD) segments into memory
874 * at their appropriate memory offsets off the base address.
877 * fd: Open file descriptor to the library to load.
878 * header: Pointer to a header page that contains the ELF header.
879 * This is needed since we haven't mapped in the real file yet.
880 * si: ptr to soinfo struct describing the shared object.
883 * 0 on success, -1 on failure.
886 load_segments(int fd
, void *header
, soinfo
*si
)
888 Elf32_Ehdr
*ehdr
= (Elf32_Ehdr
*)header
;
889 Elf32_Phdr
*phdr
= (Elf32_Phdr
*)((unsigned char *)header
+ ehdr
->e_phoff
);
890 Elf32_Addr base
= (Elf32_Addr
) si
->base
;
894 unsigned char *pbase
;
895 unsigned char *extra_base
;
897 unsigned total_sz
= 0;
899 si
->wrprotect_start
= 0xffffffff;
900 si
->wrprotect_end
= 0;
902 TRACE("[ %5d - Begin loading segments for '%s' @ 0x%08x ]\n",
903 pid
, si
->name
, (unsigned)si
->base
);
904 /* Now go through all the PT_LOAD segments and map them into memory
905 * at the appropriate locations. */
906 for (cnt
= 0; cnt
< ehdr
->e_phnum
; ++cnt
, ++phdr
) {
907 if (phdr
->p_type
== PT_LOAD
) {
908 DEBUG_DUMP_PHDR(phdr
, "PT_LOAD", pid
);
909 /* we want to map in the segment on a page boundary */
910 tmp
= base
+ (phdr
->p_vaddr
& (~PAGE_MASK
));
911 /* add the # of bytes we masked off above to the total length. */
912 len
= phdr
->p_filesz
+ (phdr
->p_vaddr
& PAGE_MASK
);
914 TRACE("[ %d - Trying to load segment from '%s' @ 0x%08x "
915 "(0x%08x). p_vaddr=0x%08x p_offset=0x%08x ]\n", pid
, si
->name
,
916 (unsigned)tmp
, len
, phdr
->p_vaddr
, phdr
->p_offset
);
917 pbase
= mmap((void *)tmp
, len
, PFLAGS_TO_PROT(phdr
->p_flags
),
918 MAP_PRIVATE
| MAP_FIXED
, fd
,
919 phdr
->p_offset
& (~PAGE_MASK
));
920 if (pbase
== MAP_FAILED
) {
921 DL_ERR("%d failed to map segment from '%s' @ 0x%08x (0x%08x). "
922 "p_vaddr=0x%08x p_offset=0x%08x", pid
, si
->name
,
923 (unsigned)tmp
, len
, phdr
->p_vaddr
, phdr
->p_offset
);
927 /* If 'len' didn't end on page boundary, and it's a writable
928 * segment, zero-fill the rest. */
929 if ((len
& PAGE_MASK
) && (phdr
->p_flags
& PF_W
))
930 memset((void *)(pbase
+ len
), 0, PAGE_SIZE
- (len
& PAGE_MASK
));
932 /* Check to see if we need to extend the map for this segment to
933 * cover the diff between filesz and memsz (i.e. for bss).
935 * base _+---------------------+ page boundary
939 * pbase _+---------------------+ page boundary
942 * base + p_vaddr _| |
945 * pbase + len _| / | |
947 * extra_base _+------------|--------+ page boundary
950 * | +------------|--------+ page boundary
951 * extra_len-> | | | |
957 * _+---------------------+ page boundary
959 tmp
= (Elf32_Addr
)(((unsigned)pbase
+ len
+ PAGE_SIZE
- 1) &
961 if (tmp
< (base
+ phdr
->p_vaddr
+ phdr
->p_memsz
)) {
962 extra_len
= base
+ phdr
->p_vaddr
+ phdr
->p_memsz
- tmp
;
963 TRACE("[ %5d - Need to extend segment from '%s' @ 0x%08x "
964 "(0x%08x) ]\n", pid
, si
->name
, (unsigned)tmp
, extra_len
);
965 /* map in the extra page(s) as anonymous into the range.
966 * This is probably not necessary as we already mapped in
967 * the entire region previously, but we just want to be
968 * sure. This will also set the right flags on the region
969 * (though we can probably accomplish the same thing with
972 extra_base
= mmap((void *)tmp
, extra_len
,
973 PFLAGS_TO_PROT(phdr
->p_flags
),
974 MAP_PRIVATE
| MAP_FIXED
| MAP_ANONYMOUS
,
976 if (extra_base
== MAP_FAILED
) {
977 DL_ERR("[ %5d - failed to extend segment from '%s' @ 0x%08x"
978 " (0x%08x) ]", pid
, si
->name
, (unsigned)tmp
,
982 /* TODO: Check if we need to memset-0 this region.
983 * Anonymous mappings are zero-filled copy-on-writes, so we
984 * shouldn't need to. */
985 TRACE("[ %5d - Segment from '%s' extended @ 0x%08x "
986 "(0x%08x)\n", pid
, si
->name
, (unsigned)extra_base
,
989 /* set the len here to show the full extent of the segment we
990 * just loaded, mostly for debugging */
991 len
= (((unsigned)base
+ phdr
->p_vaddr
+ phdr
->p_memsz
+
992 PAGE_SIZE
- 1) & (~PAGE_MASK
)) - (unsigned)pbase
;
993 TRACE("[ %5d - Successfully loaded segment from '%s' @ 0x%08x "
994 "(0x%08x). p_vaddr=0x%08x p_offset=0x%08x\n", pid
, si
->name
,
995 (unsigned)pbase
, len
, phdr
->p_vaddr
, phdr
->p_offset
);
997 /* Make the section writable just in case we'll have to write to
998 * it during relocation (i.e. text segment). However, we will
999 * remember what range of addresses should be write protected.
1002 if (!(phdr
->p_flags
& PF_W
)) {
1003 if ((unsigned)pbase
< si
->wrprotect_start
)
1004 si
->wrprotect_start
= (unsigned)pbase
;
1005 if (((unsigned)pbase
+ len
) > si
->wrprotect_end
)
1006 si
->wrprotect_end
= (unsigned)pbase
+ len
;
1007 mprotect(pbase
, len
,
1008 PFLAGS_TO_PROT(phdr
->p_flags
) | PROT_WRITE
);
1010 } else if (phdr
->p_type
== PT_DYNAMIC
) {
1011 DEBUG_DUMP_PHDR(phdr
, "PT_DYNAMIC", pid
);
1012 /* this segment contains the dynamic linking information */
1013 si
->dynamic
= (unsigned *)(base
+ phdr
->p_vaddr
);
1014 } else if (phdr
->p_type
== PT_GNU_RELRO
) {
1015 if ((phdr
->p_vaddr
>= si
->size
)
1016 || ((phdr
->p_vaddr
+ phdr
->p_memsz
) > si
->size
)
1017 || ((base
+ phdr
->p_vaddr
+ phdr
->p_memsz
) < base
)) {
1018 DL_ERR("%d invalid GNU_RELRO in '%s' "
1019 "p_vaddr=0x%08x p_memsz=0x%08x", pid
, si
->name
,
1020 phdr
->p_vaddr
, phdr
->p_memsz
);
1023 si
->gnu_relro_start
= (Elf32_Addr
) (base
+ phdr
->p_vaddr
);
1024 si
->gnu_relro_len
= (unsigned) phdr
->p_memsz
;
1026 #ifdef ANDROID_ARM_LINKER
1027 if (phdr
->p_type
== PT_ARM_EXIDX
) {
1028 DEBUG_DUMP_PHDR(phdr
, "PT_ARM_EXIDX", pid
);
1029 /* exidx entries (used for stack unwinding) are 8 bytes each.
1031 si
->ARM_exidx
= (unsigned *)phdr
->p_vaddr
;
1032 si
->ARM_exidx_count
= phdr
->p_memsz
/ 8;
1040 if (total_sz
> si
->size
) {
1041 DL_ERR("%5d - Total length (0x%08x) of mapped segments from '%s' is "
1042 "greater than what was allocated (0x%08x). THIS IS BAD!",
1043 pid
, total_sz
, si
->name
, si
->size
);
1047 TRACE("[ %5d - Finish loading segments for '%s' @ 0x%08x. "
1048 "Total memory footprint: 0x%08x bytes ]\n", pid
, si
->name
,
1049 (unsigned)si
->base
, si
->size
);
1053 /* We can just blindly unmap the entire region even though some things
1054 * were mapped in originally with anonymous and others could have been
1055 * been mapped in from the file before we failed. The kernel will unmap
1056 * all the pages in the range, irrespective of how they got there.
1058 munmap((void *)si
->base
, si
->size
);
1059 si
->flags
|= FLAG_ERROR
;
1063 /* TODO: Implement this to take care of the fact that Android ARM
1064 * ELF objects shove everything into a single loadable segment that has the
1065 * write bit set. wr_offset is then used to set non-(data|bss) pages to be
1070 get_wr_offset(int fd
, const char *name
, Elf32_Ehdr
*ehdr
)
1072 Elf32_Shdr
*shdr_start
;
1074 int shdr_sz
= ehdr
->e_shnum
* sizeof(Elf32_Shdr
);
1076 unsigned wr_offset
= 0xffffffff;
1078 shdr_start
= mmap(0, shdr_sz
, PROT_READ
, MAP_PRIVATE
, fd
,
1079 ehdr
->e_shoff
& (~PAGE_MASK
));
1080 if (shdr_start
== MAP_FAILED
) {
1081 WARN("%5d - Could not read section header info from '%s'. Will not "
1082 "not be able to determine write-protect offset.\n", pid
, name
);
1083 return (unsigned)-1;
1086 for(cnt
= 0, shdr
= shdr_start
; cnt
< ehdr
->e_shnum
; ++cnt
, ++shdr
) {
1087 if ((shdr
->sh_type
!= SHT_NULL
) && (shdr
->sh_flags
& SHF_WRITE
) &&
1088 (shdr
->sh_addr
< wr_offset
)) {
1089 wr_offset
= shdr
->sh_addr
;
1093 munmap(shdr_start
, shdr_sz
);
1099 load_library(const char *name
)
1101 int fd
= open_library(name
);
1110 DL_ERR("Library '%s' not found", name
);
1114 /* We have to read the ELF header to figure out what to do with this image
1116 if (lseek(fd
, 0, SEEK_SET
) < 0) {
1117 DL_ERR("lseek() failed!");
1121 if ((cnt
= read(fd
, &__header
[0], PAGE_SIZE
)) < 0) {
1122 DL_ERR("read() failed!");
1126 /* Parse the ELF header and get the size of the memory footprint for
1128 req_base
= get_lib_extents(fd
, name
, &__header
[0], &ext_sz
);
1129 if (req_base
== (unsigned)-1)
1131 TRACE("[ %5d - '%s' (%s) wants base=0x%08x sz=0x%08x ]\n", pid
, name
,
1132 (req_base
? "prelinked" : "not pre-linked"), req_base
, ext_sz
);
1134 /* Now configure the soinfo struct where we'll store all of our data
1135 * for the ELF object. If the loading fails, we waste the entry, but
1136 * same thing would happen if we failed during linking. Configuring the
1137 * soinfo struct here is a lot more convenient.
1139 bname
= strrchr(name
, '/');
1140 si
= alloc_info(bname
? bname
+ 1 : name
);
1144 /* Carve out a chunk of memory where we will map in the individual
1146 si
->base
= req_base
;
1150 si
->dynamic
= (unsigned *)-1;
1151 if (alloc_mem_region(si
) < 0)
1154 TRACE("[ %5d allocated memory for %s @ %p (0x%08x) ]\n",
1155 pid
, name
, (void *)si
->base
, (unsigned) ext_sz
);
1157 /* Now actually load the library's segments into right places in memory */
1158 if (load_segments(fd
, &__header
[0], si
) < 0) {
1162 /* this might not be right. Technically, we don't even need this info
1163 * once we go through 'load_segments'. */
1164 hdr
= (Elf32_Ehdr
*)si
->base
;
1165 si
->phdr
= (Elf32_Phdr
*)((unsigned char *)si
->base
+ hdr
->e_phoff
);
1166 si
->phnum
= hdr
->e_phnum
;
1173 if (si
) free_info(si
);
1179 init_library(soinfo
*si
)
1181 unsigned wr_offset
= 0xffffffff;
1184 /* Has to be set via init_library as we don't get called via the
1185 * traditional android init library path */
1187 env
= getenv("HYBRIS_LINKER_DEBUG");
1189 debug_verbosity
= atoi(env
);
1190 if (getenv("HYBRIS_LINKER_STDOUT"))
1193 INFO("[ HYBRIS: initializing library '%s']\n", si
->name
);
1196 /* At this point we know that whatever is loaded @ base is a valid ELF
1197 * shared library whose segments are properly mapped in. */
1198 TRACE("[ %5d init_library base=0x%08x sz=0x%08x name='%s') ]\n",
1199 pid
, si
->base
, si
->size
, si
->name
);
1201 if(link_image(si
, wr_offset
)) {
1202 /* We failed to link. However, we can only restore libbase
1203 ** if no additional libraries have moved it since we updated it.
1205 munmap((void *)si
->base
, si
->size
);
1212 soinfo
*find_library(const char *name
)
1217 #if ALLOW_SYMBOLS_FROM_MAIN
1225 bname
= strrchr(name
, '/');
1226 bname
= bname
? bname
+ 1 : name
;
1228 for(si
= solist
; si
!= 0; si
= si
->next
){
1229 if(!strcmp(bname
, si
->name
)) {
1230 if(si
->flags
& FLAG_ERROR
) {
1231 DL_ERR("%5d '%s' failed to load previously", pid
, bname
);
1234 if(si
->flags
& FLAG_LINKED
) return si
;
1235 DL_ERR("OOPS: %5d recursive link to '%s'", pid
, si
->name
);
1240 TRACE("[ %5d '%s' has not been loaded yet. Locating...]\n", pid
, name
);
1241 si
= load_library(name
);
1244 return init_library(si
);
1248 * notify gdb of unload
1249 * for non-prelinked libraries, find a way to decrement libbase
1251 static void call_destructors(soinfo
*si
);
1252 unsigned unload_library(soinfo
*si
)
1255 if (si
->refcount
== 1) {
1256 TRACE("%5d unloading '%s'\n", pid
, si
->name
);
1257 call_destructors(si
);
1260 * Make sure that we undo the PT_GNU_RELRO protections we added
1261 * in link_image. This is needed to undo the DT_NEEDED hack below.
1263 if ((si
->gnu_relro_start
!= 0) && (si
->gnu_relro_len
!= 0)) {
1264 Elf32_Addr start
= (si
->gnu_relro_start
& ~PAGE_MASK
);
1265 unsigned len
= (si
->gnu_relro_start
- start
) + si
->gnu_relro_len
;
1266 if (mprotect((void *) start
, len
, PROT_READ
| PROT_WRITE
) < 0)
1267 DL_ERR("%5d %s: could not undo GNU_RELRO protections. "
1268 "Expect a crash soon. errno=%d (%s)",
1269 pid
, si
->name
, errno
, strerror(errno
));
1273 for(d
= si
->dynamic
; *d
; d
+= 2) {
1274 if(d
[0] == DT_NEEDED
){
1275 soinfo
*lsi
= (soinfo
*)d
[1];
1277 // The next line will segfault if the we don't undo the
1278 // PT_GNU_RELRO protections (see comments above and in
1282 if (validate_soinfo(lsi
)) {
1283 TRACE("%5d %s needs to unload %s\n", pid
,
1284 si
->name
, lsi
->name
);
1285 unload_library(lsi
);
1288 DL_ERR("%5d %s: could not unload dependent library",
1293 munmap((char *)si
->base
, si
->size
);
1294 notify_gdb_of_unload(si
);
1300 INFO("%5d not unloading '%s', decrementing refcount to %d\n",
1301 pid
, si
->name
, si
->refcount
);
1303 return si
->refcount
;
1306 /* TODO: don't use unsigned for addrs below. It works, but is not
1307 * ideal. They should probably be either uint32_t, Elf32_Addr, or unsigned
1310 static int reloc_library(soinfo
*si
, Elf32_Rel
*rel
, unsigned count
)
1312 Elf32_Sym
*symtab
= si
->symtab
;
1313 const char *strtab
= si
->strtab
;
1316 Elf32_Rel
*start
= rel
;
1319 for (idx
= 0; idx
< count
; ++idx
) {
1320 unsigned type
= ELF32_R_TYPE(rel
->r_info
);
1321 unsigned sym
= ELF32_R_SYM(rel
->r_info
);
1322 unsigned reloc
= (unsigned)(rel
->r_offset
+ si
->base
);
1323 unsigned sym_addr
= 0;
1324 char *sym_name
= NULL
;
1326 DEBUG("%5d Processing '%s' relocation at index %d\n", pid
,
1329 sym_name
= (char *)(strtab
+ symtab
[sym
].st_name
);
1330 INFO("HYBRIS: '%s' checking hooks for sym '%s'\n", si
->name
, sym_name
);
1331 sym_addr
= get_hooked_symbol(sym_name
);
1332 if (sym_addr
!= NULL
) {
1333 INFO("HYBRIS: '%s' hooked symbol %s to %x\n", si
->name
,
1334 sym_name
, sym_addr
);
1336 s
= _do_lookup(si
, sym_name
, &base
);
1338 if(sym_addr
== NULL
)
1340 /* We only allow an undefined symbol if this is a weak
1343 if (ELF32_ST_BIND(s
->st_info
) != STB_WEAK
) {
1344 DL_ERR("%5d cannot locate '%s'...\n", pid
, sym_name
);
1348 /* IHI0044C AAELF 4.5.1.1:
1350 Libraries are not searched to resolve weak references.
1351 It is not an error for a weak reference to remain
1354 During linking, the value of an undefined weak reference is:
1355 - Zero if the relocation type is absolute
1356 - The address of the place if the relocation is pc-relative
1357 - The address of nominial base address if the relocation
1358 type is base-relative.
1362 #if defined(ANDROID_ARM_LINKER)
1363 case R_ARM_JUMP_SLOT
:
1364 case R_ARM_GLOB_DAT
:
1366 case R_ARM_RELATIVE
: /* Don't care. */
1367 case R_ARM_NONE
: /* Don't care. */
1368 #elif defined(ANDROID_X86_LINKER)
1369 case R_386_JUMP_SLOT
:
1370 case R_386_GLOB_DAT
:
1372 case R_386_RELATIVE
: /* Dont' care. */
1373 #endif /* ANDROID_*_LINKER */
1374 /* sym_addr was initialized to be zero above or relocation
1375 code below does not care about value of sym_addr.
1376 No need to do anything. */
1379 #if defined(ANDROID_X86_LINKER)
1383 #endif /* ANDROID_X86_LINKER */
1385 #if defined(ANDROID_ARM_LINKER)
1387 /* Fall through. Can't really copy if weak symbol is
1388 not found in run-time. */
1389 #endif /* ANDROID_ARM_LINKER */
1391 DL_ERR("%5d unknown weak reloc type %d @ %p (%d)\n",
1392 pid
, type
, rel
, (int) (rel
- start
));
1396 /* We got a definition. */
1398 if((base
== 0) && (si
->base
!= 0)){
1399 /* linking from libraries to main image is bad */
1400 DL_ERR("%5d cannot locate '%s'...",
1401 pid
, strtab
+ symtab
[sym
].st_name
);
1405 sym_addr
= (unsigned)(s
->st_value
+ base
);
1407 COUNT_RELOC(RELOC_SYMBOL
);
1412 /* TODO: This is ugly. Split up the relocations by arch into
1416 #if defined(ANDROID_ARM_LINKER)
1417 case R_ARM_JUMP_SLOT
:
1418 COUNT_RELOC(RELOC_ABSOLUTE
);
1419 MARK(rel
->r_offset
);
1420 TRACE_TYPE(RELO
, "%5d RELO JMP_SLOT %08x <- %08x %s\n", pid
,
1421 reloc
, sym_addr
, sym_name
);
1422 *((unsigned*)reloc
) = sym_addr
;
1424 case R_ARM_GLOB_DAT
:
1425 COUNT_RELOC(RELOC_ABSOLUTE
);
1426 MARK(rel
->r_offset
);
1427 TRACE_TYPE(RELO
, "%5d RELO GLOB_DAT %08x <- %08x %s\n", pid
,
1428 reloc
, sym_addr
, sym_name
);
1429 *((unsigned*)reloc
) = sym_addr
;
1432 COUNT_RELOC(RELOC_ABSOLUTE
);
1433 MARK(rel
->r_offset
);
1434 TRACE_TYPE(RELO
, "%5d RELO ABS %08x <- %08x %s\n", pid
,
1435 reloc
, sym_addr
, sym_name
);
1436 *((unsigned*)reloc
) += sym_addr
;
1439 COUNT_RELOC(RELOC_RELATIVE
);
1440 MARK(rel
->r_offset
);
1441 TRACE_TYPE(RELO
, "%5d RELO REL32 %08x <- %08x - %08x %s\n", pid
,
1442 reloc
, sym_addr
, rel
->r_offset
, sym_name
);
1443 *((unsigned*)reloc
) += sym_addr
- rel
->r_offset
;
1445 #elif defined(ANDROID_X86_LINKER)
1446 case R_386_JUMP_SLOT
:
1447 COUNT_RELOC(RELOC_ABSOLUTE
);
1448 MARK(rel
->r_offset
);
1449 TRACE_TYPE(RELO
, "%5d RELO JMP_SLOT %08x <- %08x %s\n", pid
,
1450 reloc
, sym_addr
, sym_name
);
1451 *((unsigned*)reloc
) = sym_addr
;
1453 case R_386_GLOB_DAT
:
1454 COUNT_RELOC(RELOC_ABSOLUTE
);
1455 MARK(rel
->r_offset
);
1456 TRACE_TYPE(RELO
, "%5d RELO GLOB_DAT %08x <- %08x %s\n", pid
,
1457 reloc
, sym_addr
, sym_name
);
1458 *((unsigned*)reloc
) = sym_addr
;
1460 #endif /* ANDROID_*_LINKER */
1462 #if defined(ANDROID_ARM_LINKER)
1463 case R_ARM_RELATIVE
:
1464 #elif defined(ANDROID_X86_LINKER)
1465 case R_386_RELATIVE
:
1466 #endif /* ANDROID_*_LINKER */
1467 COUNT_RELOC(RELOC_RELATIVE
);
1468 MARK(rel
->r_offset
);
1470 DL_ERR("%5d odd RELATIVE form...", pid
);
1473 TRACE_TYPE(RELO
, "%5d RELO RELATIVE %08x <- +%08x\n", pid
,
1475 *((unsigned*)reloc
) += si
->base
;
1478 #if defined(ANDROID_X86_LINKER)
1480 COUNT_RELOC(RELOC_RELATIVE
);
1481 MARK(rel
->r_offset
);
1483 TRACE_TYPE(RELO
, "%5d RELO R_386_32 %08x <- +%08x %s\n", pid
,
1484 reloc
, sym_addr
, sym_name
);
1485 *((unsigned *)reloc
) += (unsigned)sym_addr
;
1489 COUNT_RELOC(RELOC_RELATIVE
);
1490 MARK(rel
->r_offset
);
1491 TRACE_TYPE(RELO
, "%5d RELO R_386_PC32 %08x <- "
1492 "+%08x (%08x - %08x) %s\n", pid
, reloc
,
1493 (sym_addr
- reloc
), sym_addr
, reloc
, sym_name
);
1494 *((unsigned *)reloc
) += (unsigned)(sym_addr
- reloc
);
1496 #endif /* ANDROID_X86_LINKER */
1498 #ifdef ANDROID_ARM_LINKER
1500 COUNT_RELOC(RELOC_COPY
);
1501 MARK(rel
->r_offset
);
1502 TRACE_TYPE(RELO
, "%5d RELO %08x <- %d @ %08x %s\n", pid
,
1503 reloc
, s
->st_size
, sym_addr
, sym_name
);
1504 memcpy((void*)reloc
, (void*)sym_addr
, s
->st_size
);
1508 #endif /* ANDROID_ARM_LINKER */
1511 DL_ERR("%5d unknown reloc type %d @ %p (%d)",
1512 pid
, type
, rel
, (int) (rel
- start
));
1520 /* Please read the "Initialization and Termination functions" functions.
1521 * of the linker design note in bionic/linker/README.TXT to understand
1522 * what the following code is doing.
1524 * The important things to remember are:
1526 * DT_PREINIT_ARRAY must be called first for executables, and should
1527 * not appear in shared libraries.
1529 * DT_INIT should be called before DT_INIT_ARRAY if both are present
1531 * DT_FINI should be called after DT_FINI_ARRAY if both are present
1533 * DT_FINI_ARRAY must be parsed in reverse order.
1536 static void call_array(unsigned *ctor
, int count
, int reverse
)
1545 for(n
= count
; n
> 0; n
--) {
1546 TRACE("[ %5d Looking at %s *0x%08x == 0x%08x ]\n", pid
,
1547 reverse
? "dtor" : "ctor",
1548 (unsigned)ctor
, (unsigned)*ctor
);
1549 void (*func
)() = (void (*)()) *ctor
;
1551 if(((int) func
== 0) || ((int) func
== -1)) continue;
1552 TRACE("[ %5d Calling func @ 0x%08x ]\n", pid
, (unsigned)func
);
1557 void call_constructors_recursive(soinfo
*si
)
1559 if (si
->constructors_called
)
1561 if (strcmp(si
->name
,"libc.so") == 0) {
1562 INFO("HYBRIS: =============> Skipping libc.so\n");
1566 // Set this before actually calling the constructors, otherwise it doesn't
1567 // protect against recursive constructor calls. One simple example of
1568 // constructor recursion is the libc debug malloc, which is implemented in
1569 // libc_malloc_debug_leak.so:
1570 // 1. The program depends on libc, so libc's constructor is called here.
1571 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so.
1572 // 3. dlopen() calls call_constructors_recursive() with the newly created
1573 // soinfo for libc_malloc_debug_leak.so.
1574 // 4. The debug so depends on libc, so call_constructors_recursive() is
1575 // called again with the libc soinfo. If it doesn't trigger the early-
1576 // out above, the libc constructor will be called again (recursively!).
1577 si
->constructors_called
= 1;
1579 if (si
->flags
& FLAG_EXE
) {
1580 TRACE("[ %5d Calling preinit_array @ 0x%08x [%d] for '%s' ]\n",
1581 pid
, (unsigned)si
->preinit_array
, si
->preinit_array_count
,
1583 call_array(si
->preinit_array
, si
->preinit_array_count
, 0);
1584 TRACE("[ %5d Done calling preinit_array for '%s' ]\n", pid
, si
->name
);
1586 if (si
->preinit_array
) {
1587 DL_ERR("%5d Shared library '%s' has a preinit_array table @ 0x%08x."
1588 " This is INVALID.", pid
, si
->name
,
1589 (unsigned)si
->preinit_array
);
1595 for(d
= si
->dynamic
; *d
; d
+= 2) {
1596 if(d
[0] == DT_NEEDED
){
1597 soinfo
* lsi
= (soinfo
*)d
[1];
1598 if (!validate_soinfo(lsi
)) {
1599 DL_ERR("%5d bad DT_NEEDED pointer in %s",
1602 call_constructors_recursive(lsi
);
1608 if (si
->init_func
) {
1609 TRACE("[ %5d Calling init_func @ 0x%08x for '%s' ]\n", pid
,
1610 (unsigned)si
->init_func
, si
->name
);
1612 TRACE("[ %5d Done calling init_func for '%s' ]\n", pid
, si
->name
);
1615 if (si
->init_array
) {
1616 TRACE("[ %5d Calling init_array @ 0x%08x [%d] for '%s' ]\n", pid
,
1617 (unsigned)si
->init_array
, si
->init_array_count
, si
->name
);
1618 call_array(si
->init_array
, si
->init_array_count
, 0);
1619 TRACE("[ %5d Done calling init_array for '%s' ]\n", pid
, si
->name
);
1624 static void call_destructors(soinfo
*si
)
1626 if (si
->fini_array
) {
1627 TRACE("[ %5d Calling fini_array @ 0x%08x [%d] for '%s' ]\n", pid
,
1628 (unsigned)si
->fini_array
, si
->fini_array_count
, si
->name
);
1629 call_array(si
->fini_array
, si
->fini_array_count
, 1);
1630 TRACE("[ %5d Done calling fini_array for '%s' ]\n", pid
, si
->name
);
1633 if (si
->fini_func
) {
1634 TRACE("[ %5d Calling fini_func @ 0x%08x for '%s' ]\n", pid
,
1635 (unsigned)si
->fini_func
, si
->name
);
1637 TRACE("[ %5d Done calling fini_func for '%s' ]\n", pid
, si
->name
);
1641 /* Force any of the closed stdin, stdout and stderr to be associated with
1643 static int nullify_closed_stdio (void)
1645 int dev_null
, i
, status
;
1646 int return_value
= 0;
1648 dev_null
= open("/dev/null", O_RDWR
);
1650 DL_ERR("Cannot open /dev/null.");
1653 TRACE("[ %5d Opened /dev/null file-descriptor=%d]\n", pid
, dev_null
);
1655 /* If any of the stdio file descriptors is valid and not associated
1656 with /dev/null, dup /dev/null to it. */
1657 for (i
= 0; i
< 3; i
++) {
1658 /* If it is /dev/null already, we are done. */
1662 TRACE("[ %5d Nullifying stdio file descriptor %d]\n", pid
, i
);
1663 /* The man page of fcntl does not say that fcntl(..,F_GETFL)
1664 can be interrupted but we do this just to be safe. */
1666 status
= fcntl(i
, F_GETFL
);
1667 } while (status
< 0 && errno
== EINTR
);
1669 /* If file is openned, we are good. */
1673 /* The only error we allow is that the file descriptor does not
1674 exist, in which case we dup /dev/null to it. */
1675 if (errno
!= EBADF
) {
1676 DL_ERR("nullify_stdio: unhandled error %s", strerror(errno
));
1681 /* Try dupping /dev/null to this stdio file descriptor and
1682 repeat if there is a signal. Note that any errors in closing
1683 the stdio descriptor are lost. */
1685 status
= dup2(dev_null
, i
);
1686 } while (status
< 0 && errno
== EINTR
);
1689 DL_ERR("nullify_stdio: dup2 error %s", strerror(errno
));
1695 /* If /dev/null is not one of the stdio file descriptors, close it. */
1697 TRACE("[ %5d Closing /dev/null file-descriptor=%d]\n", pid
, dev_null
);
1699 status
= close(dev_null
);
1700 } while (status
< 0 && errno
== EINTR
);
1703 DL_ERR("nullify_stdio: close error %s", strerror(errno
));
1709 return return_value
;
1712 static int link_image(soinfo
*si
, unsigned wr_offset
)
1715 Elf32_Phdr
*phdr
= si
->phdr
;
1716 int phnum
= si
->phnum
;
1718 INFO("[ %5d linking %s ]\n", pid
, si
->name
);
1719 DEBUG("%5d si->base = 0x%08x si->flags = 0x%08x\n", pid
,
1720 si
->base
, si
->flags
);
1722 if (si
->flags
& (FLAG_EXE
| FLAG_LINKER
)) {
1723 /* Locate the needed program segments (DYNAMIC/ARM_EXIDX) for
1724 * linkage info if this is the executable or the linker itself.
1725 * If this was a dynamic lib, that would have been done at load time.
1727 * TODO: It's unfortunate that small pieces of this are
1728 * repeated from the load_library routine. Refactor this just
1729 * slightly to reuse these bits.
1732 for(; phnum
> 0; --phnum
, ++phdr
) {
1733 #ifdef ANDROID_ARM_LINKER
1734 if(phdr
->p_type
== PT_ARM_EXIDX
) {
1735 /* exidx entries (used for stack unwinding) are 8 bytes each.
1737 si
->ARM_exidx
= (unsigned *)phdr
->p_vaddr
;
1738 si
->ARM_exidx_count
= phdr
->p_memsz
/ 8;
1741 if (phdr
->p_type
== PT_LOAD
) {
1742 /* For the executable, we use the si->size field only in
1743 dl_unwind_find_exidx(), so the meaning of si->size
1744 is not the size of the executable; it is the distance
1745 between the load location of the executable and the last
1746 address of the loadable part of the executable.
1747 We use the range [si->base, si->base + si->size) to
1748 determine whether a PC value falls within the executable
1749 section. Of course, if a value is between si->base and
1750 (si->base + phdr->p_vaddr), it's not in the executable
1751 section, but a) we shouldn't be asking for such a value
1752 anyway, and b) if we have to provide an EXIDX for such a
1753 value, then the executable's EXIDX is probably the better
1756 DEBUG_DUMP_PHDR(phdr
, "PT_LOAD", pid
);
1757 if (phdr
->p_vaddr
+ phdr
->p_memsz
> si
->size
)
1758 si
->size
= phdr
->p_vaddr
+ phdr
->p_memsz
;
1759 /* try to remember what range of addresses should be write
1761 if (!(phdr
->p_flags
& PF_W
)) {
1764 if (si
->base
+ phdr
->p_vaddr
< si
->wrprotect_start
)
1765 si
->wrprotect_start
= si
->base
+ phdr
->p_vaddr
;
1766 _end
= (((si
->base
+ phdr
->p_vaddr
+ phdr
->p_memsz
+ PAGE_SIZE
- 1) &
1768 if (_end
> si
->wrprotect_end
)
1769 si
->wrprotect_end
= _end
;
1770 /* Make the section writable just in case we'll have to
1771 * write to it during relocation (i.e. text segment).
1772 * However, we will remember what range of addresses
1773 * should be write protected.
1775 mprotect((void *) (si
->base
+ phdr
->p_vaddr
),
1777 PFLAGS_TO_PROT(phdr
->p_flags
) | PROT_WRITE
);
1779 } else if (phdr
->p_type
== PT_DYNAMIC
) {
1780 if (si
->dynamic
!= (unsigned *)-1) {
1781 DL_ERR("%5d multiple PT_DYNAMIC segments found in '%s'. "
1782 "Segment at 0x%08x, previously one found at 0x%08x",
1783 pid
, si
->name
, si
->base
+ phdr
->p_vaddr
,
1784 (unsigned)si
->dynamic
);
1787 DEBUG_DUMP_PHDR(phdr
, "PT_DYNAMIC", pid
);
1788 si
->dynamic
= (unsigned *) (si
->base
+ phdr
->p_vaddr
);
1789 } else if (phdr
->p_type
== PT_GNU_RELRO
) {
1790 if ((phdr
->p_vaddr
>= si
->size
)
1791 || ((phdr
->p_vaddr
+ phdr
->p_memsz
) > si
->size
)
1792 || ((si
->base
+ phdr
->p_vaddr
+ phdr
->p_memsz
) < si
->base
)) {
1793 DL_ERR("%d invalid GNU_RELRO in '%s' "
1794 "p_vaddr=0x%08x p_memsz=0x%08x", pid
, si
->name
,
1795 phdr
->p_vaddr
, phdr
->p_memsz
);
1798 si
->gnu_relro_start
= (Elf32_Addr
) (si
->base
+ phdr
->p_vaddr
);
1799 si
->gnu_relro_len
= (unsigned) phdr
->p_memsz
;
1804 if (si
->dynamic
== (unsigned *)-1) {
1805 DL_ERR("%5d missing PT_DYNAMIC?!", pid
);
1809 DEBUG("%5d dynamic = %p\n", pid
, si
->dynamic
);
1811 /* extract useful information from dynamic section */
1812 for(d
= si
->dynamic
; *d
; d
++){
1813 DEBUG("%5d d = %p, d[0] = 0x%08x d[1] = 0x%08x\n", pid
, d
, d
[0], d
[1]);
1816 si
->nbucket
= ((unsigned *) (si
->base
+ *d
))[0];
1817 si
->nchain
= ((unsigned *) (si
->base
+ *d
))[1];
1818 si
->bucket
= (unsigned *) (si
->base
+ *d
+ 8);
1819 si
->chain
= (unsigned *) (si
->base
+ *d
+ 8 + si
->nbucket
* 4);
1822 si
->strtab
= (const char *) (si
->base
+ *d
);
1825 si
->symtab
= (Elf32_Sym
*) (si
->base
+ *d
);
1829 DL_ERR("DT_RELA not supported");
1834 si
->plt_rel
= (Elf32_Rel
*) (si
->base
+ *d
);
1837 si
->plt_rel_count
= *d
/ 8;
1840 si
->rel
= (Elf32_Rel
*) (si
->base
+ *d
);
1843 si
->rel_count
= *d
/ 8;
1846 /* Save this in case we decide to do lazy binding. We don't yet. */
1847 si
->plt_got
= (unsigned *)(si
->base
+ *d
);
1850 // Set the DT_DEBUG entry to the addres of _r_debug for GDB
1851 *d
= (int) &_r_debug
;
1854 DL_ERR("%5d DT_RELA not supported", pid
);
1857 si
->init_func
= (void (*)(void))(si
->base
+ *d
);
1858 DEBUG("%5d %s constructors (init func) found at %p\n",
1859 pid
, si
->name
, si
->init_func
);
1862 si
->fini_func
= (void (*)(void))(si
->base
+ *d
);
1863 DEBUG("%5d %s destructors (fini func) found at %p\n",
1864 pid
, si
->name
, si
->fini_func
);
1867 si
->init_array
= (unsigned *)(si
->base
+ *d
);
1868 DEBUG("%5d %s constructors (init_array) found at %p\n",
1869 pid
, si
->name
, si
->init_array
);
1871 case DT_INIT_ARRAYSZ
:
1872 si
->init_array_count
= ((unsigned)*d
) / sizeof(Elf32_Addr
);
1875 si
->fini_array
= (unsigned *)(si
->base
+ *d
);
1876 DEBUG("%5d %s destructors (fini_array) found at %p\n",
1877 pid
, si
->name
, si
->fini_array
);
1879 case DT_FINI_ARRAYSZ
:
1880 si
->fini_array_count
= ((unsigned)*d
) / sizeof(Elf32_Addr
);
1882 case DT_PREINIT_ARRAY
:
1883 si
->preinit_array
= (unsigned *)(si
->base
+ *d
);
1884 DEBUG("%5d %s constructors (preinit_array) found at %p\n",
1885 pid
, si
->name
, si
->preinit_array
);
1887 case DT_PREINIT_ARRAYSZ
:
1888 si
->preinit_array_count
= ((unsigned)*d
) / sizeof(Elf32_Addr
);
1891 /* TODO: make use of this. */
1892 /* this means that we might have to write into where the text
1893 * segment was loaded during relocation... Do something with
1896 DEBUG("%5d Text segment should be writable during relocation.\n",
1902 DEBUG("%5d si->base = 0x%08x, si->strtab = %p, si->symtab = %p\n",
1903 pid
, si
->base
, si
->strtab
, si
->symtab
);
1905 if((si
->strtab
== 0) || (si
->symtab
== 0)) {
1906 DL_ERR("%5d missing essential tables", pid
);
1910 /* if this is the main executable, then load all of the preloads now */
1911 if(si
->flags
& FLAG_EXE
) {
1913 memset(preloads
, 0, sizeof(preloads
));
1914 for(i
= 0; ldpreload_names
[i
] != NULL
; i
++) {
1915 soinfo
*lsi
= find_library(ldpreload_names
[i
]);
1917 strlcpy(tmp_err_buf
, linker_get_error(), sizeof(tmp_err_buf
));
1918 DL_ERR("%5d could not load needed library '%s' for '%s' (%s)",
1919 pid
, ldpreload_names
[i
], si
->name
, tmp_err_buf
);
1927 for(d
= si
->dynamic
; *d
; d
+= 2) {
1928 if(d
[0] == DT_NEEDED
){
1929 DEBUG("%5d %s needs %s\n", pid
, si
->name
, si
->strtab
+ d
[1]);
1930 soinfo
*lsi
= find_library(si
->strtab
+ d
[1]);
1932 strlcpy(tmp_err_buf
, linker_get_error(), sizeof(tmp_err_buf
));
1933 DL_ERR("%5d could not load needed library '%s' for '%s' (%s)",
1934 pid
, si
->strtab
+ d
[1], si
->name
, tmp_err_buf
);
1937 /* Save the soinfo of the loaded DT_NEEDED library in the payload
1938 of the DT_NEEDED entry itself, so that we can retrieve the
1939 soinfo directly later from the dynamic segment. This is a hack,
1940 but it allows us to map from DT_NEEDED to soinfo efficiently
1941 later on when we resolve relocations, trying to look up a symbol
1944 d
[1] = (unsigned)lsi
;
1950 DEBUG("[ %5d relocating %s plt ]\n", pid
, si
->name
);
1951 if(reloc_library(si
, si
->plt_rel
, si
->plt_rel_count
))
1955 DEBUG("[ %5d relocating %s ]\n", pid
, si
->name
);
1956 if(reloc_library(si
, si
->rel
, si
->rel_count
))
1960 si
->flags
|= FLAG_LINKED
;
1961 DEBUG("[ %5d finished linking %s ]\n", pid
, si
->name
);
1964 /* This is the way that the old dynamic linker did protection of
1965 * non-writable areas. It would scan section headers and find where
1966 * .text ended (rather where .data/.bss began) and assume that this is
1967 * the upper range of the non-writable area. This is too coarse,
1968 * and is kept here for reference until we fully move away from single
1969 * segment elf objects. See the code in get_wr_offset (also #if'd 0)
1970 * that made this possible.
1972 if(wr_offset
< 0xffffffff){
1973 mprotect((void*) si
->base
, wr_offset
, PROT_READ
| PROT_EXEC
);
1976 /* TODO: Verify that this does the right thing in all cases, as it
1977 * presently probably does not. It is possible that an ELF image will
1978 * come with multiple read-only segments. What we ought to do is scan
1979 * the program headers again and mprotect all the read-only segments.
1980 * To prevent re-scanning the program header, we would have to build a
1981 * list of loadable segments in si, and then scan that instead. */
1982 if (si
->wrprotect_start
!= 0xffffffff && si
->wrprotect_end
!= 0) {
1983 mprotect((void *)si
->wrprotect_start
,
1984 si
->wrprotect_end
- si
->wrprotect_start
,
1985 PROT_READ
| PROT_EXEC
);
1989 if (si
->gnu_relro_start
!= 0 && si
->gnu_relro_len
!= 0) {
1990 Elf32_Addr start
= (si
->gnu_relro_start
& ~PAGE_MASK
);
1991 unsigned len
= (si
->gnu_relro_start
- start
) + si
->gnu_relro_len
;
1992 if (mprotect((void *) start
, len
, PROT_READ
) < 0) {
1993 DL_ERR("%5d GNU_RELRO mprotect of library '%s' failed: %d (%s)\n",
1994 pid
, si
->name
, errno
, strerror(errno
));
1999 /* If this is a SET?ID program, dup /dev/null to opened stdin,
2000 stdout and stderr to close a security hole described in:
2002 ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
2005 if (program_is_setuid
)
2006 nullify_closed_stdio ();
2007 notify_gdb_of_load(si
);
2011 ERROR("failed to link %s\n", si
->name
);
2012 si
->flags
|= FLAG_ERROR
;
2016 static void parse_library_path(const char *path
, char *delim
)
2019 char *ldpaths_bufp
= ldpaths_buf
;
2022 len
= strlcpy(ldpaths_buf
, path
, sizeof(ldpaths_buf
));
2024 while (i
< LDPATH_MAX
&& (ldpaths
[i
] = strsep(&ldpaths_bufp
, delim
))) {
2025 if (*ldpaths
[i
] != '\0')
2029 /* Forget the last path if we had to truncate; this occurs if the 2nd to
2030 * last char isn't '\0' (i.e. not originally a delim). */
2031 if (i
> 0 && len
>= sizeof(ldpaths_buf
) &&
2032 ldpaths_buf
[sizeof(ldpaths_buf
) - 2] != '\0') {
2033 ldpaths
[i
- 1] = NULL
;
2039 static void parse_preloads(const char *path
, char *delim
)
2042 char *ldpreloads_bufp
= ldpreloads_buf
;
2045 len
= strlcpy(ldpreloads_buf
, path
, sizeof(ldpreloads_buf
));
2047 while (i
< LDPRELOAD_MAX
&& (ldpreload_names
[i
] = strsep(&ldpreloads_bufp
, delim
))) {
2048 if (*ldpreload_names
[i
] != '\0') {
2053 /* Forget the last path if we had to truncate; this occurs if the 2nd to
2054 * last char isn't '\0' (i.e. not originally a delim). */
2055 if (i
> 0 && len
>= sizeof(ldpreloads_buf
) &&
2056 ldpreloads_buf
[sizeof(ldpreloads_buf
) - 2] != '\0') {
2057 ldpreload_names
[i
- 1] = NULL
;
2059 ldpreload_names
[i
] = NULL
;
2064 * This code is called after the linker has linked itself and
2065 * fixed it's own GOT. It is safe to make references to externs
2066 * and other non-local data at this point.
2068 static unsigned __linker_init_post_relocation(unsigned **elfdata
)
2070 static soinfo linker_soinfo
;
2072 int argc
= (int) *elfdata
;
2073 char **argv
= (char**) (elfdata
+ 1);
2074 unsigned *vecs
= (unsigned*) (argv
+ argc
+ 1);
2077 struct link_map
* map
;
2078 const char *ldpath_env
= NULL
;
2079 const char *ldpreload_env
= NULL
;
2081 /* NOTE: we store the elfdata pointer on a special location
2082 * of the temporary TLS area in order to pass it to
2083 * the C Library's runtime initializer.
2085 * The initializer must clear the slot and reset the TLS
2086 * to point to a different location to ensure that no other
2087 * shared library constructor can access it.
2089 //__libc_init_tls(elfdata);
2094 struct timeval t0
, t1
;
2095 gettimeofday(&t0
, 0);
2098 /* Initialize environment functions, and get to the ELF aux vectors table */
2099 vecs
= linker_env_init(vecs
);
2101 /* Check auxv for AT_SECURE first to see if program is setuid, setgid,
2102 has file caps, or caused a SELinux/AppArmor domain transition. */
2103 for (v
= vecs
; v
[0]; v
+= 2) {
2104 if (v
[0] == AT_SECURE
) {
2105 /* kernel told us whether to enable secure mode */
2106 program_is_setuid
= v
[1];
2111 /* Kernel did not provide AT_SECURE - fall back on legacy test. */
2112 program_is_setuid
= (getuid() != geteuid()) || (getgid() != getegid());
2115 /* Sanitize environment if we're loading a setuid program */
2116 if (program_is_setuid
)
2117 linker_env_secure();
2121 /* Get a few environment variables */
2125 env
= linker_env_get("DEBUG"); /* XXX: TODO: Change to LD_DEBUG */
2127 debug_verbosity
= atoi(env
);
2130 /* Normally, these are cleaned by linker_env_secure, but the test
2131 * against program_is_setuid doesn't cost us anything */
2132 if (!program_is_setuid
) {
2133 ldpath_env
= linker_env_get("LD_LIBRARY_PATH");
2134 ldpreload_env
= linker_env_get("LD_PRELOAD");
2138 INFO("[ android linker & debugger ]\n");
2139 DEBUG("%5d elfdata @ 0x%08x\n", pid
, (unsigned)elfdata
);
2141 si
= alloc_info(argv
[0]);
2146 /* bootstrap the link map, the main exe always needs to be first */
2147 si
->flags
|= FLAG_EXE
;
2148 map
= &(si
->linkmap
);
2151 map
->l_name
= argv
[0];
2155 _r_debug
.r_map
= map
;
2158 /* gdb expects the linker to be in the debug shared object list,
2159 * and we need to make sure that the reported load address is zero.
2160 * Without this, gdb gets the wrong idea of where rtld_db_dlactivity()
2161 * is. Don't use alloc_info(), because the linker shouldn't
2162 * be on the soinfo list.
2164 strlcpy((char*) linker_soinfo
.name
, "/system/bin/linker", sizeof linker_soinfo
.name
);
2165 linker_soinfo
.flags
= 0;
2166 linker_soinfo
.base
= 0; // This is the important part; must be zero.
2167 insert_soinfo_into_debug_map(&linker_soinfo
);
2169 /* extract information passed from the kernel */
2170 while(vecs
[0] != 0){
2173 si
->phdr
= (Elf32_Phdr
*) vecs
[1];
2176 si
->phnum
= (int) vecs
[1];
2179 si
->entry
= vecs
[1];
2185 /* Compute the value of si->base. We can't rely on the fact that
2186 * the first entry is the PHDR because this will not be true
2187 * for certain executables (e.g. some in the NDK unit test suite)
2191 for ( nn
= 0; nn
< si
->phnum
; nn
++ ) {
2192 if (si
->phdr
[nn
].p_type
== PT_PHDR
) {
2193 si
->base
= (Elf32_Addr
) si
->phdr
- si
->phdr
[nn
].p_vaddr
;
2197 si
->dynamic
= (unsigned *)-1;
2198 si
->wrprotect_start
= 0xffffffff;
2199 si
->wrprotect_end
= 0;
2201 si
->gnu_relro_start
= 0;
2202 si
->gnu_relro_len
= 0;
2204 /* Use LD_LIBRARY_PATH if we aren't setuid/setgid */
2206 parse_library_path(ldpath_env
, ":");
2208 if (ldpreload_env
) {
2209 parse_preloads(ldpreload_env
, " :");
2212 if(link_image(si
, 0)) {
2213 char errmsg
[] = "CANNOT LINK EXECUTABLE\n";
2214 write(2, __linker_dl_err_buf
, strlen(__linker_dl_err_buf
));
2215 write(2, errmsg
, sizeof(errmsg
));
2219 call_constructors_recursive(si
);
2221 #if ALLOW_SYMBOLS_FROM_MAIN
2222 /* Set somain after we've loaded all the libraries in order to prevent
2223 * linking of symbols back to the main image, which is not set up at that
2230 gettimeofday(&t1
,NULL
);
2231 PRINT("LINKER TIME: %s: %d microseconds\n", argv
[0], (int) (
2232 (((long long)t1
.tv_sec
* 1000000LL) + (long long)t1
.tv_usec
) -
2233 (((long long)t0
.tv_sec
* 1000000LL) + (long long)t0
.tv_usec
)
2237 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol\n", argv
[0],
2238 linker_stats
.reloc
[RELOC_ABSOLUTE
],
2239 linker_stats
.reloc
[RELOC_RELATIVE
],
2240 linker_stats
.reloc
[RELOC_COPY
],
2241 linker_stats
.reloc
[RELOC_SYMBOL
]);
2248 for(n
= 0; n
< 4096; n
++){
2250 unsigned x
= bitmask
[n
];
2251 for(i
= 0; i
< 8; i
++){
2257 PRINT("PAGES MODIFIED: %s: %d (%dKB)\n", argv
[0], count
, count
* 4);
2261 #if TIMING || STATS || COUNT_PAGES
2265 TRACE("[ %5d Ready to execute '%s' @ 0x%08x ]\n", pid
, si
->name
,
2271 * Find the value of AT_BASE passed to us by the kernel. This is the load
2272 * location of the linker.
2274 static unsigned find_linker_base(unsigned **elfdata
) {
2275 int argc
= (int) *elfdata
;
2276 char **argv
= (char**) (elfdata
+ 1);
2277 unsigned *vecs
= (unsigned*) (argv
+ argc
+ 1);
2278 while (vecs
[0] != 0) {
2282 /* The end of the environment block is marked by two NULL pointers */
2286 if (vecs
[0] == AT_BASE
) {
2292 return 0; // should never happen
2296 * This is the entry point for the linker, called from begin.S. This
2297 * method is responsible for fixing the linker's own relocations, and
2298 * then calling __linker_init_post_relocation().
2300 * Because this method is called before the linker has fixed it's own
2301 * relocations, any attempt to reference an extern variable, extern
2302 * function, or other GOT reference will generate a segfault.
2304 unsigned __linker_init(unsigned **elfdata
) {
2305 unsigned linker_addr
= find_linker_base(elfdata
);
2306 Elf32_Ehdr
*elf_hdr
= (Elf32_Ehdr
*) linker_addr
;
2308 (Elf32_Phdr
*)((unsigned char *) linker_addr
+ elf_hdr
->e_phoff
);
2311 memset(&linker_so
, 0, sizeof(soinfo
));
2313 linker_so
.base
= linker_addr
;
2314 linker_so
.dynamic
= (unsigned *) -1;
2315 linker_so
.phdr
= phdr
;
2316 linker_so
.phnum
= elf_hdr
->e_phnum
;
2317 linker_so
.flags
|= FLAG_LINKER
;
2318 linker_so
.wrprotect_start
= 0xffffffff;
2319 linker_so
.wrprotect_end
= 0;
2320 linker_so
.gnu_relro_start
= 0;
2321 linker_so
.gnu_relro_len
= 0;
2323 if (link_image(&linker_so
, 0)) {
2324 // It would be nice to print an error message, but if the linker
2325 // can't link itself, there's no guarantee that we'll be able to
2326 // call write() (because it involves a GOT reference).
2328 // This situation should never occur unless the linker itself
2333 // We have successfully fixed our own relocations. It's safe to run
2334 // the main part of the linker now.
2335 return __linker_init_post_relocation(elfdata
);