2 * Copyright (C) 2008, 2009 The Android Open Source Project
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <linux/auxvec.h>
44 /* special private C library header - see Android.mk */
45 #include "bionic_tls.h"
48 #include "linker_debug.h"
49 #include "linker_environ.h"
50 #include "linker_format.h"
52 #define ALLOW_SYMBOLS_FROM_MAIN 1
55 /* Assume average path length of 64 and max 8 paths */
56 #define LDPATH_BUFSIZE 512
59 #define LDPRELOAD_BUFSIZE 512
60 #define LDPRELOAD_MAX 8
62 /* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
64 * Do NOT use malloc() and friends or pthread_*() code here.
65 * Don't use printf() either; it's caused mysterious memory
66 * corruption in the past.
67 * The linker runs before we bring up libc and it's easiest
68 * to make sure it does not depend on any complex libc features
72 * - are we doing everything we should for ARM_COPY relocations?
73 * - cleaner error reporting
74 * - after linking, set as much stuff as possible to READONLY
76 * - linker hardcodes PAGE_SIZE and PAGE_MASK because the kernel
77 * headers provide versions that are negative...
78 * - allocate space for soinfo structs dynamically instead of
79 * having a hard limit (64)
83 static int link_image(soinfo
*si
, unsigned wr_offset
);
85 static int socount
= 0;
86 static soinfo sopool
[SO_MAX
];
87 static soinfo
*freelist
= NULL
;
88 static soinfo
*solist
= &libdl_info
;
89 static soinfo
*sonext
= &libdl_info
;
90 #if ALLOW_SYMBOLS_FROM_MAIN
91 static soinfo
*somain
; /* main process, always the one after libdl_info */
95 static inline int validate_soinfo(soinfo
*si
)
97 return (si
>= sopool
&& si
< sopool
+ SO_MAX
) ||
101 static char ldpaths_buf
[LDPATH_BUFSIZE
];
102 static const char *ldpaths
[LDPATH_MAX
+ 1];
104 static char ldpreloads_buf
[LDPRELOAD_BUFSIZE
];
105 static const char *ldpreload_names
[LDPRELOAD_MAX
+ 1];
107 static soinfo
*preloads
[LDPRELOAD_MAX
+ 1];
115 /* This boolean is set if the program being loaded is setuid */
116 static int program_is_setuid
;
119 struct _link_stats linker_stats
;
123 unsigned bitmask
[4096];
127 #define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
130 static char tmp_err_buf
[768];
131 static char __linker_dl_err_buf
[768];
132 #define DL_ERR(fmt, x...) \
134 format_buffer(__linker_dl_err_buf, sizeof(__linker_dl_err_buf), \
135 "%s[%d]: " fmt, __func__, __LINE__, ##x); \
136 ERROR(fmt "\n", ##x); \
139 const char *linker_get_error(void)
141 return (const char *)&__linker_dl_err_buf
[0];
145 * This function is an empty stub where GDB locates a breakpoint to get notified
146 * about linker activity.
148 extern void __attribute__((noinline
)) rtld_db_dlactivity(void);
150 static struct r_debug _r_debug
= {1, NULL
, &rtld_db_dlactivity
,
152 static struct link_map
*r_debug_tail
= 0;
154 static pthread_mutex_t _r_debug_lock
= PTHREAD_MUTEX_INITIALIZER
;
156 static void insert_soinfo_into_debug_map(soinfo
* info
)
158 struct link_map
* map
;
160 /* Copy the necessary fields into the debug structure.
162 map
= &(info
->linkmap
);
163 map
->l_addr
= info
->base
;
164 map
->l_name
= (char*) info
->name
;
165 map
->l_ld
= (uintptr_t)info
->dynamic
;
167 /* Stick the new library at the end of the list.
168 * gdb tends to care more about libc than it does
169 * about leaf libraries, and ordering it this way
170 * reduces the back-and-forth over the wire.
173 r_debug_tail
->l_next
= map
;
174 map
->l_prev
= r_debug_tail
;
177 _r_debug
.r_map
= map
;
184 static void remove_soinfo_from_debug_map(soinfo
* info
)
186 struct link_map
* map
= &(info
->linkmap
);
188 if (r_debug_tail
== map
)
189 r_debug_tail
= map
->l_prev
;
191 if (map
->l_prev
) map
->l_prev
->l_next
= map
->l_next
;
192 if (map
->l_next
) map
->l_next
->l_prev
= map
->l_prev
;
195 void notify_gdb_of_load(soinfo
* info
)
197 if (info
->flags
& FLAG_EXE
) {
198 // GDB already knows about the main executable
202 pthread_mutex_lock(&_r_debug_lock
);
204 _r_debug
.r_state
= RT_ADD
;
205 rtld_db_dlactivity();
207 insert_soinfo_into_debug_map(info
);
209 _r_debug
.r_state
= RT_CONSISTENT
;
210 rtld_db_dlactivity();
212 pthread_mutex_unlock(&_r_debug_lock
);
215 void notify_gdb_of_unload(soinfo
* info
)
217 if (info
->flags
& FLAG_EXE
) {
218 // GDB already knows about the main executable
222 pthread_mutex_lock(&_r_debug_lock
);
224 _r_debug
.r_state
= RT_DELETE
;
225 rtld_db_dlactivity();
227 remove_soinfo_from_debug_map(info
);
229 _r_debug
.r_state
= RT_CONSISTENT
;
230 rtld_db_dlactivity();
232 pthread_mutex_unlock(&_r_debug_lock
);
235 void notify_gdb_of_libraries()
237 _r_debug
.r_state
= RT_ADD
;
238 rtld_db_dlactivity();
239 _r_debug
.r_state
= RT_CONSISTENT
;
240 rtld_db_dlactivity();
243 static soinfo
*alloc_info(const char *name
)
247 if(strlen(name
) >= SOINFO_NAME_LEN
) {
248 DL_ERR("%5d library name %s too long", pid
, name
);
252 /* The freelist is populated when we call free_info(), which in turn is
253 done only by dlclose(), which is not likely to be used.
256 if(socount
== SO_MAX
) {
257 DL_ERR("%5d too many libraries when loading %s", pid
, name
);
260 freelist
= sopool
+ socount
++;
261 freelist
->next
= NULL
;
265 freelist
= freelist
->next
;
267 /* Make sure we get a clean block of soinfo */
268 memset(si
, 0, sizeof(soinfo
));
269 strlcpy((char*) si
->name
, name
, sizeof(si
->name
));
275 TRACE("%5d name %s: allocated soinfo @ %p\n", pid
, name
, si
);
279 static void free_info(soinfo
*si
)
281 soinfo
*prev
= NULL
, *trav
;
283 TRACE("%5d name %s: freeing soinfo @ %p\n", pid
, si
->name
, si
);
285 for(trav
= solist
; trav
!= NULL
; trav
= trav
->next
){
291 /* si was not ni solist */
292 DL_ERR("%5d name %s is not in solist!", pid
, si
->name
);
296 /* prev will never be NULL, because the first entry in solist is
297 always the static libdl_info.
299 prev
->next
= si
->next
;
300 if (si
== sonext
) sonext
= prev
;
305 const char *addr_to_name(unsigned addr
)
309 for(si
= solist
; si
!= 0; si
= si
->next
){
310 if((addr
>= si
->base
) && (addr
< (si
->base
+ si
->size
))) {
318 /* For a given PC, find the .so that it belongs to.
319 * Returns the base address of the .ARM.exidx section
320 * for that .so, and the number of 8-byte entries
321 * in that section (via *pcount).
323 * Intended to be called by libc's __gnu_Unwind_Find_exidx().
325 * This function is exposed via dlfcn.c and libdl.so.
327 #ifdef ANDROID_ARM_LINKER
328 _Unwind_Ptr
android_dl_unwind_find_exidx(_Unwind_Ptr pc
, int *pcount
)
331 unsigned addr
= (unsigned)pc
;
333 for (si
= solist
; si
!= 0; si
= si
->next
){
334 if ((addr
>= si
->base
) && (addr
< (si
->base
+ si
->size
))) {
335 *pcount
= si
->ARM_exidx_count
;
336 return (_Unwind_Ptr
)(si
->base
+ (unsigned long)si
->ARM_exidx
);
342 #elif defined(ANDROID_X86_LINKER)
343 /* Here, we only have to provide a callback to iterate across all the
344 * loaded libraries. gcc_eh does the rest. */
346 android_dl_iterate_phdr(int (*cb
)(struct dl_phdr_info
*info
, size_t size
, void *data
),
350 struct dl_phdr_info dl_info
;
353 for (si
= solist
; si
!= NULL
; si
= si
->next
) {
354 dl_info
.dlpi_addr
= si
->linkmap
.l_addr
;
355 dl_info
.dlpi_name
= si
->linkmap
.l_name
;
356 dl_info
.dlpi_phdr
= si
->phdr
;
357 dl_info
.dlpi_phnum
= si
->phnum
;
358 rv
= cb(&dl_info
, sizeof (struct dl_phdr_info
), data
);
366 static Elf32_Sym
*_elf_lookup(soinfo
*si
, unsigned hash
, const char *name
)
369 Elf32_Sym
*symtab
= si
->symtab
;
370 const char *strtab
= si
->strtab
;
373 TRACE_TYPE(LOOKUP
, "%5d SEARCH %s in %s@0x%08x %08x %d\n", pid
,
374 name
, si
->name
, si
->base
, hash
, hash
% si
->nbucket
);
375 if (si
->nbucket
== 0) {
378 n
= hash
% si
->nbucket
;
380 for(n
= si
->bucket
[hash
% si
->nbucket
]; n
!= 0; n
= si
->chain
[n
]){
382 if(strcmp(strtab
+ s
->st_name
, name
)) continue;
384 /* only concern ourselves with global and weak symbol definitions */
385 switch(ELF32_ST_BIND(s
->st_info
)){
388 /* no section == undefined */
389 if(s
->st_shndx
== 0) continue;
391 TRACE_TYPE(LOOKUP
, "%5d FOUND %s in %s (%08x) %d\n", pid
,
392 name
, si
->name
, s
->st_value
, s
->st_size
);
400 static unsigned elfhash(const char *_name
)
402 const unsigned char *name
= (const unsigned char *) _name
;
406 h
= (h
<< 4) + *name
++;
415 _do_lookup(soinfo
*si
, const char *name
, unsigned *base
)
417 unsigned elf_hash
= elfhash(name
);
423 /* Look for symbols in the local scope (the object who is
424 * searching). This happens with C++ templates on i386 for some
427 * Notes on weak symbols:
428 * The ELF specs are ambigious about treatment of weak definitions in
429 * dynamic linking. Some systems return the first definition found
430 * and some the first non-weak definition. This is system dependent.
431 * Here we return the first definition found for simplicity. */
433 s
= _elf_lookup(si
, elf_hash
, name
);
437 /* Next, look for it in the preloads list */
438 for(i
= 0; preloads
[i
] != NULL
; i
++) {
440 s
= _elf_lookup(lsi
, elf_hash
, name
);
445 for(d
= si
->dynamic
; *d
; d
+= 2) {
446 if(d
[0] == DT_NEEDED
){
447 lsi
= (soinfo
*)d
[1];
448 if (!validate_soinfo(lsi
)) {
449 DL_ERR("%5d bad DT_NEEDED pointer in %s",
454 DEBUG("%5d %s: looking up %s in %s\n",
455 pid
, si
->name
, name
, lsi
->name
);
456 s
= _elf_lookup(lsi
, elf_hash
, name
);
457 if ((s
!= NULL
) && (s
->st_shndx
!= SHN_UNDEF
))
462 #if ALLOW_SYMBOLS_FROM_MAIN
463 /* If we are resolving relocations while dlopen()ing a library, it's OK for
464 * the library to resolve a symbol that's defined in the executable itself,
465 * although this is rare and is generally a bad idea.
469 DEBUG("%5d %s: looking up %s in executable %s\n",
470 pid
, si
->name
, name
, lsi
->name
);
471 s
= _elf_lookup(lsi
, elf_hash
, name
);
477 TRACE_TYPE(LOOKUP
, "%5d si %s sym %s s->st_value = 0x%08x, "
478 "found in %s, base = 0x%08x\n",
479 pid
, si
->name
, name
, s
->st_value
, lsi
->name
, lsi
->base
);
487 /* This is used by dl_sym(). It performs symbol lookup only within the
488 specified soinfo object and not in any of its dependencies.
490 Elf32_Sym
*lookup_in_library(soinfo
*si
, const char *name
)
492 return _elf_lookup(si
, elfhash(name
), name
);
495 /* This is used by dl_sym(). It performs a global symbol lookup.
497 Elf32_Sym
*lookup(const char *name
, soinfo
**found
, soinfo
*start
)
499 unsigned elf_hash
= elfhash(name
);
507 for(si
= start
; (s
== NULL
) && (si
!= NULL
); si
= si
->next
)
509 if(si
->flags
& FLAG_ERROR
)
511 s
= _elf_lookup(si
, elf_hash
, name
);
519 TRACE_TYPE(LOOKUP
, "%5d %s s->st_value = 0x%08x, "
520 "si->base = 0x%08x\n", pid
, name
, s
->st_value
, si
->base
);
527 soinfo
*find_containing_library(const void *addr
)
531 for(si
= solist
; si
!= NULL
; si
= si
->next
)
533 if((unsigned)addr
>= si
->base
&& (unsigned)addr
- si
->base
< si
->size
) {
541 Elf32_Sym
*find_containing_symbol(const void *addr
, soinfo
*si
)
544 unsigned soaddr
= (unsigned)addr
- si
->base
;
546 /* Search the library's symbol table for any defined symbol which
547 * contains this address */
548 for(i
=0; i
<si
->nchain
; i
++) {
549 Elf32_Sym
*sym
= &si
->symtab
[i
];
551 if(sym
->st_shndx
!= SHN_UNDEF
&&
552 soaddr
>= sym
->st_value
&&
553 soaddr
< sym
->st_value
+ sym
->st_size
) {
562 static void dump(soinfo
*si
)
564 Elf32_Sym
*s
= si
->symtab
;
567 for(n
= 0; n
< si
->nchain
; n
++) {
568 TRACE("%5d %04d> %08x: %02x %04x %08x %08x %s\n", pid
, n
, s
,
569 s
->st_info
, s
->st_shndx
, s
->st_value
, s
->st_size
,
570 si
->strtab
+ s
->st_name
);
576 static const char *sopaths
[] = {
582 static int _open_lib(const char *name
)
585 struct stat filestat
;
587 if ((stat(name
, &filestat
) >= 0) && S_ISREG(filestat
.st_mode
)) {
588 if ((fd
= open(name
, O_RDONLY
)) >= 0)
595 static int open_library(const char *name
)
602 TRACE("[ %5d opening %s ]\n", pid
, name
);
604 if(name
== 0) return -1;
605 if(strlen(name
) > 256) return -1;
607 if ((name
[0] == '/') && ((fd
= _open_lib(name
)) >= 0))
610 for (path
= ldpaths
; *path
; path
++) {
611 n
= format_buffer(buf
, sizeof(buf
), "%s/%s", *path
, name
);
612 if (n
< 0 || n
>= (int)sizeof(buf
)) {
613 WARN("Ignoring very long library path: %s/%s\n", *path
, name
);
616 if ((fd
= _open_lib(buf
)) >= 0)
619 for (path
= sopaths
; *path
; path
++) {
620 n
= format_buffer(buf
, sizeof(buf
), "%s/%s", *path
, name
);
621 if (n
< 0 || n
>= (int)sizeof(buf
)) {
622 WARN("Ignoring very long library path: %s/%s\n", *path
, name
);
625 if ((fd
= _open_lib(buf
)) >= 0)
632 /* temporary space for holding the first page of the shared lib
633 * which contains the elf header (with the pht). */
634 static unsigned char __header
[PAGE_SIZE
];
638 char tag
[4]; /* 'P', 'R', 'E', ' ' */
641 /* Returns the requested base address if the library is prelinked,
642 * and 0 otherwise. */
644 is_prelinked(int fd
, const char *name
)
649 sz
= lseek(fd
, -sizeof(prelink_info_t
), SEEK_END
);
651 DL_ERR("lseek() failed!");
655 if (read(fd
, &info
, sizeof(info
)) != sizeof(info
)) {
656 WARN("Could not read prelink_info_t structure for `%s`\n", name
);
660 if (strncmp(info
.tag
, "PRE ", 4)) {
661 WARN("`%s` is not a prelinked library\n", name
);
665 return (unsigned long)info
.mmap_addr
;
669 * Verifies if the object @ base is a valid ELF object
675 * -1 if no valid ELF object is found @ base.
678 verify_elf_object(void *base
, const char *name
)
680 Elf32_Ehdr
*hdr
= (Elf32_Ehdr
*) base
;
682 if (hdr
->e_ident
[EI_MAG0
] != ELFMAG0
) return -1;
683 if (hdr
->e_ident
[EI_MAG1
] != ELFMAG1
) return -1;
684 if (hdr
->e_ident
[EI_MAG2
] != ELFMAG2
) return -1;
685 if (hdr
->e_ident
[EI_MAG3
] != ELFMAG3
) return -1;
687 /* TODO: Should we verify anything else in the header? */
688 #ifdef ANDROID_ARM_LINKER
689 if (hdr
->e_machine
!= EM_ARM
) return -1;
690 #elif defined(ANDROID_X86_LINKER)
691 if (hdr
->e_machine
!= EM_386
) return -1;
698 * Retrieves the base (*base) address where the ELF object should be
699 * mapped and its overall memory size (*total_sz).
702 * fd: Opened file descriptor for the library
703 * name: The name of the library
704 * _hdr: Pointer to the header page of the library
705 * total_sz: Total size of the memory that should be allocated for
709 * -1 if there was an error while trying to get the lib extents.
710 * The possible reasons are:
711 * - Could not determine if the library was prelinked.
712 * - The library provided is not a valid ELF object
713 * 0 if the library did not request a specific base offset (normal
714 * for non-prelinked libs)
715 * > 0 if the library requests a specific address to be mapped to.
716 * This indicates a pre-linked library.
719 get_lib_extents(int fd
, const char *name
, void *__hdr
, unsigned *total_sz
)
722 unsigned min_vaddr
= 0xffffffff;
723 unsigned max_vaddr
= 0;
724 unsigned char *_hdr
= (unsigned char *)__hdr
;
725 Elf32_Ehdr
*ehdr
= (Elf32_Ehdr
*)_hdr
;
729 TRACE("[ %5d Computing extents for '%s'. ]\n", pid
, name
);
730 if (verify_elf_object(_hdr
, name
) < 0) {
731 DL_ERR("%5d - %s is not a valid ELF object", pid
, name
);
735 req_base
= (unsigned) is_prelinked(fd
, name
);
736 if (req_base
== (unsigned)-1)
738 else if (req_base
!= 0) {
739 TRACE("[ %5d - Prelinked library '%s' requesting base @ 0x%08x ]\n",
740 pid
, name
, req_base
);
742 TRACE("[ %5d - Non-prelinked library '%s' found. ]\n", pid
, name
);
745 phdr
= (Elf32_Phdr
*)(_hdr
+ ehdr
->e_phoff
);
747 /* find the min/max p_vaddrs from all the PT_LOAD segments so we can
749 for (cnt
= 0; cnt
< ehdr
->e_phnum
; ++cnt
, ++phdr
) {
750 if (phdr
->p_type
== PT_LOAD
) {
751 if ((phdr
->p_vaddr
+ phdr
->p_memsz
) > max_vaddr
)
752 max_vaddr
= phdr
->p_vaddr
+ phdr
->p_memsz
;
753 if (phdr
->p_vaddr
< min_vaddr
)
754 min_vaddr
= phdr
->p_vaddr
;
758 if ((min_vaddr
== 0xffffffff) && (max_vaddr
== 0)) {
759 DL_ERR("%5d - No loadable segments found in %s.", pid
, name
);
763 /* truncate min_vaddr down to page boundary */
764 min_vaddr
&= ~PAGE_MASK
;
766 /* round max_vaddr up to the next page */
767 max_vaddr
= (max_vaddr
+ PAGE_SIZE
- 1) & ~PAGE_MASK
;
769 *total_sz
= (max_vaddr
- min_vaddr
);
770 return (unsigned)req_base
;
773 /* reserve_mem_region
775 * This function reserves a chunk of memory to be used for mapping in
776 * a prelinked shared library. We reserve the entire memory region here, and
777 * then the rest of the linker will relocate the individual loadable
778 * segments into the correct locations within this memory range.
781 * si->base: The requested base of the allocation.
782 * si->size: The size of the allocation.
785 * -1 on failure, and 0 on success. On success, si->base will contain
786 * the virtual address at which the library will be mapped.
789 static int reserve_mem_region(soinfo
*si
)
791 void *base
= mmap((void *)si
->base
, si
->size
, PROT_NONE
,
792 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
793 if (base
== MAP_FAILED
) {
794 DL_ERR("%5d can NOT map (%sprelinked) library '%s' at 0x%08x "
795 "as requested, will try general pool: %d (%s)",
796 pid
, (si
->base
? "" : "non-"), si
->name
, si
->base
,
797 errno
, strerror(errno
));
799 } else if (base
!= (void *)si
->base
) {
800 DL_ERR("OOPS: %5d %sprelinked library '%s' mapped at 0x%08x, "
801 "not at 0x%08x", pid
, (si
->base
? "" : "non-"),
802 si
->name
, (unsigned)base
, si
->base
);
803 munmap(base
, si
->size
);
809 static int alloc_mem_region(soinfo
*si
)
812 /* Attempt to mmap a prelinked library. */
813 return reserve_mem_region(si
);
816 /* This is not a prelinked library, so we use the kernel's default
820 void *base
= mmap(NULL
, si
->size
, PROT_NONE
,
821 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
822 if (base
== MAP_FAILED
) {
823 DL_ERR("%5d mmap of library '%s' failed: %d (%s)\n",
825 errno
, strerror(errno
));
828 si
->base
= (unsigned) base
;
829 PRINT("%5d mapped library '%s' to %08x via kernel allocator.\n",
830 pid
, si
->name
, si
->base
);
834 DL_ERR("OOPS: %5d cannot map library '%s'. no vspace available.",
839 #define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0)
840 #define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
841 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
842 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
845 * This function loads all the loadable (PT_LOAD) segments into memory
846 * at their appropriate memory offsets off the base address.
849 * fd: Open file descriptor to the library to load.
850 * header: Pointer to a header page that contains the ELF header.
851 * This is needed since we haven't mapped in the real file yet.
852 * si: ptr to soinfo struct describing the shared object.
855 * 0 on success, -1 on failure.
858 load_segments(int fd
, void *header
, soinfo
*si
)
860 Elf32_Ehdr
*ehdr
= (Elf32_Ehdr
*)header
;
861 Elf32_Phdr
*phdr
= (Elf32_Phdr
*)((unsigned char *)header
+ ehdr
->e_phoff
);
862 Elf32_Addr base
= (Elf32_Addr
) si
->base
;
866 unsigned char *pbase
;
867 unsigned char *extra_base
;
869 unsigned total_sz
= 0;
871 si
->wrprotect_start
= 0xffffffff;
872 si
->wrprotect_end
= 0;
874 TRACE("[ %5d - Begin loading segments for '%s' @ 0x%08x ]\n",
875 pid
, si
->name
, (unsigned)si
->base
);
876 /* Now go through all the PT_LOAD segments and map them into memory
877 * at the appropriate locations. */
878 for (cnt
= 0; cnt
< ehdr
->e_phnum
; ++cnt
, ++phdr
) {
879 if (phdr
->p_type
== PT_LOAD
) {
880 DEBUG_DUMP_PHDR(phdr
, "PT_LOAD", pid
);
881 /* we want to map in the segment on a page boundary */
882 tmp
= base
+ (phdr
->p_vaddr
& (~PAGE_MASK
));
883 /* add the # of bytes we masked off above to the total length. */
884 len
= phdr
->p_filesz
+ (phdr
->p_vaddr
& PAGE_MASK
);
886 TRACE("[ %d - Trying to load segment from '%s' @ 0x%08x "
887 "(0x%08x). p_vaddr=0x%08x p_offset=0x%08x ]\n", pid
, si
->name
,
888 (unsigned)tmp
, len
, phdr
->p_vaddr
, phdr
->p_offset
);
889 pbase
= mmap((void *)tmp
, len
, PFLAGS_TO_PROT(phdr
->p_flags
),
890 MAP_PRIVATE
| MAP_FIXED
, fd
,
891 phdr
->p_offset
& (~PAGE_MASK
));
892 if (pbase
== MAP_FAILED
) {
893 DL_ERR("%d failed to map segment from '%s' @ 0x%08x (0x%08x). "
894 "p_vaddr=0x%08x p_offset=0x%08x", pid
, si
->name
,
895 (unsigned)tmp
, len
, phdr
->p_vaddr
, phdr
->p_offset
);
899 /* If 'len' didn't end on page boundary, and it's a writable
900 * segment, zero-fill the rest. */
901 if ((len
& PAGE_MASK
) && (phdr
->p_flags
& PF_W
))
902 memset((void *)(pbase
+ len
), 0, PAGE_SIZE
- (len
& PAGE_MASK
));
904 /* Check to see if we need to extend the map for this segment to
905 * cover the diff between filesz and memsz (i.e. for bss).
907 * base _+---------------------+ page boundary
911 * pbase _+---------------------+ page boundary
914 * base + p_vaddr _| |
917 * pbase + len _| / | |
919 * extra_base _+------------|--------+ page boundary
922 * | +------------|--------+ page boundary
923 * extra_len-> | | | |
929 * _+---------------------+ page boundary
931 tmp
= (Elf32_Addr
)(((unsigned)pbase
+ len
+ PAGE_SIZE
- 1) &
933 if (tmp
< (base
+ phdr
->p_vaddr
+ phdr
->p_memsz
)) {
934 extra_len
= base
+ phdr
->p_vaddr
+ phdr
->p_memsz
- tmp
;
935 TRACE("[ %5d - Need to extend segment from '%s' @ 0x%08x "
936 "(0x%08x) ]\n", pid
, si
->name
, (unsigned)tmp
, extra_len
);
937 /* map in the extra page(s) as anonymous into the range.
938 * This is probably not necessary as we already mapped in
939 * the entire region previously, but we just want to be
940 * sure. This will also set the right flags on the region
941 * (though we can probably accomplish the same thing with
944 extra_base
= mmap((void *)tmp
, extra_len
,
945 PFLAGS_TO_PROT(phdr
->p_flags
),
946 MAP_PRIVATE
| MAP_FIXED
| MAP_ANONYMOUS
,
948 if (extra_base
== MAP_FAILED
) {
949 DL_ERR("[ %5d - failed to extend segment from '%s' @ 0x%08x"
950 " (0x%08x) ]", pid
, si
->name
, (unsigned)tmp
,
954 /* TODO: Check if we need to memset-0 this region.
955 * Anonymous mappings are zero-filled copy-on-writes, so we
956 * shouldn't need to. */
957 TRACE("[ %5d - Segment from '%s' extended @ 0x%08x "
958 "(0x%08x)\n", pid
, si
->name
, (unsigned)extra_base
,
961 /* set the len here to show the full extent of the segment we
962 * just loaded, mostly for debugging */
963 len
= (((unsigned)base
+ phdr
->p_vaddr
+ phdr
->p_memsz
+
964 PAGE_SIZE
- 1) & (~PAGE_MASK
)) - (unsigned)pbase
;
965 TRACE("[ %5d - Successfully loaded segment from '%s' @ 0x%08x "
966 "(0x%08x). p_vaddr=0x%08x p_offset=0x%08x\n", pid
, si
->name
,
967 (unsigned)pbase
, len
, phdr
->p_vaddr
, phdr
->p_offset
);
969 /* Make the section writable just in case we'll have to write to
970 * it during relocation (i.e. text segment). However, we will
971 * remember what range of addresses should be write protected.
974 if (!(phdr
->p_flags
& PF_W
)) {
975 if ((unsigned)pbase
< si
->wrprotect_start
)
976 si
->wrprotect_start
= (unsigned)pbase
;
977 if (((unsigned)pbase
+ len
) > si
->wrprotect_end
)
978 si
->wrprotect_end
= (unsigned)pbase
+ len
;
980 PFLAGS_TO_PROT(phdr
->p_flags
) | PROT_WRITE
);
982 } else if (phdr
->p_type
== PT_DYNAMIC
) {
983 DEBUG_DUMP_PHDR(phdr
, "PT_DYNAMIC", pid
);
984 /* this segment contains the dynamic linking information */
985 si
->dynamic
= (unsigned *)(base
+ phdr
->p_vaddr
);
986 } else if (phdr
->p_type
== PT_GNU_RELRO
) {
987 if ((phdr
->p_vaddr
>= si
->size
)
988 || ((phdr
->p_vaddr
+ phdr
->p_memsz
) > si
->size
)
989 || ((base
+ phdr
->p_vaddr
+ phdr
->p_memsz
) < base
)) {
990 DL_ERR("%d invalid GNU_RELRO in '%s' "
991 "p_vaddr=0x%08x p_memsz=0x%08x", pid
, si
->name
,
992 phdr
->p_vaddr
, phdr
->p_memsz
);
995 si
->gnu_relro_start
= (Elf32_Addr
) (base
+ phdr
->p_vaddr
);
996 si
->gnu_relro_len
= (unsigned) phdr
->p_memsz
;
998 #ifdef ANDROID_ARM_LINKER
999 if (phdr
->p_type
== PT_ARM_EXIDX
) {
1000 DEBUG_DUMP_PHDR(phdr
, "PT_ARM_EXIDX", pid
);
1001 /* exidx entries (used for stack unwinding) are 8 bytes each.
1003 si
->ARM_exidx
= (unsigned *)phdr
->p_vaddr
;
1004 si
->ARM_exidx_count
= phdr
->p_memsz
/ 8;
1012 if (total_sz
> si
->size
) {
1013 DL_ERR("%5d - Total length (0x%08x) of mapped segments from '%s' is "
1014 "greater than what was allocated (0x%08x). THIS IS BAD!",
1015 pid
, total_sz
, si
->name
, si
->size
);
1019 TRACE("[ %5d - Finish loading segments for '%s' @ 0x%08x. "
1020 "Total memory footprint: 0x%08x bytes ]\n", pid
, si
->name
,
1021 (unsigned)si
->base
, si
->size
);
1025 /* We can just blindly unmap the entire region even though some things
1026 * were mapped in originally with anonymous and others could have been
1027 * been mapped in from the file before we failed. The kernel will unmap
1028 * all the pages in the range, irrespective of how they got there.
1030 munmap((void *)si
->base
, si
->size
);
1031 si
->flags
|= FLAG_ERROR
;
1035 /* TODO: Implement this to take care of the fact that Android ARM
1036 * ELF objects shove everything into a single loadable segment that has the
1037 * write bit set. wr_offset is then used to set non-(data|bss) pages to be
1042 get_wr_offset(int fd
, const char *name
, Elf32_Ehdr
*ehdr
)
1044 Elf32_Shdr
*shdr_start
;
1046 int shdr_sz
= ehdr
->e_shnum
* sizeof(Elf32_Shdr
);
1048 unsigned wr_offset
= 0xffffffff;
1050 shdr_start
= mmap(0, shdr_sz
, PROT_READ
, MAP_PRIVATE
, fd
,
1051 ehdr
->e_shoff
& (~PAGE_MASK
));
1052 if (shdr_start
== MAP_FAILED
) {
1053 WARN("%5d - Could not read section header info from '%s'. Will not "
1054 "not be able to determine write-protect offset.\n", pid
, name
);
1055 return (unsigned)-1;
1058 for(cnt
= 0, shdr
= shdr_start
; cnt
< ehdr
->e_shnum
; ++cnt
, ++shdr
) {
1059 if ((shdr
->sh_type
!= SHT_NULL
) && (shdr
->sh_flags
& SHF_WRITE
) &&
1060 (shdr
->sh_addr
< wr_offset
)) {
1061 wr_offset
= shdr
->sh_addr
;
1065 munmap(shdr_start
, shdr_sz
);
1071 load_library(const char *name
)
1073 int fd
= open_library(name
);
1082 DL_ERR("Library '%s' not found", name
);
1086 /* We have to read the ELF header to figure out what to do with this image
1088 if (lseek(fd
, 0, SEEK_SET
) < 0) {
1089 DL_ERR("lseek() failed!");
1093 if ((cnt
= read(fd
, &__header
[0], PAGE_SIZE
)) < 0) {
1094 DL_ERR("read() failed!");
1098 /* Parse the ELF header and get the size of the memory footprint for
1100 req_base
= get_lib_extents(fd
, name
, &__header
[0], &ext_sz
);
1101 if (req_base
== (unsigned)-1)
1103 TRACE("[ %5d - '%s' (%s) wants base=0x%08x sz=0x%08x ]\n", pid
, name
,
1104 (req_base
? "prelinked" : "not pre-linked"), req_base
, ext_sz
);
1106 /* Now configure the soinfo struct where we'll store all of our data
1107 * for the ELF object. If the loading fails, we waste the entry, but
1108 * same thing would happen if we failed during linking. Configuring the
1109 * soinfo struct here is a lot more convenient.
1111 bname
= strrchr(name
, '/');
1112 si
= alloc_info(bname
? bname
+ 1 : name
);
1116 /* Carve out a chunk of memory where we will map in the individual
1118 si
->base
= req_base
;
1122 si
->dynamic
= (unsigned *)-1;
1123 if (alloc_mem_region(si
) < 0)
1126 TRACE("[ %5d allocated memory for %s @ %p (0x%08x) ]\n",
1127 pid
, name
, (void *)si
->base
, (unsigned) ext_sz
);
1129 /* Now actually load the library's segments into right places in memory */
1130 if (load_segments(fd
, &__header
[0], si
) < 0) {
1134 /* this might not be right. Technically, we don't even need this info
1135 * once we go through 'load_segments'. */
1136 hdr
= (Elf32_Ehdr
*)si
->base
;
1137 si
->phdr
= (Elf32_Phdr
*)((unsigned char *)si
->base
+ hdr
->e_phoff
);
1138 si
->phnum
= hdr
->e_phnum
;
1145 if (si
) free_info(si
);
1151 init_library(soinfo
*si
)
1153 unsigned wr_offset
= 0xffffffff;
1155 /* At this point we know that whatever is loaded @ base is a valid ELF
1156 * shared library whose segments are properly mapped in. */
1157 TRACE("[ %5d init_library base=0x%08x sz=0x%08x name='%s') ]\n",
1158 pid
, si
->base
, si
->size
, si
->name
);
1160 if(link_image(si
, wr_offset
)) {
1161 /* We failed to link. However, we can only restore libbase
1162 ** if no additional libraries have moved it since we updated it.
1164 munmap((void *)si
->base
, si
->size
);
1171 soinfo
*find_library(const char *name
)
1176 #if ALLOW_SYMBOLS_FROM_MAIN
1184 bname
= strrchr(name
, '/');
1185 bname
= bname
? bname
+ 1 : name
;
1187 for(si
= solist
; si
!= 0; si
= si
->next
){
1188 if(!strcmp(bname
, si
->name
)) {
1189 if(si
->flags
& FLAG_ERROR
) {
1190 DL_ERR("%5d '%s' failed to load previously", pid
, bname
);
1193 if(si
->flags
& FLAG_LINKED
) return si
;
1194 DL_ERR("OOPS: %5d recursive link to '%s'", pid
, si
->name
);
1199 TRACE("[ %5d '%s' has not been loaded yet. Locating...]\n", pid
, name
);
1200 si
= load_library(name
);
1203 return init_library(si
);
1207 * notify gdb of unload
1208 * for non-prelinked libraries, find a way to decrement libbase
1210 static void call_destructors(soinfo
*si
);
1211 unsigned unload_library(soinfo
*si
)
1214 if (si
->refcount
== 1) {
1215 TRACE("%5d unloading '%s'\n", pid
, si
->name
);
1216 call_destructors(si
);
1219 * Make sure that we undo the PT_GNU_RELRO protections we added
1220 * in link_image. This is needed to undo the DT_NEEDED hack below.
1222 if ((si
->gnu_relro_start
!= 0) && (si
->gnu_relro_len
!= 0)) {
1223 Elf32_Addr start
= (si
->gnu_relro_start
& ~PAGE_MASK
);
1224 unsigned len
= (si
->gnu_relro_start
- start
) + si
->gnu_relro_len
;
1225 if (mprotect((void *) start
, len
, PROT_READ
| PROT_WRITE
) < 0)
1226 DL_ERR("%5d %s: could not undo GNU_RELRO protections. "
1227 "Expect a crash soon. errno=%d (%s)",
1228 pid
, si
->name
, errno
, strerror(errno
));
1232 for(d
= si
->dynamic
; *d
; d
+= 2) {
1233 if(d
[0] == DT_NEEDED
){
1234 soinfo
*lsi
= (soinfo
*)d
[1];
1236 // The next line will segfault if the we don't undo the
1237 // PT_GNU_RELRO protections (see comments above and in
1241 if (validate_soinfo(lsi
)) {
1242 TRACE("%5d %s needs to unload %s\n", pid
,
1243 si
->name
, lsi
->name
);
1244 unload_library(lsi
);
1247 DL_ERR("%5d %s: could not unload dependent library",
1252 munmap((char *)si
->base
, si
->size
);
1253 notify_gdb_of_unload(si
);
1259 PRINT("%5d not unloading '%s', decrementing refcount to %d\n",
1260 pid
, si
->name
, si
->refcount
);
1262 return si
->refcount
;
1265 /* TODO: don't use unsigned for addrs below. It works, but is not
1266 * ideal. They should probably be either uint32_t, Elf32_Addr, or unsigned
1269 static int reloc_library(soinfo
*si
, Elf32_Rel
*rel
, unsigned count
)
1271 Elf32_Sym
*symtab
= si
->symtab
;
1272 const char *strtab
= si
->strtab
;
1275 Elf32_Rel
*start
= rel
;
1278 for (idx
= 0; idx
< count
; ++idx
) {
1279 unsigned type
= ELF32_R_TYPE(rel
->r_info
);
1280 unsigned sym
= ELF32_R_SYM(rel
->r_info
);
1281 unsigned reloc
= (unsigned)(rel
->r_offset
+ si
->base
);
1282 unsigned sym_addr
= 0;
1283 char *sym_name
= NULL
;
1285 DEBUG("%5d Processing '%s' relocation at index %d\n", pid
,
1288 sym_name
= (char *)(strtab
+ symtab
[sym
].st_name
);
1290 if ((sym_addr
= get_hooked_symbol(sym_name
)) != NULL
) {
1291 DEBUG("hooked symbol %s to %x\n", sym_name
, sym_addr
);
1295 s
= _do_lookup(si
, sym_name
, &base
);
1297 if(sym_addr
!= NULL
)
1301 /* We only allow an undefined symbol if this is a weak
1304 if (ELF32_ST_BIND(s
->st_info
) != STB_WEAK
) {
1305 DL_ERR("%5d cannot locate '%s'...\n", pid
, sym_name
);
1309 /* IHI0044C AAELF 4.5.1.1:
1311 Libraries are not searched to resolve weak references.
1312 It is not an error for a weak reference to remain
1315 During linking, the value of an undefined weak reference is:
1316 - Zero if the relocation type is absolute
1317 - The address of the place if the relocation is pc-relative
1318 - The address of nominial base address if the relocation
1319 type is base-relative.
1323 #if defined(ANDROID_ARM_LINKER)
1324 case R_ARM_JUMP_SLOT
:
1325 case R_ARM_GLOB_DAT
:
1327 case R_ARM_RELATIVE
: /* Don't care. */
1328 case R_ARM_NONE
: /* Don't care. */
1329 #elif defined(ANDROID_X86_LINKER)
1330 case R_386_JUMP_SLOT
:
1331 case R_386_GLOB_DAT
:
1333 case R_386_RELATIVE
: /* Dont' care. */
1334 #endif /* ANDROID_*_LINKER */
1335 /* sym_addr was initialized to be zero above or relocation
1336 code below does not care about value of sym_addr.
1337 No need to do anything. */
1340 #if defined(ANDROID_X86_LINKER)
1344 #endif /* ANDROID_X86_LINKER */
1346 #if defined(ANDROID_ARM_LINKER)
1348 /* Fall through. Can't really copy if weak symbol is
1349 not found in run-time. */
1350 #endif /* ANDROID_ARM_LINKER */
1352 DL_ERR("%5d unknown weak reloc type %d @ %p (%d)\n",
1353 pid
, type
, rel
, (int) (rel
- start
));
1357 /* We got a definition. */
1359 if((base
== 0) && (si
->base
!= 0)){
1360 /* linking from libraries to main image is bad */
1361 DL_ERR("%5d cannot locate '%s'...",
1362 pid
, strtab
+ symtab
[sym
].st_name
);
1366 sym_addr
= (unsigned)(s
->st_value
+ base
);
1368 COUNT_RELOC(RELOC_SYMBOL
);
1373 /* TODO: This is ugly. Split up the relocations by arch into
1377 #if defined(ANDROID_ARM_LINKER)
1378 case R_ARM_JUMP_SLOT
:
1379 COUNT_RELOC(RELOC_ABSOLUTE
);
1380 MARK(rel
->r_offset
);
1381 TRACE_TYPE(RELO
, "%5d RELO JMP_SLOT %08x <- %08x %s\n", pid
,
1382 reloc
, sym_addr
, sym_name
);
1383 *((unsigned*)reloc
) = sym_addr
;
1385 case R_ARM_GLOB_DAT
:
1386 COUNT_RELOC(RELOC_ABSOLUTE
);
1387 MARK(rel
->r_offset
);
1388 TRACE_TYPE(RELO
, "%5d RELO GLOB_DAT %08x <- %08x %s\n", pid
,
1389 reloc
, sym_addr
, sym_name
);
1390 *((unsigned*)reloc
) = sym_addr
;
1393 COUNT_RELOC(RELOC_ABSOLUTE
);
1394 MARK(rel
->r_offset
);
1395 TRACE_TYPE(RELO
, "%5d RELO ABS %08x <- %08x %s\n", pid
,
1396 reloc
, sym_addr
, sym_name
);
1397 *((unsigned*)reloc
) += sym_addr
;
1400 COUNT_RELOC(RELOC_RELATIVE
);
1401 MARK(rel
->r_offset
);
1402 TRACE_TYPE(RELO
, "%5d RELO REL32 %08x <- %08x - %08x %s\n", pid
,
1403 reloc
, sym_addr
, rel
->r_offset
, sym_name
);
1404 *((unsigned*)reloc
) += sym_addr
- rel
->r_offset
;
1406 #elif defined(ANDROID_X86_LINKER)
1407 case R_386_JUMP_SLOT
:
1408 COUNT_RELOC(RELOC_ABSOLUTE
);
1409 MARK(rel
->r_offset
);
1410 TRACE_TYPE(RELO
, "%5d RELO JMP_SLOT %08x <- %08x %s\n", pid
,
1411 reloc
, sym_addr
, sym_name
);
1412 *((unsigned*)reloc
) = sym_addr
;
1414 case R_386_GLOB_DAT
:
1415 COUNT_RELOC(RELOC_ABSOLUTE
);
1416 MARK(rel
->r_offset
);
1417 TRACE_TYPE(RELO
, "%5d RELO GLOB_DAT %08x <- %08x %s\n", pid
,
1418 reloc
, sym_addr
, sym_name
);
1419 *((unsigned*)reloc
) = sym_addr
;
1421 #endif /* ANDROID_*_LINKER */
1423 #if defined(ANDROID_ARM_LINKER)
1424 case R_ARM_RELATIVE
:
1425 #elif defined(ANDROID_X86_LINKER)
1426 case R_386_RELATIVE
:
1427 #endif /* ANDROID_*_LINKER */
1428 COUNT_RELOC(RELOC_RELATIVE
);
1429 MARK(rel
->r_offset
);
1431 DL_ERR("%5d odd RELATIVE form...", pid
);
1434 TRACE_TYPE(RELO
, "%5d RELO RELATIVE %08x <- +%08x\n", pid
,
1436 *((unsigned*)reloc
) += si
->base
;
1439 #if defined(ANDROID_X86_LINKER)
1441 COUNT_RELOC(RELOC_RELATIVE
);
1442 MARK(rel
->r_offset
);
1444 TRACE_TYPE(RELO
, "%5d RELO R_386_32 %08x <- +%08x %s\n", pid
,
1445 reloc
, sym_addr
, sym_name
);
1446 *((unsigned *)reloc
) += (unsigned)sym_addr
;
1450 COUNT_RELOC(RELOC_RELATIVE
);
1451 MARK(rel
->r_offset
);
1452 TRACE_TYPE(RELO
, "%5d RELO R_386_PC32 %08x <- "
1453 "+%08x (%08x - %08x) %s\n", pid
, reloc
,
1454 (sym_addr
- reloc
), sym_addr
, reloc
, sym_name
);
1455 *((unsigned *)reloc
) += (unsigned)(sym_addr
- reloc
);
1457 #endif /* ANDROID_X86_LINKER */
1459 #ifdef ANDROID_ARM_LINKER
1461 COUNT_RELOC(RELOC_COPY
);
1462 MARK(rel
->r_offset
);
1463 TRACE_TYPE(RELO
, "%5d RELO %08x <- %d @ %08x %s\n", pid
,
1464 reloc
, s
->st_size
, sym_addr
, sym_name
);
1465 memcpy((void*)reloc
, (void*)sym_addr
, s
->st_size
);
1469 #endif /* ANDROID_ARM_LINKER */
1472 DL_ERR("%5d unknown reloc type %d @ %p (%d)",
1473 pid
, type
, rel
, (int) (rel
- start
));
1481 /* Please read the "Initialization and Termination functions" functions.
1482 * of the linker design note in bionic/linker/README.TXT to understand
1483 * what the following code is doing.
1485 * The important things to remember are:
1487 * DT_PREINIT_ARRAY must be called first for executables, and should
1488 * not appear in shared libraries.
1490 * DT_INIT should be called before DT_INIT_ARRAY if both are present
1492 * DT_FINI should be called after DT_FINI_ARRAY if both are present
1494 * DT_FINI_ARRAY must be parsed in reverse order.
1497 static void call_array(unsigned *ctor
, int count
, int reverse
)
1506 for(n
= count
; n
> 0; n
--) {
1507 TRACE("[ %5d Looking at %s *0x%08x == 0x%08x ]\n", pid
,
1508 reverse
? "dtor" : "ctor",
1509 (unsigned)ctor
, (unsigned)*ctor
);
1510 void (*func
)() = (void (*)()) *ctor
;
1512 if(((int) func
== 0) || ((int) func
== -1)) continue;
1513 TRACE("[ %5d Calling func @ 0x%08x ]\n", pid
, (unsigned)func
);
1518 void call_constructors_recursive(soinfo
*si
)
1520 if (si
->constructors_called
)
1522 if (strcmp(si
->name
, "libc.so") == 0)
1524 // Set this before actually calling the constructors, otherwise it doesn't
1525 // protect against recursive constructor calls. One simple example of
1526 // constructor recursion is the libc debug malloc, which is implemented in
1527 // libc_malloc_debug_leak.so:
1528 // 1. The program depends on libc, so libc's constructor is called here.
1529 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so.
1530 // 3. dlopen() calls call_constructors_recursive() with the newly created
1531 // soinfo for libc_malloc_debug_leak.so.
1532 // 4. The debug so depends on libc, so call_constructors_recursive() is
1533 // called again with the libc soinfo. If it doesn't trigger the early-
1534 // out above, the libc constructor will be called again (recursively!).
1535 si
->constructors_called
= 1;
1537 if (si
->flags
& FLAG_EXE
) {
1538 TRACE("[ %5d Calling preinit_array @ 0x%08x [%d] for '%s' ]\n",
1539 pid
, (unsigned)si
->preinit_array
, si
->preinit_array_count
,
1541 call_array(si
->preinit_array
, si
->preinit_array_count
, 0);
1542 TRACE("[ %5d Done calling preinit_array for '%s' ]\n", pid
, si
->name
);
1544 if (si
->preinit_array
) {
1545 DL_ERR("%5d Shared library '%s' has a preinit_array table @ 0x%08x."
1546 " This is INVALID.", pid
, si
->name
,
1547 (unsigned)si
->preinit_array
);
1553 for(d
= si
->dynamic
; *d
; d
+= 2) {
1554 if(d
[0] == DT_NEEDED
){
1555 soinfo
* lsi
= (soinfo
*)d
[1];
1556 if (!validate_soinfo(lsi
)) {
1557 DL_ERR("%5d bad DT_NEEDED pointer in %s",
1560 call_constructors_recursive(lsi
);
1566 if (si
->init_func
) {
1567 TRACE("[ %5d Calling init_func @ 0x%08x for '%s' ]\n", pid
,
1568 (unsigned)si
->init_func
, si
->name
);
1570 TRACE("[ %5d Done calling init_func for '%s' ]\n", pid
, si
->name
);
1573 if (si
->init_array
) {
1574 TRACE("[ %5d Calling init_array @ 0x%08x [%d] for '%s' ]\n", pid
,
1575 (unsigned)si
->init_array
, si
->init_array_count
, si
->name
);
1576 call_array(si
->init_array
, si
->init_array_count
, 0);
1577 TRACE("[ %5d Done calling init_array for '%s' ]\n", pid
, si
->name
);
1582 static void call_destructors(soinfo
*si
)
1584 if (si
->fini_array
) {
1585 TRACE("[ %5d Calling fini_array @ 0x%08x [%d] for '%s' ]\n", pid
,
1586 (unsigned)si
->fini_array
, si
->fini_array_count
, si
->name
);
1587 call_array(si
->fini_array
, si
->fini_array_count
, 1);
1588 TRACE("[ %5d Done calling fini_array for '%s' ]\n", pid
, si
->name
);
1591 if (si
->fini_func
) {
1592 TRACE("[ %5d Calling fini_func @ 0x%08x for '%s' ]\n", pid
,
1593 (unsigned)si
->fini_func
, si
->name
);
1595 TRACE("[ %5d Done calling fini_func for '%s' ]\n", pid
, si
->name
);
1599 /* Force any of the closed stdin, stdout and stderr to be associated with
1601 static int nullify_closed_stdio (void)
1603 int dev_null
, i
, status
;
1604 int return_value
= 0;
1606 dev_null
= open("/dev/null", O_RDWR
);
1608 DL_ERR("Cannot open /dev/null.");
1611 TRACE("[ %5d Opened /dev/null file-descriptor=%d]\n", pid
, dev_null
);
1613 /* If any of the stdio file descriptors is valid and not associated
1614 with /dev/null, dup /dev/null to it. */
1615 for (i
= 0; i
< 3; i
++) {
1616 /* If it is /dev/null already, we are done. */
1620 TRACE("[ %5d Nullifying stdio file descriptor %d]\n", pid
, i
);
1621 /* The man page of fcntl does not say that fcntl(..,F_GETFL)
1622 can be interrupted but we do this just to be safe. */
1624 status
= fcntl(i
, F_GETFL
);
1625 } while (status
< 0 && errno
== EINTR
);
1627 /* If file is openned, we are good. */
1631 /* The only error we allow is that the file descriptor does not
1632 exist, in which case we dup /dev/null to it. */
1633 if (errno
!= EBADF
) {
1634 DL_ERR("nullify_stdio: unhandled error %s", strerror(errno
));
1639 /* Try dupping /dev/null to this stdio file descriptor and
1640 repeat if there is a signal. Note that any errors in closing
1641 the stdio descriptor are lost. */
1643 status
= dup2(dev_null
, i
);
1644 } while (status
< 0 && errno
== EINTR
);
1647 DL_ERR("nullify_stdio: dup2 error %s", strerror(errno
));
1653 /* If /dev/null is not one of the stdio file descriptors, close it. */
1655 TRACE("[ %5d Closing /dev/null file-descriptor=%d]\n", pid
, dev_null
);
1657 status
= close(dev_null
);
1658 } while (status
< 0 && errno
== EINTR
);
1661 DL_ERR("nullify_stdio: close error %s", strerror(errno
));
1666 return return_value
;
1669 static int link_image(soinfo
*si
, unsigned wr_offset
)
1672 Elf32_Phdr
*phdr
= si
->phdr
;
1673 int phnum
= si
->phnum
;
1675 INFO("[ %5d linking %s ]\n", pid
, si
->name
);
1676 DEBUG("%5d si->base = 0x%08x si->flags = 0x%08x\n", pid
,
1677 si
->base
, si
->flags
);
1679 if (si
->flags
& (FLAG_EXE
| FLAG_LINKER
)) {
1680 /* Locate the needed program segments (DYNAMIC/ARM_EXIDX) for
1681 * linkage info if this is the executable or the linker itself.
1682 * If this was a dynamic lib, that would have been done at load time.
1684 * TODO: It's unfortunate that small pieces of this are
1685 * repeated from the load_library routine. Refactor this just
1686 * slightly to reuse these bits.
1689 for(; phnum
> 0; --phnum
, ++phdr
) {
1690 #ifdef ANDROID_ARM_LINKER
1691 if(phdr
->p_type
== PT_ARM_EXIDX
) {
1692 /* exidx entries (used for stack unwinding) are 8 bytes each.
1694 si
->ARM_exidx
= (unsigned *)phdr
->p_vaddr
;
1695 si
->ARM_exidx_count
= phdr
->p_memsz
/ 8;
1698 if (phdr
->p_type
== PT_LOAD
) {
1699 /* For the executable, we use the si->size field only in
1700 dl_unwind_find_exidx(), so the meaning of si->size
1701 is not the size of the executable; it is the distance
1702 between the load location of the executable and the last
1703 address of the loadable part of the executable.
1704 We use the range [si->base, si->base + si->size) to
1705 determine whether a PC value falls within the executable
1706 section. Of course, if a value is between si->base and
1707 (si->base + phdr->p_vaddr), it's not in the executable
1708 section, but a) we shouldn't be asking for such a value
1709 anyway, and b) if we have to provide an EXIDX for such a
1710 value, then the executable's EXIDX is probably the better
1713 DEBUG_DUMP_PHDR(phdr
, "PT_LOAD", pid
);
1714 if (phdr
->p_vaddr
+ phdr
->p_memsz
> si
->size
)
1715 si
->size
= phdr
->p_vaddr
+ phdr
->p_memsz
;
1716 /* try to remember what range of addresses should be write
1718 if (!(phdr
->p_flags
& PF_W
)) {
1721 if (si
->base
+ phdr
->p_vaddr
< si
->wrprotect_start
)
1722 si
->wrprotect_start
= si
->base
+ phdr
->p_vaddr
;
1723 _end
= (((si
->base
+ phdr
->p_vaddr
+ phdr
->p_memsz
+ PAGE_SIZE
- 1) &
1725 if (_end
> si
->wrprotect_end
)
1726 si
->wrprotect_end
= _end
;
1727 /* Make the section writable just in case we'll have to
1728 * write to it during relocation (i.e. text segment).
1729 * However, we will remember what range of addresses
1730 * should be write protected.
1732 mprotect((void *) (si
->base
+ phdr
->p_vaddr
),
1734 PFLAGS_TO_PROT(phdr
->p_flags
) | PROT_WRITE
);
1736 } else if (phdr
->p_type
== PT_DYNAMIC
) {
1737 if (si
->dynamic
!= (unsigned *)-1) {
1738 DL_ERR("%5d multiple PT_DYNAMIC segments found in '%s'. "
1739 "Segment at 0x%08x, previously one found at 0x%08x",
1740 pid
, si
->name
, si
->base
+ phdr
->p_vaddr
,
1741 (unsigned)si
->dynamic
);
1744 DEBUG_DUMP_PHDR(phdr
, "PT_DYNAMIC", pid
);
1745 si
->dynamic
= (unsigned *) (si
->base
+ phdr
->p_vaddr
);
1746 } else if (phdr
->p_type
== PT_GNU_RELRO
) {
1747 if ((phdr
->p_vaddr
>= si
->size
)
1748 || ((phdr
->p_vaddr
+ phdr
->p_memsz
) > si
->size
)
1749 || ((si
->base
+ phdr
->p_vaddr
+ phdr
->p_memsz
) < si
->base
)) {
1750 DL_ERR("%d invalid GNU_RELRO in '%s' "
1751 "p_vaddr=0x%08x p_memsz=0x%08x", pid
, si
->name
,
1752 phdr
->p_vaddr
, phdr
->p_memsz
);
1755 si
->gnu_relro_start
= (Elf32_Addr
) (si
->base
+ phdr
->p_vaddr
);
1756 si
->gnu_relro_len
= (unsigned) phdr
->p_memsz
;
1761 if (si
->dynamic
== (unsigned *)-1) {
1762 DL_ERR("%5d missing PT_DYNAMIC?!", pid
);
1766 DEBUG("%5d dynamic = %p\n", pid
, si
->dynamic
);
1768 /* extract useful information from dynamic section */
1769 for(d
= si
->dynamic
; *d
; d
++){
1770 DEBUG("%5d d = %p, d[0] = 0x%08x d[1] = 0x%08x\n", pid
, d
, d
[0], d
[1]);
1773 si
->nbucket
= ((unsigned *) (si
->base
+ *d
))[0];
1774 si
->nchain
= ((unsigned *) (si
->base
+ *d
))[1];
1775 si
->bucket
= (unsigned *) (si
->base
+ *d
+ 8);
1776 si
->chain
= (unsigned *) (si
->base
+ *d
+ 8 + si
->nbucket
* 4);
1779 si
->strtab
= (const char *) (si
->base
+ *d
);
1782 si
->symtab
= (Elf32_Sym
*) (si
->base
+ *d
);
1786 DL_ERR("DT_RELA not supported");
1791 si
->plt_rel
= (Elf32_Rel
*) (si
->base
+ *d
);
1794 si
->plt_rel_count
= *d
/ 8;
1797 si
->rel
= (Elf32_Rel
*) (si
->base
+ *d
);
1800 si
->rel_count
= *d
/ 8;
1803 /* Save this in case we decide to do lazy binding. We don't yet. */
1804 si
->plt_got
= (unsigned *)(si
->base
+ *d
);
1807 // Set the DT_DEBUG entry to the addres of _r_debug for GDB
1808 *d
= (int) &_r_debug
;
1811 DL_ERR("%5d DT_RELA not supported", pid
);
1814 si
->init_func
= (void (*)(void))(si
->base
+ *d
);
1815 DEBUG("%5d %s constructors (init func) found at %p\n",
1816 pid
, si
->name
, si
->init_func
);
1819 si
->fini_func
= (void (*)(void))(si
->base
+ *d
);
1820 DEBUG("%5d %s destructors (fini func) found at %p\n",
1821 pid
, si
->name
, si
->fini_func
);
1824 si
->init_array
= (unsigned *)(si
->base
+ *d
);
1825 DEBUG("%5d %s constructors (init_array) found at %p\n",
1826 pid
, si
->name
, si
->init_array
);
1828 case DT_INIT_ARRAYSZ
:
1829 si
->init_array_count
= ((unsigned)*d
) / sizeof(Elf32_Addr
);
1832 si
->fini_array
= (unsigned *)(si
->base
+ *d
);
1833 DEBUG("%5d %s destructors (fini_array) found at %p\n",
1834 pid
, si
->name
, si
->fini_array
);
1836 case DT_FINI_ARRAYSZ
:
1837 si
->fini_array_count
= ((unsigned)*d
) / sizeof(Elf32_Addr
);
1839 case DT_PREINIT_ARRAY
:
1840 si
->preinit_array
= (unsigned *)(si
->base
+ *d
);
1841 DEBUG("%5d %s constructors (preinit_array) found at %p\n",
1842 pid
, si
->name
, si
->preinit_array
);
1844 case DT_PREINIT_ARRAYSZ
:
1845 si
->preinit_array_count
= ((unsigned)*d
) / sizeof(Elf32_Addr
);
1848 /* TODO: make use of this. */
1849 /* this means that we might have to write into where the text
1850 * segment was loaded during relocation... Do something with
1853 DEBUG("%5d Text segment should be writable during relocation.\n",
1859 DEBUG("%5d si->base = 0x%08x, si->strtab = %p, si->symtab = %p\n",
1860 pid
, si
->base
, si
->strtab
, si
->symtab
);
1862 if((si
->strtab
== 0) || (si
->symtab
== 0)) {
1863 DL_ERR("%5d missing essential tables", pid
);
1867 /* if this is the main executable, then load all of the preloads now */
1868 if(si
->flags
& FLAG_EXE
) {
1870 memset(preloads
, 0, sizeof(preloads
));
1871 for(i
= 0; ldpreload_names
[i
] != NULL
; i
++) {
1872 soinfo
*lsi
= find_library(ldpreload_names
[i
]);
1874 strlcpy(tmp_err_buf
, linker_get_error(), sizeof(tmp_err_buf
));
1875 DL_ERR("%5d could not load needed library '%s' for '%s' (%s)",
1876 pid
, ldpreload_names
[i
], si
->name
, tmp_err_buf
);
1884 for(d
= si
->dynamic
; *d
; d
+= 2) {
1885 if(d
[0] == DT_NEEDED
){
1886 DEBUG("%5d %s needs %s\n", pid
, si
->name
, si
->strtab
+ d
[1]);
1887 soinfo
*lsi
= find_library(si
->strtab
+ d
[1]);
1889 strlcpy(tmp_err_buf
, linker_get_error(), sizeof(tmp_err_buf
));
1890 DL_ERR("%5d could not load needed library '%s' for '%s' (%s)",
1891 pid
, si
->strtab
+ d
[1], si
->name
, tmp_err_buf
);
1894 /* Save the soinfo of the loaded DT_NEEDED library in the payload
1895 of the DT_NEEDED entry itself, so that we can retrieve the
1896 soinfo directly later from the dynamic segment. This is a hack,
1897 but it allows us to map from DT_NEEDED to soinfo efficiently
1898 later on when we resolve relocations, trying to look up a symbol
1901 d
[1] = (unsigned)lsi
;
1907 DEBUG("[ %5d relocating %s plt ]\n", pid
, si
->name
);
1908 if(reloc_library(si
, si
->plt_rel
, si
->plt_rel_count
))
1912 DEBUG("[ %5d relocating %s ]\n", pid
, si
->name
);
1913 if(reloc_library(si
, si
->rel
, si
->rel_count
))
1917 si
->flags
|= FLAG_LINKED
;
1918 DEBUG("[ %5d finished linking %s ]\n", pid
, si
->name
);
1921 /* This is the way that the old dynamic linker did protection of
1922 * non-writable areas. It would scan section headers and find where
1923 * .text ended (rather where .data/.bss began) and assume that this is
1924 * the upper range of the non-writable area. This is too coarse,
1925 * and is kept here for reference until we fully move away from single
1926 * segment elf objects. See the code in get_wr_offset (also #if'd 0)
1927 * that made this possible.
1929 if(wr_offset
< 0xffffffff){
1930 mprotect((void*) si
->base
, wr_offset
, PROT_READ
| PROT_EXEC
);
1933 /* TODO: Verify that this does the right thing in all cases, as it
1934 * presently probably does not. It is possible that an ELF image will
1935 * come with multiple read-only segments. What we ought to do is scan
1936 * the program headers again and mprotect all the read-only segments.
1937 * To prevent re-scanning the program header, we would have to build a
1938 * list of loadable segments in si, and then scan that instead. */
1939 if (si
->wrprotect_start
!= 0xffffffff && si
->wrprotect_end
!= 0) {
1940 mprotect((void *)si
->wrprotect_start
,
1941 si
->wrprotect_end
- si
->wrprotect_start
,
1942 PROT_READ
| PROT_EXEC
);
1946 if (si
->gnu_relro_start
!= 0 && si
->gnu_relro_len
!= 0) {
1947 Elf32_Addr start
= (si
->gnu_relro_start
& ~PAGE_MASK
);
1948 unsigned len
= (si
->gnu_relro_start
- start
) + si
->gnu_relro_len
;
1949 if (mprotect((void *) start
, len
, PROT_READ
) < 0) {
1950 DL_ERR("%5d GNU_RELRO mprotect of library '%s' failed: %d (%s)\n",
1951 pid
, si
->name
, errno
, strerror(errno
));
1956 /* If this is a SET?ID program, dup /dev/null to opened stdin,
1957 stdout and stderr to close a security hole described in:
1959 ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
1962 if (program_is_setuid
)
1963 nullify_closed_stdio ();
1964 notify_gdb_of_load(si
);
1968 ERROR("failed to link %s\n", si
->name
);
1969 si
->flags
|= FLAG_ERROR
;
1973 static void parse_library_path(const char *path
, char *delim
)
1976 char *ldpaths_bufp
= ldpaths_buf
;
1979 len
= strlcpy(ldpaths_buf
, path
, sizeof(ldpaths_buf
));
1981 while (i
< LDPATH_MAX
&& (ldpaths
[i
] = strsep(&ldpaths_bufp
, delim
))) {
1982 if (*ldpaths
[i
] != '\0')
1986 /* Forget the last path if we had to truncate; this occurs if the 2nd to
1987 * last char isn't '\0' (i.e. not originally a delim). */
1988 if (i
> 0 && len
>= sizeof(ldpaths_buf
) &&
1989 ldpaths_buf
[sizeof(ldpaths_buf
) - 2] != '\0') {
1990 ldpaths
[i
- 1] = NULL
;
1996 static void parse_preloads(const char *path
, char *delim
)
1999 char *ldpreloads_bufp
= ldpreloads_buf
;
2002 len
= strlcpy(ldpreloads_buf
, path
, sizeof(ldpreloads_buf
));
2004 while (i
< LDPRELOAD_MAX
&& (ldpreload_names
[i
] = strsep(&ldpreloads_bufp
, delim
))) {
2005 if (*ldpreload_names
[i
] != '\0') {
2010 /* Forget the last path if we had to truncate; this occurs if the 2nd to
2011 * last char isn't '\0' (i.e. not originally a delim). */
2012 if (i
> 0 && len
>= sizeof(ldpreloads_buf
) &&
2013 ldpreloads_buf
[sizeof(ldpreloads_buf
) - 2] != '\0') {
2014 ldpreload_names
[i
- 1] = NULL
;
2016 ldpreload_names
[i
] = NULL
;
2021 * This code is called after the linker has linked itself and
2022 * fixed it's own GOT. It is safe to make references to externs
2023 * and other non-local data at this point.
2025 static unsigned __linker_init_post_relocation(unsigned **elfdata
)
2027 static soinfo linker_soinfo
;
2029 int argc
= (int) *elfdata
;
2030 char **argv
= (char**) (elfdata
+ 1);
2031 unsigned *vecs
= (unsigned*) (argv
+ argc
+ 1);
2034 struct link_map
* map
;
2035 const char *ldpath_env
= NULL
;
2036 const char *ldpreload_env
= NULL
;
2038 /* NOTE: we store the elfdata pointer on a special location
2039 * of the temporary TLS area in order to pass it to
2040 * the C Library's runtime initializer.
2042 * The initializer must clear the slot and reset the TLS
2043 * to point to a different location to ensure that no other
2044 * shared library constructor can access it.
2047 __libc_init_tls(elfdata
);
2053 struct timeval t0
, t1
;
2054 gettimeofday(&t0
, 0);
2057 /* Initialize environment functions, and get to the ELF aux vectors table */
2058 vecs
= linker_env_init(vecs
);
2060 /* Check auxv for AT_SECURE first to see if program is setuid, setgid,
2061 has file caps, or caused a SELinux/AppArmor domain transition. */
2062 for (v
= vecs
; v
[0]; v
+= 2) {
2063 if (v
[0] == AT_SECURE
) {
2064 /* kernel told us whether to enable secure mode */
2065 program_is_setuid
= v
[1];
2070 /* Kernel did not provide AT_SECURE - fall back on legacy test. */
2071 program_is_setuid
= (getuid() != geteuid()) || (getgid() != getegid());
2074 /* Sanitize environment if we're loading a setuid program */
2075 if (program_is_setuid
)
2076 linker_env_secure();
2082 /* Get a few environment variables */
2086 env
= linker_env_get("DEBUG"); /* XXX: TODO: Change to LD_DEBUG */
2088 debug_verbosity
= atoi(env
);
2091 /* Normally, these are cleaned by linker_env_secure, but the test
2092 * against program_is_setuid doesn't cost us anything */
2093 if (!program_is_setuid
) {
2094 ldpath_env
= getenv("LD_LIBRARY_PATH");
2095 ldpreload_env
= getenv("LD_PRELOAD");
2099 INFO("[ android linker & debugger ]\n");
2100 DEBUG("%5d elfdata @ 0x%08x\n", pid
, (unsigned)elfdata
);
2102 si
= alloc_info(argv
[0]);
2107 /* bootstrap the link map, the main exe always needs to be first */
2108 si
->flags
|= FLAG_EXE
;
2109 map
= &(si
->linkmap
);
2112 map
->l_name
= argv
[0];
2116 _r_debug
.r_map
= map
;
2119 /* gdb expects the linker to be in the debug shared object list,
2120 * and we need to make sure that the reported load address is zero.
2121 * Without this, gdb gets the wrong idea of where rtld_db_dlactivity()
2122 * is. Don't use alloc_info(), because the linker shouldn't
2123 * be on the soinfo list.
2125 strlcpy((char*) linker_soinfo
.name
, "/system/bin/linker", sizeof linker_soinfo
.name
);
2126 linker_soinfo
.flags
= 0;
2127 linker_soinfo
.base
= 0; // This is the important part; must be zero.
2128 insert_soinfo_into_debug_map(&linker_soinfo
);
2130 /* extract information passed from the kernel */
2131 while(vecs
[0] != 0){
2134 si
->phdr
= (Elf32_Phdr
*) vecs
[1];
2137 si
->phnum
= (int) vecs
[1];
2140 si
->entry
= vecs
[1];
2146 /* Compute the value of si->base. We can't rely on the fact that
2147 * the first entry is the PHDR because this will not be true
2148 * for certain executables (e.g. some in the NDK unit test suite)
2152 for ( nn
= 0; nn
< si
->phnum
; nn
++ ) {
2153 if (si
->phdr
[nn
].p_type
== PT_PHDR
) {
2154 si
->base
= (Elf32_Addr
) si
->phdr
- si
->phdr
[nn
].p_vaddr
;
2158 si
->dynamic
= (unsigned *)-1;
2159 si
->wrprotect_start
= 0xffffffff;
2160 si
->wrprotect_end
= 0;
2162 si
->gnu_relro_start
= 0;
2163 si
->gnu_relro_len
= 0;
2165 /* Use LD_LIBRARY_PATH if we aren't setuid/setgid */
2167 parse_library_path(ldpath_env
, ":");
2169 if (ldpreload_env
) {
2170 parse_preloads(ldpreload_env
, " :");
2173 if(link_image(si
, 0)) {
2174 char errmsg
[] = "CANNOT LINK EXECUTABLE\n";
2175 write(2, __linker_dl_err_buf
, strlen(__linker_dl_err_buf
));
2176 write(2, errmsg
, sizeof(errmsg
));
2180 call_constructors_recursive(si
);
2182 #if ALLOW_SYMBOLS_FROM_MAIN
2183 /* Set somain after we've loaded all the libraries in order to prevent
2184 * linking of symbols back to the main image, which is not set up at that
2191 gettimeofday(&t1
,NULL
);
2192 PRINT("LINKER TIME: %s: %d microseconds\n", argv
[0], (int) (
2193 (((long long)t1
.tv_sec
* 1000000LL) + (long long)t1
.tv_usec
) -
2194 (((long long)t0
.tv_sec
* 1000000LL) + (long long)t0
.tv_usec
)
2198 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol\n", argv
[0],
2199 linker_stats
.reloc
[RELOC_ABSOLUTE
],
2200 linker_stats
.reloc
[RELOC_RELATIVE
],
2201 linker_stats
.reloc
[RELOC_COPY
],
2202 linker_stats
.reloc
[RELOC_SYMBOL
]);
2209 for(n
= 0; n
< 4096; n
++){
2211 unsigned x
= bitmask
[n
];
2212 for(i
= 0; i
< 8; i
++){
2218 PRINT("PAGES MODIFIED: %s: %d (%dKB)\n", argv
[0], count
, count
* 4);
2222 #if TIMING || STATS || COUNT_PAGES
2226 TRACE("[ %5d Ready to execute '%s' @ 0x%08x ]\n", pid
, si
->name
,
2232 * Find the value of AT_BASE passed to us by the kernel. This is the load
2233 * location of the linker.
2235 static unsigned find_linker_base(unsigned **elfdata
) {
2236 int argc
= (int) *elfdata
;
2237 char **argv
= (char**) (elfdata
+ 1);
2238 unsigned *vecs
= (unsigned*) (argv
+ argc
+ 1);
2239 while (vecs
[0] != 0) {
2243 /* The end of the environment block is marked by two NULL pointers */
2247 if (vecs
[0] == AT_BASE
) {
2253 return 0; // should never happen
2257 * This is the entry point for the linker, called from begin.S. This
2258 * method is responsible for fixing the linker's own relocations, and
2259 * then calling __linker_init_post_relocation().
2261 * Because this method is called before the linker has fixed it's own
2262 * relocations, any attempt to reference an extern variable, extern
2263 * function, or other GOT reference will generate a segfault.
2265 unsigned __linker_init(unsigned **elfdata
) {
2266 unsigned linker_addr
= find_linker_base(elfdata
);
2267 Elf32_Ehdr
*elf_hdr
= (Elf32_Ehdr
*) linker_addr
;
2269 (Elf32_Phdr
*)((unsigned char *) linker_addr
+ elf_hdr
->e_phoff
);
2272 memset(&linker_so
, 0, sizeof(soinfo
));
2274 linker_so
.base
= linker_addr
;
2275 linker_so
.dynamic
= (unsigned *) -1;
2276 linker_so
.phdr
= phdr
;
2277 linker_so
.phnum
= elf_hdr
->e_phnum
;
2278 linker_so
.flags
|= FLAG_LINKER
;
2279 linker_so
.wrprotect_start
= 0xffffffff;
2280 linker_so
.wrprotect_end
= 0;
2281 linker_so
.gnu_relro_start
= 0;
2282 linker_so
.gnu_relro_len
= 0;
2284 if (link_image(&linker_so
, 0)) {
2285 // It would be nice to print an error message, but if the linker
2286 // can't link itself, there's no guarantee that we'll be able to
2287 // call write() (because it involves a GOT reference).
2289 // This situation should never occur unless the linker itself
2294 // We have successfully fixed our own relocations. It's safe to run
2295 // the main part of the linker now.
2296 return __linker_init_post_relocation(elfdata
);