Imported Upstream version 0.1.0+git20131207+e452e83
[deb_libhybris.git] / hybris / common / gingerbread / linker.c
1 /*
2 * Copyright (C) 2008, 2009 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <linux/auxvec.h>
30
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <unistd.h>
35 #include <fcntl.h>
36 #include <errno.h>
37 #include <dlfcn.h>
38 #include <sys/stat.h>
39
40 #include <pthread.h>
41
42 #include <sys/mman.h>
43
44 /* special private C library header - see Android.mk */
45 #include "bionic_tls.h"
46
47 #include "linker.h"
48 #include "linker_debug.h"
49 #include "linker_environ.h"
50 #include "linker_format.h"
51
52 #define ALLOW_SYMBOLS_FROM_MAIN 1
53 #define SO_MAX 128
54
55 /* Assume average path length of 64 and max 8 paths */
56 #define LDPATH_BUFSIZE 512
57 #define LDPATH_MAX 8
58
59 #define LDPRELOAD_BUFSIZE 512
60 #define LDPRELOAD_MAX 8
61
62 /* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
63 *
64 * Do NOT use malloc() and friends or pthread_*() code here.
65 * Don't use printf() either; it's caused mysterious memory
66 * corruption in the past.
67 * The linker runs before we bring up libc and it's easiest
68 * to make sure it does not depend on any complex libc features
69 *
70 * open issues / todo:
71 *
72 * - are we doing everything we should for ARM_COPY relocations?
73 * - cleaner error reporting
74 * - after linking, set as much stuff as possible to READONLY
75 * and NOEXEC
76 * - linker hardcodes PAGE_SIZE and PAGE_MASK because the kernel
77 * headers provide versions that are negative...
78 * - allocate space for soinfo structs dynamically instead of
79 * having a hard limit (64)
80 */
81
82
83 static int link_image(soinfo *si, unsigned wr_offset);
84
85 static int socount = 0;
86 static soinfo sopool[SO_MAX];
87 static soinfo *freelist = NULL;
88 static soinfo *solist = &libdl_info;
89 static soinfo *sonext = &libdl_info;
90 #if ALLOW_SYMBOLS_FROM_MAIN
91 static soinfo *somain; /* main process, always the one after libdl_info */
92 #endif
93
94
95 static inline int validate_soinfo(soinfo *si)
96 {
97 return (si >= sopool && si < sopool + SO_MAX) ||
98 si == &libdl_info;
99 }
100
101 static char ldpaths_buf[LDPATH_BUFSIZE];
102 static const char *ldpaths[LDPATH_MAX + 1];
103
104 static char ldpreloads_buf[LDPRELOAD_BUFSIZE];
105 static const char *ldpreload_names[LDPRELOAD_MAX + 1];
106
107 static soinfo *preloads[LDPRELOAD_MAX + 1];
108
109 #if LINKER_DEBUG
110 int debug_verbosity;
111 #endif
112
113 static int pid;
114
115 /* This boolean is set if the program being loaded is setuid */
116 static int program_is_setuid;
117
118 #if STATS
119 struct _link_stats linker_stats;
120 #endif
121
122 #if COUNT_PAGES
123 unsigned bitmask[4096];
124 #endif
125
126 #ifndef PT_ARM_EXIDX
127 #define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
128 #endif
129
130 static char tmp_err_buf[768];
131 static char __linker_dl_err_buf[768];
132 #define DL_ERR(fmt, x...) \
133 do { \
134 format_buffer(__linker_dl_err_buf, sizeof(__linker_dl_err_buf), \
135 "%s[%d]: " fmt, __func__, __LINE__, ##x); \
136 ERROR(fmt "\n", ##x); \
137 } while(0)
138
139 const char *linker_get_error(void)
140 {
141 return (const char *)&__linker_dl_err_buf[0];
142 }
143
144 /*
145 * This function is an empty stub where GDB locates a breakpoint to get notified
146 * about linker activity.
147 */
148 extern void __attribute__((noinline)) rtld_db_dlactivity(void);
149
150 static struct r_debug _r_debug = {1, NULL, &rtld_db_dlactivity,
151 RT_CONSISTENT, 0};
152 static struct link_map *r_debug_tail = 0;
153
154 static pthread_mutex_t _r_debug_lock = PTHREAD_MUTEX_INITIALIZER;
155
156 static void insert_soinfo_into_debug_map(soinfo * info)
157 {
158 struct link_map * map;
159
160 /* Copy the necessary fields into the debug structure.
161 */
162 map = &(info->linkmap);
163 map->l_addr = info->base;
164 map->l_name = (char*) info->name;
165 map->l_ld = (uintptr_t)info->dynamic;
166
167 /* Stick the new library at the end of the list.
168 * gdb tends to care more about libc than it does
169 * about leaf libraries, and ordering it this way
170 * reduces the back-and-forth over the wire.
171 */
172 if (r_debug_tail) {
173 r_debug_tail->l_next = map;
174 map->l_prev = r_debug_tail;
175 map->l_next = 0;
176 } else {
177 _r_debug.r_map = map;
178 map->l_prev = 0;
179 map->l_next = 0;
180 }
181 r_debug_tail = map;
182 }
183
184 static void remove_soinfo_from_debug_map(soinfo * info)
185 {
186 struct link_map * map = &(info->linkmap);
187
188 if (r_debug_tail == map)
189 r_debug_tail = map->l_prev;
190
191 if (map->l_prev) map->l_prev->l_next = map->l_next;
192 if (map->l_next) map->l_next->l_prev = map->l_prev;
193 }
194
195 void notify_gdb_of_load(soinfo * info)
196 {
197 if (info->flags & FLAG_EXE) {
198 // GDB already knows about the main executable
199 return;
200 }
201
202 pthread_mutex_lock(&_r_debug_lock);
203
204 _r_debug.r_state = RT_ADD;
205 rtld_db_dlactivity();
206
207 insert_soinfo_into_debug_map(info);
208
209 _r_debug.r_state = RT_CONSISTENT;
210 rtld_db_dlactivity();
211
212 pthread_mutex_unlock(&_r_debug_lock);
213 }
214
215 void notify_gdb_of_unload(soinfo * info)
216 {
217 if (info->flags & FLAG_EXE) {
218 // GDB already knows about the main executable
219 return;
220 }
221
222 pthread_mutex_lock(&_r_debug_lock);
223
224 _r_debug.r_state = RT_DELETE;
225 rtld_db_dlactivity();
226
227 remove_soinfo_from_debug_map(info);
228
229 _r_debug.r_state = RT_CONSISTENT;
230 rtld_db_dlactivity();
231
232 pthread_mutex_unlock(&_r_debug_lock);
233 }
234
235 void notify_gdb_of_libraries()
236 {
237 _r_debug.r_state = RT_ADD;
238 rtld_db_dlactivity();
239 _r_debug.r_state = RT_CONSISTENT;
240 rtld_db_dlactivity();
241 }
242
243 static soinfo *alloc_info(const char *name)
244 {
245 soinfo *si;
246
247 if(strlen(name) >= SOINFO_NAME_LEN) {
248 DL_ERR("%5d library name %s too long", pid, name);
249 return NULL;
250 }
251
252 /* The freelist is populated when we call free_info(), which in turn is
253 done only by dlclose(), which is not likely to be used.
254 */
255 if (!freelist) {
256 if(socount == SO_MAX) {
257 DL_ERR("%5d too many libraries when loading %s", pid, name);
258 return NULL;
259 }
260 freelist = sopool + socount++;
261 freelist->next = NULL;
262 }
263
264 si = freelist;
265 freelist = freelist->next;
266
267 /* Make sure we get a clean block of soinfo */
268 memset(si, 0, sizeof(soinfo));
269 strlcpy((char*) si->name, name, sizeof(si->name));
270 sonext->next = si;
271 si->next = NULL;
272 si->refcount = 0;
273 sonext = si;
274
275 TRACE("%5d name %s: allocated soinfo @ %p\n", pid, name, si);
276 return si;
277 }
278
279 static void free_info(soinfo *si)
280 {
281 soinfo *prev = NULL, *trav;
282
283 TRACE("%5d name %s: freeing soinfo @ %p\n", pid, si->name, si);
284
285 for(trav = solist; trav != NULL; trav = trav->next){
286 if (trav == si)
287 break;
288 prev = trav;
289 }
290 if (trav == NULL) {
291 /* si was not ni solist */
292 DL_ERR("%5d name %s is not in solist!", pid, si->name);
293 return;
294 }
295
296 /* prev will never be NULL, because the first entry in solist is
297 always the static libdl_info.
298 */
299 prev->next = si->next;
300 if (si == sonext) sonext = prev;
301 si->next = freelist;
302 freelist = si;
303 }
304
305 const char *addr_to_name(unsigned addr)
306 {
307 soinfo *si;
308
309 for(si = solist; si != 0; si = si->next){
310 if((addr >= si->base) && (addr < (si->base + si->size))) {
311 return si->name;
312 }
313 }
314
315 return "";
316 }
317
318 /* For a given PC, find the .so that it belongs to.
319 * Returns the base address of the .ARM.exidx section
320 * for that .so, and the number of 8-byte entries
321 * in that section (via *pcount).
322 *
323 * Intended to be called by libc's __gnu_Unwind_Find_exidx().
324 *
325 * This function is exposed via dlfcn.c and libdl.so.
326 */
327 #ifdef ANDROID_ARM_LINKER
328 _Unwind_Ptr android_dl_unwind_find_exidx(_Unwind_Ptr pc, int *pcount)
329 {
330 soinfo *si;
331 unsigned addr = (unsigned)pc;
332
333 for (si = solist; si != 0; si = si->next){
334 if ((addr >= si->base) && (addr < (si->base + si->size))) {
335 *pcount = si->ARM_exidx_count;
336 return (_Unwind_Ptr)(si->base + (unsigned long)si->ARM_exidx);
337 }
338 }
339 *pcount = 0;
340 return NULL;
341 }
342 #elif defined(ANDROID_X86_LINKER)
343 /* Here, we only have to provide a callback to iterate across all the
344 * loaded libraries. gcc_eh does the rest. */
345 int
346 android_dl_iterate_phdr(int (*cb)(struct dl_phdr_info *info, size_t size, void *data),
347 void *data)
348 {
349 soinfo *si;
350 struct dl_phdr_info dl_info;
351 int rv = 0;
352
353 for (si = solist; si != NULL; si = si->next) {
354 dl_info.dlpi_addr = si->linkmap.l_addr;
355 dl_info.dlpi_name = si->linkmap.l_name;
356 dl_info.dlpi_phdr = si->phdr;
357 dl_info.dlpi_phnum = si->phnum;
358 rv = cb(&dl_info, sizeof (struct dl_phdr_info), data);
359 if (rv != 0)
360 break;
361 }
362 return rv;
363 }
364 #endif
365
366 static Elf32_Sym *_elf_lookup(soinfo *si, unsigned hash, const char *name)
367 {
368 Elf32_Sym *s;
369 Elf32_Sym *symtab = si->symtab;
370 const char *strtab = si->strtab;
371 unsigned n;
372
373 TRACE_TYPE(LOOKUP, "%5d SEARCH %s in %s@0x%08x %08x %d\n", pid,
374 name, si->name, si->base, hash, hash % si->nbucket);
375 if (si->nbucket == 0) {
376 return NULL;
377 }
378 n = hash % si->nbucket;
379
380 for(n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]){
381 s = symtab + n;
382 if(strcmp(strtab + s->st_name, name)) continue;
383
384 /* only concern ourselves with global and weak symbol definitions */
385 switch(ELF32_ST_BIND(s->st_info)){
386 case STB_GLOBAL:
387 case STB_WEAK:
388 /* no section == undefined */
389 if(s->st_shndx == 0) continue;
390
391 TRACE_TYPE(LOOKUP, "%5d FOUND %s in %s (%08x) %d\n", pid,
392 name, si->name, s->st_value, s->st_size);
393 return s;
394 }
395 }
396
397 return NULL;
398 }
399
400 static unsigned elfhash(const char *_name)
401 {
402 const unsigned char *name = (const unsigned char *) _name;
403 unsigned h = 0, g;
404
405 while(*name) {
406 h = (h << 4) + *name++;
407 g = h & 0xf0000000;
408 h ^= g;
409 h ^= g >> 24;
410 }
411 return h;
412 }
413
414 static Elf32_Sym *
415 _do_lookup(soinfo *si, const char *name, unsigned *base)
416 {
417 unsigned elf_hash = elfhash(name);
418 Elf32_Sym *s;
419 unsigned *d;
420 soinfo *lsi = si;
421 int i;
422
423 /* Look for symbols in the local scope (the object who is
424 * searching). This happens with C++ templates on i386 for some
425 * reason.
426 *
427 * Notes on weak symbols:
428 * The ELF specs are ambigious about treatment of weak definitions in
429 * dynamic linking. Some systems return the first definition found
430 * and some the first non-weak definition. This is system dependent.
431 * Here we return the first definition found for simplicity. */
432
433 s = _elf_lookup(si, elf_hash, name);
434 if(s != NULL)
435 goto done;
436
437 /* Next, look for it in the preloads list */
438 for(i = 0; preloads[i] != NULL; i++) {
439 lsi = preloads[i];
440 s = _elf_lookup(lsi, elf_hash, name);
441 if(s != NULL)
442 goto done;
443 }
444
445 for(d = si->dynamic; *d; d += 2) {
446 if(d[0] == DT_NEEDED){
447 lsi = (soinfo *)d[1];
448 if (!validate_soinfo(lsi)) {
449 DL_ERR("%5d bad DT_NEEDED pointer in %s",
450 pid, si->name);
451 return NULL;
452 }
453
454 DEBUG("%5d %s: looking up %s in %s\n",
455 pid, si->name, name, lsi->name);
456 s = _elf_lookup(lsi, elf_hash, name);
457 if ((s != NULL) && (s->st_shndx != SHN_UNDEF))
458 goto done;
459 }
460 }
461
462 #if ALLOW_SYMBOLS_FROM_MAIN
463 /* If we are resolving relocations while dlopen()ing a library, it's OK for
464 * the library to resolve a symbol that's defined in the executable itself,
465 * although this is rare and is generally a bad idea.
466 */
467 if (somain) {
468 lsi = somain;
469 DEBUG("%5d %s: looking up %s in executable %s\n",
470 pid, si->name, name, lsi->name);
471 s = _elf_lookup(lsi, elf_hash, name);
472 }
473 #endif
474
475 done:
476 if(s != NULL) {
477 TRACE_TYPE(LOOKUP, "%5d si %s sym %s s->st_value = 0x%08x, "
478 "found in %s, base = 0x%08x\n",
479 pid, si->name, name, s->st_value, lsi->name, lsi->base);
480 *base = lsi->base;
481 return s;
482 }
483
484 return NULL;
485 }
486
487 /* This is used by dl_sym(). It performs symbol lookup only within the
488 specified soinfo object and not in any of its dependencies.
489 */
490 Elf32_Sym *lookup_in_library(soinfo *si, const char *name)
491 {
492 return _elf_lookup(si, elfhash(name), name);
493 }
494
495 /* This is used by dl_sym(). It performs a global symbol lookup.
496 */
497 Elf32_Sym *lookup(const char *name, soinfo **found, soinfo *start)
498 {
499 unsigned elf_hash = elfhash(name);
500 Elf32_Sym *s = NULL;
501 soinfo *si;
502
503 if(start == NULL) {
504 start = solist;
505 }
506
507 for(si = start; (s == NULL) && (si != NULL); si = si->next)
508 {
509 if(si->flags & FLAG_ERROR)
510 continue;
511 s = _elf_lookup(si, elf_hash, name);
512 if (s != NULL) {
513 *found = si;
514 break;
515 }
516 }
517
518 if(s != NULL) {
519 TRACE_TYPE(LOOKUP, "%5d %s s->st_value = 0x%08x, "
520 "si->base = 0x%08x\n", pid, name, s->st_value, si->base);
521 return s;
522 }
523
524 return NULL;
525 }
526
527 soinfo *find_containing_library(const void *addr)
528 {
529 soinfo *si;
530
531 for(si = solist; si != NULL; si = si->next)
532 {
533 if((unsigned)addr >= si->base && (unsigned)addr - si->base < si->size) {
534 return si;
535 }
536 }
537
538 return NULL;
539 }
540
541 Elf32_Sym *find_containing_symbol(const void *addr, soinfo *si)
542 {
543 unsigned int i;
544 unsigned soaddr = (unsigned)addr - si->base;
545
546 /* Search the library's symbol table for any defined symbol which
547 * contains this address */
548 for(i=0; i<si->nchain; i++) {
549 Elf32_Sym *sym = &si->symtab[i];
550
551 if(sym->st_shndx != SHN_UNDEF &&
552 soaddr >= sym->st_value &&
553 soaddr < sym->st_value + sym->st_size) {
554 return sym;
555 }
556 }
557
558 return NULL;
559 }
560
561 #if 0
562 static void dump(soinfo *si)
563 {
564 Elf32_Sym *s = si->symtab;
565 unsigned n;
566
567 for(n = 0; n < si->nchain; n++) {
568 TRACE("%5d %04d> %08x: %02x %04x %08x %08x %s\n", pid, n, s,
569 s->st_info, s->st_shndx, s->st_value, s->st_size,
570 si->strtab + s->st_name);
571 s++;
572 }
573 }
574 #endif
575
576 static const char *sopaths[] = {
577 "/vendor/lib",
578 "/system/lib",
579 0
580 };
581
582 static int _open_lib(const char *name)
583 {
584 int fd;
585 struct stat filestat;
586
587 if ((stat(name, &filestat) >= 0) && S_ISREG(filestat.st_mode)) {
588 if ((fd = open(name, O_RDONLY)) >= 0)
589 return fd;
590 }
591
592 return -1;
593 }
594
595 static int open_library(const char *name)
596 {
597 int fd;
598 char buf[512];
599 const char **path;
600 int n;
601
602 TRACE("[ %5d opening %s ]\n", pid, name);
603
604 if(name == 0) return -1;
605 if(strlen(name) > 256) return -1;
606
607 if ((name[0] == '/') && ((fd = _open_lib(name)) >= 0))
608 return fd;
609
610 for (path = ldpaths; *path; path++) {
611 n = format_buffer(buf, sizeof(buf), "%s/%s", *path, name);
612 if (n < 0 || n >= (int)sizeof(buf)) {
613 WARN("Ignoring very long library path: %s/%s\n", *path, name);
614 continue;
615 }
616 if ((fd = _open_lib(buf)) >= 0)
617 return fd;
618 }
619 for (path = sopaths; *path; path++) {
620 n = format_buffer(buf, sizeof(buf), "%s/%s", *path, name);
621 if (n < 0 || n >= (int)sizeof(buf)) {
622 WARN("Ignoring very long library path: %s/%s\n", *path, name);
623 continue;
624 }
625 if ((fd = _open_lib(buf)) >= 0)
626 return fd;
627 }
628
629 return -1;
630 }
631
632 /* temporary space for holding the first page of the shared lib
633 * which contains the elf header (with the pht). */
634 static unsigned char __header[PAGE_SIZE];
635
636 typedef struct {
637 long mmap_addr;
638 char tag[4]; /* 'P', 'R', 'E', ' ' */
639 } prelink_info_t;
640
641 /* Returns the requested base address if the library is prelinked,
642 * and 0 otherwise. */
643 static unsigned long
644 is_prelinked(int fd, const char *name)
645 {
646 off_t sz;
647 prelink_info_t info;
648
649 sz = lseek(fd, -sizeof(prelink_info_t), SEEK_END);
650 if (sz < 0) {
651 DL_ERR("lseek() failed!");
652 return 0;
653 }
654
655 if (read(fd, &info, sizeof(info)) != sizeof(info)) {
656 WARN("Could not read prelink_info_t structure for `%s`\n", name);
657 return 0;
658 }
659
660 if (strncmp(info.tag, "PRE ", 4)) {
661 WARN("`%s` is not a prelinked library\n", name);
662 return 0;
663 }
664
665 return (unsigned long)info.mmap_addr;
666 }
667
668 /* verify_elf_object
669 * Verifies if the object @ base is a valid ELF object
670 *
671 * Args:
672 *
673 * Returns:
674 * 0 on success
675 * -1 if no valid ELF object is found @ base.
676 */
677 static int
678 verify_elf_object(void *base, const char *name)
679 {
680 Elf32_Ehdr *hdr = (Elf32_Ehdr *) base;
681
682 if (hdr->e_ident[EI_MAG0] != ELFMAG0) return -1;
683 if (hdr->e_ident[EI_MAG1] != ELFMAG1) return -1;
684 if (hdr->e_ident[EI_MAG2] != ELFMAG2) return -1;
685 if (hdr->e_ident[EI_MAG3] != ELFMAG3) return -1;
686
687 /* TODO: Should we verify anything else in the header? */
688 #ifdef ANDROID_ARM_LINKER
689 if (hdr->e_machine != EM_ARM) return -1;
690 #elif defined(ANDROID_X86_LINKER)
691 if (hdr->e_machine != EM_386) return -1;
692 #endif
693 return 0;
694 }
695
696
697 /* get_lib_extents
698 * Retrieves the base (*base) address where the ELF object should be
699 * mapped and its overall memory size (*total_sz).
700 *
701 * Args:
702 * fd: Opened file descriptor for the library
703 * name: The name of the library
704 * _hdr: Pointer to the header page of the library
705 * total_sz: Total size of the memory that should be allocated for
706 * this library
707 *
708 * Returns:
709 * -1 if there was an error while trying to get the lib extents.
710 * The possible reasons are:
711 * - Could not determine if the library was prelinked.
712 * - The library provided is not a valid ELF object
713 * 0 if the library did not request a specific base offset (normal
714 * for non-prelinked libs)
715 * > 0 if the library requests a specific address to be mapped to.
716 * This indicates a pre-linked library.
717 */
718 static unsigned
719 get_lib_extents(int fd, const char *name, void *__hdr, unsigned *total_sz)
720 {
721 unsigned req_base;
722 unsigned min_vaddr = 0xffffffff;
723 unsigned max_vaddr = 0;
724 unsigned char *_hdr = (unsigned char *)__hdr;
725 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)_hdr;
726 Elf32_Phdr *phdr;
727 int cnt;
728
729 TRACE("[ %5d Computing extents for '%s'. ]\n", pid, name);
730 if (verify_elf_object(_hdr, name) < 0) {
731 DL_ERR("%5d - %s is not a valid ELF object", pid, name);
732 return (unsigned)-1;
733 }
734
735 req_base = (unsigned) is_prelinked(fd, name);
736 if (req_base == (unsigned)-1)
737 return -1;
738 else if (req_base != 0) {
739 TRACE("[ %5d - Prelinked library '%s' requesting base @ 0x%08x ]\n",
740 pid, name, req_base);
741 } else {
742 TRACE("[ %5d - Non-prelinked library '%s' found. ]\n", pid, name);
743 }
744
745 phdr = (Elf32_Phdr *)(_hdr + ehdr->e_phoff);
746
747 /* find the min/max p_vaddrs from all the PT_LOAD segments so we can
748 * get the range. */
749 for (cnt = 0; cnt < ehdr->e_phnum; ++cnt, ++phdr) {
750 if (phdr->p_type == PT_LOAD) {
751 if ((phdr->p_vaddr + phdr->p_memsz) > max_vaddr)
752 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
753 if (phdr->p_vaddr < min_vaddr)
754 min_vaddr = phdr->p_vaddr;
755 }
756 }
757
758 if ((min_vaddr == 0xffffffff) && (max_vaddr == 0)) {
759 DL_ERR("%5d - No loadable segments found in %s.", pid, name);
760 return (unsigned)-1;
761 }
762
763 /* truncate min_vaddr down to page boundary */
764 min_vaddr &= ~PAGE_MASK;
765
766 /* round max_vaddr up to the next page */
767 max_vaddr = (max_vaddr + PAGE_SIZE - 1) & ~PAGE_MASK;
768
769 *total_sz = (max_vaddr - min_vaddr);
770 return (unsigned)req_base;
771 }
772
773 /* reserve_mem_region
774 *
775 * This function reserves a chunk of memory to be used for mapping in
776 * a prelinked shared library. We reserve the entire memory region here, and
777 * then the rest of the linker will relocate the individual loadable
778 * segments into the correct locations within this memory range.
779 *
780 * Args:
781 * si->base: The requested base of the allocation.
782 * si->size: The size of the allocation.
783 *
784 * Returns:
785 * -1 on failure, and 0 on success. On success, si->base will contain
786 * the virtual address at which the library will be mapped.
787 */
788
789 static int reserve_mem_region(soinfo *si)
790 {
791 void *base = mmap((void *)si->base, si->size, PROT_NONE,
792 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
793 if (base == MAP_FAILED) {
794 DL_ERR("%5d can NOT map (%sprelinked) library '%s' at 0x%08x "
795 "as requested, will try general pool: %d (%s)",
796 pid, (si->base ? "" : "non-"), si->name, si->base,
797 errno, strerror(errno));
798 return -1;
799 } else if (base != (void *)si->base) {
800 DL_ERR("OOPS: %5d %sprelinked library '%s' mapped at 0x%08x, "
801 "not at 0x%08x", pid, (si->base ? "" : "non-"),
802 si->name, (unsigned)base, si->base);
803 munmap(base, si->size);
804 return -1;
805 }
806 return 0;
807 }
808
809 static int alloc_mem_region(soinfo *si)
810 {
811 if (si->base) {
812 /* Attempt to mmap a prelinked library. */
813 return reserve_mem_region(si);
814 }
815
816 /* This is not a prelinked library, so we use the kernel's default
817 allocator.
818 */
819
820 void *base = mmap(NULL, si->size, PROT_NONE,
821 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
822 if (base == MAP_FAILED) {
823 DL_ERR("%5d mmap of library '%s' failed: %d (%s)\n",
824 pid, si->name,
825 errno, strerror(errno));
826 goto err;
827 }
828 si->base = (unsigned) base;
829 PRINT("%5d mapped library '%s' to %08x via kernel allocator.\n",
830 pid, si->name, si->base);
831 return 0;
832
833 err:
834 DL_ERR("OOPS: %5d cannot map library '%s'. no vspace available.",
835 pid, si->name);
836 return -1;
837 }
838
839 #define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0)
840 #define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
841 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
842 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
843 /* load_segments
844 *
845 * This function loads all the loadable (PT_LOAD) segments into memory
846 * at their appropriate memory offsets off the base address.
847 *
848 * Args:
849 * fd: Open file descriptor to the library to load.
850 * header: Pointer to a header page that contains the ELF header.
851 * This is needed since we haven't mapped in the real file yet.
852 * si: ptr to soinfo struct describing the shared object.
853 *
854 * Returns:
855 * 0 on success, -1 on failure.
856 */
857 static int
858 load_segments(int fd, void *header, soinfo *si)
859 {
860 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)header;
861 Elf32_Phdr *phdr = (Elf32_Phdr *)((unsigned char *)header + ehdr->e_phoff);
862 Elf32_Addr base = (Elf32_Addr) si->base;
863 int cnt;
864 unsigned len;
865 Elf32_Addr tmp;
866 unsigned char *pbase;
867 unsigned char *extra_base;
868 unsigned extra_len;
869 unsigned total_sz = 0;
870
871 si->wrprotect_start = 0xffffffff;
872 si->wrprotect_end = 0;
873
874 TRACE("[ %5d - Begin loading segments for '%s' @ 0x%08x ]\n",
875 pid, si->name, (unsigned)si->base);
876 /* Now go through all the PT_LOAD segments and map them into memory
877 * at the appropriate locations. */
878 for (cnt = 0; cnt < ehdr->e_phnum; ++cnt, ++phdr) {
879 if (phdr->p_type == PT_LOAD) {
880 DEBUG_DUMP_PHDR(phdr, "PT_LOAD", pid);
881 /* we want to map in the segment on a page boundary */
882 tmp = base + (phdr->p_vaddr & (~PAGE_MASK));
883 /* add the # of bytes we masked off above to the total length. */
884 len = phdr->p_filesz + (phdr->p_vaddr & PAGE_MASK);
885
886 TRACE("[ %d - Trying to load segment from '%s' @ 0x%08x "
887 "(0x%08x). p_vaddr=0x%08x p_offset=0x%08x ]\n", pid, si->name,
888 (unsigned)tmp, len, phdr->p_vaddr, phdr->p_offset);
889 pbase = mmap((void *)tmp, len, PFLAGS_TO_PROT(phdr->p_flags),
890 MAP_PRIVATE | MAP_FIXED, fd,
891 phdr->p_offset & (~PAGE_MASK));
892 if (pbase == MAP_FAILED) {
893 DL_ERR("%d failed to map segment from '%s' @ 0x%08x (0x%08x). "
894 "p_vaddr=0x%08x p_offset=0x%08x", pid, si->name,
895 (unsigned)tmp, len, phdr->p_vaddr, phdr->p_offset);
896 goto fail;
897 }
898
899 /* If 'len' didn't end on page boundary, and it's a writable
900 * segment, zero-fill the rest. */
901 if ((len & PAGE_MASK) && (phdr->p_flags & PF_W))
902 memset((void *)(pbase + len), 0, PAGE_SIZE - (len & PAGE_MASK));
903
904 /* Check to see if we need to extend the map for this segment to
905 * cover the diff between filesz and memsz (i.e. for bss).
906 *
907 * base _+---------------------+ page boundary
908 * . .
909 * | |
910 * . .
911 * pbase _+---------------------+ page boundary
912 * | |
913 * . .
914 * base + p_vaddr _| |
915 * . \ \ .
916 * . | filesz | .
917 * pbase + len _| / | |
918 * <0 pad> . . .
919 * extra_base _+------------|--------+ page boundary
920 * / . . .
921 * | . . .
922 * | +------------|--------+ page boundary
923 * extra_len-> | | | |
924 * | . | memsz .
925 * | . | .
926 * \ _| / |
927 * . .
928 * | |
929 * _+---------------------+ page boundary
930 */
931 tmp = (Elf32_Addr)(((unsigned)pbase + len + PAGE_SIZE - 1) &
932 (~PAGE_MASK));
933 if (tmp < (base + phdr->p_vaddr + phdr->p_memsz)) {
934 extra_len = base + phdr->p_vaddr + phdr->p_memsz - tmp;
935 TRACE("[ %5d - Need to extend segment from '%s' @ 0x%08x "
936 "(0x%08x) ]\n", pid, si->name, (unsigned)tmp, extra_len);
937 /* map in the extra page(s) as anonymous into the range.
938 * This is probably not necessary as we already mapped in
939 * the entire region previously, but we just want to be
940 * sure. This will also set the right flags on the region
941 * (though we can probably accomplish the same thing with
942 * mprotect).
943 */
944 extra_base = mmap((void *)tmp, extra_len,
945 PFLAGS_TO_PROT(phdr->p_flags),
946 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
947 -1, 0);
948 if (extra_base == MAP_FAILED) {
949 DL_ERR("[ %5d - failed to extend segment from '%s' @ 0x%08x"
950 " (0x%08x) ]", pid, si->name, (unsigned)tmp,
951 extra_len);
952 goto fail;
953 }
954 /* TODO: Check if we need to memset-0 this region.
955 * Anonymous mappings are zero-filled copy-on-writes, so we
956 * shouldn't need to. */
957 TRACE("[ %5d - Segment from '%s' extended @ 0x%08x "
958 "(0x%08x)\n", pid, si->name, (unsigned)extra_base,
959 extra_len);
960 }
961 /* set the len here to show the full extent of the segment we
962 * just loaded, mostly for debugging */
963 len = (((unsigned)base + phdr->p_vaddr + phdr->p_memsz +
964 PAGE_SIZE - 1) & (~PAGE_MASK)) - (unsigned)pbase;
965 TRACE("[ %5d - Successfully loaded segment from '%s' @ 0x%08x "
966 "(0x%08x). p_vaddr=0x%08x p_offset=0x%08x\n", pid, si->name,
967 (unsigned)pbase, len, phdr->p_vaddr, phdr->p_offset);
968 total_sz += len;
969 /* Make the section writable just in case we'll have to write to
970 * it during relocation (i.e. text segment). However, we will
971 * remember what range of addresses should be write protected.
972 *
973 */
974 if (!(phdr->p_flags & PF_W)) {
975 if ((unsigned)pbase < si->wrprotect_start)
976 si->wrprotect_start = (unsigned)pbase;
977 if (((unsigned)pbase + len) > si->wrprotect_end)
978 si->wrprotect_end = (unsigned)pbase + len;
979 mprotect(pbase, len,
980 PFLAGS_TO_PROT(phdr->p_flags) | PROT_WRITE);
981 }
982 } else if (phdr->p_type == PT_DYNAMIC) {
983 DEBUG_DUMP_PHDR(phdr, "PT_DYNAMIC", pid);
984 /* this segment contains the dynamic linking information */
985 si->dynamic = (unsigned *)(base + phdr->p_vaddr);
986 } else if (phdr->p_type == PT_GNU_RELRO) {
987 if ((phdr->p_vaddr >= si->size)
988 || ((phdr->p_vaddr + phdr->p_memsz) > si->size)
989 || ((base + phdr->p_vaddr + phdr->p_memsz) < base)) {
990 DL_ERR("%d invalid GNU_RELRO in '%s' "
991 "p_vaddr=0x%08x p_memsz=0x%08x", pid, si->name,
992 phdr->p_vaddr, phdr->p_memsz);
993 goto fail;
994 }
995 si->gnu_relro_start = (Elf32_Addr) (base + phdr->p_vaddr);
996 si->gnu_relro_len = (unsigned) phdr->p_memsz;
997 } else {
998 #ifdef ANDROID_ARM_LINKER
999 if (phdr->p_type == PT_ARM_EXIDX) {
1000 DEBUG_DUMP_PHDR(phdr, "PT_ARM_EXIDX", pid);
1001 /* exidx entries (used for stack unwinding) are 8 bytes each.
1002 */
1003 si->ARM_exidx = (unsigned *)phdr->p_vaddr;
1004 si->ARM_exidx_count = phdr->p_memsz / 8;
1005 }
1006 #endif
1007 }
1008
1009 }
1010
1011 /* Sanity check */
1012 if (total_sz > si->size) {
1013 DL_ERR("%5d - Total length (0x%08x) of mapped segments from '%s' is "
1014 "greater than what was allocated (0x%08x). THIS IS BAD!",
1015 pid, total_sz, si->name, si->size);
1016 goto fail;
1017 }
1018
1019 TRACE("[ %5d - Finish loading segments for '%s' @ 0x%08x. "
1020 "Total memory footprint: 0x%08x bytes ]\n", pid, si->name,
1021 (unsigned)si->base, si->size);
1022 return 0;
1023
1024 fail:
1025 /* We can just blindly unmap the entire region even though some things
1026 * were mapped in originally with anonymous and others could have been
1027 * been mapped in from the file before we failed. The kernel will unmap
1028 * all the pages in the range, irrespective of how they got there.
1029 */
1030 munmap((void *)si->base, si->size);
1031 si->flags |= FLAG_ERROR;
1032 return -1;
1033 }
1034
1035 /* TODO: Implement this to take care of the fact that Android ARM
1036 * ELF objects shove everything into a single loadable segment that has the
1037 * write bit set. wr_offset is then used to set non-(data|bss) pages to be
1038 * non-writable.
1039 */
1040 #if 0
1041 static unsigned
1042 get_wr_offset(int fd, const char *name, Elf32_Ehdr *ehdr)
1043 {
1044 Elf32_Shdr *shdr_start;
1045 Elf32_Shdr *shdr;
1046 int shdr_sz = ehdr->e_shnum * sizeof(Elf32_Shdr);
1047 int cnt;
1048 unsigned wr_offset = 0xffffffff;
1049
1050 shdr_start = mmap(0, shdr_sz, PROT_READ, MAP_PRIVATE, fd,
1051 ehdr->e_shoff & (~PAGE_MASK));
1052 if (shdr_start == MAP_FAILED) {
1053 WARN("%5d - Could not read section header info from '%s'. Will not "
1054 "not be able to determine write-protect offset.\n", pid, name);
1055 return (unsigned)-1;
1056 }
1057
1058 for(cnt = 0, shdr = shdr_start; cnt < ehdr->e_shnum; ++cnt, ++shdr) {
1059 if ((shdr->sh_type != SHT_NULL) && (shdr->sh_flags & SHF_WRITE) &&
1060 (shdr->sh_addr < wr_offset)) {
1061 wr_offset = shdr->sh_addr;
1062 }
1063 }
1064
1065 munmap(shdr_start, shdr_sz);
1066 return wr_offset;
1067 }
1068 #endif
1069
1070 static soinfo *
1071 load_library(const char *name)
1072 {
1073 int fd = open_library(name);
1074 int cnt;
1075 unsigned ext_sz;
1076 unsigned req_base;
1077 const char *bname;
1078 soinfo *si = NULL;
1079 Elf32_Ehdr *hdr;
1080
1081 if(fd == -1) {
1082 DL_ERR("Library '%s' not found", name);
1083 return NULL;
1084 }
1085
1086 /* We have to read the ELF header to figure out what to do with this image
1087 */
1088 if (lseek(fd, 0, SEEK_SET) < 0) {
1089 DL_ERR("lseek() failed!");
1090 goto fail;
1091 }
1092
1093 if ((cnt = read(fd, &__header[0], PAGE_SIZE)) < 0) {
1094 DL_ERR("read() failed!");
1095 goto fail;
1096 }
1097
1098 /* Parse the ELF header and get the size of the memory footprint for
1099 * the library */
1100 req_base = get_lib_extents(fd, name, &__header[0], &ext_sz);
1101 if (req_base == (unsigned)-1)
1102 goto fail;
1103 TRACE("[ %5d - '%s' (%s) wants base=0x%08x sz=0x%08x ]\n", pid, name,
1104 (req_base ? "prelinked" : "not pre-linked"), req_base, ext_sz);
1105
1106 /* Now configure the soinfo struct where we'll store all of our data
1107 * for the ELF object. If the loading fails, we waste the entry, but
1108 * same thing would happen if we failed during linking. Configuring the
1109 * soinfo struct here is a lot more convenient.
1110 */
1111 bname = strrchr(name, '/');
1112 si = alloc_info(bname ? bname + 1 : name);
1113 if (si == NULL)
1114 goto fail;
1115
1116 /* Carve out a chunk of memory where we will map in the individual
1117 * segments */
1118 si->base = req_base;
1119 si->size = ext_sz;
1120 si->flags = 0;
1121 si->entry = 0;
1122 si->dynamic = (unsigned *)-1;
1123 if (alloc_mem_region(si) < 0)
1124 goto fail;
1125
1126 TRACE("[ %5d allocated memory for %s @ %p (0x%08x) ]\n",
1127 pid, name, (void *)si->base, (unsigned) ext_sz);
1128
1129 /* Now actually load the library's segments into right places in memory */
1130 if (load_segments(fd, &__header[0], si) < 0) {
1131 goto fail;
1132 }
1133
1134 /* this might not be right. Technically, we don't even need this info
1135 * once we go through 'load_segments'. */
1136 hdr = (Elf32_Ehdr *)si->base;
1137 si->phdr = (Elf32_Phdr *)((unsigned char *)si->base + hdr->e_phoff);
1138 si->phnum = hdr->e_phnum;
1139 /**/
1140
1141 close(fd);
1142 return si;
1143
1144 fail:
1145 if (si) free_info(si);
1146 close(fd);
1147 return NULL;
1148 }
1149
1150 static soinfo *
1151 init_library(soinfo *si)
1152 {
1153 unsigned wr_offset = 0xffffffff;
1154
1155 /* At this point we know that whatever is loaded @ base is a valid ELF
1156 * shared library whose segments are properly mapped in. */
1157 TRACE("[ %5d init_library base=0x%08x sz=0x%08x name='%s') ]\n",
1158 pid, si->base, si->size, si->name);
1159
1160 if(link_image(si, wr_offset)) {
1161 /* We failed to link. However, we can only restore libbase
1162 ** if no additional libraries have moved it since we updated it.
1163 */
1164 munmap((void *)si->base, si->size);
1165 return NULL;
1166 }
1167
1168 return si;
1169 }
1170
1171 soinfo *find_library(const char *name)
1172 {
1173 soinfo *si;
1174 const char *bname;
1175
1176 #if ALLOW_SYMBOLS_FROM_MAIN
1177 if (name == NULL)
1178 return somain;
1179 #else
1180 if (name == NULL)
1181 return NULL;
1182 #endif
1183
1184 bname = strrchr(name, '/');
1185 bname = bname ? bname + 1 : name;
1186
1187 for(si = solist; si != 0; si = si->next){
1188 if(!strcmp(bname, si->name)) {
1189 if(si->flags & FLAG_ERROR) {
1190 DL_ERR("%5d '%s' failed to load previously", pid, bname);
1191 return NULL;
1192 }
1193 if(si->flags & FLAG_LINKED) return si;
1194 DL_ERR("OOPS: %5d recursive link to '%s'", pid, si->name);
1195 return NULL;
1196 }
1197 }
1198
1199 TRACE("[ %5d '%s' has not been loaded yet. Locating...]\n", pid, name);
1200 si = load_library(name);
1201 if(si == NULL)
1202 return NULL;
1203 return init_library(si);
1204 }
1205
1206 /* TODO:
1207 * notify gdb of unload
1208 * for non-prelinked libraries, find a way to decrement libbase
1209 */
1210 static void call_destructors(soinfo *si);
1211 unsigned unload_library(soinfo *si)
1212 {
1213 unsigned *d;
1214 if (si->refcount == 1) {
1215 TRACE("%5d unloading '%s'\n", pid, si->name);
1216 call_destructors(si);
1217
1218 /*
1219 * Make sure that we undo the PT_GNU_RELRO protections we added
1220 * in link_image. This is needed to undo the DT_NEEDED hack below.
1221 */
1222 if ((si->gnu_relro_start != 0) && (si->gnu_relro_len != 0)) {
1223 Elf32_Addr start = (si->gnu_relro_start & ~PAGE_MASK);
1224 unsigned len = (si->gnu_relro_start - start) + si->gnu_relro_len;
1225 if (mprotect((void *) start, len, PROT_READ | PROT_WRITE) < 0)
1226 DL_ERR("%5d %s: could not undo GNU_RELRO protections. "
1227 "Expect a crash soon. errno=%d (%s)",
1228 pid, si->name, errno, strerror(errno));
1229
1230 }
1231
1232 for(d = si->dynamic; *d; d += 2) {
1233 if(d[0] == DT_NEEDED){
1234 soinfo *lsi = (soinfo *)d[1];
1235
1236 // The next line will segfault if the we don't undo the
1237 // PT_GNU_RELRO protections (see comments above and in
1238 // link_image().
1239 d[1] = 0;
1240
1241 if (validate_soinfo(lsi)) {
1242 TRACE("%5d %s needs to unload %s\n", pid,
1243 si->name, lsi->name);
1244 unload_library(lsi);
1245 }
1246 else
1247 DL_ERR("%5d %s: could not unload dependent library",
1248 pid, si->name);
1249 }
1250 }
1251
1252 munmap((char *)si->base, si->size);
1253 notify_gdb_of_unload(si);
1254 free_info(si);
1255 si->refcount = 0;
1256 }
1257 else {
1258 si->refcount--;
1259 PRINT("%5d not unloading '%s', decrementing refcount to %d\n",
1260 pid, si->name, si->refcount);
1261 }
1262 return si->refcount;
1263 }
1264
1265 /* TODO: don't use unsigned for addrs below. It works, but is not
1266 * ideal. They should probably be either uint32_t, Elf32_Addr, or unsigned
1267 * long.
1268 */
1269 static int reloc_library(soinfo *si, Elf32_Rel *rel, unsigned count)
1270 {
1271 Elf32_Sym *symtab = si->symtab;
1272 const char *strtab = si->strtab;
1273 Elf32_Sym *s;
1274 unsigned base;
1275 Elf32_Rel *start = rel;
1276 unsigned idx;
1277
1278 for (idx = 0; idx < count; ++idx) {
1279 unsigned type = ELF32_R_TYPE(rel->r_info);
1280 unsigned sym = ELF32_R_SYM(rel->r_info);
1281 unsigned reloc = (unsigned)(rel->r_offset + si->base);
1282 unsigned sym_addr = 0;
1283 char *sym_name = NULL;
1284
1285 DEBUG("%5d Processing '%s' relocation at index %d\n", pid,
1286 si->name, idx);
1287 if(sym != 0) {
1288 sym_name = (char *)(strtab + symtab[sym].st_name);
1289 sym_addr = NULL;
1290 if ((sym_addr = get_hooked_symbol(sym_name)) != NULL) {
1291 DEBUG("hooked symbol %s to %x\n", sym_name, sym_addr);
1292 }
1293 else
1294 {
1295 s = _do_lookup(si, sym_name, &base);
1296 }
1297 if(sym_addr != NULL)
1298 {
1299 } else
1300 if(s == NULL) {
1301 /* We only allow an undefined symbol if this is a weak
1302 reference.. */
1303 s = &symtab[sym];
1304 if (ELF32_ST_BIND(s->st_info) != STB_WEAK) {
1305 DL_ERR("%5d cannot locate '%s'...\n", pid, sym_name);
1306 return -1;
1307 }
1308
1309 /* IHI0044C AAELF 4.5.1.1:
1310
1311 Libraries are not searched to resolve weak references.
1312 It is not an error for a weak reference to remain
1313 unsatisfied.
1314
1315 During linking, the value of an undefined weak reference is:
1316 - Zero if the relocation type is absolute
1317 - The address of the place if the relocation is pc-relative
1318 - The address of nominial base address if the relocation
1319 type is base-relative.
1320 */
1321
1322 switch (type) {
1323 #if defined(ANDROID_ARM_LINKER)
1324 case R_ARM_JUMP_SLOT:
1325 case R_ARM_GLOB_DAT:
1326 case R_ARM_ABS32:
1327 case R_ARM_RELATIVE: /* Don't care. */
1328 case R_ARM_NONE: /* Don't care. */
1329 #elif defined(ANDROID_X86_LINKER)
1330 case R_386_JUMP_SLOT:
1331 case R_386_GLOB_DAT:
1332 case R_386_32:
1333 case R_386_RELATIVE: /* Dont' care. */
1334 #endif /* ANDROID_*_LINKER */
1335 /* sym_addr was initialized to be zero above or relocation
1336 code below does not care about value of sym_addr.
1337 No need to do anything. */
1338 break;
1339
1340 #if defined(ANDROID_X86_LINKER)
1341 case R_386_PC32:
1342 sym_addr = reloc;
1343 break;
1344 #endif /* ANDROID_X86_LINKER */
1345
1346 #if defined(ANDROID_ARM_LINKER)
1347 case R_ARM_COPY:
1348 /* Fall through. Can't really copy if weak symbol is
1349 not found in run-time. */
1350 #endif /* ANDROID_ARM_LINKER */
1351 default:
1352 DL_ERR("%5d unknown weak reloc type %d @ %p (%d)\n",
1353 pid, type, rel, (int) (rel - start));
1354 return -1;
1355 }
1356 } else {
1357 /* We got a definition. */
1358 #if 0
1359 if((base == 0) && (si->base != 0)){
1360 /* linking from libraries to main image is bad */
1361 DL_ERR("%5d cannot locate '%s'...",
1362 pid, strtab + symtab[sym].st_name);
1363 return -1;
1364 }
1365 #endif
1366 sym_addr = (unsigned)(s->st_value + base);
1367 }
1368 COUNT_RELOC(RELOC_SYMBOL);
1369 } else {
1370 s = NULL;
1371 }
1372
1373 /* TODO: This is ugly. Split up the relocations by arch into
1374 * different files.
1375 */
1376 switch(type){
1377 #if defined(ANDROID_ARM_LINKER)
1378 case R_ARM_JUMP_SLOT:
1379 COUNT_RELOC(RELOC_ABSOLUTE);
1380 MARK(rel->r_offset);
1381 TRACE_TYPE(RELO, "%5d RELO JMP_SLOT %08x <- %08x %s\n", pid,
1382 reloc, sym_addr, sym_name);
1383 *((unsigned*)reloc) = sym_addr;
1384 break;
1385 case R_ARM_GLOB_DAT:
1386 COUNT_RELOC(RELOC_ABSOLUTE);
1387 MARK(rel->r_offset);
1388 TRACE_TYPE(RELO, "%5d RELO GLOB_DAT %08x <- %08x %s\n", pid,
1389 reloc, sym_addr, sym_name);
1390 *((unsigned*)reloc) = sym_addr;
1391 break;
1392 case R_ARM_ABS32:
1393 COUNT_RELOC(RELOC_ABSOLUTE);
1394 MARK(rel->r_offset);
1395 TRACE_TYPE(RELO, "%5d RELO ABS %08x <- %08x %s\n", pid,
1396 reloc, sym_addr, sym_name);
1397 *((unsigned*)reloc) += sym_addr;
1398 break;
1399 case R_ARM_REL32:
1400 COUNT_RELOC(RELOC_RELATIVE);
1401 MARK(rel->r_offset);
1402 TRACE_TYPE(RELO, "%5d RELO REL32 %08x <- %08x - %08x %s\n", pid,
1403 reloc, sym_addr, rel->r_offset, sym_name);
1404 *((unsigned*)reloc) += sym_addr - rel->r_offset;
1405 break;
1406 #elif defined(ANDROID_X86_LINKER)
1407 case R_386_JUMP_SLOT:
1408 COUNT_RELOC(RELOC_ABSOLUTE);
1409 MARK(rel->r_offset);
1410 TRACE_TYPE(RELO, "%5d RELO JMP_SLOT %08x <- %08x %s\n", pid,
1411 reloc, sym_addr, sym_name);
1412 *((unsigned*)reloc) = sym_addr;
1413 break;
1414 case R_386_GLOB_DAT:
1415 COUNT_RELOC(RELOC_ABSOLUTE);
1416 MARK(rel->r_offset);
1417 TRACE_TYPE(RELO, "%5d RELO GLOB_DAT %08x <- %08x %s\n", pid,
1418 reloc, sym_addr, sym_name);
1419 *((unsigned*)reloc) = sym_addr;
1420 break;
1421 #endif /* ANDROID_*_LINKER */
1422
1423 #if defined(ANDROID_ARM_LINKER)
1424 case R_ARM_RELATIVE:
1425 #elif defined(ANDROID_X86_LINKER)
1426 case R_386_RELATIVE:
1427 #endif /* ANDROID_*_LINKER */
1428 COUNT_RELOC(RELOC_RELATIVE);
1429 MARK(rel->r_offset);
1430 if(sym){
1431 DL_ERR("%5d odd RELATIVE form...", pid);
1432 return -1;
1433 }
1434 TRACE_TYPE(RELO, "%5d RELO RELATIVE %08x <- +%08x\n", pid,
1435 reloc, si->base);
1436 *((unsigned*)reloc) += si->base;
1437 break;
1438
1439 #if defined(ANDROID_X86_LINKER)
1440 case R_386_32:
1441 COUNT_RELOC(RELOC_RELATIVE);
1442 MARK(rel->r_offset);
1443
1444 TRACE_TYPE(RELO, "%5d RELO R_386_32 %08x <- +%08x %s\n", pid,
1445 reloc, sym_addr, sym_name);
1446 *((unsigned *)reloc) += (unsigned)sym_addr;
1447 break;
1448
1449 case R_386_PC32:
1450 COUNT_RELOC(RELOC_RELATIVE);
1451 MARK(rel->r_offset);
1452 TRACE_TYPE(RELO, "%5d RELO R_386_PC32 %08x <- "
1453 "+%08x (%08x - %08x) %s\n", pid, reloc,
1454 (sym_addr - reloc), sym_addr, reloc, sym_name);
1455 *((unsigned *)reloc) += (unsigned)(sym_addr - reloc);
1456 break;
1457 #endif /* ANDROID_X86_LINKER */
1458
1459 #ifdef ANDROID_ARM_LINKER
1460 case R_ARM_COPY:
1461 COUNT_RELOC(RELOC_COPY);
1462 MARK(rel->r_offset);
1463 TRACE_TYPE(RELO, "%5d RELO %08x <- %d @ %08x %s\n", pid,
1464 reloc, s->st_size, sym_addr, sym_name);
1465 memcpy((void*)reloc, (void*)sym_addr, s->st_size);
1466 break;
1467 case R_ARM_NONE:
1468 break;
1469 #endif /* ANDROID_ARM_LINKER */
1470
1471 default:
1472 DL_ERR("%5d unknown reloc type %d @ %p (%d)",
1473 pid, type, rel, (int) (rel - start));
1474 return -1;
1475 }
1476 rel++;
1477 }
1478 return 0;
1479 }
1480
1481 /* Please read the "Initialization and Termination functions" functions.
1482 * of the linker design note in bionic/linker/README.TXT to understand
1483 * what the following code is doing.
1484 *
1485 * The important things to remember are:
1486 *
1487 * DT_PREINIT_ARRAY must be called first for executables, and should
1488 * not appear in shared libraries.
1489 *
1490 * DT_INIT should be called before DT_INIT_ARRAY if both are present
1491 *
1492 * DT_FINI should be called after DT_FINI_ARRAY if both are present
1493 *
1494 * DT_FINI_ARRAY must be parsed in reverse order.
1495 */
1496
1497 static void call_array(unsigned *ctor, int count, int reverse)
1498 {
1499 int n, inc = 1;
1500
1501 if (reverse) {
1502 ctor += (count-1);
1503 inc = -1;
1504 }
1505
1506 for(n = count; n > 0; n--) {
1507 TRACE("[ %5d Looking at %s *0x%08x == 0x%08x ]\n", pid,
1508 reverse ? "dtor" : "ctor",
1509 (unsigned)ctor, (unsigned)*ctor);
1510 void (*func)() = (void (*)()) *ctor;
1511 ctor += inc;
1512 if(((int) func == 0) || ((int) func == -1)) continue;
1513 TRACE("[ %5d Calling func @ 0x%08x ]\n", pid, (unsigned)func);
1514 func();
1515 }
1516 }
1517
1518 void call_constructors_recursive(soinfo *si)
1519 {
1520 if (si->constructors_called)
1521 return;
1522 if (strcmp(si->name, "libc.so") == 0)
1523 return;
1524 // Set this before actually calling the constructors, otherwise it doesn't
1525 // protect against recursive constructor calls. One simple example of
1526 // constructor recursion is the libc debug malloc, which is implemented in
1527 // libc_malloc_debug_leak.so:
1528 // 1. The program depends on libc, so libc's constructor is called here.
1529 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so.
1530 // 3. dlopen() calls call_constructors_recursive() with the newly created
1531 // soinfo for libc_malloc_debug_leak.so.
1532 // 4. The debug so depends on libc, so call_constructors_recursive() is
1533 // called again with the libc soinfo. If it doesn't trigger the early-
1534 // out above, the libc constructor will be called again (recursively!).
1535 si->constructors_called = 1;
1536
1537 if (si->flags & FLAG_EXE) {
1538 TRACE("[ %5d Calling preinit_array @ 0x%08x [%d] for '%s' ]\n",
1539 pid, (unsigned)si->preinit_array, si->preinit_array_count,
1540 si->name);
1541 call_array(si->preinit_array, si->preinit_array_count, 0);
1542 TRACE("[ %5d Done calling preinit_array for '%s' ]\n", pid, si->name);
1543 } else {
1544 if (si->preinit_array) {
1545 DL_ERR("%5d Shared library '%s' has a preinit_array table @ 0x%08x."
1546 " This is INVALID.", pid, si->name,
1547 (unsigned)si->preinit_array);
1548 }
1549 }
1550
1551 if (si->dynamic) {
1552 unsigned *d;
1553 for(d = si->dynamic; *d; d += 2) {
1554 if(d[0] == DT_NEEDED){
1555 soinfo* lsi = (soinfo *)d[1];
1556 if (!validate_soinfo(lsi)) {
1557 DL_ERR("%5d bad DT_NEEDED pointer in %s",
1558 pid, si->name);
1559 } else {
1560 call_constructors_recursive(lsi);
1561 }
1562 }
1563 }
1564 }
1565
1566 if (si->init_func) {
1567 TRACE("[ %5d Calling init_func @ 0x%08x for '%s' ]\n", pid,
1568 (unsigned)si->init_func, si->name);
1569 si->init_func();
1570 TRACE("[ %5d Done calling init_func for '%s' ]\n", pid, si->name);
1571 }
1572
1573 if (si->init_array) {
1574 TRACE("[ %5d Calling init_array @ 0x%08x [%d] for '%s' ]\n", pid,
1575 (unsigned)si->init_array, si->init_array_count, si->name);
1576 call_array(si->init_array, si->init_array_count, 0);
1577 TRACE("[ %5d Done calling init_array for '%s' ]\n", pid, si->name);
1578 }
1579
1580 }
1581
1582 static void call_destructors(soinfo *si)
1583 {
1584 if (si->fini_array) {
1585 TRACE("[ %5d Calling fini_array @ 0x%08x [%d] for '%s' ]\n", pid,
1586 (unsigned)si->fini_array, si->fini_array_count, si->name);
1587 call_array(si->fini_array, si->fini_array_count, 1);
1588 TRACE("[ %5d Done calling fini_array for '%s' ]\n", pid, si->name);
1589 }
1590
1591 if (si->fini_func) {
1592 TRACE("[ %5d Calling fini_func @ 0x%08x for '%s' ]\n", pid,
1593 (unsigned)si->fini_func, si->name);
1594 si->fini_func();
1595 TRACE("[ %5d Done calling fini_func for '%s' ]\n", pid, si->name);
1596 }
1597 }
1598
1599 /* Force any of the closed stdin, stdout and stderr to be associated with
1600 /dev/null. */
1601 static int nullify_closed_stdio (void)
1602 {
1603 int dev_null, i, status;
1604 int return_value = 0;
1605
1606 dev_null = open("/dev/null", O_RDWR);
1607 if (dev_null < 0) {
1608 DL_ERR("Cannot open /dev/null.");
1609 return -1;
1610 }
1611 TRACE("[ %5d Opened /dev/null file-descriptor=%d]\n", pid, dev_null);
1612
1613 /* If any of the stdio file descriptors is valid and not associated
1614 with /dev/null, dup /dev/null to it. */
1615 for (i = 0; i < 3; i++) {
1616 /* If it is /dev/null already, we are done. */
1617 if (i == dev_null)
1618 continue;
1619
1620 TRACE("[ %5d Nullifying stdio file descriptor %d]\n", pid, i);
1621 /* The man page of fcntl does not say that fcntl(..,F_GETFL)
1622 can be interrupted but we do this just to be safe. */
1623 do {
1624 status = fcntl(i, F_GETFL);
1625 } while (status < 0 && errno == EINTR);
1626
1627 /* If file is openned, we are good. */
1628 if (status >= 0)
1629 continue;
1630
1631 /* The only error we allow is that the file descriptor does not
1632 exist, in which case we dup /dev/null to it. */
1633 if (errno != EBADF) {
1634 DL_ERR("nullify_stdio: unhandled error %s", strerror(errno));
1635 return_value = -1;
1636 continue;
1637 }
1638
1639 /* Try dupping /dev/null to this stdio file descriptor and
1640 repeat if there is a signal. Note that any errors in closing
1641 the stdio descriptor are lost. */
1642 do {
1643 status = dup2(dev_null, i);
1644 } while (status < 0 && errno == EINTR);
1645
1646 if (status < 0) {
1647 DL_ERR("nullify_stdio: dup2 error %s", strerror(errno));
1648 return_value = -1;
1649 continue;
1650 }
1651 }
1652
1653 /* If /dev/null is not one of the stdio file descriptors, close it. */
1654 if (dev_null > 2) {
1655 TRACE("[ %5d Closing /dev/null file-descriptor=%d]\n", pid, dev_null);
1656 do {
1657 status = close(dev_null);
1658 } while (status < 0 && errno == EINTR);
1659
1660 if (status < 0) {
1661 DL_ERR("nullify_stdio: close error %s", strerror(errno));
1662 return_value = -1;
1663 }
1664 }
1665
1666 return return_value;
1667 }
1668
1669 static int link_image(soinfo *si, unsigned wr_offset)
1670 {
1671 unsigned *d;
1672 Elf32_Phdr *phdr = si->phdr;
1673 int phnum = si->phnum;
1674
1675 INFO("[ %5d linking %s ]\n", pid, si->name);
1676 DEBUG("%5d si->base = 0x%08x si->flags = 0x%08x\n", pid,
1677 si->base, si->flags);
1678
1679 if (si->flags & (FLAG_EXE | FLAG_LINKER)) {
1680 /* Locate the needed program segments (DYNAMIC/ARM_EXIDX) for
1681 * linkage info if this is the executable or the linker itself.
1682 * If this was a dynamic lib, that would have been done at load time.
1683 *
1684 * TODO: It's unfortunate that small pieces of this are
1685 * repeated from the load_library routine. Refactor this just
1686 * slightly to reuse these bits.
1687 */
1688 si->size = 0;
1689 for(; phnum > 0; --phnum, ++phdr) {
1690 #ifdef ANDROID_ARM_LINKER
1691 if(phdr->p_type == PT_ARM_EXIDX) {
1692 /* exidx entries (used for stack unwinding) are 8 bytes each.
1693 */
1694 si->ARM_exidx = (unsigned *)phdr->p_vaddr;
1695 si->ARM_exidx_count = phdr->p_memsz / 8;
1696 }
1697 #endif
1698 if (phdr->p_type == PT_LOAD) {
1699 /* For the executable, we use the si->size field only in
1700 dl_unwind_find_exidx(), so the meaning of si->size
1701 is not the size of the executable; it is the distance
1702 between the load location of the executable and the last
1703 address of the loadable part of the executable.
1704 We use the range [si->base, si->base + si->size) to
1705 determine whether a PC value falls within the executable
1706 section. Of course, if a value is between si->base and
1707 (si->base + phdr->p_vaddr), it's not in the executable
1708 section, but a) we shouldn't be asking for such a value
1709 anyway, and b) if we have to provide an EXIDX for such a
1710 value, then the executable's EXIDX is probably the better
1711 choice.
1712 */
1713 DEBUG_DUMP_PHDR(phdr, "PT_LOAD", pid);
1714 if (phdr->p_vaddr + phdr->p_memsz > si->size)
1715 si->size = phdr->p_vaddr + phdr->p_memsz;
1716 /* try to remember what range of addresses should be write
1717 * protected */
1718 if (!(phdr->p_flags & PF_W)) {
1719 unsigned _end;
1720
1721 if (si->base + phdr->p_vaddr < si->wrprotect_start)
1722 si->wrprotect_start = si->base + phdr->p_vaddr;
1723 _end = (((si->base + phdr->p_vaddr + phdr->p_memsz + PAGE_SIZE - 1) &
1724 (~PAGE_MASK)));
1725 if (_end > si->wrprotect_end)
1726 si->wrprotect_end = _end;
1727 /* Make the section writable just in case we'll have to
1728 * write to it during relocation (i.e. text segment).
1729 * However, we will remember what range of addresses
1730 * should be write protected.
1731 */
1732 mprotect((void *) (si->base + phdr->p_vaddr),
1733 phdr->p_memsz,
1734 PFLAGS_TO_PROT(phdr->p_flags) | PROT_WRITE);
1735 }
1736 } else if (phdr->p_type == PT_DYNAMIC) {
1737 if (si->dynamic != (unsigned *)-1) {
1738 DL_ERR("%5d multiple PT_DYNAMIC segments found in '%s'. "
1739 "Segment at 0x%08x, previously one found at 0x%08x",
1740 pid, si->name, si->base + phdr->p_vaddr,
1741 (unsigned)si->dynamic);
1742 goto fail;
1743 }
1744 DEBUG_DUMP_PHDR(phdr, "PT_DYNAMIC", pid);
1745 si->dynamic = (unsigned *) (si->base + phdr->p_vaddr);
1746 } else if (phdr->p_type == PT_GNU_RELRO) {
1747 if ((phdr->p_vaddr >= si->size)
1748 || ((phdr->p_vaddr + phdr->p_memsz) > si->size)
1749 || ((si->base + phdr->p_vaddr + phdr->p_memsz) < si->base)) {
1750 DL_ERR("%d invalid GNU_RELRO in '%s' "
1751 "p_vaddr=0x%08x p_memsz=0x%08x", pid, si->name,
1752 phdr->p_vaddr, phdr->p_memsz);
1753 goto fail;
1754 }
1755 si->gnu_relro_start = (Elf32_Addr) (si->base + phdr->p_vaddr);
1756 si->gnu_relro_len = (unsigned) phdr->p_memsz;
1757 }
1758 }
1759 }
1760
1761 if (si->dynamic == (unsigned *)-1) {
1762 DL_ERR("%5d missing PT_DYNAMIC?!", pid);
1763 goto fail;
1764 }
1765
1766 DEBUG("%5d dynamic = %p\n", pid, si->dynamic);
1767
1768 /* extract useful information from dynamic section */
1769 for(d = si->dynamic; *d; d++){
1770 DEBUG("%5d d = %p, d[0] = 0x%08x d[1] = 0x%08x\n", pid, d, d[0], d[1]);
1771 switch(*d++){
1772 case DT_HASH:
1773 si->nbucket = ((unsigned *) (si->base + *d))[0];
1774 si->nchain = ((unsigned *) (si->base + *d))[1];
1775 si->bucket = (unsigned *) (si->base + *d + 8);
1776 si->chain = (unsigned *) (si->base + *d + 8 + si->nbucket * 4);
1777 break;
1778 case DT_STRTAB:
1779 si->strtab = (const char *) (si->base + *d);
1780 break;
1781 case DT_SYMTAB:
1782 si->symtab = (Elf32_Sym *) (si->base + *d);
1783 break;
1784 case DT_PLTREL:
1785 if(*d != DT_REL) {
1786 DL_ERR("DT_RELA not supported");
1787 goto fail;
1788 }
1789 break;
1790 case DT_JMPREL:
1791 si->plt_rel = (Elf32_Rel*) (si->base + *d);
1792 break;
1793 case DT_PLTRELSZ:
1794 si->plt_rel_count = *d / 8;
1795 break;
1796 case DT_REL:
1797 si->rel = (Elf32_Rel*) (si->base + *d);
1798 break;
1799 case DT_RELSZ:
1800 si->rel_count = *d / 8;
1801 break;
1802 case DT_PLTGOT:
1803 /* Save this in case we decide to do lazy binding. We don't yet. */
1804 si->plt_got = (unsigned *)(si->base + *d);
1805 break;
1806 case DT_DEBUG:
1807 // Set the DT_DEBUG entry to the addres of _r_debug for GDB
1808 *d = (int) &_r_debug;
1809 break;
1810 case DT_RELA:
1811 DL_ERR("%5d DT_RELA not supported", pid);
1812 goto fail;
1813 case DT_INIT:
1814 si->init_func = (void (*)(void))(si->base + *d);
1815 DEBUG("%5d %s constructors (init func) found at %p\n",
1816 pid, si->name, si->init_func);
1817 break;
1818 case DT_FINI:
1819 si->fini_func = (void (*)(void))(si->base + *d);
1820 DEBUG("%5d %s destructors (fini func) found at %p\n",
1821 pid, si->name, si->fini_func);
1822 break;
1823 case DT_INIT_ARRAY:
1824 si->init_array = (unsigned *)(si->base + *d);
1825 DEBUG("%5d %s constructors (init_array) found at %p\n",
1826 pid, si->name, si->init_array);
1827 break;
1828 case DT_INIT_ARRAYSZ:
1829 si->init_array_count = ((unsigned)*d) / sizeof(Elf32_Addr);
1830 break;
1831 case DT_FINI_ARRAY:
1832 si->fini_array = (unsigned *)(si->base + *d);
1833 DEBUG("%5d %s destructors (fini_array) found at %p\n",
1834 pid, si->name, si->fini_array);
1835 break;
1836 case DT_FINI_ARRAYSZ:
1837 si->fini_array_count = ((unsigned)*d) / sizeof(Elf32_Addr);
1838 break;
1839 case DT_PREINIT_ARRAY:
1840 si->preinit_array = (unsigned *)(si->base + *d);
1841 DEBUG("%5d %s constructors (preinit_array) found at %p\n",
1842 pid, si->name, si->preinit_array);
1843 break;
1844 case DT_PREINIT_ARRAYSZ:
1845 si->preinit_array_count = ((unsigned)*d) / sizeof(Elf32_Addr);
1846 break;
1847 case DT_TEXTREL:
1848 /* TODO: make use of this. */
1849 /* this means that we might have to write into where the text
1850 * segment was loaded during relocation... Do something with
1851 * it.
1852 */
1853 DEBUG("%5d Text segment should be writable during relocation.\n",
1854 pid);
1855 break;
1856 }
1857 }
1858
1859 DEBUG("%5d si->base = 0x%08x, si->strtab = %p, si->symtab = %p\n",
1860 pid, si->base, si->strtab, si->symtab);
1861
1862 if((si->strtab == 0) || (si->symtab == 0)) {
1863 DL_ERR("%5d missing essential tables", pid);
1864 goto fail;
1865 }
1866
1867 /* if this is the main executable, then load all of the preloads now */
1868 if(si->flags & FLAG_EXE) {
1869 int i;
1870 memset(preloads, 0, sizeof(preloads));
1871 for(i = 0; ldpreload_names[i] != NULL; i++) {
1872 soinfo *lsi = find_library(ldpreload_names[i]);
1873 if(lsi == 0) {
1874 strlcpy(tmp_err_buf, linker_get_error(), sizeof(tmp_err_buf));
1875 DL_ERR("%5d could not load needed library '%s' for '%s' (%s)",
1876 pid, ldpreload_names[i], si->name, tmp_err_buf);
1877 goto fail;
1878 }
1879 lsi->refcount++;
1880 preloads[i] = lsi;
1881 }
1882 }
1883
1884 for(d = si->dynamic; *d; d += 2) {
1885 if(d[0] == DT_NEEDED){
1886 DEBUG("%5d %s needs %s\n", pid, si->name, si->strtab + d[1]);
1887 soinfo *lsi = find_library(si->strtab + d[1]);
1888 if(lsi == 0) {
1889 strlcpy(tmp_err_buf, linker_get_error(), sizeof(tmp_err_buf));
1890 DL_ERR("%5d could not load needed library '%s' for '%s' (%s)",
1891 pid, si->strtab + d[1], si->name, tmp_err_buf);
1892 goto fail;
1893 }
1894 /* Save the soinfo of the loaded DT_NEEDED library in the payload
1895 of the DT_NEEDED entry itself, so that we can retrieve the
1896 soinfo directly later from the dynamic segment. This is a hack,
1897 but it allows us to map from DT_NEEDED to soinfo efficiently
1898 later on when we resolve relocations, trying to look up a symbol
1899 with dlsym().
1900 */
1901 d[1] = (unsigned)lsi;
1902 lsi->refcount++;
1903 }
1904 }
1905
1906 if(si->plt_rel) {
1907 DEBUG("[ %5d relocating %s plt ]\n", pid, si->name );
1908 if(reloc_library(si, si->plt_rel, si->plt_rel_count))
1909 goto fail;
1910 }
1911 if(si->rel) {
1912 DEBUG("[ %5d relocating %s ]\n", pid, si->name );
1913 if(reloc_library(si, si->rel, si->rel_count))
1914 goto fail;
1915 }
1916
1917 si->flags |= FLAG_LINKED;
1918 DEBUG("[ %5d finished linking %s ]\n", pid, si->name);
1919
1920 #if 0
1921 /* This is the way that the old dynamic linker did protection of
1922 * non-writable areas. It would scan section headers and find where
1923 * .text ended (rather where .data/.bss began) and assume that this is
1924 * the upper range of the non-writable area. This is too coarse,
1925 * and is kept here for reference until we fully move away from single
1926 * segment elf objects. See the code in get_wr_offset (also #if'd 0)
1927 * that made this possible.
1928 */
1929 if(wr_offset < 0xffffffff){
1930 mprotect((void*) si->base, wr_offset, PROT_READ | PROT_EXEC);
1931 }
1932 #else
1933 /* TODO: Verify that this does the right thing in all cases, as it
1934 * presently probably does not. It is possible that an ELF image will
1935 * come with multiple read-only segments. What we ought to do is scan
1936 * the program headers again and mprotect all the read-only segments.
1937 * To prevent re-scanning the program header, we would have to build a
1938 * list of loadable segments in si, and then scan that instead. */
1939 if (si->wrprotect_start != 0xffffffff && si->wrprotect_end != 0) {
1940 mprotect((void *)si->wrprotect_start,
1941 si->wrprotect_end - si->wrprotect_start,
1942 PROT_READ | PROT_EXEC);
1943 }
1944 #endif
1945
1946 if (si->gnu_relro_start != 0 && si->gnu_relro_len != 0) {
1947 Elf32_Addr start = (si->gnu_relro_start & ~PAGE_MASK);
1948 unsigned len = (si->gnu_relro_start - start) + si->gnu_relro_len;
1949 if (mprotect((void *) start, len, PROT_READ) < 0) {
1950 DL_ERR("%5d GNU_RELRO mprotect of library '%s' failed: %d (%s)\n",
1951 pid, si->name, errno, strerror(errno));
1952 goto fail;
1953 }
1954 }
1955
1956 /* If this is a SET?ID program, dup /dev/null to opened stdin,
1957 stdout and stderr to close a security hole described in:
1958
1959 ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
1960
1961 */
1962 if (program_is_setuid)
1963 nullify_closed_stdio ();
1964 notify_gdb_of_load(si);
1965 return 0;
1966
1967 fail:
1968 ERROR("failed to link %s\n", si->name);
1969 si->flags |= FLAG_ERROR;
1970 return -1;
1971 }
1972
1973 static void parse_library_path(const char *path, char *delim)
1974 {
1975 size_t len;
1976 char *ldpaths_bufp = ldpaths_buf;
1977 int i = 0;
1978
1979 len = strlcpy(ldpaths_buf, path, sizeof(ldpaths_buf));
1980
1981 while (i < LDPATH_MAX && (ldpaths[i] = strsep(&ldpaths_bufp, delim))) {
1982 if (*ldpaths[i] != '\0')
1983 ++i;
1984 }
1985
1986 /* Forget the last path if we had to truncate; this occurs if the 2nd to
1987 * last char isn't '\0' (i.e. not originally a delim). */
1988 if (i > 0 && len >= sizeof(ldpaths_buf) &&
1989 ldpaths_buf[sizeof(ldpaths_buf) - 2] != '\0') {
1990 ldpaths[i - 1] = NULL;
1991 } else {
1992 ldpaths[i] = NULL;
1993 }
1994 }
1995
1996 static void parse_preloads(const char *path, char *delim)
1997 {
1998 size_t len;
1999 char *ldpreloads_bufp = ldpreloads_buf;
2000 int i = 0;
2001
2002 len = strlcpy(ldpreloads_buf, path, sizeof(ldpreloads_buf));
2003
2004 while (i < LDPRELOAD_MAX && (ldpreload_names[i] = strsep(&ldpreloads_bufp, delim))) {
2005 if (*ldpreload_names[i] != '\0') {
2006 ++i;
2007 }
2008 }
2009
2010 /* Forget the last path if we had to truncate; this occurs if the 2nd to
2011 * last char isn't '\0' (i.e. not originally a delim). */
2012 if (i > 0 && len >= sizeof(ldpreloads_buf) &&
2013 ldpreloads_buf[sizeof(ldpreloads_buf) - 2] != '\0') {
2014 ldpreload_names[i - 1] = NULL;
2015 } else {
2016 ldpreload_names[i] = NULL;
2017 }
2018 }
2019
2020 /*
2021 * This code is called after the linker has linked itself and
2022 * fixed it's own GOT. It is safe to make references to externs
2023 * and other non-local data at this point.
2024 */
2025 static unsigned __linker_init_post_relocation(unsigned **elfdata)
2026 {
2027 static soinfo linker_soinfo;
2028
2029 int argc = (int) *elfdata;
2030 char **argv = (char**) (elfdata + 1);
2031 unsigned *vecs = (unsigned*) (argv + argc + 1);
2032 unsigned *v;
2033 soinfo *si;
2034 struct link_map * map;
2035 const char *ldpath_env = NULL;
2036 const char *ldpreload_env = NULL;
2037
2038 /* NOTE: we store the elfdata pointer on a special location
2039 * of the temporary TLS area in order to pass it to
2040 * the C Library's runtime initializer.
2041 *
2042 * The initializer must clear the slot and reset the TLS
2043 * to point to a different location to ensure that no other
2044 * shared library constructor can access it.
2045 */
2046 #if 0
2047 __libc_init_tls(elfdata);
2048 #endif
2049
2050 pid = getpid();
2051
2052 #if TIMING
2053 struct timeval t0, t1;
2054 gettimeofday(&t0, 0);
2055 #endif
2056
2057 /* Initialize environment functions, and get to the ELF aux vectors table */
2058 vecs = linker_env_init(vecs);
2059
2060 /* Check auxv for AT_SECURE first to see if program is setuid, setgid,
2061 has file caps, or caused a SELinux/AppArmor domain transition. */
2062 for (v = vecs; v[0]; v += 2) {
2063 if (v[0] == AT_SECURE) {
2064 /* kernel told us whether to enable secure mode */
2065 program_is_setuid = v[1];
2066 goto sanitize;
2067 }
2068 }
2069
2070 /* Kernel did not provide AT_SECURE - fall back on legacy test. */
2071 program_is_setuid = (getuid() != geteuid()) || (getgid() != getegid());
2072
2073 sanitize:
2074 /* Sanitize environment if we're loading a setuid program */
2075 if (program_is_setuid)
2076 linker_env_secure();
2077
2078 #if 0
2079 debugger_init();
2080 #endif
2081
2082 /* Get a few environment variables */
2083 {
2084 #if LINKER_DEBUG
2085 const char* env;
2086 env = linker_env_get("DEBUG"); /* XXX: TODO: Change to LD_DEBUG */
2087 if (env)
2088 debug_verbosity = atoi(env);
2089 #endif
2090
2091 /* Normally, these are cleaned by linker_env_secure, but the test
2092 * against program_is_setuid doesn't cost us anything */
2093 if (!program_is_setuid) {
2094 ldpath_env = getenv("LD_LIBRARY_PATH");
2095 ldpreload_env = getenv("LD_PRELOAD");
2096 }
2097 }
2098
2099 INFO("[ android linker & debugger ]\n");
2100 DEBUG("%5d elfdata @ 0x%08x\n", pid, (unsigned)elfdata);
2101
2102 si = alloc_info(argv[0]);
2103 if(si == 0) {
2104 exit(-1);
2105 }
2106
2107 /* bootstrap the link map, the main exe always needs to be first */
2108 si->flags |= FLAG_EXE;
2109 map = &(si->linkmap);
2110
2111 map->l_addr = 0;
2112 map->l_name = argv[0];
2113 map->l_prev = NULL;
2114 map->l_next = NULL;
2115
2116 _r_debug.r_map = map;
2117 r_debug_tail = map;
2118
2119 /* gdb expects the linker to be in the debug shared object list,
2120 * and we need to make sure that the reported load address is zero.
2121 * Without this, gdb gets the wrong idea of where rtld_db_dlactivity()
2122 * is. Don't use alloc_info(), because the linker shouldn't
2123 * be on the soinfo list.
2124 */
2125 strlcpy((char*) linker_soinfo.name, "/system/bin/linker", sizeof linker_soinfo.name);
2126 linker_soinfo.flags = 0;
2127 linker_soinfo.base = 0; // This is the important part; must be zero.
2128 insert_soinfo_into_debug_map(&linker_soinfo);
2129
2130 /* extract information passed from the kernel */
2131 while(vecs[0] != 0){
2132 switch(vecs[0]){
2133 case AT_PHDR:
2134 si->phdr = (Elf32_Phdr*) vecs[1];
2135 break;
2136 case AT_PHNUM:
2137 si->phnum = (int) vecs[1];
2138 break;
2139 case AT_ENTRY:
2140 si->entry = vecs[1];
2141 break;
2142 }
2143 vecs += 2;
2144 }
2145
2146 /* Compute the value of si->base. We can't rely on the fact that
2147 * the first entry is the PHDR because this will not be true
2148 * for certain executables (e.g. some in the NDK unit test suite)
2149 */
2150 int nn;
2151 si->base = 0;
2152 for ( nn = 0; nn < si->phnum; nn++ ) {
2153 if (si->phdr[nn].p_type == PT_PHDR) {
2154 si->base = (Elf32_Addr) si->phdr - si->phdr[nn].p_vaddr;
2155 break;
2156 }
2157 }
2158 si->dynamic = (unsigned *)-1;
2159 si->wrprotect_start = 0xffffffff;
2160 si->wrprotect_end = 0;
2161 si->refcount = 1;
2162 si->gnu_relro_start = 0;
2163 si->gnu_relro_len = 0;
2164
2165 /* Use LD_LIBRARY_PATH if we aren't setuid/setgid */
2166 if (ldpath_env)
2167 parse_library_path(ldpath_env, ":");
2168
2169 if (ldpreload_env) {
2170 parse_preloads(ldpreload_env, " :");
2171 }
2172
2173 if(link_image(si, 0)) {
2174 char errmsg[] = "CANNOT LINK EXECUTABLE\n";
2175 write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf));
2176 write(2, errmsg, sizeof(errmsg));
2177 exit(-1);
2178 }
2179
2180 call_constructors_recursive(si);
2181
2182 #if ALLOW_SYMBOLS_FROM_MAIN
2183 /* Set somain after we've loaded all the libraries in order to prevent
2184 * linking of symbols back to the main image, which is not set up at that
2185 * point yet.
2186 */
2187 somain = si;
2188 #endif
2189
2190 #if TIMING
2191 gettimeofday(&t1,NULL);
2192 PRINT("LINKER TIME: %s: %d microseconds\n", argv[0], (int) (
2193 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) -
2194 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec)
2195 ));
2196 #endif
2197 #if STATS
2198 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol\n", argv[0],
2199 linker_stats.reloc[RELOC_ABSOLUTE],
2200 linker_stats.reloc[RELOC_RELATIVE],
2201 linker_stats.reloc[RELOC_COPY],
2202 linker_stats.reloc[RELOC_SYMBOL]);
2203 #endif
2204 #if COUNT_PAGES
2205 {
2206 unsigned n;
2207 unsigned i;
2208 unsigned count = 0;
2209 for(n = 0; n < 4096; n++){
2210 if(bitmask[n]){
2211 unsigned x = bitmask[n];
2212 for(i = 0; i < 8; i++){
2213 if(x & 1) count++;
2214 x >>= 1;
2215 }
2216 }
2217 }
2218 PRINT("PAGES MODIFIED: %s: %d (%dKB)\n", argv[0], count, count * 4);
2219 }
2220 #endif
2221
2222 #if TIMING || STATS || COUNT_PAGES
2223 fflush(stdout);
2224 #endif
2225
2226 TRACE("[ %5d Ready to execute '%s' @ 0x%08x ]\n", pid, si->name,
2227 si->entry);
2228 return si->entry;
2229 }
2230
2231 /*
2232 * Find the value of AT_BASE passed to us by the kernel. This is the load
2233 * location of the linker.
2234 */
2235 static unsigned find_linker_base(unsigned **elfdata) {
2236 int argc = (int) *elfdata;
2237 char **argv = (char**) (elfdata + 1);
2238 unsigned *vecs = (unsigned*) (argv + argc + 1);
2239 while (vecs[0] != 0) {
2240 vecs++;
2241 }
2242
2243 /* The end of the environment block is marked by two NULL pointers */
2244 vecs++;
2245
2246 while(vecs[0]) {
2247 if (vecs[0] == AT_BASE) {
2248 return vecs[1];
2249 }
2250 vecs += 2;
2251 }
2252
2253 return 0; // should never happen
2254 }
2255
2256 /*
2257 * This is the entry point for the linker, called from begin.S. This
2258 * method is responsible for fixing the linker's own relocations, and
2259 * then calling __linker_init_post_relocation().
2260 *
2261 * Because this method is called before the linker has fixed it's own
2262 * relocations, any attempt to reference an extern variable, extern
2263 * function, or other GOT reference will generate a segfault.
2264 */
2265 unsigned __linker_init(unsigned **elfdata) {
2266 unsigned linker_addr = find_linker_base(elfdata);
2267 Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *) linker_addr;
2268 Elf32_Phdr *phdr =
2269 (Elf32_Phdr *)((unsigned char *) linker_addr + elf_hdr->e_phoff);
2270
2271 soinfo linker_so;
2272 memset(&linker_so, 0, sizeof(soinfo));
2273
2274 linker_so.base = linker_addr;
2275 linker_so.dynamic = (unsigned *) -1;
2276 linker_so.phdr = phdr;
2277 linker_so.phnum = elf_hdr->e_phnum;
2278 linker_so.flags |= FLAG_LINKER;
2279 linker_so.wrprotect_start = 0xffffffff;
2280 linker_so.wrprotect_end = 0;
2281 linker_so.gnu_relro_start = 0;
2282 linker_so.gnu_relro_len = 0;
2283
2284 if (link_image(&linker_so, 0)) {
2285 // It would be nice to print an error message, but if the linker
2286 // can't link itself, there's no guarantee that we'll be able to
2287 // call write() (because it involves a GOT reference).
2288 //
2289 // This situation should never occur unless the linker itself
2290 // is corrupt.
2291 exit(-1);
2292 }
2293
2294 // We have successfully fixed our own relocations. It's safe to run
2295 // the main part of the linker now.
2296 return __linker_init_post_relocation(elfdata);
2297 }