Imported Upstream version 0.1.0+git20131207+e452e83
[deb_libhybris.git] / hybris / common / jb / linker.c
1 /*
2 * Copyright (C) 2008, 2009 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <linux/auxvec.h>
30
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <unistd.h>
35 #include <fcntl.h>
36 #include <errno.h>
37 #include <dlfcn.h>
38 #include <sys/stat.h>
39
40 #include <pthread.h>
41
42 #include <sys/mman.h>
43
44 /* special private C library header - see Android.mk */
45 //#include "bionic_tls.h"
46
47 #include "linker.h"
48 #include "linker_debug.h"
49 #include "linker_environ.h"
50 #include "linker_format.h"
51
52 #define ALLOW_SYMBOLS_FROM_MAIN 1
53 #define SO_MAX 128
54
55 /* Assume average path length of 64 and max 8 paths */
56 #define LDPATH_BUFSIZE 512
57 #define LDPATH_MAX 8
58
59 #define LDPRELOAD_BUFSIZE 512
60 #define LDPRELOAD_MAX 8
61
62 /* >>> IMPORTANT NOTE - READ ME BEFORE MODIFYING <<<
63 *
64 * Do NOT use malloc() and friends or pthread_*() code here.
65 * Don't use printf() either; it's caused mysterious memory
66 * corruption in the past.
67 * The linker runs before we bring up libc and it's easiest
68 * to make sure it does not depend on any complex libc features
69 *
70 * open issues / todo:
71 *
72 * - are we doing everything we should for ARM_COPY relocations?
73 * - cleaner error reporting
74 * - after linking, set as much stuff as possible to READONLY
75 * and NOEXEC
76 * - linker hardcodes PAGE_SIZE and PAGE_MASK because the kernel
77 * headers provide versions that are negative...
78 * - allocate space for soinfo structs dynamically instead of
79 * having a hard limit (64)
80 */
81
82
83 static int link_image(soinfo *si, unsigned wr_offset);
84
85 static int socount = 0;
86 static soinfo sopool[SO_MAX];
87 static soinfo *freelist = NULL;
88 static soinfo *solist = &libdl_info;
89 static soinfo *sonext = &libdl_info;
90 #if ALLOW_SYMBOLS_FROM_MAIN
91 static soinfo *somain; /* main process, always the one after libdl_info */
92 #endif
93
94
95 static inline int validate_soinfo(soinfo *si)
96 {
97 return (si >= sopool && si < sopool + SO_MAX) ||
98 si == &libdl_info;
99 }
100
101 static char ldpaths_buf[LDPATH_BUFSIZE];
102 static const char *ldpaths[LDPATH_MAX + 1];
103
104 static char ldpreloads_buf[LDPRELOAD_BUFSIZE];
105 static const char *ldpreload_names[LDPRELOAD_MAX + 1];
106
107 static soinfo *preloads[LDPRELOAD_MAX + 1];
108
109 #if LINKER_DEBUG
110 int debug_verbosity = 0;
111 int debug_stdout = 0;
112 #endif
113
114 static int pid;
115
116 /* This boolean is set if the program being loaded is setuid */
117 static int program_is_setuid;
118
119 #if STATS
120 struct _link_stats linker_stats;
121 #endif
122
123 #if COUNT_PAGES
124 unsigned bitmask[4096];
125 #endif
126
127 #ifndef PT_ARM_EXIDX
128 #define PT_ARM_EXIDX 0x70000001 /* .ARM.exidx segment */
129 #endif
130
131 #if 0
132 // disable abort() since this is not a linker anymore
133 #define HOODLUM(name, ret, ...) \
134 ret name __VA_ARGS__ \
135 { \
136 char errstr[] = "ERROR: " #name " called from the dynamic linker!\n"; \
137 write(2, errstr, sizeof(errstr)); \
138 abort(); \
139 }
140 HOODLUM(malloc, void *, (size_t size));
141 HOODLUM(free, void, (void *ptr));
142 HOODLUM(realloc, void *, (void *ptr, size_t size));
143 HOODLUM(calloc, void *, (size_t cnt, size_t size));
144 #endif
145
146 static char tmp_err_buf[768];
147 static char __linker_dl_err_buf[768];
148 #define DL_ERR(fmt, x...) \
149 do { \
150 format_buffer(__linker_dl_err_buf, sizeof(__linker_dl_err_buf), \
151 "%s[%d]: " fmt, __func__, __LINE__, ##x); \
152 ERROR(fmt "\n", ##x); \
153 } while(0)
154
155 const char *linker_get_error(void)
156 {
157 return (const char *)&__linker_dl_err_buf[0];
158 }
159
160 /*
161 * This function is an empty stub where GDB locates a breakpoint to get notified
162 * about linker activity.
163 */
164 extern void __attribute__((noinline)) rtld_db_dlactivity(void);
165
166 static struct r_debug _r_debug = {1, NULL, &rtld_db_dlactivity,
167 RT_CONSISTENT, 0};
168 static struct link_map *r_debug_tail = 0;
169
170 static pthread_mutex_t _r_debug_lock = PTHREAD_MUTEX_INITIALIZER;
171
172 static void insert_soinfo_into_debug_map(soinfo * info)
173 {
174 struct link_map * map;
175
176 /* Copy the necessary fields into the debug structure.
177 */
178 map = &(info->linkmap);
179 map->l_addr = info->base;
180 map->l_name = (char*) info->name;
181 map->l_ld = (uintptr_t)info->dynamic;
182
183 /* Stick the new library at the end of the list.
184 * gdb tends to care more about libc than it does
185 * about leaf libraries, and ordering it this way
186 * reduces the back-and-forth over the wire.
187 */
188 if (r_debug_tail) {
189 r_debug_tail->l_next = map;
190 map->l_prev = r_debug_tail;
191 map->l_next = 0;
192 } else {
193 _r_debug.r_map = map;
194 map->l_prev = 0;
195 map->l_next = 0;
196 }
197 r_debug_tail = map;
198 }
199
200 static void remove_soinfo_from_debug_map(soinfo * info)
201 {
202 struct link_map * map = &(info->linkmap);
203
204 if (r_debug_tail == map)
205 r_debug_tail = map->l_prev;
206
207 if (map->l_prev) map->l_prev->l_next = map->l_next;
208 if (map->l_next) map->l_next->l_prev = map->l_prev;
209 }
210
211 void notify_gdb_of_load(soinfo * info)
212 {
213 if (info->flags & FLAG_EXE) {
214 // GDB already knows about the main executable
215 return;
216 }
217
218 pthread_mutex_lock(&_r_debug_lock);
219
220 _r_debug.r_state = RT_ADD;
221 rtld_db_dlactivity();
222
223 insert_soinfo_into_debug_map(info);
224
225 _r_debug.r_state = RT_CONSISTENT;
226 rtld_db_dlactivity();
227
228 pthread_mutex_unlock(&_r_debug_lock);
229 }
230
231 void notify_gdb_of_unload(soinfo * info)
232 {
233 if (info->flags & FLAG_EXE) {
234 // GDB already knows about the main executable
235 return;
236 }
237
238 pthread_mutex_lock(&_r_debug_lock);
239
240 _r_debug.r_state = RT_DELETE;
241 rtld_db_dlactivity();
242
243 remove_soinfo_from_debug_map(info);
244
245 _r_debug.r_state = RT_CONSISTENT;
246 rtld_db_dlactivity();
247
248 pthread_mutex_unlock(&_r_debug_lock);
249 }
250
251 void notify_gdb_of_libraries()
252 {
253 pthread_mutex_lock(&_r_debug_lock);
254 _r_debug.r_state = RT_ADD;
255 rtld_db_dlactivity();
256 _r_debug.r_state = RT_CONSISTENT;
257 rtld_db_dlactivity();
258 pthread_mutex_unlock(&_r_debug_lock);
259 }
260
261 static soinfo *alloc_info(const char *name)
262 {
263 soinfo *si;
264
265 if(strlen(name) >= SOINFO_NAME_LEN) {
266 DL_ERR("%5d library name %s too long", pid, name);
267 return NULL;
268 }
269
270 /* The freelist is populated when we call free_info(), which in turn is
271 done only by dlclose(), which is not likely to be used.
272 */
273 if (!freelist) {
274 if(socount == SO_MAX) {
275 DL_ERR("%5d too many libraries when loading %s", pid, name);
276 return NULL;
277 }
278 freelist = sopool + socount++;
279 freelist->next = NULL;
280 }
281
282 si = freelist;
283 freelist = freelist->next;
284
285 /* Make sure we get a clean block of soinfo */
286 memset(si, 0, sizeof(soinfo));
287 strlcpy((char*) si->name, name, sizeof(si->name));
288 sonext->next = si;
289 si->next = NULL;
290 si->refcount = 0;
291 sonext = si;
292
293 TRACE("%5d name %s: allocated soinfo @ %p\n", pid, name, si);
294 return si;
295 }
296
297 static void free_info(soinfo *si)
298 {
299 soinfo *prev = NULL, *trav;
300
301 TRACE("%5d name %s: freeing soinfo @ %p\n", pid, si->name, si);
302
303 for(trav = solist; trav != NULL; trav = trav->next){
304 if (trav == si)
305 break;
306 prev = trav;
307 }
308 if (trav == NULL) {
309 /* si was not ni solist */
310 DL_ERR("%5d name %s is not in solist!", pid, si->name);
311 return;
312 }
313
314 /* prev will never be NULL, because the first entry in solist is
315 always the static libdl_info.
316 */
317 prev->next = si->next;
318 if (si == sonext) sonext = prev;
319 si->next = freelist;
320 freelist = si;
321 }
322
323 const char *addr_to_name(unsigned addr)
324 {
325 soinfo *si;
326
327 for(si = solist; si != 0; si = si->next){
328 if((addr >= si->base) && (addr < (si->base + si->size))) {
329 return si->name;
330 }
331 }
332
333 return "";
334 }
335
336 /* For a given PC, find the .so that it belongs to.
337 * Returns the base address of the .ARM.exidx section
338 * for that .so, and the number of 8-byte entries
339 * in that section (via *pcount).
340 *
341 * Intended to be called by libc's __gnu_Unwind_Find_exidx().
342 *
343 * This function is exposed via dlfcn.c and libdl.so.
344 */
345 #ifdef ANDROID_ARM_LINKER
346 _Unwind_Ptr android_dl_unwind_find_exidx(_Unwind_Ptr pc, int *pcount)
347 {
348 soinfo *si;
349 unsigned addr = (unsigned)pc;
350
351 for (si = solist; si != 0; si = si->next){
352 if ((addr >= si->base) && (addr < (si->base + si->size))) {
353 *pcount = si->ARM_exidx_count;
354 return (_Unwind_Ptr)(si->base + (unsigned long)si->ARM_exidx);
355 }
356 }
357 *pcount = 0;
358 return NULL;
359 }
360 #elif defined(ANDROID_X86_LINKER)
361 /* Here, we only have to provide a callback to iterate across all the
362 * loaded libraries. gcc_eh does the rest. */
363 int
364 android_dl_iterate_phdr(int (*cb)(struct dl_phdr_info *info, size_t size, void *data),
365 void *data)
366 {
367 soinfo *si;
368 struct dl_phdr_info dl_info;
369 int rv = 0;
370
371 for (si = solist; si != NULL; si = si->next) {
372 dl_info.dlpi_addr = si->linkmap.l_addr;
373 dl_info.dlpi_name = si->linkmap.l_name;
374 dl_info.dlpi_phdr = si->phdr;
375 dl_info.dlpi_phnum = si->phnum;
376 rv = cb(&dl_info, sizeof (struct dl_phdr_info), data);
377 if (rv != 0)
378 break;
379 }
380 return rv;
381 }
382 #endif
383
384 static Elf32_Sym *_elf_lookup(soinfo *si, unsigned hash, const char *name)
385 {
386 Elf32_Sym *s;
387 Elf32_Sym *symtab = si->symtab;
388 const char *strtab = si->strtab;
389 unsigned n;
390
391 TRACE_TYPE(LOOKUP, "%5d SEARCH %s in %s@0x%08x %08x %d\n", pid,
392 name, si->name, si->base, hash, hash % si->nbucket);
393 n = hash % si->nbucket;
394
395 for(n = si->bucket[hash % si->nbucket]; n != 0; n = si->chain[n]){
396 s = symtab + n;
397 if(strcmp(strtab + s->st_name, name)) continue;
398
399 /* only concern ourselves with global and weak symbol definitions */
400 switch(ELF32_ST_BIND(s->st_info)){
401 case STB_GLOBAL:
402 case STB_WEAK:
403 /* no section == undefined */
404 if(s->st_shndx == 0) continue;
405
406 TRACE_TYPE(LOOKUP, "%5d FOUND %s in %s (%08x) %d\n", pid,
407 name, si->name, s->st_value, s->st_size);
408 return s;
409 }
410 }
411
412 return NULL;
413 }
414
415 static unsigned elfhash(const char *_name)
416 {
417 const unsigned char *name = (const unsigned char *) _name;
418 unsigned h = 0, g;
419
420 while(*name) {
421 h = (h << 4) + *name++;
422 g = h & 0xf0000000;
423 h ^= g;
424 h ^= g >> 24;
425 }
426 return h;
427 }
428
429 static Elf32_Sym *
430 _do_lookup(soinfo *si, const char *name, unsigned *base)
431 {
432 unsigned elf_hash = elfhash(name);
433 Elf32_Sym *s;
434 unsigned *d;
435 soinfo *lsi = si;
436 int i;
437
438 /* Look for symbols in the local scope (the object who is
439 * searching). This happens with C++ templates on i386 for some
440 * reason.
441 *
442 * Notes on weak symbols:
443 * The ELF specs are ambigious about treatment of weak definitions in
444 * dynamic linking. Some systems return the first definition found
445 * and some the first non-weak definition. This is system dependent.
446 * Here we return the first definition found for simplicity. */
447
448 s = _elf_lookup(si, elf_hash, name);
449 if(s != NULL)
450 goto done;
451
452 /* Next, look for it in the preloads list */
453 for(i = 0; preloads[i] != NULL; i++) {
454 lsi = preloads[i];
455 s = _elf_lookup(lsi, elf_hash, name);
456 if(s != NULL)
457 goto done;
458 }
459
460 for(d = si->dynamic; *d; d += 2) {
461 if(d[0] == DT_NEEDED){
462 lsi = (soinfo *)d[1];
463 if (!validate_soinfo(lsi)) {
464 DL_ERR("%5d bad DT_NEEDED pointer in %s",
465 pid, si->name);
466 return NULL;
467 }
468
469 DEBUG("%5d %s: looking up %s in %s\n",
470 pid, si->name, name, lsi->name);
471 s = _elf_lookup(lsi, elf_hash, name);
472 if ((s != NULL) && (s->st_shndx != SHN_UNDEF))
473 goto done;
474 }
475 }
476
477 #if ALLOW_SYMBOLS_FROM_MAIN
478 /* If we are resolving relocations while dlopen()ing a library, it's OK for
479 * the library to resolve a symbol that's defined in the executable itself,
480 * although this is rare and is generally a bad idea.
481 */
482 if (somain) {
483 lsi = somain;
484 DEBUG("%5d %s: looking up %s in executable %s\n",
485 pid, si->name, name, lsi->name);
486 s = _elf_lookup(lsi, elf_hash, name);
487 }
488 #endif
489
490 done:
491 if(s != NULL) {
492 TRACE_TYPE(LOOKUP, "%5d si %s sym %s s->st_value = 0x%08x, "
493 "found in %s, base = 0x%08x\n",
494 pid, si->name, name, s->st_value, lsi->name, lsi->base);
495 *base = lsi->base;
496 return s;
497 }
498
499 return NULL;
500 }
501
502 /* This is used by dl_sym(). It performs symbol lookup only within the
503 specified soinfo object and not in any of its dependencies.
504 */
505 Elf32_Sym *lookup_in_library(soinfo *si, const char *name)
506 {
507 return _elf_lookup(si, elfhash(name), name);
508 }
509
510 /* This is used by dl_sym(). It performs a global symbol lookup.
511 */
512 Elf32_Sym *lookup(const char *name, soinfo **found, soinfo *start)
513 {
514 unsigned elf_hash = elfhash(name);
515 Elf32_Sym *s = NULL;
516 soinfo *si;
517
518 if(start == NULL) {
519 start = solist;
520 }
521
522 for(si = start; (s == NULL) && (si != NULL); si = si->next)
523 {
524 if(si->flags & FLAG_ERROR)
525 continue;
526 s = _elf_lookup(si, elf_hash, name);
527 if (s != NULL) {
528 *found = si;
529 break;
530 }
531 }
532
533 if(s != NULL) {
534 TRACE_TYPE(LOOKUP, "%5d %s s->st_value = 0x%08x, "
535 "si->base = 0x%08x\n", pid, name, s->st_value, si->base);
536 return s;
537 }
538
539 return NULL;
540 }
541
542 soinfo *find_containing_library(const void *addr)
543 {
544 soinfo *si;
545
546 for(si = solist; si != NULL; si = si->next)
547 {
548 if((unsigned)addr >= si->base && (unsigned)addr - si->base < si->size) {
549 return si;
550 }
551 }
552
553 return NULL;
554 }
555
556 Elf32_Sym *find_containing_symbol(const void *addr, soinfo *si)
557 {
558 unsigned int i;
559 unsigned soaddr = (unsigned)addr - si->base;
560
561 /* Search the library's symbol table for any defined symbol which
562 * contains this address */
563 for(i=0; i<si->nchain; i++) {
564 Elf32_Sym *sym = &si->symtab[i];
565
566 if(sym->st_shndx != SHN_UNDEF &&
567 soaddr >= sym->st_value &&
568 soaddr < sym->st_value + sym->st_size) {
569 return sym;
570 }
571 }
572
573 return NULL;
574 }
575
576 #if 0
577 static void dump(soinfo *si)
578 {
579 Elf32_Sym *s = si->symtab;
580 unsigned n;
581
582 for(n = 0; n < si->nchain; n++) {
583 TRACE("%5d %04d> %08x: %02x %04x %08x %08x %s\n", pid, n, s,
584 s->st_info, s->st_shndx, s->st_value, s->st_size,
585 si->strtab + s->st_name);
586 s++;
587 }
588 }
589 #endif
590
591 static const char *sopaths[] = {
592 "/vendor/lib",
593 "/system/lib",
594 0
595 };
596
597 static int _open_lib(const char *name)
598 {
599 int fd;
600 struct stat filestat;
601
602 if ((stat(name, &filestat) >= 0) && S_ISREG(filestat.st_mode)) {
603 if ((fd = open(name, O_RDONLY)) >= 0)
604 return fd;
605 }
606
607 return -1;
608 }
609
610 static void parse_library_path(const char *path, char *delim);
611
612 static int open_library(const char *name)
613 {
614 int fd;
615 char buf[512];
616 const char **path;
617 int n;
618
619 TRACE("[ %5d opening %s ]\n", pid, name);
620
621 if(name == 0) return -1;
622 if(strlen(name) > 256) return -1;
623
624 if ((name[0] == '/') && ((fd = _open_lib(name)) >= 0))
625 return fd;
626
627 #ifdef DEFAULT_HYBRIS_LD_LIBRARY_PATH
628 if (getenv("HYBRIS_LD_LIBRARY_PATH") == NULL && *ldpaths == 0)
629 {
630 parse_library_path(DEFAULT_HYBRIS_LD_LIBRARY_PATH, ":");
631 }
632 #endif
633 if (getenv("HYBRIS_LD_LIBRARY_PATH") != NULL && *ldpaths == 0)
634 {
635 parse_library_path(getenv("HYBRIS_LD_LIBRARY_PATH"), ":");
636 }
637
638 for (path = ldpaths; *path; path++) {
639 n = format_buffer(buf, sizeof(buf), "%s/%s", *path, name);
640 if (n < 0 || n >= (int)sizeof(buf)) {
641 WARN("Ignoring very long library path: %s/%s\n", *path, name);
642 continue;
643 }
644 if ((fd = _open_lib(buf)) >= 0)
645 return fd;
646 }
647 for (path = sopaths; *path; path++) {
648 n = format_buffer(buf, sizeof(buf), "%s/%s", *path, name);
649 if (n < 0 || n >= (int)sizeof(buf)) {
650 WARN("Ignoring very long library path: %s/%s\n", *path, name);
651 continue;
652 }
653 if ((fd = _open_lib(buf)) >= 0)
654 return fd;
655 }
656
657 return -1;
658 }
659
660 /* temporary space for holding the first page of the shared lib
661 * which contains the elf header (with the pht). */
662 static unsigned char __header[PAGE_SIZE];
663
664 typedef struct {
665 long mmap_addr;
666 char tag[4]; /* 'P', 'R', 'E', ' ' */
667 } prelink_info_t;
668
669 /* Returns the requested base address if the library is prelinked,
670 * and 0 otherwise. */
671 static unsigned long
672 is_prelinked(int fd, const char *name)
673 {
674 off_t sz;
675 prelink_info_t info;
676
677 sz = lseek(fd, -sizeof(prelink_info_t), SEEK_END);
678 if (sz < 0) {
679 DL_ERR("lseek() failed!");
680 return 0;
681 }
682
683 if (read(fd, &info, sizeof(info)) != sizeof(info)) {
684 INFO("Could not read prelink_info_t structure for `%s`\n", name);
685 return 0;
686 }
687
688 if (strncmp(info.tag, "PRE ", 4)) {
689 INFO("`%s` is not a prelinked library\n", name);
690 return 0;
691 }
692
693 return (unsigned long)info.mmap_addr;
694 }
695
696 /* verify_elf_object
697 * Verifies if the object @ base is a valid ELF object
698 *
699 * Args:
700 *
701 * Returns:
702 * 0 on success
703 * -1 if no valid ELF object is found @ base.
704 */
705 static int
706 verify_elf_object(void *base, const char *name)
707 {
708 Elf32_Ehdr *hdr = (Elf32_Ehdr *) base;
709
710 if (hdr->e_ident[EI_MAG0] != ELFMAG0) return -1;
711 if (hdr->e_ident[EI_MAG1] != ELFMAG1) return -1;
712 if (hdr->e_ident[EI_MAG2] != ELFMAG2) return -1;
713 if (hdr->e_ident[EI_MAG3] != ELFMAG3) return -1;
714
715 /* TODO: Should we verify anything else in the header? */
716 #ifdef ANDROID_ARM_LINKER
717 if (hdr->e_machine != EM_ARM) return -1;
718 #elif defined(ANDROID_X86_LINKER)
719 if (hdr->e_machine != EM_386) return -1;
720 #endif
721 return 0;
722 }
723
724
725 /* get_lib_extents
726 * Retrieves the base (*base) address where the ELF object should be
727 * mapped and its overall memory size (*total_sz).
728 *
729 * Args:
730 * fd: Opened file descriptor for the library
731 * name: The name of the library
732 * _hdr: Pointer to the header page of the library
733 * total_sz: Total size of the memory that should be allocated for
734 * this library
735 *
736 * Returns:
737 * -1 if there was an error while trying to get the lib extents.
738 * The possible reasons are:
739 * - Could not determine if the library was prelinked.
740 * - The library provided is not a valid ELF object
741 * 0 if the library did not request a specific base offset (normal
742 * for non-prelinked libs)
743 * > 0 if the library requests a specific address to be mapped to.
744 * This indicates a pre-linked library.
745 */
746 static unsigned
747 get_lib_extents(int fd, const char *name, void *__hdr, unsigned *total_sz)
748 {
749 unsigned req_base;
750 unsigned min_vaddr = 0xffffffff;
751 unsigned max_vaddr = 0;
752 unsigned char *_hdr = (unsigned char *)__hdr;
753 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)_hdr;
754 Elf32_Phdr *phdr;
755 int cnt;
756
757 TRACE("[ %5d Computing extents for '%s'. ]\n", pid, name);
758 if (verify_elf_object(_hdr, name) < 0) {
759 DL_ERR("%5d - %s is not a valid ELF object", pid, name);
760 return (unsigned)-1;
761 }
762
763 req_base = (unsigned) is_prelinked(fd, name);
764 if (req_base == (unsigned)-1)
765 return -1;
766 else if (req_base != 0) {
767 TRACE("[ %5d - Prelinked library '%s' requesting base @ 0x%08x ]\n",
768 pid, name, req_base);
769 } else {
770 TRACE("[ %5d - Non-prelinked library '%s' found. ]\n", pid, name);
771 }
772
773 phdr = (Elf32_Phdr *)(_hdr + ehdr->e_phoff);
774
775 /* find the min/max p_vaddrs from all the PT_LOAD segments so we can
776 * get the range. */
777 for (cnt = 0; cnt < ehdr->e_phnum; ++cnt, ++phdr) {
778 if (phdr->p_type == PT_LOAD) {
779 if ((phdr->p_vaddr + phdr->p_memsz) > max_vaddr)
780 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
781 if (phdr->p_vaddr < min_vaddr)
782 min_vaddr = phdr->p_vaddr;
783 }
784 }
785
786 if ((min_vaddr == 0xffffffff) && (max_vaddr == 0)) {
787 DL_ERR("%5d - No loadable segments found in %s.", pid, name);
788 return (unsigned)-1;
789 }
790
791 /* truncate min_vaddr down to page boundary */
792 min_vaddr &= ~PAGE_MASK;
793
794 /* round max_vaddr up to the next page */
795 max_vaddr = (max_vaddr + PAGE_SIZE - 1) & ~PAGE_MASK;
796
797 *total_sz = (max_vaddr - min_vaddr);
798 return (unsigned)req_base;
799 }
800
801 /* reserve_mem_region
802 *
803 * This function reserves a chunk of memory to be used for mapping in
804 * a prelinked shared library. We reserve the entire memory region here, and
805 * then the rest of the linker will relocate the individual loadable
806 * segments into the correct locations within this memory range.
807 *
808 * Args:
809 * si->base: The requested base of the allocation.
810 * si->size: The size of the allocation.
811 *
812 * Returns:
813 * -1 on failure, and 0 on success. On success, si->base will contain
814 * the virtual address at which the library will be mapped.
815 */
816
817 static int reserve_mem_region(soinfo *si)
818 {
819 void *base = mmap((void *)si->base, si->size, PROT_NONE,
820 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
821 if (base == MAP_FAILED) {
822 DL_ERR("%5d can NOT map (%sprelinked) library '%s' at 0x%08x "
823 "as requested, will try general pool: %d (%s)",
824 pid, (si->base ? "" : "non-"), si->name, si->base,
825 errno, strerror(errno));
826 return -1;
827 } else if (base != (void *)si->base) {
828 DL_ERR("OOPS: %5d %sprelinked library '%s' mapped at 0x%08x, "
829 "not at 0x%08x", pid, (si->base ? "" : "non-"),
830 si->name, (unsigned)base, si->base);
831 munmap(base, si->size);
832 return -1;
833 }
834 return 0;
835 }
836
837 static int alloc_mem_region(soinfo *si)
838 {
839 if (si->base) {
840 /* Attempt to mmap a prelinked library. */
841 return reserve_mem_region(si);
842 }
843
844 /* This is not a prelinked library, so we use the kernel's default
845 allocator.
846 */
847
848 void *base = mmap(NULL, si->size, PROT_NONE,
849 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
850 if (base == MAP_FAILED) {
851 DL_ERR("%5d mmap of library '%s' failed: %d (%s)\n",
852 pid, si->name,
853 errno, strerror(errno));
854 goto err;
855 }
856 si->base = (unsigned) base;
857 INFO("%5d mapped library '%s' to %08x via kernel allocator.\n",
858 pid, si->name, si->base);
859 return 0;
860
861 err:
862 DL_ERR("OOPS: %5d cannot map library '%s'. no vspace available.",
863 pid, si->name);
864 return -1;
865 }
866
867 #define MAYBE_MAP_FLAG(x,from,to) (((x) & (from)) ? (to) : 0)
868 #define PFLAGS_TO_PROT(x) (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | \
869 MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
870 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
871 /* load_segments
872 *
873 * This function loads all the loadable (PT_LOAD) segments into memory
874 * at their appropriate memory offsets off the base address.
875 *
876 * Args:
877 * fd: Open file descriptor to the library to load.
878 * header: Pointer to a header page that contains the ELF header.
879 * This is needed since we haven't mapped in the real file yet.
880 * si: ptr to soinfo struct describing the shared object.
881 *
882 * Returns:
883 * 0 on success, -1 on failure.
884 */
885 static int
886 load_segments(int fd, void *header, soinfo *si)
887 {
888 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)header;
889 Elf32_Phdr *phdr = (Elf32_Phdr *)((unsigned char *)header + ehdr->e_phoff);
890 Elf32_Addr base = (Elf32_Addr) si->base;
891 int cnt;
892 unsigned len;
893 Elf32_Addr tmp;
894 unsigned char *pbase;
895 unsigned char *extra_base;
896 unsigned extra_len;
897 unsigned total_sz = 0;
898
899 si->wrprotect_start = 0xffffffff;
900 si->wrprotect_end = 0;
901
902 TRACE("[ %5d - Begin loading segments for '%s' @ 0x%08x ]\n",
903 pid, si->name, (unsigned)si->base);
904 /* Now go through all the PT_LOAD segments and map them into memory
905 * at the appropriate locations. */
906 for (cnt = 0; cnt < ehdr->e_phnum; ++cnt, ++phdr) {
907 if (phdr->p_type == PT_LOAD) {
908 DEBUG_DUMP_PHDR(phdr, "PT_LOAD", pid);
909 /* we want to map in the segment on a page boundary */
910 tmp = base + (phdr->p_vaddr & (~PAGE_MASK));
911 /* add the # of bytes we masked off above to the total length. */
912 len = phdr->p_filesz + (phdr->p_vaddr & PAGE_MASK);
913
914 TRACE("[ %d - Trying to load segment from '%s' @ 0x%08x "
915 "(0x%08x). p_vaddr=0x%08x p_offset=0x%08x ]\n", pid, si->name,
916 (unsigned)tmp, len, phdr->p_vaddr, phdr->p_offset);
917 pbase = mmap((void *)tmp, len, PFLAGS_TO_PROT(phdr->p_flags),
918 MAP_PRIVATE | MAP_FIXED, fd,
919 phdr->p_offset & (~PAGE_MASK));
920 if (pbase == MAP_FAILED) {
921 DL_ERR("%d failed to map segment from '%s' @ 0x%08x (0x%08x). "
922 "p_vaddr=0x%08x p_offset=0x%08x", pid, si->name,
923 (unsigned)tmp, len, phdr->p_vaddr, phdr->p_offset);
924 goto fail;
925 }
926
927 /* If 'len' didn't end on page boundary, and it's a writable
928 * segment, zero-fill the rest. */
929 if ((len & PAGE_MASK) && (phdr->p_flags & PF_W))
930 memset((void *)(pbase + len), 0, PAGE_SIZE - (len & PAGE_MASK));
931
932 /* Check to see if we need to extend the map for this segment to
933 * cover the diff between filesz and memsz (i.e. for bss).
934 *
935 * base _+---------------------+ page boundary
936 * . .
937 * | |
938 * . .
939 * pbase _+---------------------+ page boundary
940 * | |
941 * . .
942 * base + p_vaddr _| |
943 * . \ \ .
944 * . | filesz | .
945 * pbase + len _| / | |
946 * <0 pad> . . .
947 * extra_base _+------------|--------+ page boundary
948 * / . . .
949 * | . . .
950 * | +------------|--------+ page boundary
951 * extra_len-> | | | |
952 * | . | memsz .
953 * | . | .
954 * \ _| / |
955 * . .
956 * | |
957 * _+---------------------+ page boundary
958 */
959 tmp = (Elf32_Addr)(((unsigned)pbase + len + PAGE_SIZE - 1) &
960 (~PAGE_MASK));
961 if (tmp < (base + phdr->p_vaddr + phdr->p_memsz)) {
962 extra_len = base + phdr->p_vaddr + phdr->p_memsz - tmp;
963 TRACE("[ %5d - Need to extend segment from '%s' @ 0x%08x "
964 "(0x%08x) ]\n", pid, si->name, (unsigned)tmp, extra_len);
965 /* map in the extra page(s) as anonymous into the range.
966 * This is probably not necessary as we already mapped in
967 * the entire region previously, but we just want to be
968 * sure. This will also set the right flags on the region
969 * (though we can probably accomplish the same thing with
970 * mprotect).
971 */
972 extra_base = mmap((void *)tmp, extra_len,
973 PFLAGS_TO_PROT(phdr->p_flags),
974 MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS,
975 -1, 0);
976 if (extra_base == MAP_FAILED) {
977 DL_ERR("[ %5d - failed to extend segment from '%s' @ 0x%08x"
978 " (0x%08x) ]", pid, si->name, (unsigned)tmp,
979 extra_len);
980 goto fail;
981 }
982 /* TODO: Check if we need to memset-0 this region.
983 * Anonymous mappings are zero-filled copy-on-writes, so we
984 * shouldn't need to. */
985 TRACE("[ %5d - Segment from '%s' extended @ 0x%08x "
986 "(0x%08x)\n", pid, si->name, (unsigned)extra_base,
987 extra_len);
988 }
989 /* set the len here to show the full extent of the segment we
990 * just loaded, mostly for debugging */
991 len = (((unsigned)base + phdr->p_vaddr + phdr->p_memsz +
992 PAGE_SIZE - 1) & (~PAGE_MASK)) - (unsigned)pbase;
993 TRACE("[ %5d - Successfully loaded segment from '%s' @ 0x%08x "
994 "(0x%08x). p_vaddr=0x%08x p_offset=0x%08x\n", pid, si->name,
995 (unsigned)pbase, len, phdr->p_vaddr, phdr->p_offset);
996 total_sz += len;
997 /* Make the section writable just in case we'll have to write to
998 * it during relocation (i.e. text segment). However, we will
999 * remember what range of addresses should be write protected.
1000 *
1001 */
1002 if (!(phdr->p_flags & PF_W)) {
1003 if ((unsigned)pbase < si->wrprotect_start)
1004 si->wrprotect_start = (unsigned)pbase;
1005 if (((unsigned)pbase + len) > si->wrprotect_end)
1006 si->wrprotect_end = (unsigned)pbase + len;
1007 mprotect(pbase, len,
1008 PFLAGS_TO_PROT(phdr->p_flags) | PROT_WRITE);
1009 }
1010 } else if (phdr->p_type == PT_DYNAMIC) {
1011 DEBUG_DUMP_PHDR(phdr, "PT_DYNAMIC", pid);
1012 /* this segment contains the dynamic linking information */
1013 si->dynamic = (unsigned *)(base + phdr->p_vaddr);
1014 } else if (phdr->p_type == PT_GNU_RELRO) {
1015 if ((phdr->p_vaddr >= si->size)
1016 || ((phdr->p_vaddr + phdr->p_memsz) > si->size)
1017 || ((base + phdr->p_vaddr + phdr->p_memsz) < base)) {
1018 DL_ERR("%d invalid GNU_RELRO in '%s' "
1019 "p_vaddr=0x%08x p_memsz=0x%08x", pid, si->name,
1020 phdr->p_vaddr, phdr->p_memsz);
1021 goto fail;
1022 }
1023 si->gnu_relro_start = (Elf32_Addr) (base + phdr->p_vaddr);
1024 si->gnu_relro_len = (unsigned) phdr->p_memsz;
1025 } else {
1026 #ifdef ANDROID_ARM_LINKER
1027 if (phdr->p_type == PT_ARM_EXIDX) {
1028 DEBUG_DUMP_PHDR(phdr, "PT_ARM_EXIDX", pid);
1029 /* exidx entries (used for stack unwinding) are 8 bytes each.
1030 */
1031 si->ARM_exidx = (unsigned *)phdr->p_vaddr;
1032 si->ARM_exidx_count = phdr->p_memsz / 8;
1033 }
1034 #endif
1035 }
1036
1037 }
1038
1039 /* Sanity check */
1040 if (total_sz > si->size) {
1041 DL_ERR("%5d - Total length (0x%08x) of mapped segments from '%s' is "
1042 "greater than what was allocated (0x%08x). THIS IS BAD!",
1043 pid, total_sz, si->name, si->size);
1044 goto fail;
1045 }
1046
1047 TRACE("[ %5d - Finish loading segments for '%s' @ 0x%08x. "
1048 "Total memory footprint: 0x%08x bytes ]\n", pid, si->name,
1049 (unsigned)si->base, si->size);
1050 return 0;
1051
1052 fail:
1053 /* We can just blindly unmap the entire region even though some things
1054 * were mapped in originally with anonymous and others could have been
1055 * been mapped in from the file before we failed. The kernel will unmap
1056 * all the pages in the range, irrespective of how they got there.
1057 */
1058 munmap((void *)si->base, si->size);
1059 si->flags |= FLAG_ERROR;
1060 return -1;
1061 }
1062
1063 /* TODO: Implement this to take care of the fact that Android ARM
1064 * ELF objects shove everything into a single loadable segment that has the
1065 * write bit set. wr_offset is then used to set non-(data|bss) pages to be
1066 * non-writable.
1067 */
1068 #if 0
1069 static unsigned
1070 get_wr_offset(int fd, const char *name, Elf32_Ehdr *ehdr)
1071 {
1072 Elf32_Shdr *shdr_start;
1073 Elf32_Shdr *shdr;
1074 int shdr_sz = ehdr->e_shnum * sizeof(Elf32_Shdr);
1075 int cnt;
1076 unsigned wr_offset = 0xffffffff;
1077
1078 shdr_start = mmap(0, shdr_sz, PROT_READ, MAP_PRIVATE, fd,
1079 ehdr->e_shoff & (~PAGE_MASK));
1080 if (shdr_start == MAP_FAILED) {
1081 WARN("%5d - Could not read section header info from '%s'. Will not "
1082 "not be able to determine write-protect offset.\n", pid, name);
1083 return (unsigned)-1;
1084 }
1085
1086 for(cnt = 0, shdr = shdr_start; cnt < ehdr->e_shnum; ++cnt, ++shdr) {
1087 if ((shdr->sh_type != SHT_NULL) && (shdr->sh_flags & SHF_WRITE) &&
1088 (shdr->sh_addr < wr_offset)) {
1089 wr_offset = shdr->sh_addr;
1090 }
1091 }
1092
1093 munmap(shdr_start, shdr_sz);
1094 return wr_offset;
1095 }
1096 #endif
1097
1098 static soinfo *
1099 load_library(const char *name)
1100 {
1101 int fd = open_library(name);
1102 int cnt;
1103 unsigned ext_sz;
1104 unsigned req_base;
1105 const char *bname;
1106 soinfo *si = NULL;
1107 Elf32_Ehdr *hdr;
1108
1109 if(fd == -1) {
1110 DL_ERR("Library '%s' not found", name);
1111 return NULL;
1112 }
1113
1114 /* We have to read the ELF header to figure out what to do with this image
1115 */
1116 if (lseek(fd, 0, SEEK_SET) < 0) {
1117 DL_ERR("lseek() failed!");
1118 goto fail;
1119 }
1120
1121 if ((cnt = read(fd, &__header[0], PAGE_SIZE)) < 0) {
1122 DL_ERR("read() failed!");
1123 goto fail;
1124 }
1125
1126 /* Parse the ELF header and get the size of the memory footprint for
1127 * the library */
1128 req_base = get_lib_extents(fd, name, &__header[0], &ext_sz);
1129 if (req_base == (unsigned)-1)
1130 goto fail;
1131 TRACE("[ %5d - '%s' (%s) wants base=0x%08x sz=0x%08x ]\n", pid, name,
1132 (req_base ? "prelinked" : "not pre-linked"), req_base, ext_sz);
1133
1134 /* Now configure the soinfo struct where we'll store all of our data
1135 * for the ELF object. If the loading fails, we waste the entry, but
1136 * same thing would happen if we failed during linking. Configuring the
1137 * soinfo struct here is a lot more convenient.
1138 */
1139 bname = strrchr(name, '/');
1140 si = alloc_info(bname ? bname + 1 : name);
1141 if (si == NULL)
1142 goto fail;
1143
1144 /* Carve out a chunk of memory where we will map in the individual
1145 * segments */
1146 si->base = req_base;
1147 si->size = ext_sz;
1148 si->flags = 0;
1149 si->entry = 0;
1150 si->dynamic = (unsigned *)-1;
1151 if (alloc_mem_region(si) < 0)
1152 goto fail;
1153
1154 TRACE("[ %5d allocated memory for %s @ %p (0x%08x) ]\n",
1155 pid, name, (void *)si->base, (unsigned) ext_sz);
1156
1157 /* Now actually load the library's segments into right places in memory */
1158 if (load_segments(fd, &__header[0], si) < 0) {
1159 goto fail;
1160 }
1161
1162 /* this might not be right. Technically, we don't even need this info
1163 * once we go through 'load_segments'. */
1164 hdr = (Elf32_Ehdr *)si->base;
1165 si->phdr = (Elf32_Phdr *)((unsigned char *)si->base + hdr->e_phoff);
1166 si->phnum = hdr->e_phnum;
1167 /**/
1168
1169 close(fd);
1170 return si;
1171
1172 fail:
1173 if (si) free_info(si);
1174 close(fd);
1175 return NULL;
1176 }
1177
1178 static soinfo *
1179 init_library(soinfo *si)
1180 {
1181 unsigned wr_offset = 0xffffffff;
1182
1183 #if LINKER_DEBUG
1184 /* Has to be set via init_library as we don't get called via the
1185 * traditional android init library path */
1186 const char* env;
1187 env = getenv("HYBRIS_LINKER_DEBUG");
1188 if (env)
1189 debug_verbosity = atoi(env);
1190 if (getenv("HYBRIS_LINKER_STDOUT"))
1191 debug_stdout = 1;
1192
1193 INFO("[ HYBRIS: initializing library '%s']\n", si->name);
1194 #endif
1195
1196 /* At this point we know that whatever is loaded @ base is a valid ELF
1197 * shared library whose segments are properly mapped in. */
1198 TRACE("[ %5d init_library base=0x%08x sz=0x%08x name='%s') ]\n",
1199 pid, si->base, si->size, si->name);
1200
1201 if(link_image(si, wr_offset)) {
1202 /* We failed to link. However, we can only restore libbase
1203 ** if no additional libraries have moved it since we updated it.
1204 */
1205 munmap((void *)si->base, si->size);
1206 return NULL;
1207 }
1208
1209 return si;
1210 }
1211
1212 soinfo *find_library(const char *name)
1213 {
1214 soinfo *si;
1215 const char *bname;
1216
1217 #if ALLOW_SYMBOLS_FROM_MAIN
1218 if (name == NULL)
1219 return somain;
1220 #else
1221 if (name == NULL)
1222 return NULL;
1223 #endif
1224
1225 bname = strrchr(name, '/');
1226 bname = bname ? bname + 1 : name;
1227
1228 for(si = solist; si != 0; si = si->next){
1229 if(!strcmp(bname, si->name)) {
1230 if(si->flags & FLAG_ERROR) {
1231 DL_ERR("%5d '%s' failed to load previously", pid, bname);
1232 return NULL;
1233 }
1234 if(si->flags & FLAG_LINKED) return si;
1235 DL_ERR("OOPS: %5d recursive link to '%s'", pid, si->name);
1236 return NULL;
1237 }
1238 }
1239
1240 TRACE("[ %5d '%s' has not been loaded yet. Locating...]\n", pid, name);
1241 si = load_library(name);
1242 if(si == NULL)
1243 return NULL;
1244 return init_library(si);
1245 }
1246
1247 /* TODO:
1248 * notify gdb of unload
1249 * for non-prelinked libraries, find a way to decrement libbase
1250 */
1251 static void call_destructors(soinfo *si);
1252 unsigned unload_library(soinfo *si)
1253 {
1254 unsigned *d;
1255 if (si->refcount == 1) {
1256 TRACE("%5d unloading '%s'\n", pid, si->name);
1257 call_destructors(si);
1258
1259 /*
1260 * Make sure that we undo the PT_GNU_RELRO protections we added
1261 * in link_image. This is needed to undo the DT_NEEDED hack below.
1262 */
1263 if ((si->gnu_relro_start != 0) && (si->gnu_relro_len != 0)) {
1264 Elf32_Addr start = (si->gnu_relro_start & ~PAGE_MASK);
1265 unsigned len = (si->gnu_relro_start - start) + si->gnu_relro_len;
1266 if (mprotect((void *) start, len, PROT_READ | PROT_WRITE) < 0)
1267 DL_ERR("%5d %s: could not undo GNU_RELRO protections. "
1268 "Expect a crash soon. errno=%d (%s)",
1269 pid, si->name, errno, strerror(errno));
1270
1271 }
1272
1273 for(d = si->dynamic; *d; d += 2) {
1274 if(d[0] == DT_NEEDED){
1275 soinfo *lsi = (soinfo *)d[1];
1276
1277 // The next line will segfault if the we don't undo the
1278 // PT_GNU_RELRO protections (see comments above and in
1279 // link_image().
1280 d[1] = 0;
1281
1282 if (validate_soinfo(lsi)) {
1283 TRACE("%5d %s needs to unload %s\n", pid,
1284 si->name, lsi->name);
1285 unload_library(lsi);
1286 }
1287 else
1288 DL_ERR("%5d %s: could not unload dependent library",
1289 pid, si->name);
1290 }
1291 }
1292
1293 munmap((char *)si->base, si->size);
1294 notify_gdb_of_unload(si);
1295 free_info(si);
1296 si->refcount = 0;
1297 }
1298 else {
1299 si->refcount--;
1300 INFO("%5d not unloading '%s', decrementing refcount to %d\n",
1301 pid, si->name, si->refcount);
1302 }
1303 return si->refcount;
1304 }
1305
1306 /* TODO: don't use unsigned for addrs below. It works, but is not
1307 * ideal. They should probably be either uint32_t, Elf32_Addr, or unsigned
1308 * long.
1309 */
1310 static int reloc_library(soinfo *si, Elf32_Rel *rel, unsigned count)
1311 {
1312 Elf32_Sym *symtab = si->symtab;
1313 const char *strtab = si->strtab;
1314 Elf32_Sym *s;
1315 unsigned base;
1316 Elf32_Rel *start = rel;
1317 unsigned idx;
1318
1319 for (idx = 0; idx < count; ++idx) {
1320 unsigned type = ELF32_R_TYPE(rel->r_info);
1321 unsigned sym = ELF32_R_SYM(rel->r_info);
1322 unsigned reloc = (unsigned)(rel->r_offset + si->base);
1323 unsigned sym_addr = 0;
1324 char *sym_name = NULL;
1325
1326 DEBUG("%5d Processing '%s' relocation at index %d\n", pid,
1327 si->name, idx);
1328 if(sym != 0) {
1329 sym_name = (char *)(strtab + symtab[sym].st_name);
1330 INFO("HYBRIS: '%s' checking hooks for sym '%s'\n", si->name, sym_name);
1331 sym_addr = get_hooked_symbol(sym_name);
1332 if (sym_addr != NULL) {
1333 INFO("HYBRIS: '%s' hooked symbol %s to %x\n", si->name,
1334 sym_name, sym_addr);
1335 } else {
1336 s = _do_lookup(si, sym_name, &base);
1337 }
1338 if(sym_addr == NULL)
1339 if(s == NULL) {
1340 /* We only allow an undefined symbol if this is a weak
1341 reference.. */
1342 s = &symtab[sym];
1343 if (ELF32_ST_BIND(s->st_info) != STB_WEAK) {
1344 DL_ERR("%5d cannot locate '%s'...\n", pid, sym_name);
1345 return -1;
1346 }
1347
1348 /* IHI0044C AAELF 4.5.1.1:
1349
1350 Libraries are not searched to resolve weak references.
1351 It is not an error for a weak reference to remain
1352 unsatisfied.
1353
1354 During linking, the value of an undefined weak reference is:
1355 - Zero if the relocation type is absolute
1356 - The address of the place if the relocation is pc-relative
1357 - The address of nominial base address if the relocation
1358 type is base-relative.
1359 */
1360
1361 switch (type) {
1362 #if defined(ANDROID_ARM_LINKER)
1363 case R_ARM_JUMP_SLOT:
1364 case R_ARM_GLOB_DAT:
1365 case R_ARM_ABS32:
1366 case R_ARM_RELATIVE: /* Don't care. */
1367 case R_ARM_NONE: /* Don't care. */
1368 #elif defined(ANDROID_X86_LINKER)
1369 case R_386_JUMP_SLOT:
1370 case R_386_GLOB_DAT:
1371 case R_386_32:
1372 case R_386_RELATIVE: /* Dont' care. */
1373 #endif /* ANDROID_*_LINKER */
1374 /* sym_addr was initialized to be zero above or relocation
1375 code below does not care about value of sym_addr.
1376 No need to do anything. */
1377 break;
1378
1379 #if defined(ANDROID_X86_LINKER)
1380 case R_386_PC32:
1381 sym_addr = reloc;
1382 break;
1383 #endif /* ANDROID_X86_LINKER */
1384
1385 #if defined(ANDROID_ARM_LINKER)
1386 case R_ARM_COPY:
1387 /* Fall through. Can't really copy if weak symbol is
1388 not found in run-time. */
1389 #endif /* ANDROID_ARM_LINKER */
1390 default:
1391 DL_ERR("%5d unknown weak reloc type %d @ %p (%d)\n",
1392 pid, type, rel, (int) (rel - start));
1393 return -1;
1394 }
1395 } else {
1396 /* We got a definition. */
1397 #if 0
1398 if((base == 0) && (si->base != 0)){
1399 /* linking from libraries to main image is bad */
1400 DL_ERR("%5d cannot locate '%s'...",
1401 pid, strtab + symtab[sym].st_name);
1402 return -1;
1403 }
1404 #endif
1405 sym_addr = (unsigned)(s->st_value + base);
1406 }
1407 COUNT_RELOC(RELOC_SYMBOL);
1408 } else {
1409 s = NULL;
1410 }
1411
1412 /* TODO: This is ugly. Split up the relocations by arch into
1413 * different files.
1414 */
1415 switch(type){
1416 #if defined(ANDROID_ARM_LINKER)
1417 case R_ARM_JUMP_SLOT:
1418 COUNT_RELOC(RELOC_ABSOLUTE);
1419 MARK(rel->r_offset);
1420 TRACE_TYPE(RELO, "%5d RELO JMP_SLOT %08x <- %08x %s\n", pid,
1421 reloc, sym_addr, sym_name);
1422 *((unsigned*)reloc) = sym_addr;
1423 break;
1424 case R_ARM_GLOB_DAT:
1425 COUNT_RELOC(RELOC_ABSOLUTE);
1426 MARK(rel->r_offset);
1427 TRACE_TYPE(RELO, "%5d RELO GLOB_DAT %08x <- %08x %s\n", pid,
1428 reloc, sym_addr, sym_name);
1429 *((unsigned*)reloc) = sym_addr;
1430 break;
1431 case R_ARM_ABS32:
1432 COUNT_RELOC(RELOC_ABSOLUTE);
1433 MARK(rel->r_offset);
1434 TRACE_TYPE(RELO, "%5d RELO ABS %08x <- %08x %s\n", pid,
1435 reloc, sym_addr, sym_name);
1436 *((unsigned*)reloc) += sym_addr;
1437 break;
1438 case R_ARM_REL32:
1439 COUNT_RELOC(RELOC_RELATIVE);
1440 MARK(rel->r_offset);
1441 TRACE_TYPE(RELO, "%5d RELO REL32 %08x <- %08x - %08x %s\n", pid,
1442 reloc, sym_addr, rel->r_offset, sym_name);
1443 *((unsigned*)reloc) += sym_addr - rel->r_offset;
1444 break;
1445 #elif defined(ANDROID_X86_LINKER)
1446 case R_386_JUMP_SLOT:
1447 COUNT_RELOC(RELOC_ABSOLUTE);
1448 MARK(rel->r_offset);
1449 TRACE_TYPE(RELO, "%5d RELO JMP_SLOT %08x <- %08x %s\n", pid,
1450 reloc, sym_addr, sym_name);
1451 *((unsigned*)reloc) = sym_addr;
1452 break;
1453 case R_386_GLOB_DAT:
1454 COUNT_RELOC(RELOC_ABSOLUTE);
1455 MARK(rel->r_offset);
1456 TRACE_TYPE(RELO, "%5d RELO GLOB_DAT %08x <- %08x %s\n", pid,
1457 reloc, sym_addr, sym_name);
1458 *((unsigned*)reloc) = sym_addr;
1459 break;
1460 #endif /* ANDROID_*_LINKER */
1461
1462 #if defined(ANDROID_ARM_LINKER)
1463 case R_ARM_RELATIVE:
1464 #elif defined(ANDROID_X86_LINKER)
1465 case R_386_RELATIVE:
1466 #endif /* ANDROID_*_LINKER */
1467 COUNT_RELOC(RELOC_RELATIVE);
1468 MARK(rel->r_offset);
1469 if(sym){
1470 DL_ERR("%5d odd RELATIVE form...", pid);
1471 return -1;
1472 }
1473 TRACE_TYPE(RELO, "%5d RELO RELATIVE %08x <- +%08x\n", pid,
1474 reloc, si->base);
1475 *((unsigned*)reloc) += si->base;
1476 break;
1477
1478 #if defined(ANDROID_X86_LINKER)
1479 case R_386_32:
1480 COUNT_RELOC(RELOC_RELATIVE);
1481 MARK(rel->r_offset);
1482
1483 TRACE_TYPE(RELO, "%5d RELO R_386_32 %08x <- +%08x %s\n", pid,
1484 reloc, sym_addr, sym_name);
1485 *((unsigned *)reloc) += (unsigned)sym_addr;
1486 break;
1487
1488 case R_386_PC32:
1489 COUNT_RELOC(RELOC_RELATIVE);
1490 MARK(rel->r_offset);
1491 TRACE_TYPE(RELO, "%5d RELO R_386_PC32 %08x <- "
1492 "+%08x (%08x - %08x) %s\n", pid, reloc,
1493 (sym_addr - reloc), sym_addr, reloc, sym_name);
1494 *((unsigned *)reloc) += (unsigned)(sym_addr - reloc);
1495 break;
1496 #endif /* ANDROID_X86_LINKER */
1497
1498 #ifdef ANDROID_ARM_LINKER
1499 case R_ARM_COPY:
1500 COUNT_RELOC(RELOC_COPY);
1501 MARK(rel->r_offset);
1502 TRACE_TYPE(RELO, "%5d RELO %08x <- %d @ %08x %s\n", pid,
1503 reloc, s->st_size, sym_addr, sym_name);
1504 memcpy((void*)reloc, (void*)sym_addr, s->st_size);
1505 break;
1506 case R_ARM_NONE:
1507 break;
1508 #endif /* ANDROID_ARM_LINKER */
1509
1510 default:
1511 DL_ERR("%5d unknown reloc type %d @ %p (%d)",
1512 pid, type, rel, (int) (rel - start));
1513 return -1;
1514 }
1515 rel++;
1516 }
1517 return 0;
1518 }
1519
1520 /* Please read the "Initialization and Termination functions" functions.
1521 * of the linker design note in bionic/linker/README.TXT to understand
1522 * what the following code is doing.
1523 *
1524 * The important things to remember are:
1525 *
1526 * DT_PREINIT_ARRAY must be called first for executables, and should
1527 * not appear in shared libraries.
1528 *
1529 * DT_INIT should be called before DT_INIT_ARRAY if both are present
1530 *
1531 * DT_FINI should be called after DT_FINI_ARRAY if both are present
1532 *
1533 * DT_FINI_ARRAY must be parsed in reverse order.
1534 */
1535
1536 static void call_array(unsigned *ctor, int count, int reverse)
1537 {
1538 int n, inc = 1;
1539
1540 if (reverse) {
1541 ctor += (count-1);
1542 inc = -1;
1543 }
1544
1545 for(n = count; n > 0; n--) {
1546 TRACE("[ %5d Looking at %s *0x%08x == 0x%08x ]\n", pid,
1547 reverse ? "dtor" : "ctor",
1548 (unsigned)ctor, (unsigned)*ctor);
1549 void (*func)() = (void (*)()) *ctor;
1550 ctor += inc;
1551 if(((int) func == 0) || ((int) func == -1)) continue;
1552 TRACE("[ %5d Calling func @ 0x%08x ]\n", pid, (unsigned)func);
1553 func();
1554 }
1555 }
1556
1557 void call_constructors_recursive(soinfo *si)
1558 {
1559 if (si->constructors_called)
1560 return;
1561 if (strcmp(si->name,"libc.so") == 0) {
1562 INFO("HYBRIS: =============> Skipping libc.so\n");
1563 return;
1564 }
1565
1566 // Set this before actually calling the constructors, otherwise it doesn't
1567 // protect against recursive constructor calls. One simple example of
1568 // constructor recursion is the libc debug malloc, which is implemented in
1569 // libc_malloc_debug_leak.so:
1570 // 1. The program depends on libc, so libc's constructor is called here.
1571 // 2. The libc constructor calls dlopen() to load libc_malloc_debug_leak.so.
1572 // 3. dlopen() calls call_constructors_recursive() with the newly created
1573 // soinfo for libc_malloc_debug_leak.so.
1574 // 4. The debug so depends on libc, so call_constructors_recursive() is
1575 // called again with the libc soinfo. If it doesn't trigger the early-
1576 // out above, the libc constructor will be called again (recursively!).
1577 si->constructors_called = 1;
1578
1579 if (si->flags & FLAG_EXE) {
1580 TRACE("[ %5d Calling preinit_array @ 0x%08x [%d] for '%s' ]\n",
1581 pid, (unsigned)si->preinit_array, si->preinit_array_count,
1582 si->name);
1583 call_array(si->preinit_array, si->preinit_array_count, 0);
1584 TRACE("[ %5d Done calling preinit_array for '%s' ]\n", pid, si->name);
1585 } else {
1586 if (si->preinit_array) {
1587 DL_ERR("%5d Shared library '%s' has a preinit_array table @ 0x%08x."
1588 " This is INVALID.", pid, si->name,
1589 (unsigned)si->preinit_array);
1590 }
1591 }
1592
1593 if (si->dynamic) {
1594 unsigned *d;
1595 for(d = si->dynamic; *d; d += 2) {
1596 if(d[0] == DT_NEEDED){
1597 soinfo* lsi = (soinfo *)d[1];
1598 if (!validate_soinfo(lsi)) {
1599 DL_ERR("%5d bad DT_NEEDED pointer in %s",
1600 pid, si->name);
1601 } else {
1602 call_constructors_recursive(lsi);
1603 }
1604 }
1605 }
1606 }
1607
1608 if (si->init_func) {
1609 TRACE("[ %5d Calling init_func @ 0x%08x for '%s' ]\n", pid,
1610 (unsigned)si->init_func, si->name);
1611 si->init_func();
1612 TRACE("[ %5d Done calling init_func for '%s' ]\n", pid, si->name);
1613 }
1614
1615 if (si->init_array) {
1616 TRACE("[ %5d Calling init_array @ 0x%08x [%d] for '%s' ]\n", pid,
1617 (unsigned)si->init_array, si->init_array_count, si->name);
1618 call_array(si->init_array, si->init_array_count, 0);
1619 TRACE("[ %5d Done calling init_array for '%s' ]\n", pid, si->name);
1620 }
1621
1622 }
1623
1624 static void call_destructors(soinfo *si)
1625 {
1626 if (si->fini_array) {
1627 TRACE("[ %5d Calling fini_array @ 0x%08x [%d] for '%s' ]\n", pid,
1628 (unsigned)si->fini_array, si->fini_array_count, si->name);
1629 call_array(si->fini_array, si->fini_array_count, 1);
1630 TRACE("[ %5d Done calling fini_array for '%s' ]\n", pid, si->name);
1631 }
1632
1633 if (si->fini_func) {
1634 TRACE("[ %5d Calling fini_func @ 0x%08x for '%s' ]\n", pid,
1635 (unsigned)si->fini_func, si->name);
1636 si->fini_func();
1637 TRACE("[ %5d Done calling fini_func for '%s' ]\n", pid, si->name);
1638 }
1639 }
1640
1641 /* Force any of the closed stdin, stdout and stderr to be associated with
1642 /dev/null. */
1643 static int nullify_closed_stdio (void)
1644 {
1645 int dev_null, i, status;
1646 int return_value = 0;
1647
1648 dev_null = open("/dev/null", O_RDWR);
1649 if (dev_null < 0) {
1650 DL_ERR("Cannot open /dev/null.");
1651 return -1;
1652 }
1653 TRACE("[ %5d Opened /dev/null file-descriptor=%d]\n", pid, dev_null);
1654
1655 /* If any of the stdio file descriptors is valid and not associated
1656 with /dev/null, dup /dev/null to it. */
1657 for (i = 0; i < 3; i++) {
1658 /* If it is /dev/null already, we are done. */
1659 if (i == dev_null)
1660 continue;
1661
1662 TRACE("[ %5d Nullifying stdio file descriptor %d]\n", pid, i);
1663 /* The man page of fcntl does not say that fcntl(..,F_GETFL)
1664 can be interrupted but we do this just to be safe. */
1665 do {
1666 status = fcntl(i, F_GETFL);
1667 } while (status < 0 && errno == EINTR);
1668
1669 /* If file is openned, we are good. */
1670 if (status >= 0)
1671 continue;
1672
1673 /* The only error we allow is that the file descriptor does not
1674 exist, in which case we dup /dev/null to it. */
1675 if (errno != EBADF) {
1676 DL_ERR("nullify_stdio: unhandled error %s", strerror(errno));
1677 return_value = -1;
1678 continue;
1679 }
1680
1681 /* Try dupping /dev/null to this stdio file descriptor and
1682 repeat if there is a signal. Note that any errors in closing
1683 the stdio descriptor are lost. */
1684 do {
1685 status = dup2(dev_null, i);
1686 } while (status < 0 && errno == EINTR);
1687
1688 if (status < 0) {
1689 DL_ERR("nullify_stdio: dup2 error %s", strerror(errno));
1690 return_value = -1;
1691 continue;
1692 }
1693 }
1694
1695 /* If /dev/null is not one of the stdio file descriptors, close it. */
1696 if (dev_null > 2) {
1697 TRACE("[ %5d Closing /dev/null file-descriptor=%d]\n", pid, dev_null);
1698 do {
1699 status = close(dev_null);
1700 } while (status < 0 && errno == EINTR);
1701
1702 if (status < 0) {
1703 DL_ERR("nullify_stdio: close error %s", strerror(errno));
1704 return_value = -1;
1705 }
1706 }
1707
1708 close(dev_null);
1709 return return_value;
1710 }
1711
1712 static int link_image(soinfo *si, unsigned wr_offset)
1713 {
1714 unsigned *d;
1715 Elf32_Phdr *phdr = si->phdr;
1716 int phnum = si->phnum;
1717
1718 INFO("[ %5d linking %s ]\n", pid, si->name);
1719 DEBUG("%5d si->base = 0x%08x si->flags = 0x%08x\n", pid,
1720 si->base, si->flags);
1721
1722 if (si->flags & (FLAG_EXE | FLAG_LINKER)) {
1723 /* Locate the needed program segments (DYNAMIC/ARM_EXIDX) for
1724 * linkage info if this is the executable or the linker itself.
1725 * If this was a dynamic lib, that would have been done at load time.
1726 *
1727 * TODO: It's unfortunate that small pieces of this are
1728 * repeated from the load_library routine. Refactor this just
1729 * slightly to reuse these bits.
1730 */
1731 si->size = 0;
1732 for(; phnum > 0; --phnum, ++phdr) {
1733 #ifdef ANDROID_ARM_LINKER
1734 if(phdr->p_type == PT_ARM_EXIDX) {
1735 /* exidx entries (used for stack unwinding) are 8 bytes each.
1736 */
1737 si->ARM_exidx = (unsigned *)phdr->p_vaddr;
1738 si->ARM_exidx_count = phdr->p_memsz / 8;
1739 }
1740 #endif
1741 if (phdr->p_type == PT_LOAD) {
1742 /* For the executable, we use the si->size field only in
1743 dl_unwind_find_exidx(), so the meaning of si->size
1744 is not the size of the executable; it is the distance
1745 between the load location of the executable and the last
1746 address of the loadable part of the executable.
1747 We use the range [si->base, si->base + si->size) to
1748 determine whether a PC value falls within the executable
1749 section. Of course, if a value is between si->base and
1750 (si->base + phdr->p_vaddr), it's not in the executable
1751 section, but a) we shouldn't be asking for such a value
1752 anyway, and b) if we have to provide an EXIDX for such a
1753 value, then the executable's EXIDX is probably the better
1754 choice.
1755 */
1756 DEBUG_DUMP_PHDR(phdr, "PT_LOAD", pid);
1757 if (phdr->p_vaddr + phdr->p_memsz > si->size)
1758 si->size = phdr->p_vaddr + phdr->p_memsz;
1759 /* try to remember what range of addresses should be write
1760 * protected */
1761 if (!(phdr->p_flags & PF_W)) {
1762 unsigned _end;
1763
1764 if (si->base + phdr->p_vaddr < si->wrprotect_start)
1765 si->wrprotect_start = si->base + phdr->p_vaddr;
1766 _end = (((si->base + phdr->p_vaddr + phdr->p_memsz + PAGE_SIZE - 1) &
1767 (~PAGE_MASK)));
1768 if (_end > si->wrprotect_end)
1769 si->wrprotect_end = _end;
1770 /* Make the section writable just in case we'll have to
1771 * write to it during relocation (i.e. text segment).
1772 * However, we will remember what range of addresses
1773 * should be write protected.
1774 */
1775 mprotect((void *) (si->base + phdr->p_vaddr),
1776 phdr->p_memsz,
1777 PFLAGS_TO_PROT(phdr->p_flags) | PROT_WRITE);
1778 }
1779 } else if (phdr->p_type == PT_DYNAMIC) {
1780 if (si->dynamic != (unsigned *)-1) {
1781 DL_ERR("%5d multiple PT_DYNAMIC segments found in '%s'. "
1782 "Segment at 0x%08x, previously one found at 0x%08x",
1783 pid, si->name, si->base + phdr->p_vaddr,
1784 (unsigned)si->dynamic);
1785 goto fail;
1786 }
1787 DEBUG_DUMP_PHDR(phdr, "PT_DYNAMIC", pid);
1788 si->dynamic = (unsigned *) (si->base + phdr->p_vaddr);
1789 } else if (phdr->p_type == PT_GNU_RELRO) {
1790 if ((phdr->p_vaddr >= si->size)
1791 || ((phdr->p_vaddr + phdr->p_memsz) > si->size)
1792 || ((si->base + phdr->p_vaddr + phdr->p_memsz) < si->base)) {
1793 DL_ERR("%d invalid GNU_RELRO in '%s' "
1794 "p_vaddr=0x%08x p_memsz=0x%08x", pid, si->name,
1795 phdr->p_vaddr, phdr->p_memsz);
1796 goto fail;
1797 }
1798 si->gnu_relro_start = (Elf32_Addr) (si->base + phdr->p_vaddr);
1799 si->gnu_relro_len = (unsigned) phdr->p_memsz;
1800 }
1801 }
1802 }
1803
1804 if (si->dynamic == (unsigned *)-1) {
1805 DL_ERR("%5d missing PT_DYNAMIC?!", pid);
1806 goto fail;
1807 }
1808
1809 DEBUG("%5d dynamic = %p\n", pid, si->dynamic);
1810
1811 /* extract useful information from dynamic section */
1812 for(d = si->dynamic; *d; d++){
1813 DEBUG("%5d d = %p, d[0] = 0x%08x d[1] = 0x%08x\n", pid, d, d[0], d[1]);
1814 switch(*d++){
1815 case DT_HASH:
1816 si->nbucket = ((unsigned *) (si->base + *d))[0];
1817 si->nchain = ((unsigned *) (si->base + *d))[1];
1818 si->bucket = (unsigned *) (si->base + *d + 8);
1819 si->chain = (unsigned *) (si->base + *d + 8 + si->nbucket * 4);
1820 break;
1821 case DT_STRTAB:
1822 si->strtab = (const char *) (si->base + *d);
1823 break;
1824 case DT_SYMTAB:
1825 si->symtab = (Elf32_Sym *) (si->base + *d);
1826 break;
1827 case DT_PLTREL:
1828 if(*d != DT_REL) {
1829 DL_ERR("DT_RELA not supported");
1830 goto fail;
1831 }
1832 break;
1833 case DT_JMPREL:
1834 si->plt_rel = (Elf32_Rel*) (si->base + *d);
1835 break;
1836 case DT_PLTRELSZ:
1837 si->plt_rel_count = *d / 8;
1838 break;
1839 case DT_REL:
1840 si->rel = (Elf32_Rel*) (si->base + *d);
1841 break;
1842 case DT_RELSZ:
1843 si->rel_count = *d / 8;
1844 break;
1845 case DT_PLTGOT:
1846 /* Save this in case we decide to do lazy binding. We don't yet. */
1847 si->plt_got = (unsigned *)(si->base + *d);
1848 break;
1849 case DT_DEBUG:
1850 // Set the DT_DEBUG entry to the addres of _r_debug for GDB
1851 *d = (int) &_r_debug;
1852 break;
1853 case DT_RELA:
1854 DL_ERR("%5d DT_RELA not supported", pid);
1855 goto fail;
1856 case DT_INIT:
1857 si->init_func = (void (*)(void))(si->base + *d);
1858 DEBUG("%5d %s constructors (init func) found at %p\n",
1859 pid, si->name, si->init_func);
1860 break;
1861 case DT_FINI:
1862 si->fini_func = (void (*)(void))(si->base + *d);
1863 DEBUG("%5d %s destructors (fini func) found at %p\n",
1864 pid, si->name, si->fini_func);
1865 break;
1866 case DT_INIT_ARRAY:
1867 si->init_array = (unsigned *)(si->base + *d);
1868 DEBUG("%5d %s constructors (init_array) found at %p\n",
1869 pid, si->name, si->init_array);
1870 break;
1871 case DT_INIT_ARRAYSZ:
1872 si->init_array_count = ((unsigned)*d) / sizeof(Elf32_Addr);
1873 break;
1874 case DT_FINI_ARRAY:
1875 si->fini_array = (unsigned *)(si->base + *d);
1876 DEBUG("%5d %s destructors (fini_array) found at %p\n",
1877 pid, si->name, si->fini_array);
1878 break;
1879 case DT_FINI_ARRAYSZ:
1880 si->fini_array_count = ((unsigned)*d) / sizeof(Elf32_Addr);
1881 break;
1882 case DT_PREINIT_ARRAY:
1883 si->preinit_array = (unsigned *)(si->base + *d);
1884 DEBUG("%5d %s constructors (preinit_array) found at %p\n",
1885 pid, si->name, si->preinit_array);
1886 break;
1887 case DT_PREINIT_ARRAYSZ:
1888 si->preinit_array_count = ((unsigned)*d) / sizeof(Elf32_Addr);
1889 break;
1890 case DT_TEXTREL:
1891 /* TODO: make use of this. */
1892 /* this means that we might have to write into where the text
1893 * segment was loaded during relocation... Do something with
1894 * it.
1895 */
1896 DEBUG("%5d Text segment should be writable during relocation.\n",
1897 pid);
1898 break;
1899 }
1900 }
1901
1902 DEBUG("%5d si->base = 0x%08x, si->strtab = %p, si->symtab = %p\n",
1903 pid, si->base, si->strtab, si->symtab);
1904
1905 if((si->strtab == 0) || (si->symtab == 0)) {
1906 DL_ERR("%5d missing essential tables", pid);
1907 goto fail;
1908 }
1909
1910 /* if this is the main executable, then load all of the preloads now */
1911 if(si->flags & FLAG_EXE) {
1912 int i;
1913 memset(preloads, 0, sizeof(preloads));
1914 for(i = 0; ldpreload_names[i] != NULL; i++) {
1915 soinfo *lsi = find_library(ldpreload_names[i]);
1916 if(lsi == 0) {
1917 strlcpy(tmp_err_buf, linker_get_error(), sizeof(tmp_err_buf));
1918 DL_ERR("%5d could not load needed library '%s' for '%s' (%s)",
1919 pid, ldpreload_names[i], si->name, tmp_err_buf);
1920 goto fail;
1921 }
1922 lsi->refcount++;
1923 preloads[i] = lsi;
1924 }
1925 }
1926
1927 for(d = si->dynamic; *d; d += 2) {
1928 if(d[0] == DT_NEEDED){
1929 DEBUG("%5d %s needs %s\n", pid, si->name, si->strtab + d[1]);
1930 soinfo *lsi = find_library(si->strtab + d[1]);
1931 if(lsi == 0) {
1932 strlcpy(tmp_err_buf, linker_get_error(), sizeof(tmp_err_buf));
1933 DL_ERR("%5d could not load needed library '%s' for '%s' (%s)",
1934 pid, si->strtab + d[1], si->name, tmp_err_buf);
1935 goto fail;
1936 }
1937 /* Save the soinfo of the loaded DT_NEEDED library in the payload
1938 of the DT_NEEDED entry itself, so that we can retrieve the
1939 soinfo directly later from the dynamic segment. This is a hack,
1940 but it allows us to map from DT_NEEDED to soinfo efficiently
1941 later on when we resolve relocations, trying to look up a symbol
1942 with dlsym().
1943 */
1944 d[1] = (unsigned)lsi;
1945 lsi->refcount++;
1946 }
1947 }
1948
1949 if(si->plt_rel) {
1950 DEBUG("[ %5d relocating %s plt ]\n", pid, si->name );
1951 if(reloc_library(si, si->plt_rel, si->plt_rel_count))
1952 goto fail;
1953 }
1954 if(si->rel) {
1955 DEBUG("[ %5d relocating %s ]\n", pid, si->name );
1956 if(reloc_library(si, si->rel, si->rel_count))
1957 goto fail;
1958 }
1959
1960 si->flags |= FLAG_LINKED;
1961 DEBUG("[ %5d finished linking %s ]\n", pid, si->name);
1962
1963 #if 0
1964 /* This is the way that the old dynamic linker did protection of
1965 * non-writable areas. It would scan section headers and find where
1966 * .text ended (rather where .data/.bss began) and assume that this is
1967 * the upper range of the non-writable area. This is too coarse,
1968 * and is kept here for reference until we fully move away from single
1969 * segment elf objects. See the code in get_wr_offset (also #if'd 0)
1970 * that made this possible.
1971 */
1972 if(wr_offset < 0xffffffff){
1973 mprotect((void*) si->base, wr_offset, PROT_READ | PROT_EXEC);
1974 }
1975 #else
1976 /* TODO: Verify that this does the right thing in all cases, as it
1977 * presently probably does not. It is possible that an ELF image will
1978 * come with multiple read-only segments. What we ought to do is scan
1979 * the program headers again and mprotect all the read-only segments.
1980 * To prevent re-scanning the program header, we would have to build a
1981 * list of loadable segments in si, and then scan that instead. */
1982 if (si->wrprotect_start != 0xffffffff && si->wrprotect_end != 0) {
1983 mprotect((void *)si->wrprotect_start,
1984 si->wrprotect_end - si->wrprotect_start,
1985 PROT_READ | PROT_EXEC);
1986 }
1987 #endif
1988
1989 if (si->gnu_relro_start != 0 && si->gnu_relro_len != 0) {
1990 Elf32_Addr start = (si->gnu_relro_start & ~PAGE_MASK);
1991 unsigned len = (si->gnu_relro_start - start) + si->gnu_relro_len;
1992 if (mprotect((void *) start, len, PROT_READ) < 0) {
1993 DL_ERR("%5d GNU_RELRO mprotect of library '%s' failed: %d (%s)\n",
1994 pid, si->name, errno, strerror(errno));
1995 goto fail;
1996 }
1997 }
1998
1999 /* If this is a SET?ID program, dup /dev/null to opened stdin,
2000 stdout and stderr to close a security hole described in:
2001
2002 ftp://ftp.freebsd.org/pub/FreeBSD/CERT/advisories/FreeBSD-SA-02:23.stdio.asc
2003
2004 */
2005 if (program_is_setuid)
2006 nullify_closed_stdio ();
2007 notify_gdb_of_load(si);
2008 return 0;
2009
2010 fail:
2011 ERROR("failed to link %s\n", si->name);
2012 si->flags |= FLAG_ERROR;
2013 return -1;
2014 }
2015
2016 static void parse_library_path(const char *path, char *delim)
2017 {
2018 size_t len;
2019 char *ldpaths_bufp = ldpaths_buf;
2020 int i = 0;
2021
2022 len = strlcpy(ldpaths_buf, path, sizeof(ldpaths_buf));
2023
2024 while (i < LDPATH_MAX && (ldpaths[i] = strsep(&ldpaths_bufp, delim))) {
2025 if (*ldpaths[i] != '\0')
2026 ++i;
2027 }
2028
2029 /* Forget the last path if we had to truncate; this occurs if the 2nd to
2030 * last char isn't '\0' (i.e. not originally a delim). */
2031 if (i > 0 && len >= sizeof(ldpaths_buf) &&
2032 ldpaths_buf[sizeof(ldpaths_buf) - 2] != '\0') {
2033 ldpaths[i - 1] = NULL;
2034 } else {
2035 ldpaths[i] = NULL;
2036 }
2037 }
2038
2039 static void parse_preloads(const char *path, char *delim)
2040 {
2041 size_t len;
2042 char *ldpreloads_bufp = ldpreloads_buf;
2043 int i = 0;
2044
2045 len = strlcpy(ldpreloads_buf, path, sizeof(ldpreloads_buf));
2046
2047 while (i < LDPRELOAD_MAX && (ldpreload_names[i] = strsep(&ldpreloads_bufp, delim))) {
2048 if (*ldpreload_names[i] != '\0') {
2049 ++i;
2050 }
2051 }
2052
2053 /* Forget the last path if we had to truncate; this occurs if the 2nd to
2054 * last char isn't '\0' (i.e. not originally a delim). */
2055 if (i > 0 && len >= sizeof(ldpreloads_buf) &&
2056 ldpreloads_buf[sizeof(ldpreloads_buf) - 2] != '\0') {
2057 ldpreload_names[i - 1] = NULL;
2058 } else {
2059 ldpreload_names[i] = NULL;
2060 }
2061 }
2062
2063 /*
2064 * This code is called after the linker has linked itself and
2065 * fixed it's own GOT. It is safe to make references to externs
2066 * and other non-local data at this point.
2067 */
2068 static unsigned __linker_init_post_relocation(unsigned **elfdata)
2069 {
2070 static soinfo linker_soinfo;
2071
2072 int argc = (int) *elfdata;
2073 char **argv = (char**) (elfdata + 1);
2074 unsigned *vecs = (unsigned*) (argv + argc + 1);
2075 unsigned *v;
2076 soinfo *si;
2077 struct link_map * map;
2078 const char *ldpath_env = NULL;
2079 const char *ldpreload_env = NULL;
2080
2081 /* NOTE: we store the elfdata pointer on a special location
2082 * of the temporary TLS area in order to pass it to
2083 * the C Library's runtime initializer.
2084 *
2085 * The initializer must clear the slot and reset the TLS
2086 * to point to a different location to ensure that no other
2087 * shared library constructor can access it.
2088 */
2089 //__libc_init_tls(elfdata);
2090
2091 pid = getpid();
2092
2093 #if TIMING
2094 struct timeval t0, t1;
2095 gettimeofday(&t0, 0);
2096 #endif
2097
2098 /* Initialize environment functions, and get to the ELF aux vectors table */
2099 vecs = linker_env_init(vecs);
2100
2101 /* Check auxv for AT_SECURE first to see if program is setuid, setgid,
2102 has file caps, or caused a SELinux/AppArmor domain transition. */
2103 for (v = vecs; v[0]; v += 2) {
2104 if (v[0] == AT_SECURE) {
2105 /* kernel told us whether to enable secure mode */
2106 program_is_setuid = v[1];
2107 goto sanitize;
2108 }
2109 }
2110
2111 /* Kernel did not provide AT_SECURE - fall back on legacy test. */
2112 program_is_setuid = (getuid() != geteuid()) || (getgid() != getegid());
2113
2114 sanitize:
2115 /* Sanitize environment if we're loading a setuid program */
2116 if (program_is_setuid)
2117 linker_env_secure();
2118
2119 //debugger_init();
2120
2121 /* Get a few environment variables */
2122 {
2123 #if LINKER_DEBUG
2124 const char* env;
2125 env = linker_env_get("DEBUG"); /* XXX: TODO: Change to LD_DEBUG */
2126 if (env)
2127 debug_verbosity = atoi(env);
2128 #endif
2129
2130 /* Normally, these are cleaned by linker_env_secure, but the test
2131 * against program_is_setuid doesn't cost us anything */
2132 if (!program_is_setuid) {
2133 ldpath_env = linker_env_get("LD_LIBRARY_PATH");
2134 ldpreload_env = linker_env_get("LD_PRELOAD");
2135 }
2136 }
2137
2138 INFO("[ android linker & debugger ]\n");
2139 DEBUG("%5d elfdata @ 0x%08x\n", pid, (unsigned)elfdata);
2140
2141 si = alloc_info(argv[0]);
2142 if(si == 0) {
2143 exit(-1);
2144 }
2145
2146 /* bootstrap the link map, the main exe always needs to be first */
2147 si->flags |= FLAG_EXE;
2148 map = &(si->linkmap);
2149
2150 map->l_addr = 0;
2151 map->l_name = argv[0];
2152 map->l_prev = NULL;
2153 map->l_next = NULL;
2154
2155 _r_debug.r_map = map;
2156 r_debug_tail = map;
2157
2158 /* gdb expects the linker to be in the debug shared object list,
2159 * and we need to make sure that the reported load address is zero.
2160 * Without this, gdb gets the wrong idea of where rtld_db_dlactivity()
2161 * is. Don't use alloc_info(), because the linker shouldn't
2162 * be on the soinfo list.
2163 */
2164 strlcpy((char*) linker_soinfo.name, "/system/bin/linker", sizeof linker_soinfo.name);
2165 linker_soinfo.flags = 0;
2166 linker_soinfo.base = 0; // This is the important part; must be zero.
2167 insert_soinfo_into_debug_map(&linker_soinfo);
2168
2169 /* extract information passed from the kernel */
2170 while(vecs[0] != 0){
2171 switch(vecs[0]){
2172 case AT_PHDR:
2173 si->phdr = (Elf32_Phdr*) vecs[1];
2174 break;
2175 case AT_PHNUM:
2176 si->phnum = (int) vecs[1];
2177 break;
2178 case AT_ENTRY:
2179 si->entry = vecs[1];
2180 break;
2181 }
2182 vecs += 2;
2183 }
2184
2185 /* Compute the value of si->base. We can't rely on the fact that
2186 * the first entry is the PHDR because this will not be true
2187 * for certain executables (e.g. some in the NDK unit test suite)
2188 */
2189 int nn;
2190 si->base = 0;
2191 for ( nn = 0; nn < si->phnum; nn++ ) {
2192 if (si->phdr[nn].p_type == PT_PHDR) {
2193 si->base = (Elf32_Addr) si->phdr - si->phdr[nn].p_vaddr;
2194 break;
2195 }
2196 }
2197 si->dynamic = (unsigned *)-1;
2198 si->wrprotect_start = 0xffffffff;
2199 si->wrprotect_end = 0;
2200 si->refcount = 1;
2201 si->gnu_relro_start = 0;
2202 si->gnu_relro_len = 0;
2203
2204 /* Use LD_LIBRARY_PATH if we aren't setuid/setgid */
2205 if (ldpath_env)
2206 parse_library_path(ldpath_env, ":");
2207
2208 if (ldpreload_env) {
2209 parse_preloads(ldpreload_env, " :");
2210 }
2211
2212 if(link_image(si, 0)) {
2213 char errmsg[] = "CANNOT LINK EXECUTABLE\n";
2214 write(2, __linker_dl_err_buf, strlen(__linker_dl_err_buf));
2215 write(2, errmsg, sizeof(errmsg));
2216 exit(-1);
2217 }
2218
2219 call_constructors_recursive(si);
2220
2221 #if ALLOW_SYMBOLS_FROM_MAIN
2222 /* Set somain after we've loaded all the libraries in order to prevent
2223 * linking of symbols back to the main image, which is not set up at that
2224 * point yet.
2225 */
2226 somain = si;
2227 #endif
2228
2229 #if TIMING
2230 gettimeofday(&t1,NULL);
2231 PRINT("LINKER TIME: %s: %d microseconds\n", argv[0], (int) (
2232 (((long long)t1.tv_sec * 1000000LL) + (long long)t1.tv_usec) -
2233 (((long long)t0.tv_sec * 1000000LL) + (long long)t0.tv_usec)
2234 ));
2235 #endif
2236 #if STATS
2237 PRINT("RELO STATS: %s: %d abs, %d rel, %d copy, %d symbol\n", argv[0],
2238 linker_stats.reloc[RELOC_ABSOLUTE],
2239 linker_stats.reloc[RELOC_RELATIVE],
2240 linker_stats.reloc[RELOC_COPY],
2241 linker_stats.reloc[RELOC_SYMBOL]);
2242 #endif
2243 #if COUNT_PAGES
2244 {
2245 unsigned n;
2246 unsigned i;
2247 unsigned count = 0;
2248 for(n = 0; n < 4096; n++){
2249 if(bitmask[n]){
2250 unsigned x = bitmask[n];
2251 for(i = 0; i < 8; i++){
2252 if(x & 1) count++;
2253 x >>= 1;
2254 }
2255 }
2256 }
2257 PRINT("PAGES MODIFIED: %s: %d (%dKB)\n", argv[0], count, count * 4);
2258 }
2259 #endif
2260
2261 #if TIMING || STATS || COUNT_PAGES
2262 fflush(stdout);
2263 #endif
2264
2265 TRACE("[ %5d Ready to execute '%s' @ 0x%08x ]\n", pid, si->name,
2266 si->entry);
2267 return si->entry;
2268 }
2269
2270 /*
2271 * Find the value of AT_BASE passed to us by the kernel. This is the load
2272 * location of the linker.
2273 */
2274 static unsigned find_linker_base(unsigned **elfdata) {
2275 int argc = (int) *elfdata;
2276 char **argv = (char**) (elfdata + 1);
2277 unsigned *vecs = (unsigned*) (argv + argc + 1);
2278 while (vecs[0] != 0) {
2279 vecs++;
2280 }
2281
2282 /* The end of the environment block is marked by two NULL pointers */
2283 vecs++;
2284
2285 while(vecs[0]) {
2286 if (vecs[0] == AT_BASE) {
2287 return vecs[1];
2288 }
2289 vecs += 2;
2290 }
2291
2292 return 0; // should never happen
2293 }
2294
2295 /*
2296 * This is the entry point for the linker, called from begin.S. This
2297 * method is responsible for fixing the linker's own relocations, and
2298 * then calling __linker_init_post_relocation().
2299 *
2300 * Because this method is called before the linker has fixed it's own
2301 * relocations, any attempt to reference an extern variable, extern
2302 * function, or other GOT reference will generate a segfault.
2303 */
2304 unsigned __linker_init(unsigned **elfdata) {
2305 unsigned linker_addr = find_linker_base(elfdata);
2306 Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *) linker_addr;
2307 Elf32_Phdr *phdr =
2308 (Elf32_Phdr *)((unsigned char *) linker_addr + elf_hdr->e_phoff);
2309
2310 soinfo linker_so;
2311 memset(&linker_so, 0, sizeof(soinfo));
2312
2313 linker_so.base = linker_addr;
2314 linker_so.dynamic = (unsigned *) -1;
2315 linker_so.phdr = phdr;
2316 linker_so.phnum = elf_hdr->e_phnum;
2317 linker_so.flags |= FLAG_LINKER;
2318 linker_so.wrprotect_start = 0xffffffff;
2319 linker_so.wrprotect_end = 0;
2320 linker_so.gnu_relro_start = 0;
2321 linker_so.gnu_relro_len = 0;
2322
2323 if (link_image(&linker_so, 0)) {
2324 // It would be nice to print an error message, but if the linker
2325 // can't link itself, there's no guarantee that we'll be able to
2326 // call write() (because it involves a GOT reference).
2327 //
2328 // This situation should never occur unless the linker itself
2329 // is corrupt.
2330 exit(-1);
2331 }
2332
2333 // We have successfully fixed our own relocations. It's safe to run
2334 // the main part of the linker now.
2335 return __linker_init_post_relocation(elfdata);
2336 }