2011-08-09 Pedro Alves <pedro@codesourcery.com>
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000,
4 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23
24 #include "elf/external.h"
25 #include "elf/common.h"
26 #include "elf/mips.h"
27
28 #include "symtab.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "gdbcore.h"
33 #include "target.h"
34 #include "inferior.h"
35 #include "regcache.h"
36 #include "gdbthread.h"
37 #include "observer.h"
38
39 #include "gdb_assert.h"
40
41 #include "solist.h"
42 #include "solib.h"
43 #include "solib-svr4.h"
44
45 #include "bfd-target.h"
46 #include "elf-bfd.h"
47 #include "exec.h"
48 #include "auxv.h"
49 #include "exceptions.h"
50
51 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52 static int svr4_have_link_map_offsets (void);
53 static void svr4_relocate_main_executable (void);
54
55 /* Link map info to include in an allocated so_list entry. */
56
57 struct lm_info
58 {
59 /* Pointer to copy of link map from inferior. The type is char *
60 rather than void *, so that we may use byte offsets to find the
61 various fields without the need for a cast. */
62 gdb_byte *lm;
63
64 /* Amount by which addresses in the binary should be relocated to
65 match the inferior. This could most often be taken directly
66 from lm, but when prelinking is involved and the prelink base
67 address changes, we may need a different offset, we want to
68 warn about the difference and compute it only once. */
69 CORE_ADDR l_addr;
70
71 /* The target location of lm. */
72 CORE_ADDR lm_addr;
73 };
74
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
77 events.
78
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
82
83 static const char * const solib_break_names[] =
84 {
85 "r_debug_state",
86 "_r_debug_state",
87 "_dl_debug_state",
88 "rtld_db_dlactivity",
89 "__dl_rtld_db_dlactivity",
90 "_rtld_debug_state",
91
92 NULL
93 };
94
95 static const char * const bkpt_names[] =
96 {
97 "_start",
98 "__start",
99 "main",
100 NULL
101 };
102
103 static const char * const main_name_list[] =
104 {
105 "main_$main",
106 NULL
107 };
108
109 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
110 the same shared library. */
111
112 static int
113 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
114 {
115 if (strcmp (gdb_so_name, inferior_so_name) == 0)
116 return 1;
117
118 /* On Solaris, when starting inferior we think that dynamic linker is
119 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
120 contains /lib/ld.so.1. Sometimes one file is a link to another, but
121 sometimes they have identical content, but are not linked to each
122 other. We don't restrict this check for Solaris, but the chances
123 of running into this situation elsewhere are very low. */
124 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
125 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
126 return 1;
127
128 /* Similarly, we observed the same issue with sparc64, but with
129 different locations. */
130 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
131 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
132 return 1;
133
134 return 0;
135 }
136
137 static int
138 svr4_same (struct so_list *gdb, struct so_list *inferior)
139 {
140 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
141 }
142
143 /* link map access functions. */
144
145 static CORE_ADDR
146 lm_addr_from_link_map (struct so_list *so)
147 {
148 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
149 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
150
151 return extract_typed_address (so->lm_info->lm + lmo->l_addr_offset,
152 ptr_type);
153 }
154
155 static int
156 has_lm_dynamic_from_link_map (void)
157 {
158 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
159
160 return lmo->l_ld_offset >= 0;
161 }
162
163 static CORE_ADDR
164 lm_dynamic_from_link_map (struct so_list *so)
165 {
166 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
167 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
168
169 return extract_typed_address (so->lm_info->lm + lmo->l_ld_offset,
170 ptr_type);
171 }
172
173 static CORE_ADDR
174 lm_addr_check (struct so_list *so, bfd *abfd)
175 {
176 if (so->lm_info->l_addr == (CORE_ADDR)-1)
177 {
178 struct bfd_section *dyninfo_sect;
179 CORE_ADDR l_addr, l_dynaddr, dynaddr;
180
181 l_addr = lm_addr_from_link_map (so);
182
183 if (! abfd || ! has_lm_dynamic_from_link_map ())
184 goto set_addr;
185
186 l_dynaddr = lm_dynamic_from_link_map (so);
187
188 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
189 if (dyninfo_sect == NULL)
190 goto set_addr;
191
192 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
193
194 if (dynaddr + l_addr != l_dynaddr)
195 {
196 CORE_ADDR align = 0x1000;
197 CORE_ADDR minpagesize = align;
198
199 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
200 {
201 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
202 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
203 int i;
204
205 align = 1;
206
207 for (i = 0; i < ehdr->e_phnum; i++)
208 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
209 align = phdr[i].p_align;
210
211 minpagesize = get_elf_backend_data (abfd)->minpagesize;
212 }
213
214 /* Turn it into a mask. */
215 align--;
216
217 /* If the changes match the alignment requirements, we
218 assume we're using a core file that was generated by the
219 same binary, just prelinked with a different base offset.
220 If it doesn't match, we may have a different binary, the
221 same binary with the dynamic table loaded at an unrelated
222 location, or anything, really. To avoid regressions,
223 don't adjust the base offset in the latter case, although
224 odds are that, if things really changed, debugging won't
225 quite work.
226
227 One could expect more the condition
228 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
229 but the one below is relaxed for PPC. The PPC kernel supports
230 either 4k or 64k page sizes. To be prepared for 64k pages,
231 PPC ELF files are built using an alignment requirement of 64k.
232 However, when running on a kernel supporting 4k pages, the memory
233 mapping of the library may not actually happen on a 64k boundary!
234
235 (In the usual case where (l_addr & align) == 0, this check is
236 equivalent to the possibly expected check above.)
237
238 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
239
240 l_addr = l_dynaddr - dynaddr;
241
242 if ((l_addr & (minpagesize - 1)) == 0
243 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
244 {
245 if (info_verbose)
246 printf_unfiltered (_("Using PIC (Position Independent Code) "
247 "prelink displacement %s for \"%s\".\n"),
248 paddress (target_gdbarch, l_addr),
249 so->so_name);
250 }
251 else
252 {
253 /* There is no way to verify the library file matches. prelink
254 can during prelinking of an unprelinked file (or unprelinking
255 of a prelinked file) shift the DYNAMIC segment by arbitrary
256 offset without any page size alignment. There is no way to
257 find out the ELF header and/or Program Headers for a limited
258 verification if it they match. One could do a verification
259 of the DYNAMIC segment. Still the found address is the best
260 one GDB could find. */
261
262 warning (_(".dynamic section for \"%s\" "
263 "is not at the expected address "
264 "(wrong library or version mismatch?)"), so->so_name);
265 }
266 }
267
268 set_addr:
269 so->lm_info->l_addr = l_addr;
270 }
271
272 return so->lm_info->l_addr;
273 }
274
275 static CORE_ADDR
276 lm_next (struct so_list *so)
277 {
278 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
279 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
280
281 return extract_typed_address (so->lm_info->lm + lmo->l_next_offset,
282 ptr_type);
283 }
284
285 static CORE_ADDR
286 lm_prev (struct so_list *so)
287 {
288 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
289 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
290
291 return extract_typed_address (so->lm_info->lm + lmo->l_prev_offset,
292 ptr_type);
293 }
294
295 static CORE_ADDR
296 lm_name (struct so_list *so)
297 {
298 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
299 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
300
301 return extract_typed_address (so->lm_info->lm + lmo->l_name_offset,
302 ptr_type);
303 }
304
305 static int
306 ignore_first_link_map_entry (struct so_list *so)
307 {
308 /* Assume that everything is a library if the dynamic loader was loaded
309 late by a static executable. */
310 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
311 return 0;
312
313 return lm_prev (so) == 0;
314 }
315
316 /* Per pspace SVR4 specific data. */
317
318 struct svr4_info
319 {
320 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
321
322 /* Validity flag for debug_loader_offset. */
323 int debug_loader_offset_p;
324
325 /* Load address for the dynamic linker, inferred. */
326 CORE_ADDR debug_loader_offset;
327
328 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
329 char *debug_loader_name;
330
331 /* Load map address for the main executable. */
332 CORE_ADDR main_lm_addr;
333
334 CORE_ADDR interp_text_sect_low;
335 CORE_ADDR interp_text_sect_high;
336 CORE_ADDR interp_plt_sect_low;
337 CORE_ADDR interp_plt_sect_high;
338 };
339
340 /* Per-program-space data key. */
341 static const struct program_space_data *solib_svr4_pspace_data;
342
343 static void
344 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
345 {
346 struct svr4_info *info;
347
348 info = program_space_data (pspace, solib_svr4_pspace_data);
349 xfree (info);
350 }
351
352 /* Get the current svr4 data. If none is found yet, add it now. This
353 function always returns a valid object. */
354
355 static struct svr4_info *
356 get_svr4_info (void)
357 {
358 struct svr4_info *info;
359
360 info = program_space_data (current_program_space, solib_svr4_pspace_data);
361 if (info != NULL)
362 return info;
363
364 info = XZALLOC (struct svr4_info);
365 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
366 return info;
367 }
368
369 /* Local function prototypes */
370
371 static int match_main (const char *);
372
373 /* Lookup the value for a specific symbol.
374
375 An expensive way to lookup the value of a single symbol for
376 bfd's that are only temporary anyway. This is used by the
377 shared library support to find the address of the debugger
378 notification routine in the shared library.
379
380 The returned symbol may be in a code or data section; functions
381 will normally be in a code section, but may be in a data section
382 if this architecture uses function descriptors.
383
384 Note that 0 is specifically allowed as an error return (no
385 such symbol). */
386
387 static CORE_ADDR
388 bfd_lookup_symbol (bfd *abfd, const char *symname)
389 {
390 long storage_needed;
391 asymbol *sym;
392 asymbol **symbol_table;
393 unsigned int number_of_symbols;
394 unsigned int i;
395 struct cleanup *back_to;
396 CORE_ADDR symaddr = 0;
397
398 storage_needed = bfd_get_symtab_upper_bound (abfd);
399
400 if (storage_needed > 0)
401 {
402 symbol_table = (asymbol **) xmalloc (storage_needed);
403 back_to = make_cleanup (xfree, symbol_table);
404 number_of_symbols = bfd_canonicalize_symtab (abfd, symbol_table);
405
406 for (i = 0; i < number_of_symbols; i++)
407 {
408 sym = *symbol_table++;
409 if (strcmp (sym->name, symname) == 0
410 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0)
411 {
412 /* BFD symbols are section relative. */
413 symaddr = sym->value + sym->section->vma;
414 break;
415 }
416 }
417 do_cleanups (back_to);
418 }
419
420 if (symaddr)
421 return symaddr;
422
423 /* On FreeBSD, the dynamic linker is stripped by default. So we'll
424 have to check the dynamic string table too. */
425
426 storage_needed = bfd_get_dynamic_symtab_upper_bound (abfd);
427
428 if (storage_needed > 0)
429 {
430 symbol_table = (asymbol **) xmalloc (storage_needed);
431 back_to = make_cleanup (xfree, symbol_table);
432 number_of_symbols = bfd_canonicalize_dynamic_symtab (abfd, symbol_table);
433
434 for (i = 0; i < number_of_symbols; i++)
435 {
436 sym = *symbol_table++;
437
438 if (strcmp (sym->name, symname) == 0
439 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0)
440 {
441 /* BFD symbols are section relative. */
442 symaddr = sym->value + sym->section->vma;
443 break;
444 }
445 }
446 do_cleanups (back_to);
447 }
448
449 return symaddr;
450 }
451
452
453 /* Read program header TYPE from inferior memory. The header is found
454 by scanning the OS auxillary vector.
455
456 If TYPE == -1, return the program headers instead of the contents of
457 one program header.
458
459 Return a pointer to allocated memory holding the program header contents,
460 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
461 size of those contents is returned to P_SECT_SIZE. Likewise, the target
462 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
463
464 static gdb_byte *
465 read_program_header (int type, int *p_sect_size, int *p_arch_size)
466 {
467 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
468 CORE_ADDR at_phdr, at_phent, at_phnum;
469 int arch_size, sect_size;
470 CORE_ADDR sect_addr;
471 gdb_byte *buf;
472
473 /* Get required auxv elements from target. */
474 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
475 return 0;
476 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
477 return 0;
478 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
479 return 0;
480 if (!at_phdr || !at_phnum)
481 return 0;
482
483 /* Determine ELF architecture type. */
484 if (at_phent == sizeof (Elf32_External_Phdr))
485 arch_size = 32;
486 else if (at_phent == sizeof (Elf64_External_Phdr))
487 arch_size = 64;
488 else
489 return 0;
490
491 /* Find the requested segment. */
492 if (type == -1)
493 {
494 sect_addr = at_phdr;
495 sect_size = at_phent * at_phnum;
496 }
497 else if (arch_size == 32)
498 {
499 Elf32_External_Phdr phdr;
500 int i;
501
502 /* Search for requested PHDR. */
503 for (i = 0; i < at_phnum; i++)
504 {
505 if (target_read_memory (at_phdr + i * sizeof (phdr),
506 (gdb_byte *)&phdr, sizeof (phdr)))
507 return 0;
508
509 if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
510 4, byte_order) == type)
511 break;
512 }
513
514 if (i == at_phnum)
515 return 0;
516
517 /* Retrieve address and size. */
518 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
519 4, byte_order);
520 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
521 4, byte_order);
522 }
523 else
524 {
525 Elf64_External_Phdr phdr;
526 int i;
527
528 /* Search for requested PHDR. */
529 for (i = 0; i < at_phnum; i++)
530 {
531 if (target_read_memory (at_phdr + i * sizeof (phdr),
532 (gdb_byte *)&phdr, sizeof (phdr)))
533 return 0;
534
535 if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
536 4, byte_order) == type)
537 break;
538 }
539
540 if (i == at_phnum)
541 return 0;
542
543 /* Retrieve address and size. */
544 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
545 8, byte_order);
546 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
547 8, byte_order);
548 }
549
550 /* Read in requested program header. */
551 buf = xmalloc (sect_size);
552 if (target_read_memory (sect_addr, buf, sect_size))
553 {
554 xfree (buf);
555 return NULL;
556 }
557
558 if (p_arch_size)
559 *p_arch_size = arch_size;
560 if (p_sect_size)
561 *p_sect_size = sect_size;
562
563 return buf;
564 }
565
566
567 /* Return program interpreter string. */
568 static gdb_byte *
569 find_program_interpreter (void)
570 {
571 gdb_byte *buf = NULL;
572
573 /* If we have an exec_bfd, use its section table. */
574 if (exec_bfd
575 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
576 {
577 struct bfd_section *interp_sect;
578
579 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
580 if (interp_sect != NULL)
581 {
582 int sect_size = bfd_section_size (exec_bfd, interp_sect);
583
584 buf = xmalloc (sect_size);
585 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
586 }
587 }
588
589 /* If we didn't find it, use the target auxillary vector. */
590 if (!buf)
591 buf = read_program_header (PT_INTERP, NULL, NULL);
592
593 return buf;
594 }
595
596
597 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
598 returned and the corresponding PTR is set. */
599
600 static int
601 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
602 {
603 int arch_size, step, sect_size;
604 long dyn_tag;
605 CORE_ADDR dyn_ptr, dyn_addr;
606 gdb_byte *bufend, *bufstart, *buf;
607 Elf32_External_Dyn *x_dynp_32;
608 Elf64_External_Dyn *x_dynp_64;
609 struct bfd_section *sect;
610 struct target_section *target_section;
611
612 if (abfd == NULL)
613 return 0;
614
615 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
616 return 0;
617
618 arch_size = bfd_get_arch_size (abfd);
619 if (arch_size == -1)
620 return 0;
621
622 /* Find the start address of the .dynamic section. */
623 sect = bfd_get_section_by_name (abfd, ".dynamic");
624 if (sect == NULL)
625 return 0;
626
627 for (target_section = current_target_sections->sections;
628 target_section < current_target_sections->sections_end;
629 target_section++)
630 if (sect == target_section->the_bfd_section)
631 break;
632 if (target_section < current_target_sections->sections_end)
633 dyn_addr = target_section->addr;
634 else
635 {
636 /* ABFD may come from OBJFILE acting only as a symbol file without being
637 loaded into the target (see add_symbol_file_command). This case is
638 such fallback to the file VMA address without the possibility of
639 having the section relocated to its actual in-memory address. */
640
641 dyn_addr = bfd_section_vma (abfd, sect);
642 }
643
644 /* Read in .dynamic from the BFD. We will get the actual value
645 from memory later. */
646 sect_size = bfd_section_size (abfd, sect);
647 buf = bufstart = alloca (sect_size);
648 if (!bfd_get_section_contents (abfd, sect,
649 buf, 0, sect_size))
650 return 0;
651
652 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
653 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
654 : sizeof (Elf64_External_Dyn);
655 for (bufend = buf + sect_size;
656 buf < bufend;
657 buf += step)
658 {
659 if (arch_size == 32)
660 {
661 x_dynp_32 = (Elf32_External_Dyn *) buf;
662 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
663 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
664 }
665 else
666 {
667 x_dynp_64 = (Elf64_External_Dyn *) buf;
668 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
669 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
670 }
671 if (dyn_tag == DT_NULL)
672 return 0;
673 if (dyn_tag == dyntag)
674 {
675 /* If requested, try to read the runtime value of this .dynamic
676 entry. */
677 if (ptr)
678 {
679 struct type *ptr_type;
680 gdb_byte ptr_buf[8];
681 CORE_ADDR ptr_addr;
682
683 ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
684 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
685 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
686 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
687 *ptr = dyn_ptr;
688 }
689 return 1;
690 }
691 }
692
693 return 0;
694 }
695
696 /* Scan for DYNTAG in .dynamic section of the target's main executable,
697 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
698 returned and the corresponding PTR is set. */
699
700 static int
701 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
702 {
703 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
704 int sect_size, arch_size, step;
705 long dyn_tag;
706 CORE_ADDR dyn_ptr;
707 gdb_byte *bufend, *bufstart, *buf;
708
709 /* Read in .dynamic section. */
710 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
711 if (!buf)
712 return 0;
713
714 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
715 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
716 : sizeof (Elf64_External_Dyn);
717 for (bufend = buf + sect_size;
718 buf < bufend;
719 buf += step)
720 {
721 if (arch_size == 32)
722 {
723 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
724
725 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
726 4, byte_order);
727 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
728 4, byte_order);
729 }
730 else
731 {
732 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
733
734 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
735 8, byte_order);
736 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
737 8, byte_order);
738 }
739 if (dyn_tag == DT_NULL)
740 break;
741
742 if (dyn_tag == dyntag)
743 {
744 if (ptr)
745 *ptr = dyn_ptr;
746
747 xfree (bufstart);
748 return 1;
749 }
750 }
751
752 xfree (bufstart);
753 return 0;
754 }
755
756 /* Locate the base address of dynamic linker structs for SVR4 elf
757 targets.
758
759 For SVR4 elf targets the address of the dynamic linker's runtime
760 structure is contained within the dynamic info section in the
761 executable file. The dynamic section is also mapped into the
762 inferior address space. Because the runtime loader fills in the
763 real address before starting the inferior, we have to read in the
764 dynamic info section from the inferior address space.
765 If there are any errors while trying to find the address, we
766 silently return 0, otherwise the found address is returned. */
767
768 static CORE_ADDR
769 elf_locate_base (void)
770 {
771 struct minimal_symbol *msymbol;
772 CORE_ADDR dyn_ptr;
773
774 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
775 instead of DT_DEBUG, although they sometimes contain an unused
776 DT_DEBUG. */
777 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
778 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
779 {
780 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
781 gdb_byte *pbuf;
782 int pbuf_size = TYPE_LENGTH (ptr_type);
783
784 pbuf = alloca (pbuf_size);
785 /* DT_MIPS_RLD_MAP contains a pointer to the address
786 of the dynamic link structure. */
787 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
788 return 0;
789 return extract_typed_address (pbuf, ptr_type);
790 }
791
792 /* Find DT_DEBUG. */
793 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
794 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
795 return dyn_ptr;
796
797 /* This may be a static executable. Look for the symbol
798 conventionally named _r_debug, as a last resort. */
799 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
800 if (msymbol != NULL)
801 return SYMBOL_VALUE_ADDRESS (msymbol);
802
803 /* DT_DEBUG entry not found. */
804 return 0;
805 }
806
807 /* Locate the base address of dynamic linker structs.
808
809 For both the SunOS and SVR4 shared library implementations, if the
810 inferior executable has been linked dynamically, there is a single
811 address somewhere in the inferior's data space which is the key to
812 locating all of the dynamic linker's runtime structures. This
813 address is the value of the debug base symbol. The job of this
814 function is to find and return that address, or to return 0 if there
815 is no such address (the executable is statically linked for example).
816
817 For SunOS, the job is almost trivial, since the dynamic linker and
818 all of it's structures are statically linked to the executable at
819 link time. Thus the symbol for the address we are looking for has
820 already been added to the minimal symbol table for the executable's
821 objfile at the time the symbol file's symbols were read, and all we
822 have to do is look it up there. Note that we explicitly do NOT want
823 to find the copies in the shared library.
824
825 The SVR4 version is a bit more complicated because the address
826 is contained somewhere in the dynamic info section. We have to go
827 to a lot more work to discover the address of the debug base symbol.
828 Because of this complexity, we cache the value we find and return that
829 value on subsequent invocations. Note there is no copy in the
830 executable symbol tables. */
831
832 static CORE_ADDR
833 locate_base (struct svr4_info *info)
834 {
835 /* Check to see if we have a currently valid address, and if so, avoid
836 doing all this work again and just return the cached address. If
837 we have no cached address, try to locate it in the dynamic info
838 section for ELF executables. There's no point in doing any of this
839 though if we don't have some link map offsets to work with. */
840
841 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
842 info->debug_base = elf_locate_base ();
843 return info->debug_base;
844 }
845
846 /* Find the first element in the inferior's dynamic link map, and
847 return its address in the inferior. Return zero if the address
848 could not be determined.
849
850 FIXME: Perhaps we should validate the info somehow, perhaps by
851 checking r_version for a known version number, or r_state for
852 RT_CONSISTENT. */
853
854 static CORE_ADDR
855 solib_svr4_r_map (struct svr4_info *info)
856 {
857 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
858 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
859 CORE_ADDR addr = 0;
860 volatile struct gdb_exception ex;
861
862 TRY_CATCH (ex, RETURN_MASK_ERROR)
863 {
864 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
865 ptr_type);
866 }
867 exception_print (gdb_stderr, ex);
868 return addr;
869 }
870
871 /* Find r_brk from the inferior's debug base. */
872
873 static CORE_ADDR
874 solib_svr4_r_brk (struct svr4_info *info)
875 {
876 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
877 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
878
879 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
880 ptr_type);
881 }
882
883 /* Find the link map for the dynamic linker (if it is not in the
884 normal list of loaded shared objects). */
885
886 static CORE_ADDR
887 solib_svr4_r_ldsomap (struct svr4_info *info)
888 {
889 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
890 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
891 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
892 ULONGEST version;
893
894 /* Check version, and return zero if `struct r_debug' doesn't have
895 the r_ldsomap member. */
896 version
897 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
898 lmo->r_version_size, byte_order);
899 if (version < 2 || lmo->r_ldsomap_offset == -1)
900 return 0;
901
902 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
903 ptr_type);
904 }
905
906 /* On Solaris systems with some versions of the dynamic linker,
907 ld.so's l_name pointer points to the SONAME in the string table
908 rather than into writable memory. So that GDB can find shared
909 libraries when loading a core file generated by gcore, ensure that
910 memory areas containing the l_name string are saved in the core
911 file. */
912
913 static int
914 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
915 {
916 struct svr4_info *info;
917 CORE_ADDR ldsomap;
918 struct so_list *new;
919 struct cleanup *old_chain;
920 struct link_map_offsets *lmo;
921 CORE_ADDR name_lm;
922
923 info = get_svr4_info ();
924
925 info->debug_base = 0;
926 locate_base (info);
927 if (!info->debug_base)
928 return 0;
929
930 ldsomap = solib_svr4_r_ldsomap (info);
931 if (!ldsomap)
932 return 0;
933
934 lmo = svr4_fetch_link_map_offsets ();
935 new = XZALLOC (struct so_list);
936 old_chain = make_cleanup (xfree, new);
937 new->lm_info = xmalloc (sizeof (struct lm_info));
938 make_cleanup (xfree, new->lm_info);
939 new->lm_info->l_addr = (CORE_ADDR)-1;
940 new->lm_info->lm_addr = ldsomap;
941 new->lm_info->lm = xzalloc (lmo->link_map_size);
942 make_cleanup (xfree, new->lm_info->lm);
943 read_memory (ldsomap, new->lm_info->lm, lmo->link_map_size);
944 name_lm = lm_name (new);
945 do_cleanups (old_chain);
946
947 return (name_lm >= vaddr && name_lm < vaddr + size);
948 }
949
950 /* Implement the "open_symbol_file_object" target_so_ops method.
951
952 If no open symbol file, attempt to locate and open the main symbol
953 file. On SVR4 systems, this is the first link map entry. If its
954 name is here, we can open it. Useful when attaching to a process
955 without first loading its symbol file. */
956
957 static int
958 open_symbol_file_object (void *from_ttyp)
959 {
960 CORE_ADDR lm, l_name;
961 char *filename;
962 int errcode;
963 int from_tty = *(int *)from_ttyp;
964 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
965 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
966 int l_name_size = TYPE_LENGTH (ptr_type);
967 gdb_byte *l_name_buf = xmalloc (l_name_size);
968 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
969 struct svr4_info *info = get_svr4_info ();
970
971 if (symfile_objfile)
972 if (!query (_("Attempt to reload symbols from process? ")))
973 {
974 do_cleanups (cleanups);
975 return 0;
976 }
977
978 /* Always locate the debug struct, in case it has moved. */
979 info->debug_base = 0;
980 if (locate_base (info) == 0)
981 {
982 do_cleanups (cleanups);
983 return 0; /* failed somehow... */
984 }
985
986 /* First link map member should be the executable. */
987 lm = solib_svr4_r_map (info);
988 if (lm == 0)
989 {
990 do_cleanups (cleanups);
991 return 0; /* failed somehow... */
992 }
993
994 /* Read address of name from target memory to GDB. */
995 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
996
997 /* Convert the address to host format. */
998 l_name = extract_typed_address (l_name_buf, ptr_type);
999
1000 if (l_name == 0)
1001 {
1002 do_cleanups (cleanups);
1003 return 0; /* No filename. */
1004 }
1005
1006 /* Now fetch the filename from target memory. */
1007 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1008 make_cleanup (xfree, filename);
1009
1010 if (errcode)
1011 {
1012 warning (_("failed to read exec filename from attached file: %s"),
1013 safe_strerror (errcode));
1014 do_cleanups (cleanups);
1015 return 0;
1016 }
1017
1018 /* Have a pathname: read the symbol file. */
1019 symbol_file_add_main (filename, from_tty);
1020
1021 do_cleanups (cleanups);
1022 return 1;
1023 }
1024
1025 /* If no shared library information is available from the dynamic
1026 linker, build a fallback list from other sources. */
1027
1028 static struct so_list *
1029 svr4_default_sos (void)
1030 {
1031 struct svr4_info *info = get_svr4_info ();
1032
1033 struct so_list *head = NULL;
1034 struct so_list **link_ptr = &head;
1035
1036 if (info->debug_loader_offset_p)
1037 {
1038 struct so_list *new = XZALLOC (struct so_list);
1039
1040 new->lm_info = xmalloc (sizeof (struct lm_info));
1041
1042 /* Nothing will ever check the cached copy of the link
1043 map if we set l_addr. */
1044 new->lm_info->l_addr = info->debug_loader_offset;
1045 new->lm_info->lm_addr = 0;
1046 new->lm_info->lm = NULL;
1047
1048 strncpy (new->so_name, info->debug_loader_name,
1049 SO_NAME_MAX_PATH_SIZE - 1);
1050 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1051 strcpy (new->so_original_name, new->so_name);
1052
1053 *link_ptr = new;
1054 link_ptr = &new->next;
1055 }
1056
1057 return head;
1058 }
1059
1060 /* Implement the "current_sos" target_so_ops method. */
1061
1062 static struct so_list *
1063 svr4_current_sos (void)
1064 {
1065 CORE_ADDR lm, prev_lm;
1066 struct so_list *head = 0;
1067 struct so_list **link_ptr = &head;
1068 CORE_ADDR ldsomap = 0;
1069 struct svr4_info *info;
1070
1071 info = get_svr4_info ();
1072
1073 /* Always locate the debug struct, in case it has moved. */
1074 info->debug_base = 0;
1075 locate_base (info);
1076
1077 /* If we can't find the dynamic linker's base structure, this
1078 must not be a dynamically linked executable. Hmm. */
1079 if (! info->debug_base)
1080 return svr4_default_sos ();
1081
1082 /* Walk the inferior's link map list, and build our list of
1083 `struct so_list' nodes. */
1084 prev_lm = 0;
1085 lm = solib_svr4_r_map (info);
1086
1087 while (lm)
1088 {
1089 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1090 struct so_list *new = XZALLOC (struct so_list);
1091 struct cleanup *old_chain = make_cleanup (xfree, new);
1092 CORE_ADDR next_lm;
1093
1094 new->lm_info = xmalloc (sizeof (struct lm_info));
1095 make_cleanup (xfree, new->lm_info);
1096
1097 new->lm_info->l_addr = (CORE_ADDR)-1;
1098 new->lm_info->lm_addr = lm;
1099 new->lm_info->lm = xzalloc (lmo->link_map_size);
1100 make_cleanup (xfree, new->lm_info->lm);
1101
1102 read_memory (lm, new->lm_info->lm, lmo->link_map_size);
1103
1104 next_lm = lm_next (new);
1105
1106 if (lm_prev (new) != prev_lm)
1107 {
1108 warning (_("Corrupted shared library list"));
1109 free_so (new);
1110 next_lm = 0;
1111 }
1112
1113 /* For SVR4 versions, the first entry in the link map is for the
1114 inferior executable, so we must ignore it. For some versions of
1115 SVR4, it has no name. For others (Solaris 2.3 for example), it
1116 does have a name, so we can no longer use a missing name to
1117 decide when to ignore it. */
1118 else if (ignore_first_link_map_entry (new) && ldsomap == 0)
1119 {
1120 info->main_lm_addr = new->lm_info->lm_addr;
1121 free_so (new);
1122 }
1123 else
1124 {
1125 int errcode;
1126 char *buffer;
1127
1128 /* Extract this shared object's name. */
1129 target_read_string (lm_name (new), &buffer,
1130 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1131 if (errcode != 0)
1132 warning (_("Can't read pathname for load map: %s."),
1133 safe_strerror (errcode));
1134 else
1135 {
1136 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1137 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1138 strcpy (new->so_original_name, new->so_name);
1139 }
1140 xfree (buffer);
1141
1142 /* If this entry has no name, or its name matches the name
1143 for the main executable, don't include it in the list. */
1144 if (! new->so_name[0]
1145 || match_main (new->so_name))
1146 free_so (new);
1147 else
1148 {
1149 new->next = 0;
1150 *link_ptr = new;
1151 link_ptr = &new->next;
1152 }
1153 }
1154
1155 prev_lm = lm;
1156 lm = next_lm;
1157
1158 /* On Solaris, the dynamic linker is not in the normal list of
1159 shared objects, so make sure we pick it up too. Having
1160 symbol information for the dynamic linker is quite crucial
1161 for skipping dynamic linker resolver code. */
1162 if (lm == 0 && ldsomap == 0)
1163 {
1164 lm = ldsomap = solib_svr4_r_ldsomap (info);
1165 prev_lm = 0;
1166 }
1167
1168 discard_cleanups (old_chain);
1169 }
1170
1171 if (head == NULL)
1172 return svr4_default_sos ();
1173
1174 return head;
1175 }
1176
1177 /* Get the address of the link_map for a given OBJFILE. */
1178
1179 CORE_ADDR
1180 svr4_fetch_objfile_link_map (struct objfile *objfile)
1181 {
1182 struct so_list *so;
1183 struct svr4_info *info = get_svr4_info ();
1184
1185 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1186 if (info->main_lm_addr == 0)
1187 solib_add (NULL, 0, &current_target, auto_solib_add);
1188
1189 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1190 if (objfile == symfile_objfile)
1191 return info->main_lm_addr;
1192
1193 /* The other link map addresses may be found by examining the list
1194 of shared libraries. */
1195 for (so = master_so_list (); so; so = so->next)
1196 if (so->objfile == objfile)
1197 return so->lm_info->lm_addr;
1198
1199 /* Not found! */
1200 return 0;
1201 }
1202
1203 /* On some systems, the only way to recognize the link map entry for
1204 the main executable file is by looking at its name. Return
1205 non-zero iff SONAME matches one of the known main executable names. */
1206
1207 static int
1208 match_main (const char *soname)
1209 {
1210 const char * const *mainp;
1211
1212 for (mainp = main_name_list; *mainp != NULL; mainp++)
1213 {
1214 if (strcmp (soname, *mainp) == 0)
1215 return (1);
1216 }
1217
1218 return (0);
1219 }
1220
1221 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1222 SVR4 run time loader. */
1223
1224 int
1225 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1226 {
1227 struct svr4_info *info = get_svr4_info ();
1228
1229 return ((pc >= info->interp_text_sect_low
1230 && pc < info->interp_text_sect_high)
1231 || (pc >= info->interp_plt_sect_low
1232 && pc < info->interp_plt_sect_high)
1233 || in_plt_section (pc, NULL)
1234 || in_gnu_ifunc_stub (pc));
1235 }
1236
1237 /* Given an executable's ABFD and target, compute the entry-point
1238 address. */
1239
1240 static CORE_ADDR
1241 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1242 {
1243 /* KevinB wrote ... for most targets, the address returned by
1244 bfd_get_start_address() is the entry point for the start
1245 function. But, for some targets, bfd_get_start_address() returns
1246 the address of a function descriptor from which the entry point
1247 address may be extracted. This address is extracted by
1248 gdbarch_convert_from_func_ptr_addr(). The method
1249 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1250 function for targets which don't use function descriptors. */
1251 return gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1252 bfd_get_start_address (abfd),
1253 targ);
1254 }
1255
1256 /* Arrange for dynamic linker to hit breakpoint.
1257
1258 Both the SunOS and the SVR4 dynamic linkers have, as part of their
1259 debugger interface, support for arranging for the inferior to hit
1260 a breakpoint after mapping in the shared libraries. This function
1261 enables that breakpoint.
1262
1263 For SunOS, there is a special flag location (in_debugger) which we
1264 set to 1. When the dynamic linker sees this flag set, it will set
1265 a breakpoint at a location known only to itself, after saving the
1266 original contents of that place and the breakpoint address itself,
1267 in it's own internal structures. When we resume the inferior, it
1268 will eventually take a SIGTRAP when it runs into the breakpoint.
1269 We handle this (in a different place) by restoring the contents of
1270 the breakpointed location (which is only known after it stops),
1271 chasing around to locate the shared libraries that have been
1272 loaded, then resuming.
1273
1274 For SVR4, the debugger interface structure contains a member (r_brk)
1275 which is statically initialized at the time the shared library is
1276 built, to the offset of a function (_r_debug_state) which is guaran-
1277 teed to be called once before mapping in a library, and again when
1278 the mapping is complete. At the time we are examining this member,
1279 it contains only the unrelocated offset of the function, so we have
1280 to do our own relocation. Later, when the dynamic linker actually
1281 runs, it relocates r_brk to be the actual address of _r_debug_state().
1282
1283 The debugger interface structure also contains an enumeration which
1284 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1285 depending upon whether or not the library is being mapped or unmapped,
1286 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
1287
1288 static int
1289 enable_break (struct svr4_info *info, int from_tty)
1290 {
1291 struct minimal_symbol *msymbol;
1292 const char * const *bkpt_namep;
1293 asection *interp_sect;
1294 gdb_byte *interp_name;
1295 CORE_ADDR sym_addr;
1296
1297 info->interp_text_sect_low = info->interp_text_sect_high = 0;
1298 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1299
1300 /* If we already have a shared library list in the target, and
1301 r_debug contains r_brk, set the breakpoint there - this should
1302 mean r_brk has already been relocated. Assume the dynamic linker
1303 is the object containing r_brk. */
1304
1305 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1306 sym_addr = 0;
1307 if (info->debug_base && solib_svr4_r_map (info) != 0)
1308 sym_addr = solib_svr4_r_brk (info);
1309
1310 if (sym_addr != 0)
1311 {
1312 struct obj_section *os;
1313
1314 sym_addr = gdbarch_addr_bits_remove
1315 (target_gdbarch, gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1316 sym_addr,
1317 &current_target));
1318
1319 /* On at least some versions of Solaris there's a dynamic relocation
1320 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1321 we get control before the dynamic linker has self-relocated.
1322 Check if SYM_ADDR is in a known section, if it is assume we can
1323 trust its value. This is just a heuristic though, it could go away
1324 or be replaced if it's getting in the way.
1325
1326 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1327 however it's spelled in your particular system) is ARM or Thumb.
1328 That knowledge is encoded in the address, if it's Thumb the low bit
1329 is 1. However, we've stripped that info above and it's not clear
1330 what all the consequences are of passing a non-addr_bits_remove'd
1331 address to create_solib_event_breakpoint. The call to
1332 find_pc_section verifies we know about the address and have some
1333 hope of computing the right kind of breakpoint to use (via
1334 symbol info). It does mean that GDB needs to be pointed at a
1335 non-stripped version of the dynamic linker in order to obtain
1336 information it already knows about. Sigh. */
1337
1338 os = find_pc_section (sym_addr);
1339 if (os != NULL)
1340 {
1341 /* Record the relocated start and end address of the dynamic linker
1342 text and plt section for svr4_in_dynsym_resolve_code. */
1343 bfd *tmp_bfd;
1344 CORE_ADDR load_addr;
1345
1346 tmp_bfd = os->objfile->obfd;
1347 load_addr = ANOFFSET (os->objfile->section_offsets,
1348 os->objfile->sect_index_text);
1349
1350 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1351 if (interp_sect)
1352 {
1353 info->interp_text_sect_low =
1354 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1355 info->interp_text_sect_high =
1356 info->interp_text_sect_low
1357 + bfd_section_size (tmp_bfd, interp_sect);
1358 }
1359 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1360 if (interp_sect)
1361 {
1362 info->interp_plt_sect_low =
1363 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1364 info->interp_plt_sect_high =
1365 info->interp_plt_sect_low
1366 + bfd_section_size (tmp_bfd, interp_sect);
1367 }
1368
1369 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1370 return 1;
1371 }
1372 }
1373
1374 /* Find the program interpreter; if not found, warn the user and drop
1375 into the old breakpoint at symbol code. */
1376 interp_name = find_program_interpreter ();
1377 if (interp_name)
1378 {
1379 CORE_ADDR load_addr = 0;
1380 int load_addr_found = 0;
1381 int loader_found_in_list = 0;
1382 struct so_list *so;
1383 bfd *tmp_bfd = NULL;
1384 struct target_ops *tmp_bfd_target;
1385 volatile struct gdb_exception ex;
1386
1387 sym_addr = 0;
1388
1389 /* Now we need to figure out where the dynamic linker was
1390 loaded so that we can load its symbols and place a breakpoint
1391 in the dynamic linker itself.
1392
1393 This address is stored on the stack. However, I've been unable
1394 to find any magic formula to find it for Solaris (appears to
1395 be trivial on GNU/Linux). Therefore, we have to try an alternate
1396 mechanism to find the dynamic linker's base address. */
1397
1398 TRY_CATCH (ex, RETURN_MASK_ALL)
1399 {
1400 tmp_bfd = solib_bfd_open (interp_name);
1401 }
1402 if (tmp_bfd == NULL)
1403 goto bkpt_at_symbol;
1404
1405 /* Now convert the TMP_BFD into a target. That way target, as
1406 well as BFD operations can be used. Note that closing the
1407 target will also close the underlying bfd. */
1408 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1409
1410 /* On a running target, we can get the dynamic linker's base
1411 address from the shared library table. */
1412 so = master_so_list ();
1413 while (so)
1414 {
1415 if (svr4_same_1 (interp_name, so->so_original_name))
1416 {
1417 load_addr_found = 1;
1418 loader_found_in_list = 1;
1419 load_addr = lm_addr_check (so, tmp_bfd);
1420 break;
1421 }
1422 so = so->next;
1423 }
1424
1425 /* If we were not able to find the base address of the loader
1426 from our so_list, then try using the AT_BASE auxilliary entry. */
1427 if (!load_addr_found)
1428 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1429 {
1430 int addr_bit = gdbarch_addr_bit (target_gdbarch);
1431
1432 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1433 that `+ load_addr' will overflow CORE_ADDR width not creating
1434 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1435 GDB. */
1436
1437 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1438 {
1439 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1440 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1441 tmp_bfd_target);
1442
1443 gdb_assert (load_addr < space_size);
1444
1445 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1446 64bit ld.so with 32bit executable, it should not happen. */
1447
1448 if (tmp_entry_point < space_size
1449 && tmp_entry_point + load_addr >= space_size)
1450 load_addr -= space_size;
1451 }
1452
1453 load_addr_found = 1;
1454 }
1455
1456 /* Otherwise we find the dynamic linker's base address by examining
1457 the current pc (which should point at the entry point for the
1458 dynamic linker) and subtracting the offset of the entry point.
1459
1460 This is more fragile than the previous approaches, but is a good
1461 fallback method because it has actually been working well in
1462 most cases. */
1463 if (!load_addr_found)
1464 {
1465 struct regcache *regcache
1466 = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1467
1468 load_addr = (regcache_read_pc (regcache)
1469 - exec_entry_point (tmp_bfd, tmp_bfd_target));
1470 }
1471
1472 if (!loader_found_in_list)
1473 {
1474 info->debug_loader_name = xstrdup (interp_name);
1475 info->debug_loader_offset_p = 1;
1476 info->debug_loader_offset = load_addr;
1477 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1478 }
1479
1480 /* Record the relocated start and end address of the dynamic linker
1481 text and plt section for svr4_in_dynsym_resolve_code. */
1482 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1483 if (interp_sect)
1484 {
1485 info->interp_text_sect_low =
1486 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1487 info->interp_text_sect_high =
1488 info->interp_text_sect_low
1489 + bfd_section_size (tmp_bfd, interp_sect);
1490 }
1491 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1492 if (interp_sect)
1493 {
1494 info->interp_plt_sect_low =
1495 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1496 info->interp_plt_sect_high =
1497 info->interp_plt_sect_low
1498 + bfd_section_size (tmp_bfd, interp_sect);
1499 }
1500
1501 /* Now try to set a breakpoint in the dynamic linker. */
1502 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1503 {
1504 sym_addr = bfd_lookup_symbol (tmp_bfd, *bkpt_namep);
1505 if (sym_addr != 0)
1506 break;
1507 }
1508
1509 if (sym_addr != 0)
1510 /* Convert 'sym_addr' from a function pointer to an address.
1511 Because we pass tmp_bfd_target instead of the current
1512 target, this will always produce an unrelocated value. */
1513 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1514 sym_addr,
1515 tmp_bfd_target);
1516
1517 /* We're done with both the temporary bfd and target. Remember,
1518 closing the target closes the underlying bfd. */
1519 target_close (tmp_bfd_target, 0);
1520
1521 if (sym_addr != 0)
1522 {
1523 create_solib_event_breakpoint (target_gdbarch, load_addr + sym_addr);
1524 xfree (interp_name);
1525 return 1;
1526 }
1527
1528 /* For whatever reason we couldn't set a breakpoint in the dynamic
1529 linker. Warn and drop into the old code. */
1530 bkpt_at_symbol:
1531 xfree (interp_name);
1532 warning (_("Unable to find dynamic linker breakpoint function.\n"
1533 "GDB will be unable to debug shared library initializers\n"
1534 "and track explicitly loaded dynamic code."));
1535 }
1536
1537 /* Scan through the lists of symbols, trying to look up the symbol and
1538 set a breakpoint there. Terminate loop when we/if we succeed. */
1539
1540 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1541 {
1542 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1543 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1544 {
1545 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1546 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1547 sym_addr,
1548 &current_target);
1549 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1550 return 1;
1551 }
1552 }
1553
1554 if (!current_inferior ()->attach_flag)
1555 {
1556 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1557 {
1558 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1559 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1560 {
1561 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1562 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1563 sym_addr,
1564 &current_target);
1565 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1566 return 1;
1567 }
1568 }
1569 }
1570 return 0;
1571 }
1572
1573 /* Implement the "special_symbol_handling" target_so_ops method. */
1574
1575 static void
1576 svr4_special_symbol_handling (void)
1577 {
1578 /* Nothing to do. */
1579 }
1580
1581 /* Read the ELF program headers from ABFD. Return the contents and
1582 set *PHDRS_SIZE to the size of the program headers. */
1583
1584 static gdb_byte *
1585 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1586 {
1587 Elf_Internal_Ehdr *ehdr;
1588 gdb_byte *buf;
1589
1590 ehdr = elf_elfheader (abfd);
1591
1592 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1593 if (*phdrs_size == 0)
1594 return NULL;
1595
1596 buf = xmalloc (*phdrs_size);
1597 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1598 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1599 {
1600 xfree (buf);
1601 return NULL;
1602 }
1603
1604 return buf;
1605 }
1606
1607 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1608 exec_bfd. Otherwise return 0.
1609
1610 We relocate all of the sections by the same amount. This
1611 behavior is mandated by recent editions of the System V ABI.
1612 According to the System V Application Binary Interface,
1613 Edition 4.1, page 5-5:
1614
1615 ... Though the system chooses virtual addresses for
1616 individual processes, it maintains the segments' relative
1617 positions. Because position-independent code uses relative
1618 addressesing between segments, the difference between
1619 virtual addresses in memory must match the difference
1620 between virtual addresses in the file. The difference
1621 between the virtual address of any segment in memory and
1622 the corresponding virtual address in the file is thus a
1623 single constant value for any one executable or shared
1624 object in a given process. This difference is the base
1625 address. One use of the base address is to relocate the
1626 memory image of the program during dynamic linking.
1627
1628 The same language also appears in Edition 4.0 of the System V
1629 ABI and is left unspecified in some of the earlier editions.
1630
1631 Decide if the objfile needs to be relocated. As indicated above, we will
1632 only be here when execution is stopped. But during attachment PC can be at
1633 arbitrary address therefore regcache_read_pc can be misleading (contrary to
1634 the auxv AT_ENTRY value). Moreover for executable with interpreter section
1635 regcache_read_pc would point to the interpreter and not the main executable.
1636
1637 So, to summarize, relocations are necessary when the start address obtained
1638 from the executable is different from the address in auxv AT_ENTRY entry.
1639
1640 [ The astute reader will note that we also test to make sure that
1641 the executable in question has the DYNAMIC flag set. It is my
1642 opinion that this test is unnecessary (undesirable even). It
1643 was added to avoid inadvertent relocation of an executable
1644 whose e_type member in the ELF header is not ET_DYN. There may
1645 be a time in the future when it is desirable to do relocations
1646 on other types of files as well in which case this condition
1647 should either be removed or modified to accomodate the new file
1648 type. - Kevin, Nov 2000. ] */
1649
1650 static int
1651 svr4_exec_displacement (CORE_ADDR *displacementp)
1652 {
1653 /* ENTRY_POINT is a possible function descriptor - before
1654 a call to gdbarch_convert_from_func_ptr_addr. */
1655 CORE_ADDR entry_point, displacement;
1656
1657 if (exec_bfd == NULL)
1658 return 0;
1659
1660 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
1661 being executed themselves and PIE (Position Independent Executable)
1662 executables are ET_DYN. */
1663
1664 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1665 return 0;
1666
1667 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1668 return 0;
1669
1670 displacement = entry_point - bfd_get_start_address (exec_bfd);
1671
1672 /* Verify the DISPLACEMENT candidate complies with the required page
1673 alignment. It is cheaper than the program headers comparison below. */
1674
1675 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1676 {
1677 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1678
1679 /* p_align of PT_LOAD segments does not specify any alignment but
1680 only congruency of addresses:
1681 p_offset % p_align == p_vaddr % p_align
1682 Kernel is free to load the executable with lower alignment. */
1683
1684 if ((displacement & (elf->minpagesize - 1)) != 0)
1685 return 0;
1686 }
1687
1688 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1689 comparing their program headers. If the program headers in the auxilliary
1690 vector do not match the program headers in the executable, then we are
1691 looking at a different file than the one used by the kernel - for
1692 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
1693
1694 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1695 {
1696 /* Be optimistic and clear OK only if GDB was able to verify the headers
1697 really do not match. */
1698 int phdrs_size, phdrs2_size, ok = 1;
1699 gdb_byte *buf, *buf2;
1700 int arch_size;
1701
1702 buf = read_program_header (-1, &phdrs_size, &arch_size);
1703 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1704 if (buf != NULL && buf2 != NULL)
1705 {
1706 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
1707
1708 /* We are dealing with three different addresses. EXEC_BFD
1709 represents current address in on-disk file. target memory content
1710 may be different from EXEC_BFD as the file may have been prelinked
1711 to a different address after the executable has been loaded.
1712 Moreover the address of placement in target memory can be
1713 different from what the program headers in target memory say -
1714 this is the goal of PIE.
1715
1716 Detected DISPLACEMENT covers both the offsets of PIE placement and
1717 possible new prelink performed after start of the program. Here
1718 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1719 content offset for the verification purpose. */
1720
1721 if (phdrs_size != phdrs2_size
1722 || bfd_get_arch_size (exec_bfd) != arch_size)
1723 ok = 0;
1724 else if (arch_size == 32
1725 && phdrs_size >= sizeof (Elf32_External_Phdr)
1726 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1727 {
1728 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1729 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1730 CORE_ADDR displacement = 0;
1731 int i;
1732
1733 /* DISPLACEMENT could be found more easily by the difference of
1734 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1735 already have enough information to compute that displacement
1736 with what we've read. */
1737
1738 for (i = 0; i < ehdr2->e_phnum; i++)
1739 if (phdr2[i].p_type == PT_LOAD)
1740 {
1741 Elf32_External_Phdr *phdrp;
1742 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1743 CORE_ADDR vaddr, paddr;
1744 CORE_ADDR displacement_vaddr = 0;
1745 CORE_ADDR displacement_paddr = 0;
1746
1747 phdrp = &((Elf32_External_Phdr *) buf)[i];
1748 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1749 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1750
1751 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1752 byte_order);
1753 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1754
1755 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1756 byte_order);
1757 displacement_paddr = paddr - phdr2[i].p_paddr;
1758
1759 if (displacement_vaddr == displacement_paddr)
1760 displacement = displacement_vaddr;
1761
1762 break;
1763 }
1764
1765 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1766
1767 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1768 {
1769 Elf32_External_Phdr *phdrp;
1770 Elf32_External_Phdr *phdr2p;
1771 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1772 CORE_ADDR vaddr, paddr;
1773 asection *plt2_asect;
1774
1775 phdrp = &((Elf32_External_Phdr *) buf)[i];
1776 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1777 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1778 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1779
1780 /* PT_GNU_STACK is an exception by being never relocated by
1781 prelink as its addresses are always zero. */
1782
1783 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1784 continue;
1785
1786 /* Check also other adjustment combinations - PR 11786. */
1787
1788 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1789 byte_order);
1790 vaddr -= displacement;
1791 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1792
1793 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1794 byte_order);
1795 paddr -= displacement;
1796 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1797
1798 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1799 continue;
1800
1801 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
1802 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1803 if (plt2_asect)
1804 {
1805 int content2;
1806 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1807 CORE_ADDR filesz;
1808
1809 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1810 & SEC_HAS_CONTENTS) != 0;
1811
1812 filesz = extract_unsigned_integer (buf_filesz_p, 4,
1813 byte_order);
1814
1815 /* PLT2_ASECT is from on-disk file (exec_bfd) while
1816 FILESZ is from the in-memory image. */
1817 if (content2)
1818 filesz += bfd_get_section_size (plt2_asect);
1819 else
1820 filesz -= bfd_get_section_size (plt2_asect);
1821
1822 store_unsigned_integer (buf_filesz_p, 4, byte_order,
1823 filesz);
1824
1825 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1826 continue;
1827 }
1828
1829 ok = 0;
1830 break;
1831 }
1832 }
1833 else if (arch_size == 64
1834 && phdrs_size >= sizeof (Elf64_External_Phdr)
1835 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
1836 {
1837 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1838 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1839 CORE_ADDR displacement = 0;
1840 int i;
1841
1842 /* DISPLACEMENT could be found more easily by the difference of
1843 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1844 already have enough information to compute that displacement
1845 with what we've read. */
1846
1847 for (i = 0; i < ehdr2->e_phnum; i++)
1848 if (phdr2[i].p_type == PT_LOAD)
1849 {
1850 Elf64_External_Phdr *phdrp;
1851 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1852 CORE_ADDR vaddr, paddr;
1853 CORE_ADDR displacement_vaddr = 0;
1854 CORE_ADDR displacement_paddr = 0;
1855
1856 phdrp = &((Elf64_External_Phdr *) buf)[i];
1857 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1858 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1859
1860 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
1861 byte_order);
1862 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1863
1864 paddr = extract_unsigned_integer (buf_paddr_p, 8,
1865 byte_order);
1866 displacement_paddr = paddr - phdr2[i].p_paddr;
1867
1868 if (displacement_vaddr == displacement_paddr)
1869 displacement = displacement_vaddr;
1870
1871 break;
1872 }
1873
1874 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1875
1876 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
1877 {
1878 Elf64_External_Phdr *phdrp;
1879 Elf64_External_Phdr *phdr2p;
1880 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1881 CORE_ADDR vaddr, paddr;
1882 asection *plt2_asect;
1883
1884 phdrp = &((Elf64_External_Phdr *) buf)[i];
1885 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1886 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1887 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
1888
1889 /* PT_GNU_STACK is an exception by being never relocated by
1890 prelink as its addresses are always zero. */
1891
1892 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1893 continue;
1894
1895 /* Check also other adjustment combinations - PR 11786. */
1896
1897 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
1898 byte_order);
1899 vaddr -= displacement;
1900 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
1901
1902 paddr = extract_unsigned_integer (buf_paddr_p, 8,
1903 byte_order);
1904 paddr -= displacement;
1905 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
1906
1907 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1908 continue;
1909
1910 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
1911 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1912 if (plt2_asect)
1913 {
1914 int content2;
1915 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1916 CORE_ADDR filesz;
1917
1918 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1919 & SEC_HAS_CONTENTS) != 0;
1920
1921 filesz = extract_unsigned_integer (buf_filesz_p, 8,
1922 byte_order);
1923
1924 /* PLT2_ASECT is from on-disk file (exec_bfd) while
1925 FILESZ is from the in-memory image. */
1926 if (content2)
1927 filesz += bfd_get_section_size (plt2_asect);
1928 else
1929 filesz -= bfd_get_section_size (plt2_asect);
1930
1931 store_unsigned_integer (buf_filesz_p, 8, byte_order,
1932 filesz);
1933
1934 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1935 continue;
1936 }
1937
1938 ok = 0;
1939 break;
1940 }
1941 }
1942 else
1943 ok = 0;
1944 }
1945
1946 xfree (buf);
1947 xfree (buf2);
1948
1949 if (!ok)
1950 return 0;
1951 }
1952
1953 if (info_verbose)
1954 {
1955 /* It can be printed repeatedly as there is no easy way to check
1956 the executable symbols/file has been already relocated to
1957 displacement. */
1958
1959 printf_unfiltered (_("Using PIE (Position Independent Executable) "
1960 "displacement %s for \"%s\".\n"),
1961 paddress (target_gdbarch, displacement),
1962 bfd_get_filename (exec_bfd));
1963 }
1964
1965 *displacementp = displacement;
1966 return 1;
1967 }
1968
1969 /* Relocate the main executable. This function should be called upon
1970 stopping the inferior process at the entry point to the program.
1971 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
1972 different, the main executable is relocated by the proper amount. */
1973
1974 static void
1975 svr4_relocate_main_executable (void)
1976 {
1977 CORE_ADDR displacement;
1978
1979 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
1980 probably contains the offsets computed using the PIE displacement
1981 from the previous run, which of course are irrelevant for this run.
1982 So we need to determine the new PIE displacement and recompute the
1983 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
1984 already contains pre-computed offsets.
1985
1986 If we cannot compute the PIE displacement, either:
1987
1988 - The executable is not PIE.
1989
1990 - SYMFILE_OBJFILE does not match the executable started in the target.
1991 This can happen for main executable symbols loaded at the host while
1992 `ld.so --ld-args main-executable' is loaded in the target.
1993
1994 Then we leave the section offsets untouched and use them as is for
1995 this run. Either:
1996
1997 - These section offsets were properly reset earlier, and thus
1998 already contain the correct values. This can happen for instance
1999 when reconnecting via the remote protocol to a target that supports
2000 the `qOffsets' packet.
2001
2002 - The section offsets were not reset earlier, and the best we can
2003 hope is that the old offsets are still applicable to the new run. */
2004
2005 if (! svr4_exec_displacement (&displacement))
2006 return;
2007
2008 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2009 addresses. */
2010
2011 if (symfile_objfile)
2012 {
2013 struct section_offsets *new_offsets;
2014 int i;
2015
2016 new_offsets = alloca (symfile_objfile->num_sections
2017 * sizeof (*new_offsets));
2018
2019 for (i = 0; i < symfile_objfile->num_sections; i++)
2020 new_offsets->offsets[i] = displacement;
2021
2022 objfile_relocate (symfile_objfile, new_offsets);
2023 }
2024 else if (exec_bfd)
2025 {
2026 asection *asect;
2027
2028 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2029 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2030 (bfd_section_vma (exec_bfd, asect)
2031 + displacement));
2032 }
2033 }
2034
2035 /* Implement the "create_inferior_hook" target_solib_ops method.
2036
2037 For SVR4 executables, this first instruction is either the first
2038 instruction in the dynamic linker (for dynamically linked
2039 executables) or the instruction at "start" for statically linked
2040 executables. For dynamically linked executables, the system
2041 first exec's /lib/libc.so.N, which contains the dynamic linker,
2042 and starts it running. The dynamic linker maps in any needed
2043 shared libraries, maps in the actual user executable, and then
2044 jumps to "start" in the user executable.
2045
2046 We can arrange to cooperate with the dynamic linker to discover the
2047 names of shared libraries that are dynamically linked, and the base
2048 addresses to which they are linked.
2049
2050 This function is responsible for discovering those names and
2051 addresses, and saving sufficient information about them to allow
2052 their symbols to be read at a later time.
2053
2054 FIXME
2055
2056 Between enable_break() and disable_break(), this code does not
2057 properly handle hitting breakpoints which the user might have
2058 set in the startup code or in the dynamic linker itself. Proper
2059 handling will probably have to wait until the implementation is
2060 changed to use the "breakpoint handler function" method.
2061
2062 Also, what if child has exit()ed? Must exit loop somehow. */
2063
2064 static void
2065 svr4_solib_create_inferior_hook (int from_tty)
2066 {
2067 #if defined(_SCO_DS)
2068 struct inferior *inf;
2069 struct thread_info *tp;
2070 #endif /* defined(_SCO_DS) */
2071 struct svr4_info *info;
2072
2073 info = get_svr4_info ();
2074
2075 /* Relocate the main executable if necessary. */
2076 svr4_relocate_main_executable ();
2077
2078 /* No point setting a breakpoint in the dynamic linker if we can't
2079 hit it (e.g., a core file, or a trace file). */
2080 if (!target_has_execution)
2081 return;
2082
2083 if (!svr4_have_link_map_offsets ())
2084 return;
2085
2086 if (!enable_break (info, from_tty))
2087 return;
2088
2089 #if defined(_SCO_DS)
2090 /* SCO needs the loop below, other systems should be using the
2091 special shared library breakpoints and the shared library breakpoint
2092 service routine.
2093
2094 Now run the target. It will eventually hit the breakpoint, at
2095 which point all of the libraries will have been mapped in and we
2096 can go groveling around in the dynamic linker structures to find
2097 out what we need to know about them. */
2098
2099 inf = current_inferior ();
2100 tp = inferior_thread ();
2101
2102 clear_proceed_status ();
2103 inf->control.stop_soon = STOP_QUIETLY;
2104 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2105 do
2106 {
2107 target_resume (pid_to_ptid (-1), 0, tp->suspend.stop_signal);
2108 wait_for_inferior ();
2109 }
2110 while (tp->suspend.stop_signal != TARGET_SIGNAL_TRAP);
2111 inf->control.stop_soon = NO_STOP_QUIETLY;
2112 #endif /* defined(_SCO_DS) */
2113 }
2114
2115 static void
2116 svr4_clear_solib (void)
2117 {
2118 struct svr4_info *info;
2119
2120 info = get_svr4_info ();
2121 info->debug_base = 0;
2122 info->debug_loader_offset_p = 0;
2123 info->debug_loader_offset = 0;
2124 xfree (info->debug_loader_name);
2125 info->debug_loader_name = NULL;
2126 }
2127
2128 static void
2129 svr4_free_so (struct so_list *so)
2130 {
2131 xfree (so->lm_info->lm);
2132 xfree (so->lm_info);
2133 }
2134
2135
2136 /* Clear any bits of ADDR that wouldn't fit in a target-format
2137 data pointer. "Data pointer" here refers to whatever sort of
2138 address the dynamic linker uses to manage its sections. At the
2139 moment, we don't support shared libraries on any processors where
2140 code and data pointers are different sizes.
2141
2142 This isn't really the right solution. What we really need here is
2143 a way to do arithmetic on CORE_ADDR values that respects the
2144 natural pointer/address correspondence. (For example, on the MIPS,
2145 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2146 sign-extend the value. There, simply truncating the bits above
2147 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2148 be a new gdbarch method or something. */
2149 static CORE_ADDR
2150 svr4_truncate_ptr (CORE_ADDR addr)
2151 {
2152 if (gdbarch_ptr_bit (target_gdbarch) == sizeof (CORE_ADDR) * 8)
2153 /* We don't need to truncate anything, and the bit twiddling below
2154 will fail due to overflow problems. */
2155 return addr;
2156 else
2157 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch)) - 1);
2158 }
2159
2160
2161 static void
2162 svr4_relocate_section_addresses (struct so_list *so,
2163 struct target_section *sec)
2164 {
2165 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so,
2166 sec->bfd));
2167 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so,
2168 sec->bfd));
2169 }
2170 \f
2171
2172 /* Architecture-specific operations. */
2173
2174 /* Per-architecture data key. */
2175 static struct gdbarch_data *solib_svr4_data;
2176
2177 struct solib_svr4_ops
2178 {
2179 /* Return a description of the layout of `struct link_map'. */
2180 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2181 };
2182
2183 /* Return a default for the architecture-specific operations. */
2184
2185 static void *
2186 solib_svr4_init (struct obstack *obstack)
2187 {
2188 struct solib_svr4_ops *ops;
2189
2190 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2191 ops->fetch_link_map_offsets = NULL;
2192 return ops;
2193 }
2194
2195 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2196 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2197
2198 void
2199 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2200 struct link_map_offsets *(*flmo) (void))
2201 {
2202 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2203
2204 ops->fetch_link_map_offsets = flmo;
2205
2206 set_solib_ops (gdbarch, &svr4_so_ops);
2207 }
2208
2209 /* Fetch a link_map_offsets structure using the architecture-specific
2210 `struct link_map_offsets' fetcher. */
2211
2212 static struct link_map_offsets *
2213 svr4_fetch_link_map_offsets (void)
2214 {
2215 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2216
2217 gdb_assert (ops->fetch_link_map_offsets);
2218 return ops->fetch_link_map_offsets ();
2219 }
2220
2221 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2222
2223 static int
2224 svr4_have_link_map_offsets (void)
2225 {
2226 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2227
2228 return (ops->fetch_link_map_offsets != NULL);
2229 }
2230 \f
2231
2232 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2233 `struct r_debug' and a `struct link_map' that are binary compatible
2234 with the origional SVR4 implementation. */
2235
2236 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2237 for an ILP32 SVR4 system. */
2238
2239 struct link_map_offsets *
2240 svr4_ilp32_fetch_link_map_offsets (void)
2241 {
2242 static struct link_map_offsets lmo;
2243 static struct link_map_offsets *lmp = NULL;
2244
2245 if (lmp == NULL)
2246 {
2247 lmp = &lmo;
2248
2249 lmo.r_version_offset = 0;
2250 lmo.r_version_size = 4;
2251 lmo.r_map_offset = 4;
2252 lmo.r_brk_offset = 8;
2253 lmo.r_ldsomap_offset = 20;
2254
2255 /* Everything we need is in the first 20 bytes. */
2256 lmo.link_map_size = 20;
2257 lmo.l_addr_offset = 0;
2258 lmo.l_name_offset = 4;
2259 lmo.l_ld_offset = 8;
2260 lmo.l_next_offset = 12;
2261 lmo.l_prev_offset = 16;
2262 }
2263
2264 return lmp;
2265 }
2266
2267 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2268 for an LP64 SVR4 system. */
2269
2270 struct link_map_offsets *
2271 svr4_lp64_fetch_link_map_offsets (void)
2272 {
2273 static struct link_map_offsets lmo;
2274 static struct link_map_offsets *lmp = NULL;
2275
2276 if (lmp == NULL)
2277 {
2278 lmp = &lmo;
2279
2280 lmo.r_version_offset = 0;
2281 lmo.r_version_size = 4;
2282 lmo.r_map_offset = 8;
2283 lmo.r_brk_offset = 16;
2284 lmo.r_ldsomap_offset = 40;
2285
2286 /* Everything we need is in the first 40 bytes. */
2287 lmo.link_map_size = 40;
2288 lmo.l_addr_offset = 0;
2289 lmo.l_name_offset = 8;
2290 lmo.l_ld_offset = 16;
2291 lmo.l_next_offset = 24;
2292 lmo.l_prev_offset = 32;
2293 }
2294
2295 return lmp;
2296 }
2297 \f
2298
2299 struct target_so_ops svr4_so_ops;
2300
2301 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2302 different rule for symbol lookup. The lookup begins here in the DSO, not in
2303 the main executable. */
2304
2305 static struct symbol *
2306 elf_lookup_lib_symbol (const struct objfile *objfile,
2307 const char *name,
2308 const domain_enum domain)
2309 {
2310 bfd *abfd;
2311
2312 if (objfile == symfile_objfile)
2313 abfd = exec_bfd;
2314 else
2315 {
2316 /* OBJFILE should have been passed as the non-debug one. */
2317 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2318
2319 abfd = objfile->obfd;
2320 }
2321
2322 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2323 return NULL;
2324
2325 return lookup_global_symbol_from_objfile (objfile, name, domain);
2326 }
2327
2328 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2329
2330 void
2331 _initialize_svr4_solib (void)
2332 {
2333 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2334 solib_svr4_pspace_data
2335 = register_program_space_data_with_cleanup (svr4_pspace_data_cleanup);
2336
2337 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2338 svr4_so_ops.free_so = svr4_free_so;
2339 svr4_so_ops.clear_solib = svr4_clear_solib;
2340 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2341 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2342 svr4_so_ops.current_sos = svr4_current_sos;
2343 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2344 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2345 svr4_so_ops.bfd_open = solib_bfd_open;
2346 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2347 svr4_so_ops.same = svr4_same;
2348 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2349 }
This page took 0.077708 seconds and 4 git commands to generate.