Fix library-list.dtd -> library-list-svr4.dtd
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "infrun.h"
34 #include "regcache.h"
35 #include "gdbthread.h"
36 #include "observer.h"
37
38 #include "solist.h"
39 #include "solib.h"
40 #include "solib-svr4.h"
41
42 #include "bfd-target.h"
43 #include "elf-bfd.h"
44 #include "exec.h"
45 #include "auxv.h"
46 #include "exceptions.h"
47 #include "gdb_bfd.h"
48 #include "probe.h"
49
50 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
51 static int svr4_have_link_map_offsets (void);
52 static void svr4_relocate_main_executable (void);
53 static void svr4_free_library_list (void *p_list);
54
55 /* Link map info to include in an allocated so_list entry. */
56
57 struct lm_info
58 {
59 /* Amount by which addresses in the binary should be relocated to
60 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
61 When prelinking is involved and the prelink base address changes,
62 we may need a different offset - the recomputed offset is in L_ADDR.
63 It is commonly the same value. It is cached as we want to warn about
64 the difference and compute it only once. L_ADDR is valid
65 iff L_ADDR_P. */
66 CORE_ADDR l_addr, l_addr_inferior;
67 unsigned int l_addr_p : 1;
68
69 /* The target location of lm. */
70 CORE_ADDR lm_addr;
71
72 /* Values read in from inferior's fields of the same name. */
73 CORE_ADDR l_ld, l_next, l_prev, l_name;
74 };
75
76 /* On SVR4 systems, a list of symbols in the dynamic linker where
77 GDB can try to place a breakpoint to monitor shared library
78 events.
79
80 If none of these symbols are found, or other errors occur, then
81 SVR4 systems will fall back to using a symbol as the "startup
82 mapping complete" breakpoint address. */
83
84 static const char * const solib_break_names[] =
85 {
86 "r_debug_state",
87 "_r_debug_state",
88 "_dl_debug_state",
89 "rtld_db_dlactivity",
90 "__dl_rtld_db_dlactivity",
91 "_rtld_debug_state",
92
93 NULL
94 };
95
96 static const char * const bkpt_names[] =
97 {
98 "_start",
99 "__start",
100 "main",
101 NULL
102 };
103
104 static const char * const main_name_list[] =
105 {
106 "main_$main",
107 NULL
108 };
109
110 /* What to do when a probe stop occurs. */
111
112 enum probe_action
113 {
114 /* Something went seriously wrong. Stop using probes and
115 revert to using the older interface. */
116 PROBES_INTERFACE_FAILED,
117
118 /* No action is required. The shared object list is still
119 valid. */
120 DO_NOTHING,
121
122 /* The shared object list should be reloaded entirely. */
123 FULL_RELOAD,
124
125 /* Attempt to incrementally update the shared object list. If
126 the update fails or is not possible, fall back to reloading
127 the list in full. */
128 UPDATE_OR_RELOAD,
129 };
130
131 /* A probe's name and its associated action. */
132
133 struct probe_info
134 {
135 /* The name of the probe. */
136 const char *name;
137
138 /* What to do when a probe stop occurs. */
139 enum probe_action action;
140 };
141
142 /* A list of named probes and their associated actions. If all
143 probes are present in the dynamic linker then the probes-based
144 interface will be used. */
145
146 static const struct probe_info probe_info[] =
147 {
148 { "init_start", DO_NOTHING },
149 { "init_complete", FULL_RELOAD },
150 { "map_start", DO_NOTHING },
151 { "map_failed", DO_NOTHING },
152 { "reloc_complete", UPDATE_OR_RELOAD },
153 { "unmap_start", DO_NOTHING },
154 { "unmap_complete", FULL_RELOAD },
155 };
156
157 #define NUM_PROBES ARRAY_SIZE (probe_info)
158
159 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
160 the same shared library. */
161
162 static int
163 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
164 {
165 if (strcmp (gdb_so_name, inferior_so_name) == 0)
166 return 1;
167
168 /* On Solaris, when starting inferior we think that dynamic linker is
169 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
170 contains /lib/ld.so.1. Sometimes one file is a link to another, but
171 sometimes they have identical content, but are not linked to each
172 other. We don't restrict this check for Solaris, but the chances
173 of running into this situation elsewhere are very low. */
174 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
175 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
176 return 1;
177
178 /* Similarly, we observed the same issue with sparc64, but with
179 different locations. */
180 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
181 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
182 return 1;
183
184 return 0;
185 }
186
187 static int
188 svr4_same (struct so_list *gdb, struct so_list *inferior)
189 {
190 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
191 }
192
193 static struct lm_info *
194 lm_info_read (CORE_ADDR lm_addr)
195 {
196 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
197 gdb_byte *lm;
198 struct lm_info *lm_info;
199 struct cleanup *back_to;
200
201 lm = xmalloc (lmo->link_map_size);
202 back_to = make_cleanup (xfree, lm);
203
204 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
205 {
206 warning (_("Error reading shared library list entry at %s"),
207 paddress (target_gdbarch (), lm_addr)),
208 lm_info = NULL;
209 }
210 else
211 {
212 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
213
214 lm_info = xzalloc (sizeof (*lm_info));
215 lm_info->lm_addr = lm_addr;
216
217 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
218 ptr_type);
219 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
220 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
221 ptr_type);
222 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
223 ptr_type);
224 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
225 ptr_type);
226 }
227
228 do_cleanups (back_to);
229
230 return lm_info;
231 }
232
233 static int
234 has_lm_dynamic_from_link_map (void)
235 {
236 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
237
238 return lmo->l_ld_offset >= 0;
239 }
240
241 static CORE_ADDR
242 lm_addr_check (const struct so_list *so, bfd *abfd)
243 {
244 if (!so->lm_info->l_addr_p)
245 {
246 struct bfd_section *dyninfo_sect;
247 CORE_ADDR l_addr, l_dynaddr, dynaddr;
248
249 l_addr = so->lm_info->l_addr_inferior;
250
251 if (! abfd || ! has_lm_dynamic_from_link_map ())
252 goto set_addr;
253
254 l_dynaddr = so->lm_info->l_ld;
255
256 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
257 if (dyninfo_sect == NULL)
258 goto set_addr;
259
260 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
261
262 if (dynaddr + l_addr != l_dynaddr)
263 {
264 CORE_ADDR align = 0x1000;
265 CORE_ADDR minpagesize = align;
266
267 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
268 {
269 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
270 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
271 int i;
272
273 align = 1;
274
275 for (i = 0; i < ehdr->e_phnum; i++)
276 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
277 align = phdr[i].p_align;
278
279 minpagesize = get_elf_backend_data (abfd)->minpagesize;
280 }
281
282 /* Turn it into a mask. */
283 align--;
284
285 /* If the changes match the alignment requirements, we
286 assume we're using a core file that was generated by the
287 same binary, just prelinked with a different base offset.
288 If it doesn't match, we may have a different binary, the
289 same binary with the dynamic table loaded at an unrelated
290 location, or anything, really. To avoid regressions,
291 don't adjust the base offset in the latter case, although
292 odds are that, if things really changed, debugging won't
293 quite work.
294
295 One could expect more the condition
296 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
297 but the one below is relaxed for PPC. The PPC kernel supports
298 either 4k or 64k page sizes. To be prepared for 64k pages,
299 PPC ELF files are built using an alignment requirement of 64k.
300 However, when running on a kernel supporting 4k pages, the memory
301 mapping of the library may not actually happen on a 64k boundary!
302
303 (In the usual case where (l_addr & align) == 0, this check is
304 equivalent to the possibly expected check above.)
305
306 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
307
308 l_addr = l_dynaddr - dynaddr;
309
310 if ((l_addr & (minpagesize - 1)) == 0
311 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
312 {
313 if (info_verbose)
314 printf_unfiltered (_("Using PIC (Position Independent Code) "
315 "prelink displacement %s for \"%s\".\n"),
316 paddress (target_gdbarch (), l_addr),
317 so->so_name);
318 }
319 else
320 {
321 /* There is no way to verify the library file matches. prelink
322 can during prelinking of an unprelinked file (or unprelinking
323 of a prelinked file) shift the DYNAMIC segment by arbitrary
324 offset without any page size alignment. There is no way to
325 find out the ELF header and/or Program Headers for a limited
326 verification if it they match. One could do a verification
327 of the DYNAMIC segment. Still the found address is the best
328 one GDB could find. */
329
330 warning (_(".dynamic section for \"%s\" "
331 "is not at the expected address "
332 "(wrong library or version mismatch?)"), so->so_name);
333 }
334 }
335
336 set_addr:
337 so->lm_info->l_addr = l_addr;
338 so->lm_info->l_addr_p = 1;
339 }
340
341 return so->lm_info->l_addr;
342 }
343
344 /* Per pspace SVR4 specific data. */
345
346 struct svr4_info
347 {
348 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
349
350 /* Validity flag for debug_loader_offset. */
351 int debug_loader_offset_p;
352
353 /* Load address for the dynamic linker, inferred. */
354 CORE_ADDR debug_loader_offset;
355
356 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
357 char *debug_loader_name;
358
359 /* Load map address for the main executable. */
360 CORE_ADDR main_lm_addr;
361
362 CORE_ADDR interp_text_sect_low;
363 CORE_ADDR interp_text_sect_high;
364 CORE_ADDR interp_plt_sect_low;
365 CORE_ADDR interp_plt_sect_high;
366
367 /* Nonzero if the list of objects was last obtained from the target
368 via qXfer:libraries-svr4:read. */
369 int using_xfer;
370
371 /* Table of struct probe_and_action instances, used by the
372 probes-based interface to map breakpoint addresses to probes
373 and their associated actions. Lookup is performed using
374 probe_and_action->probe->address. */
375 htab_t probes_table;
376
377 /* List of objects loaded into the inferior, used by the probes-
378 based interface. */
379 struct so_list *solib_list;
380 };
381
382 /* Per-program-space data key. */
383 static const struct program_space_data *solib_svr4_pspace_data;
384
385 /* Free the probes table. */
386
387 static void
388 free_probes_table (struct svr4_info *info)
389 {
390 if (info->probes_table == NULL)
391 return;
392
393 htab_delete (info->probes_table);
394 info->probes_table = NULL;
395 }
396
397 /* Free the solib list. */
398
399 static void
400 free_solib_list (struct svr4_info *info)
401 {
402 svr4_free_library_list (&info->solib_list);
403 info->solib_list = NULL;
404 }
405
406 static void
407 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
408 {
409 struct svr4_info *info = arg;
410
411 free_probes_table (info);
412 free_solib_list (info);
413
414 xfree (info);
415 }
416
417 /* Get the current svr4 data. If none is found yet, add it now. This
418 function always returns a valid object. */
419
420 static struct svr4_info *
421 get_svr4_info (void)
422 {
423 struct svr4_info *info;
424
425 info = program_space_data (current_program_space, solib_svr4_pspace_data);
426 if (info != NULL)
427 return info;
428
429 info = XCNEW (struct svr4_info);
430 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
431 return info;
432 }
433
434 /* Local function prototypes */
435
436 static int match_main (const char *);
437
438 /* Read program header TYPE from inferior memory. The header is found
439 by scanning the OS auxillary vector.
440
441 If TYPE == -1, return the program headers instead of the contents of
442 one program header.
443
444 Return a pointer to allocated memory holding the program header contents,
445 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
446 size of those contents is returned to P_SECT_SIZE. Likewise, the target
447 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
448
449 static gdb_byte *
450 read_program_header (int type, int *p_sect_size, int *p_arch_size)
451 {
452 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
453 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
454 int arch_size, sect_size;
455 CORE_ADDR sect_addr;
456 gdb_byte *buf;
457 int pt_phdr_p = 0;
458
459 /* Get required auxv elements from target. */
460 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
461 return 0;
462 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
463 return 0;
464 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
465 return 0;
466 if (!at_phdr || !at_phnum)
467 return 0;
468
469 /* Determine ELF architecture type. */
470 if (at_phent == sizeof (Elf32_External_Phdr))
471 arch_size = 32;
472 else if (at_phent == sizeof (Elf64_External_Phdr))
473 arch_size = 64;
474 else
475 return 0;
476
477 /* Find the requested segment. */
478 if (type == -1)
479 {
480 sect_addr = at_phdr;
481 sect_size = at_phent * at_phnum;
482 }
483 else if (arch_size == 32)
484 {
485 Elf32_External_Phdr phdr;
486 int i;
487
488 /* Search for requested PHDR. */
489 for (i = 0; i < at_phnum; i++)
490 {
491 int p_type;
492
493 if (target_read_memory (at_phdr + i * sizeof (phdr),
494 (gdb_byte *)&phdr, sizeof (phdr)))
495 return 0;
496
497 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
498 4, byte_order);
499
500 if (p_type == PT_PHDR)
501 {
502 pt_phdr_p = 1;
503 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
504 4, byte_order);
505 }
506
507 if (p_type == type)
508 break;
509 }
510
511 if (i == at_phnum)
512 return 0;
513
514 /* Retrieve address and size. */
515 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
516 4, byte_order);
517 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
518 4, byte_order);
519 }
520 else
521 {
522 Elf64_External_Phdr phdr;
523 int i;
524
525 /* Search for requested PHDR. */
526 for (i = 0; i < at_phnum; i++)
527 {
528 int p_type;
529
530 if (target_read_memory (at_phdr + i * sizeof (phdr),
531 (gdb_byte *)&phdr, sizeof (phdr)))
532 return 0;
533
534 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
535 4, byte_order);
536
537 if (p_type == PT_PHDR)
538 {
539 pt_phdr_p = 1;
540 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
541 8, byte_order);
542 }
543
544 if (p_type == type)
545 break;
546 }
547
548 if (i == at_phnum)
549 return 0;
550
551 /* Retrieve address and size. */
552 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
553 8, byte_order);
554 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
555 8, byte_order);
556 }
557
558 /* PT_PHDR is optional, but we really need it
559 for PIE to make this work in general. */
560
561 if (pt_phdr_p)
562 {
563 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
564 Relocation offset is the difference between the two. */
565 sect_addr = sect_addr + (at_phdr - pt_phdr);
566 }
567
568 /* Read in requested program header. */
569 buf = xmalloc (sect_size);
570 if (target_read_memory (sect_addr, buf, sect_size))
571 {
572 xfree (buf);
573 return NULL;
574 }
575
576 if (p_arch_size)
577 *p_arch_size = arch_size;
578 if (p_sect_size)
579 *p_sect_size = sect_size;
580
581 return buf;
582 }
583
584
585 /* Return program interpreter string. */
586 static char *
587 find_program_interpreter (void)
588 {
589 gdb_byte *buf = NULL;
590
591 /* If we have an exec_bfd, use its section table. */
592 if (exec_bfd
593 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
594 {
595 struct bfd_section *interp_sect;
596
597 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
598 if (interp_sect != NULL)
599 {
600 int sect_size = bfd_section_size (exec_bfd, interp_sect);
601
602 buf = xmalloc (sect_size);
603 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
604 }
605 }
606
607 /* If we didn't find it, use the target auxillary vector. */
608 if (!buf)
609 buf = read_program_header (PT_INTERP, NULL, NULL);
610
611 return (char *) buf;
612 }
613
614
615 /* Scan for DESIRED_DYNTAG in .dynamic section of ABFD. If DESIRED_DYNTAG is
616 found, 1 is returned and the corresponding PTR is set. */
617
618 static int
619 scan_dyntag (const int desired_dyntag, bfd *abfd, CORE_ADDR *ptr)
620 {
621 int arch_size, step, sect_size;
622 long current_dyntag;
623 CORE_ADDR dyn_ptr, dyn_addr;
624 gdb_byte *bufend, *bufstart, *buf;
625 Elf32_External_Dyn *x_dynp_32;
626 Elf64_External_Dyn *x_dynp_64;
627 struct bfd_section *sect;
628 struct target_section *target_section;
629
630 if (abfd == NULL)
631 return 0;
632
633 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
634 return 0;
635
636 arch_size = bfd_get_arch_size (abfd);
637 if (arch_size == -1)
638 return 0;
639
640 /* Find the start address of the .dynamic section. */
641 sect = bfd_get_section_by_name (abfd, ".dynamic");
642 if (sect == NULL)
643 return 0;
644
645 for (target_section = current_target_sections->sections;
646 target_section < current_target_sections->sections_end;
647 target_section++)
648 if (sect == target_section->the_bfd_section)
649 break;
650 if (target_section < current_target_sections->sections_end)
651 dyn_addr = target_section->addr;
652 else
653 {
654 /* ABFD may come from OBJFILE acting only as a symbol file without being
655 loaded into the target (see add_symbol_file_command). This case is
656 such fallback to the file VMA address without the possibility of
657 having the section relocated to its actual in-memory address. */
658
659 dyn_addr = bfd_section_vma (abfd, sect);
660 }
661
662 /* Read in .dynamic from the BFD. We will get the actual value
663 from memory later. */
664 sect_size = bfd_section_size (abfd, sect);
665 buf = bufstart = alloca (sect_size);
666 if (!bfd_get_section_contents (abfd, sect,
667 buf, 0, sect_size))
668 return 0;
669
670 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
671 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
672 : sizeof (Elf64_External_Dyn);
673 for (bufend = buf + sect_size;
674 buf < bufend;
675 buf += step)
676 {
677 if (arch_size == 32)
678 {
679 x_dynp_32 = (Elf32_External_Dyn *) buf;
680 current_dyntag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
681 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
682 }
683 else
684 {
685 x_dynp_64 = (Elf64_External_Dyn *) buf;
686 current_dyntag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
687 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
688 }
689 if (current_dyntag == DT_NULL)
690 return 0;
691 if (current_dyntag == desired_dyntag)
692 {
693 /* If requested, try to read the runtime value of this .dynamic
694 entry. */
695 if (ptr)
696 {
697 struct type *ptr_type;
698 gdb_byte ptr_buf[8];
699 CORE_ADDR ptr_addr;
700
701 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
702 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
703 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
704 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
705 *ptr = dyn_ptr;
706 }
707 return 1;
708 }
709 }
710
711 return 0;
712 }
713
714 /* Scan for DESIRED_DYNTAG in .dynamic section of the target's main executable,
715 found by consulting the OS auxillary vector. If DESIRED_DYNTAG is found, 1
716 is returned and the corresponding PTR is set. */
717
718 static int
719 scan_dyntag_auxv (const int desired_dyntag, CORE_ADDR *ptr)
720 {
721 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
722 int sect_size, arch_size, step;
723 long current_dyntag;
724 CORE_ADDR dyn_ptr;
725 gdb_byte *bufend, *bufstart, *buf;
726
727 /* Read in .dynamic section. */
728 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
729 if (!buf)
730 return 0;
731
732 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
733 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
734 : sizeof (Elf64_External_Dyn);
735 for (bufend = buf + sect_size;
736 buf < bufend;
737 buf += step)
738 {
739 if (arch_size == 32)
740 {
741 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
742
743 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
744 4, byte_order);
745 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
746 4, byte_order);
747 }
748 else
749 {
750 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
751
752 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
753 8, byte_order);
754 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
755 8, byte_order);
756 }
757 if (current_dyntag == DT_NULL)
758 break;
759
760 if (current_dyntag == desired_dyntag)
761 {
762 if (ptr)
763 *ptr = dyn_ptr;
764
765 xfree (bufstart);
766 return 1;
767 }
768 }
769
770 xfree (bufstart);
771 return 0;
772 }
773
774 /* Locate the base address of dynamic linker structs for SVR4 elf
775 targets.
776
777 For SVR4 elf targets the address of the dynamic linker's runtime
778 structure is contained within the dynamic info section in the
779 executable file. The dynamic section is also mapped into the
780 inferior address space. Because the runtime loader fills in the
781 real address before starting the inferior, we have to read in the
782 dynamic info section from the inferior address space.
783 If there are any errors while trying to find the address, we
784 silently return 0, otherwise the found address is returned. */
785
786 static CORE_ADDR
787 elf_locate_base (void)
788 {
789 struct bound_minimal_symbol msymbol;
790 CORE_ADDR dyn_ptr;
791
792 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
793 instead of DT_DEBUG, although they sometimes contain an unused
794 DT_DEBUG. */
795 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
796 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
797 {
798 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
799 gdb_byte *pbuf;
800 int pbuf_size = TYPE_LENGTH (ptr_type);
801
802 pbuf = alloca (pbuf_size);
803 /* DT_MIPS_RLD_MAP contains a pointer to the address
804 of the dynamic link structure. */
805 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
806 return 0;
807 return extract_typed_address (pbuf, ptr_type);
808 }
809
810 /* Find DT_DEBUG. */
811 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
812 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
813 return dyn_ptr;
814
815 /* This may be a static executable. Look for the symbol
816 conventionally named _r_debug, as a last resort. */
817 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
818 if (msymbol.minsym != NULL)
819 return BMSYMBOL_VALUE_ADDRESS (msymbol);
820
821 /* DT_DEBUG entry not found. */
822 return 0;
823 }
824
825 /* Locate the base address of dynamic linker structs.
826
827 For both the SunOS and SVR4 shared library implementations, if the
828 inferior executable has been linked dynamically, there is a single
829 address somewhere in the inferior's data space which is the key to
830 locating all of the dynamic linker's runtime structures. This
831 address is the value of the debug base symbol. The job of this
832 function is to find and return that address, or to return 0 if there
833 is no such address (the executable is statically linked for example).
834
835 For SunOS, the job is almost trivial, since the dynamic linker and
836 all of it's structures are statically linked to the executable at
837 link time. Thus the symbol for the address we are looking for has
838 already been added to the minimal symbol table for the executable's
839 objfile at the time the symbol file's symbols were read, and all we
840 have to do is look it up there. Note that we explicitly do NOT want
841 to find the copies in the shared library.
842
843 The SVR4 version is a bit more complicated because the address
844 is contained somewhere in the dynamic info section. We have to go
845 to a lot more work to discover the address of the debug base symbol.
846 Because of this complexity, we cache the value we find and return that
847 value on subsequent invocations. Note there is no copy in the
848 executable symbol tables. */
849
850 static CORE_ADDR
851 locate_base (struct svr4_info *info)
852 {
853 /* Check to see if we have a currently valid address, and if so, avoid
854 doing all this work again and just return the cached address. If
855 we have no cached address, try to locate it in the dynamic info
856 section for ELF executables. There's no point in doing any of this
857 though if we don't have some link map offsets to work with. */
858
859 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
860 info->debug_base = elf_locate_base ();
861 return info->debug_base;
862 }
863
864 /* Find the first element in the inferior's dynamic link map, and
865 return its address in the inferior. Return zero if the address
866 could not be determined.
867
868 FIXME: Perhaps we should validate the info somehow, perhaps by
869 checking r_version for a known version number, or r_state for
870 RT_CONSISTENT. */
871
872 static CORE_ADDR
873 solib_svr4_r_map (struct svr4_info *info)
874 {
875 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
876 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
877 CORE_ADDR addr = 0;
878 volatile struct gdb_exception ex;
879
880 TRY_CATCH (ex, RETURN_MASK_ERROR)
881 {
882 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
883 ptr_type);
884 }
885 exception_print (gdb_stderr, ex);
886 return addr;
887 }
888
889 /* Find r_brk from the inferior's debug base. */
890
891 static CORE_ADDR
892 solib_svr4_r_brk (struct svr4_info *info)
893 {
894 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
895 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
896
897 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
898 ptr_type);
899 }
900
901 /* Find the link map for the dynamic linker (if it is not in the
902 normal list of loaded shared objects). */
903
904 static CORE_ADDR
905 solib_svr4_r_ldsomap (struct svr4_info *info)
906 {
907 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
908 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
909 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
910 ULONGEST version;
911
912 /* Check version, and return zero if `struct r_debug' doesn't have
913 the r_ldsomap member. */
914 version
915 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
916 lmo->r_version_size, byte_order);
917 if (version < 2 || lmo->r_ldsomap_offset == -1)
918 return 0;
919
920 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
921 ptr_type);
922 }
923
924 /* On Solaris systems with some versions of the dynamic linker,
925 ld.so's l_name pointer points to the SONAME in the string table
926 rather than into writable memory. So that GDB can find shared
927 libraries when loading a core file generated by gcore, ensure that
928 memory areas containing the l_name string are saved in the core
929 file. */
930
931 static int
932 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
933 {
934 struct svr4_info *info;
935 CORE_ADDR ldsomap;
936 struct so_list *new;
937 struct cleanup *old_chain;
938 CORE_ADDR name_lm;
939
940 info = get_svr4_info ();
941
942 info->debug_base = 0;
943 locate_base (info);
944 if (!info->debug_base)
945 return 0;
946
947 ldsomap = solib_svr4_r_ldsomap (info);
948 if (!ldsomap)
949 return 0;
950
951 new = XCNEW (struct so_list);
952 old_chain = make_cleanup (xfree, new);
953 new->lm_info = lm_info_read (ldsomap);
954 make_cleanup (xfree, new->lm_info);
955 name_lm = new->lm_info ? new->lm_info->l_name : 0;
956 do_cleanups (old_chain);
957
958 return (name_lm >= vaddr && name_lm < vaddr + size);
959 }
960
961 /* Implement the "open_symbol_file_object" target_so_ops method.
962
963 If no open symbol file, attempt to locate and open the main symbol
964 file. On SVR4 systems, this is the first link map entry. If its
965 name is here, we can open it. Useful when attaching to a process
966 without first loading its symbol file. */
967
968 static int
969 open_symbol_file_object (void *from_ttyp)
970 {
971 CORE_ADDR lm, l_name;
972 char *filename;
973 int errcode;
974 int from_tty = *(int *)from_ttyp;
975 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
976 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
977 int l_name_size = TYPE_LENGTH (ptr_type);
978 gdb_byte *l_name_buf = xmalloc (l_name_size);
979 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
980 struct svr4_info *info = get_svr4_info ();
981
982 if (symfile_objfile)
983 if (!query (_("Attempt to reload symbols from process? ")))
984 {
985 do_cleanups (cleanups);
986 return 0;
987 }
988
989 /* Always locate the debug struct, in case it has moved. */
990 info->debug_base = 0;
991 if (locate_base (info) == 0)
992 {
993 do_cleanups (cleanups);
994 return 0; /* failed somehow... */
995 }
996
997 /* First link map member should be the executable. */
998 lm = solib_svr4_r_map (info);
999 if (lm == 0)
1000 {
1001 do_cleanups (cleanups);
1002 return 0; /* failed somehow... */
1003 }
1004
1005 /* Read address of name from target memory to GDB. */
1006 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1007
1008 /* Convert the address to host format. */
1009 l_name = extract_typed_address (l_name_buf, ptr_type);
1010
1011 if (l_name == 0)
1012 {
1013 do_cleanups (cleanups);
1014 return 0; /* No filename. */
1015 }
1016
1017 /* Now fetch the filename from target memory. */
1018 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1019 make_cleanup (xfree, filename);
1020
1021 if (errcode)
1022 {
1023 warning (_("failed to read exec filename from attached file: %s"),
1024 safe_strerror (errcode));
1025 do_cleanups (cleanups);
1026 return 0;
1027 }
1028
1029 /* Have a pathname: read the symbol file. */
1030 symbol_file_add_main (filename, from_tty);
1031
1032 do_cleanups (cleanups);
1033 return 1;
1034 }
1035
1036 /* Data exchange structure for the XML parser as returned by
1037 svr4_current_sos_via_xfer_libraries. */
1038
1039 struct svr4_library_list
1040 {
1041 struct so_list *head, **tailp;
1042
1043 /* Inferior address of struct link_map used for the main executable. It is
1044 NULL if not known. */
1045 CORE_ADDR main_lm;
1046 };
1047
1048 /* Implementation for target_so_ops.free_so. */
1049
1050 static void
1051 svr4_free_so (struct so_list *so)
1052 {
1053 xfree (so->lm_info);
1054 }
1055
1056 /* Implement target_so_ops.clear_so. */
1057
1058 static void
1059 svr4_clear_so (struct so_list *so)
1060 {
1061 if (so->lm_info != NULL)
1062 so->lm_info->l_addr_p = 0;
1063 }
1064
1065 /* Free so_list built so far (called via cleanup). */
1066
1067 static void
1068 svr4_free_library_list (void *p_list)
1069 {
1070 struct so_list *list = *(struct so_list **) p_list;
1071
1072 while (list != NULL)
1073 {
1074 struct so_list *next = list->next;
1075
1076 free_so (list);
1077 list = next;
1078 }
1079 }
1080
1081 /* Copy library list. */
1082
1083 static struct so_list *
1084 svr4_copy_library_list (struct so_list *src)
1085 {
1086 struct so_list *dst = NULL;
1087 struct so_list **link = &dst;
1088
1089 while (src != NULL)
1090 {
1091 struct so_list *new;
1092
1093 new = xmalloc (sizeof (struct so_list));
1094 memcpy (new, src, sizeof (struct so_list));
1095
1096 new->lm_info = xmalloc (sizeof (struct lm_info));
1097 memcpy (new->lm_info, src->lm_info, sizeof (struct lm_info));
1098
1099 new->next = NULL;
1100 *link = new;
1101 link = &new->next;
1102
1103 src = src->next;
1104 }
1105
1106 return dst;
1107 }
1108
1109 #ifdef HAVE_LIBEXPAT
1110
1111 #include "xml-support.h"
1112
1113 /* Handle the start of a <library> element. Note: new elements are added
1114 at the tail of the list, keeping the list in order. */
1115
1116 static void
1117 library_list_start_library (struct gdb_xml_parser *parser,
1118 const struct gdb_xml_element *element,
1119 void *user_data, VEC(gdb_xml_value_s) *attributes)
1120 {
1121 struct svr4_library_list *list = user_data;
1122 const char *name = xml_find_attribute (attributes, "name")->value;
1123 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1124 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1125 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1126 struct so_list *new_elem;
1127
1128 new_elem = XCNEW (struct so_list);
1129 new_elem->lm_info = XCNEW (struct lm_info);
1130 new_elem->lm_info->lm_addr = *lmp;
1131 new_elem->lm_info->l_addr_inferior = *l_addrp;
1132 new_elem->lm_info->l_ld = *l_ldp;
1133
1134 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1135 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1136 strcpy (new_elem->so_original_name, new_elem->so_name);
1137
1138 *list->tailp = new_elem;
1139 list->tailp = &new_elem->next;
1140 }
1141
1142 /* Handle the start of a <library-list-svr4> element. */
1143
1144 static void
1145 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1146 const struct gdb_xml_element *element,
1147 void *user_data, VEC(gdb_xml_value_s) *attributes)
1148 {
1149 struct svr4_library_list *list = user_data;
1150 const char *version = xml_find_attribute (attributes, "version")->value;
1151 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1152
1153 if (strcmp (version, "1.0") != 0)
1154 gdb_xml_error (parser,
1155 _("SVR4 Library list has unsupported version \"%s\""),
1156 version);
1157
1158 if (main_lm)
1159 list->main_lm = *(ULONGEST *) main_lm->value;
1160 }
1161
1162 /* The allowed elements and attributes for an XML library list.
1163 The root element is a <library-list>. */
1164
1165 static const struct gdb_xml_attribute svr4_library_attributes[] =
1166 {
1167 { "name", GDB_XML_AF_NONE, NULL, NULL },
1168 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1169 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1170 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1171 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1172 };
1173
1174 static const struct gdb_xml_element svr4_library_list_children[] =
1175 {
1176 {
1177 "library", svr4_library_attributes, NULL,
1178 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1179 library_list_start_library, NULL
1180 },
1181 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1182 };
1183
1184 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1185 {
1186 { "version", GDB_XML_AF_NONE, NULL, NULL },
1187 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1188 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1189 };
1190
1191 static const struct gdb_xml_element svr4_library_list_elements[] =
1192 {
1193 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1194 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1195 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1196 };
1197
1198 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1199
1200 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1201 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1202 empty, caller is responsible for freeing all its entries. */
1203
1204 static int
1205 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1206 {
1207 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1208 &list->head);
1209
1210 memset (list, 0, sizeof (*list));
1211 list->tailp = &list->head;
1212 if (gdb_xml_parse_quick (_("target library list"), "library-list-svr4.dtd",
1213 svr4_library_list_elements, document, list) == 0)
1214 {
1215 /* Parsed successfully, keep the result. */
1216 discard_cleanups (back_to);
1217 return 1;
1218 }
1219
1220 do_cleanups (back_to);
1221 return 0;
1222 }
1223
1224 /* Attempt to get so_list from target via qXfer:libraries-svr4:read packet.
1225
1226 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1227 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1228 empty, caller is responsible for freeing all its entries.
1229
1230 Note that ANNEX must be NULL if the remote does not explicitly allow
1231 qXfer:libraries-svr4:read packets with non-empty annexes. Support for
1232 this can be checked using target_augmented_libraries_svr4_read (). */
1233
1234 static int
1235 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1236 const char *annex)
1237 {
1238 char *svr4_library_document;
1239 int result;
1240 struct cleanup *back_to;
1241
1242 gdb_assert (annex == NULL || target_augmented_libraries_svr4_read ());
1243
1244 /* Fetch the list of shared libraries. */
1245 svr4_library_document = target_read_stralloc (&current_target,
1246 TARGET_OBJECT_LIBRARIES_SVR4,
1247 annex);
1248 if (svr4_library_document == NULL)
1249 return 0;
1250
1251 back_to = make_cleanup (xfree, svr4_library_document);
1252 result = svr4_parse_libraries (svr4_library_document, list);
1253 do_cleanups (back_to);
1254
1255 return result;
1256 }
1257
1258 #else
1259
1260 static int
1261 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1262 const char *annex)
1263 {
1264 return 0;
1265 }
1266
1267 #endif
1268
1269 /* If no shared library information is available from the dynamic
1270 linker, build a fallback list from other sources. */
1271
1272 static struct so_list *
1273 svr4_default_sos (void)
1274 {
1275 struct svr4_info *info = get_svr4_info ();
1276 struct so_list *new;
1277
1278 if (!info->debug_loader_offset_p)
1279 return NULL;
1280
1281 new = XCNEW (struct so_list);
1282
1283 new->lm_info = xzalloc (sizeof (struct lm_info));
1284
1285 /* Nothing will ever check the other fields if we set l_addr_p. */
1286 new->lm_info->l_addr = info->debug_loader_offset;
1287 new->lm_info->l_addr_p = 1;
1288
1289 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1290 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1291 strcpy (new->so_original_name, new->so_name);
1292
1293 return new;
1294 }
1295
1296 /* Read the whole inferior libraries chain starting at address LM.
1297 Expect the first entry in the chain's previous entry to be PREV_LM.
1298 Add the entries to the tail referenced by LINK_PTR_PTR. Ignore the
1299 first entry if IGNORE_FIRST and set global MAIN_LM_ADDR according
1300 to it. Returns nonzero upon success. If zero is returned the
1301 entries stored to LINK_PTR_PTR are still valid although they may
1302 represent only part of the inferior library list. */
1303
1304 static int
1305 svr4_read_so_list (CORE_ADDR lm, CORE_ADDR prev_lm,
1306 struct so_list ***link_ptr_ptr, int ignore_first)
1307 {
1308 CORE_ADDR first_l_name = 0;
1309 CORE_ADDR next_lm;
1310
1311 for (; lm != 0; prev_lm = lm, lm = next_lm)
1312 {
1313 struct so_list *new;
1314 struct cleanup *old_chain;
1315 int errcode;
1316 char *buffer;
1317
1318 new = XCNEW (struct so_list);
1319 old_chain = make_cleanup_free_so (new);
1320
1321 new->lm_info = lm_info_read (lm);
1322 if (new->lm_info == NULL)
1323 {
1324 do_cleanups (old_chain);
1325 return 0;
1326 }
1327
1328 next_lm = new->lm_info->l_next;
1329
1330 if (new->lm_info->l_prev != prev_lm)
1331 {
1332 warning (_("Corrupted shared library list: %s != %s"),
1333 paddress (target_gdbarch (), prev_lm),
1334 paddress (target_gdbarch (), new->lm_info->l_prev));
1335 do_cleanups (old_chain);
1336 return 0;
1337 }
1338
1339 /* For SVR4 versions, the first entry in the link map is for the
1340 inferior executable, so we must ignore it. For some versions of
1341 SVR4, it has no name. For others (Solaris 2.3 for example), it
1342 does have a name, so we can no longer use a missing name to
1343 decide when to ignore it. */
1344 if (ignore_first && new->lm_info->l_prev == 0)
1345 {
1346 struct svr4_info *info = get_svr4_info ();
1347
1348 first_l_name = new->lm_info->l_name;
1349 info->main_lm_addr = new->lm_info->lm_addr;
1350 do_cleanups (old_chain);
1351 continue;
1352 }
1353
1354 /* Extract this shared object's name. */
1355 target_read_string (new->lm_info->l_name, &buffer,
1356 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1357 if (errcode != 0)
1358 {
1359 /* If this entry's l_name address matches that of the
1360 inferior executable, then this is not a normal shared
1361 object, but (most likely) a vDSO. In this case, silently
1362 skip it; otherwise emit a warning. */
1363 if (first_l_name == 0 || new->lm_info->l_name != first_l_name)
1364 warning (_("Can't read pathname for load map: %s."),
1365 safe_strerror (errcode));
1366 do_cleanups (old_chain);
1367 continue;
1368 }
1369
1370 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1371 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1372 strcpy (new->so_original_name, new->so_name);
1373 xfree (buffer);
1374
1375 /* If this entry has no name, or its name matches the name
1376 for the main executable, don't include it in the list. */
1377 if (! new->so_name[0] || match_main (new->so_name))
1378 {
1379 do_cleanups (old_chain);
1380 continue;
1381 }
1382
1383 discard_cleanups (old_chain);
1384 new->next = 0;
1385 **link_ptr_ptr = new;
1386 *link_ptr_ptr = &new->next;
1387 }
1388
1389 return 1;
1390 }
1391
1392 /* Read the full list of currently loaded shared objects directly
1393 from the inferior, without referring to any libraries read and
1394 stored by the probes interface. Handle special cases relating
1395 to the first elements of the list. */
1396
1397 static struct so_list *
1398 svr4_current_sos_direct (struct svr4_info *info)
1399 {
1400 CORE_ADDR lm;
1401 struct so_list *head = NULL;
1402 struct so_list **link_ptr = &head;
1403 struct cleanup *back_to;
1404 int ignore_first;
1405 struct svr4_library_list library_list;
1406
1407 /* Fall back to manual examination of the target if the packet is not
1408 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1409 tests a case where gdbserver cannot find the shared libraries list while
1410 GDB itself is able to find it via SYMFILE_OBJFILE.
1411
1412 Unfortunately statically linked inferiors will also fall back through this
1413 suboptimal code path. */
1414
1415 info->using_xfer = svr4_current_sos_via_xfer_libraries (&library_list,
1416 NULL);
1417 if (info->using_xfer)
1418 {
1419 if (library_list.main_lm)
1420 info->main_lm_addr = library_list.main_lm;
1421
1422 return library_list.head ? library_list.head : svr4_default_sos ();
1423 }
1424
1425 /* Always locate the debug struct, in case it has moved. */
1426 info->debug_base = 0;
1427 locate_base (info);
1428
1429 /* If we can't find the dynamic linker's base structure, this
1430 must not be a dynamically linked executable. Hmm. */
1431 if (! info->debug_base)
1432 return svr4_default_sos ();
1433
1434 /* Assume that everything is a library if the dynamic loader was loaded
1435 late by a static executable. */
1436 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1437 ignore_first = 0;
1438 else
1439 ignore_first = 1;
1440
1441 back_to = make_cleanup (svr4_free_library_list, &head);
1442
1443 /* Walk the inferior's link map list, and build our list of
1444 `struct so_list' nodes. */
1445 lm = solib_svr4_r_map (info);
1446 if (lm)
1447 svr4_read_so_list (lm, 0, &link_ptr, ignore_first);
1448
1449 /* On Solaris, the dynamic linker is not in the normal list of
1450 shared objects, so make sure we pick it up too. Having
1451 symbol information for the dynamic linker is quite crucial
1452 for skipping dynamic linker resolver code. */
1453 lm = solib_svr4_r_ldsomap (info);
1454 if (lm)
1455 svr4_read_so_list (lm, 0, &link_ptr, 0);
1456
1457 discard_cleanups (back_to);
1458
1459 if (head == NULL)
1460 return svr4_default_sos ();
1461
1462 return head;
1463 }
1464
1465 /* Implement the "current_sos" target_so_ops method. */
1466
1467 static struct so_list *
1468 svr4_current_sos (void)
1469 {
1470 struct svr4_info *info = get_svr4_info ();
1471
1472 /* If the solib list has been read and stored by the probes
1473 interface then we return a copy of the stored list. */
1474 if (info->solib_list != NULL)
1475 return svr4_copy_library_list (info->solib_list);
1476
1477 /* Otherwise obtain the solib list directly from the inferior. */
1478 return svr4_current_sos_direct (info);
1479 }
1480
1481 /* Get the address of the link_map for a given OBJFILE. */
1482
1483 CORE_ADDR
1484 svr4_fetch_objfile_link_map (struct objfile *objfile)
1485 {
1486 struct so_list *so;
1487 struct svr4_info *info = get_svr4_info ();
1488
1489 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1490 if (info->main_lm_addr == 0)
1491 solib_add (NULL, 0, &current_target, auto_solib_add);
1492
1493 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1494 if (objfile == symfile_objfile)
1495 return info->main_lm_addr;
1496
1497 /* The other link map addresses may be found by examining the list
1498 of shared libraries. */
1499 for (so = master_so_list (); so; so = so->next)
1500 if (so->objfile == objfile)
1501 return so->lm_info->lm_addr;
1502
1503 /* Not found! */
1504 return 0;
1505 }
1506
1507 /* On some systems, the only way to recognize the link map entry for
1508 the main executable file is by looking at its name. Return
1509 non-zero iff SONAME matches one of the known main executable names. */
1510
1511 static int
1512 match_main (const char *soname)
1513 {
1514 const char * const *mainp;
1515
1516 for (mainp = main_name_list; *mainp != NULL; mainp++)
1517 {
1518 if (strcmp (soname, *mainp) == 0)
1519 return (1);
1520 }
1521
1522 return (0);
1523 }
1524
1525 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1526 SVR4 run time loader. */
1527
1528 int
1529 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1530 {
1531 struct svr4_info *info = get_svr4_info ();
1532
1533 return ((pc >= info->interp_text_sect_low
1534 && pc < info->interp_text_sect_high)
1535 || (pc >= info->interp_plt_sect_low
1536 && pc < info->interp_plt_sect_high)
1537 || in_plt_section (pc)
1538 || in_gnu_ifunc_stub (pc));
1539 }
1540
1541 /* Given an executable's ABFD and target, compute the entry-point
1542 address. */
1543
1544 static CORE_ADDR
1545 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1546 {
1547 CORE_ADDR addr;
1548
1549 /* KevinB wrote ... for most targets, the address returned by
1550 bfd_get_start_address() is the entry point for the start
1551 function. But, for some targets, bfd_get_start_address() returns
1552 the address of a function descriptor from which the entry point
1553 address may be extracted. This address is extracted by
1554 gdbarch_convert_from_func_ptr_addr(). The method
1555 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1556 function for targets which don't use function descriptors. */
1557 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1558 bfd_get_start_address (abfd),
1559 targ);
1560 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1561 }
1562
1563 /* A probe and its associated action. */
1564
1565 struct probe_and_action
1566 {
1567 /* The probe. */
1568 struct probe *probe;
1569
1570 /* The relocated address of the probe. */
1571 CORE_ADDR address;
1572
1573 /* The action. */
1574 enum probe_action action;
1575 };
1576
1577 /* Returns a hash code for the probe_and_action referenced by p. */
1578
1579 static hashval_t
1580 hash_probe_and_action (const void *p)
1581 {
1582 const struct probe_and_action *pa = p;
1583
1584 return (hashval_t) pa->address;
1585 }
1586
1587 /* Returns non-zero if the probe_and_actions referenced by p1 and p2
1588 are equal. */
1589
1590 static int
1591 equal_probe_and_action (const void *p1, const void *p2)
1592 {
1593 const struct probe_and_action *pa1 = p1;
1594 const struct probe_and_action *pa2 = p2;
1595
1596 return pa1->address == pa2->address;
1597 }
1598
1599 /* Register a solib event probe and its associated action in the
1600 probes table. */
1601
1602 static void
1603 register_solib_event_probe (struct probe *probe, CORE_ADDR address,
1604 enum probe_action action)
1605 {
1606 struct svr4_info *info = get_svr4_info ();
1607 struct probe_and_action lookup, *pa;
1608 void **slot;
1609
1610 /* Create the probes table, if necessary. */
1611 if (info->probes_table == NULL)
1612 info->probes_table = htab_create_alloc (1, hash_probe_and_action,
1613 equal_probe_and_action,
1614 xfree, xcalloc, xfree);
1615
1616 lookup.probe = probe;
1617 lookup.address = address;
1618 slot = htab_find_slot (info->probes_table, &lookup, INSERT);
1619 gdb_assert (*slot == HTAB_EMPTY_ENTRY);
1620
1621 pa = XCNEW (struct probe_and_action);
1622 pa->probe = probe;
1623 pa->address = address;
1624 pa->action = action;
1625
1626 *slot = pa;
1627 }
1628
1629 /* Get the solib event probe at the specified location, and the
1630 action associated with it. Returns NULL if no solib event probe
1631 was found. */
1632
1633 static struct probe_and_action *
1634 solib_event_probe_at (struct svr4_info *info, CORE_ADDR address)
1635 {
1636 struct probe_and_action lookup;
1637 void **slot;
1638
1639 lookup.address = address;
1640 slot = htab_find_slot (info->probes_table, &lookup, NO_INSERT);
1641
1642 if (slot == NULL)
1643 return NULL;
1644
1645 return (struct probe_and_action *) *slot;
1646 }
1647
1648 /* Decide what action to take when the specified solib event probe is
1649 hit. */
1650
1651 static enum probe_action
1652 solib_event_probe_action (struct probe_and_action *pa)
1653 {
1654 enum probe_action action;
1655 unsigned probe_argc;
1656 struct frame_info *frame = get_current_frame ();
1657
1658 action = pa->action;
1659 if (action == DO_NOTHING || action == PROBES_INTERFACE_FAILED)
1660 return action;
1661
1662 gdb_assert (action == FULL_RELOAD || action == UPDATE_OR_RELOAD);
1663
1664 /* Check that an appropriate number of arguments has been supplied.
1665 We expect:
1666 arg0: Lmid_t lmid (mandatory)
1667 arg1: struct r_debug *debug_base (mandatory)
1668 arg2: struct link_map *new (optional, for incremental updates) */
1669 probe_argc = get_probe_argument_count (pa->probe, frame);
1670 if (probe_argc == 2)
1671 action = FULL_RELOAD;
1672 else if (probe_argc < 2)
1673 action = PROBES_INTERFACE_FAILED;
1674
1675 return action;
1676 }
1677
1678 /* Populate the shared object list by reading the entire list of
1679 shared objects from the inferior. Handle special cases relating
1680 to the first elements of the list. Returns nonzero on success. */
1681
1682 static int
1683 solist_update_full (struct svr4_info *info)
1684 {
1685 free_solib_list (info);
1686 info->solib_list = svr4_current_sos_direct (info);
1687
1688 return 1;
1689 }
1690
1691 /* Update the shared object list starting from the link-map entry
1692 passed by the linker in the probe's third argument. Returns
1693 nonzero if the list was successfully updated, or zero to indicate
1694 failure. */
1695
1696 static int
1697 solist_update_incremental (struct svr4_info *info, CORE_ADDR lm)
1698 {
1699 struct so_list *tail;
1700 CORE_ADDR prev_lm;
1701
1702 /* svr4_current_sos_direct contains logic to handle a number of
1703 special cases relating to the first elements of the list. To
1704 avoid duplicating this logic we defer to solist_update_full
1705 if the list is empty. */
1706 if (info->solib_list == NULL)
1707 return 0;
1708
1709 /* Fall back to a full update if we are using a remote target
1710 that does not support incremental transfers. */
1711 if (info->using_xfer && !target_augmented_libraries_svr4_read ())
1712 return 0;
1713
1714 /* Walk to the end of the list. */
1715 for (tail = info->solib_list; tail->next != NULL; tail = tail->next)
1716 /* Nothing. */;
1717 prev_lm = tail->lm_info->lm_addr;
1718
1719 /* Read the new objects. */
1720 if (info->using_xfer)
1721 {
1722 struct svr4_library_list library_list;
1723 char annex[64];
1724
1725 xsnprintf (annex, sizeof (annex), "start=%s;prev=%s",
1726 phex_nz (lm, sizeof (lm)),
1727 phex_nz (prev_lm, sizeof (prev_lm)));
1728 if (!svr4_current_sos_via_xfer_libraries (&library_list, annex))
1729 return 0;
1730
1731 tail->next = library_list.head;
1732 }
1733 else
1734 {
1735 struct so_list **link = &tail->next;
1736
1737 /* IGNORE_FIRST may safely be set to zero here because the
1738 above check and deferral to solist_update_full ensures
1739 that this call to svr4_read_so_list will never see the
1740 first element. */
1741 if (!svr4_read_so_list (lm, prev_lm, &link, 0))
1742 return 0;
1743 }
1744
1745 return 1;
1746 }
1747
1748 /* Disable the probes-based linker interface and revert to the
1749 original interface. We don't reset the breakpoints as the
1750 ones set up for the probes-based interface are adequate. */
1751
1752 static void
1753 disable_probes_interface_cleanup (void *arg)
1754 {
1755 struct svr4_info *info = get_svr4_info ();
1756
1757 warning (_("Probes-based dynamic linker interface failed.\n"
1758 "Reverting to original interface.\n"));
1759
1760 free_probes_table (info);
1761 free_solib_list (info);
1762 }
1763
1764 /* Update the solib list as appropriate when using the
1765 probes-based linker interface. Do nothing if using the
1766 standard interface. */
1767
1768 static void
1769 svr4_handle_solib_event (void)
1770 {
1771 struct svr4_info *info = get_svr4_info ();
1772 struct probe_and_action *pa;
1773 enum probe_action action;
1774 struct cleanup *old_chain, *usm_chain;
1775 struct value *val;
1776 CORE_ADDR pc, debug_base, lm = 0;
1777 int is_initial_ns;
1778 struct frame_info *frame = get_current_frame ();
1779
1780 /* Do nothing if not using the probes interface. */
1781 if (info->probes_table == NULL)
1782 return;
1783
1784 /* If anything goes wrong we revert to the original linker
1785 interface. */
1786 old_chain = make_cleanup (disable_probes_interface_cleanup, NULL);
1787
1788 pc = regcache_read_pc (get_current_regcache ());
1789 pa = solib_event_probe_at (info, pc);
1790 if (pa == NULL)
1791 {
1792 do_cleanups (old_chain);
1793 return;
1794 }
1795
1796 action = solib_event_probe_action (pa);
1797 if (action == PROBES_INTERFACE_FAILED)
1798 {
1799 do_cleanups (old_chain);
1800 return;
1801 }
1802
1803 if (action == DO_NOTHING)
1804 {
1805 discard_cleanups (old_chain);
1806 return;
1807 }
1808
1809 /* evaluate_probe_argument looks up symbols in the dynamic linker
1810 using find_pc_section. find_pc_section is accelerated by a cache
1811 called the section map. The section map is invalidated every
1812 time a shared library is loaded or unloaded, and if the inferior
1813 is generating a lot of shared library events then the section map
1814 will be updated every time svr4_handle_solib_event is called.
1815 We called find_pc_section in svr4_create_solib_event_breakpoints,
1816 so we can guarantee that the dynamic linker's sections are in the
1817 section map. We can therefore inhibit section map updates across
1818 these calls to evaluate_probe_argument and save a lot of time. */
1819 inhibit_section_map_updates (current_program_space);
1820 usm_chain = make_cleanup (resume_section_map_updates_cleanup,
1821 current_program_space);
1822
1823 val = evaluate_probe_argument (pa->probe, 1, frame);
1824 if (val == NULL)
1825 {
1826 do_cleanups (old_chain);
1827 return;
1828 }
1829
1830 debug_base = value_as_address (val);
1831 if (debug_base == 0)
1832 {
1833 do_cleanups (old_chain);
1834 return;
1835 }
1836
1837 /* Always locate the debug struct, in case it moved. */
1838 info->debug_base = 0;
1839 if (locate_base (info) == 0)
1840 {
1841 do_cleanups (old_chain);
1842 return;
1843 }
1844
1845 /* GDB does not currently support libraries loaded via dlmopen
1846 into namespaces other than the initial one. We must ignore
1847 any namespace other than the initial namespace here until
1848 support for this is added to GDB. */
1849 if (debug_base != info->debug_base)
1850 action = DO_NOTHING;
1851
1852 if (action == UPDATE_OR_RELOAD)
1853 {
1854 val = evaluate_probe_argument (pa->probe, 2, frame);
1855 if (val != NULL)
1856 lm = value_as_address (val);
1857
1858 if (lm == 0)
1859 action = FULL_RELOAD;
1860 }
1861
1862 /* Resume section map updates. */
1863 do_cleanups (usm_chain);
1864
1865 if (action == UPDATE_OR_RELOAD)
1866 {
1867 if (!solist_update_incremental (info, lm))
1868 action = FULL_RELOAD;
1869 }
1870
1871 if (action == FULL_RELOAD)
1872 {
1873 if (!solist_update_full (info))
1874 {
1875 do_cleanups (old_chain);
1876 return;
1877 }
1878 }
1879
1880 discard_cleanups (old_chain);
1881 }
1882
1883 /* Helper function for svr4_update_solib_event_breakpoints. */
1884
1885 static int
1886 svr4_update_solib_event_breakpoint (struct breakpoint *b, void *arg)
1887 {
1888 struct bp_location *loc;
1889
1890 if (b->type != bp_shlib_event)
1891 {
1892 /* Continue iterating. */
1893 return 0;
1894 }
1895
1896 for (loc = b->loc; loc != NULL; loc = loc->next)
1897 {
1898 struct svr4_info *info;
1899 struct probe_and_action *pa;
1900
1901 info = program_space_data (loc->pspace, solib_svr4_pspace_data);
1902 if (info == NULL || info->probes_table == NULL)
1903 continue;
1904
1905 pa = solib_event_probe_at (info, loc->address);
1906 if (pa == NULL)
1907 continue;
1908
1909 if (pa->action == DO_NOTHING)
1910 {
1911 if (b->enable_state == bp_disabled && stop_on_solib_events)
1912 enable_breakpoint (b);
1913 else if (b->enable_state == bp_enabled && !stop_on_solib_events)
1914 disable_breakpoint (b);
1915 }
1916
1917 break;
1918 }
1919
1920 /* Continue iterating. */
1921 return 0;
1922 }
1923
1924 /* Enable or disable optional solib event breakpoints as appropriate.
1925 Called whenever stop_on_solib_events is changed. */
1926
1927 static void
1928 svr4_update_solib_event_breakpoints (void)
1929 {
1930 iterate_over_breakpoints (svr4_update_solib_event_breakpoint, NULL);
1931 }
1932
1933 /* Create and register solib event breakpoints. PROBES is an array
1934 of NUM_PROBES elements, each of which is vector of probes. A
1935 solib event breakpoint will be created and registered for each
1936 probe. */
1937
1938 static void
1939 svr4_create_probe_breakpoints (struct gdbarch *gdbarch,
1940 VEC (probe_p) **probes,
1941 struct objfile *objfile)
1942 {
1943 int i;
1944
1945 for (i = 0; i < NUM_PROBES; i++)
1946 {
1947 enum probe_action action = probe_info[i].action;
1948 struct probe *probe;
1949 int ix;
1950
1951 for (ix = 0;
1952 VEC_iterate (probe_p, probes[i], ix, probe);
1953 ++ix)
1954 {
1955 CORE_ADDR address = get_probe_address (probe, objfile);
1956
1957 create_solib_event_breakpoint (gdbarch, address);
1958 register_solib_event_probe (probe, address, action);
1959 }
1960 }
1961
1962 svr4_update_solib_event_breakpoints ();
1963 }
1964
1965 /* Both the SunOS and the SVR4 dynamic linkers call a marker function
1966 before and after mapping and unmapping shared libraries. The sole
1967 purpose of this method is to allow debuggers to set a breakpoint so
1968 they can track these changes.
1969
1970 Some versions of the glibc dynamic linker contain named probes
1971 to allow more fine grained stopping. Given the address of the
1972 original marker function, this function attempts to find these
1973 probes, and if found, sets breakpoints on those instead. If the
1974 probes aren't found, a single breakpoint is set on the original
1975 marker function. */
1976
1977 static void
1978 svr4_create_solib_event_breakpoints (struct gdbarch *gdbarch,
1979 CORE_ADDR address)
1980 {
1981 struct obj_section *os;
1982
1983 os = find_pc_section (address);
1984 if (os != NULL)
1985 {
1986 int with_prefix;
1987
1988 for (with_prefix = 0; with_prefix <= 1; with_prefix++)
1989 {
1990 VEC (probe_p) *probes[NUM_PROBES];
1991 int all_probes_found = 1;
1992 int checked_can_use_probe_arguments = 0;
1993 int i;
1994
1995 memset (probes, 0, sizeof (probes));
1996 for (i = 0; i < NUM_PROBES; i++)
1997 {
1998 const char *name = probe_info[i].name;
1999 struct probe *p;
2000 char buf[32];
2001
2002 /* Fedora 17 and Red Hat Enterprise Linux 6.2-6.4
2003 shipped with an early version of the probes code in
2004 which the probes' names were prefixed with "rtld_"
2005 and the "map_failed" probe did not exist. The
2006 locations of the probes are otherwise the same, so
2007 we check for probes with prefixed names if probes
2008 with unprefixed names are not present. */
2009 if (with_prefix)
2010 {
2011 xsnprintf (buf, sizeof (buf), "rtld_%s", name);
2012 name = buf;
2013 }
2014
2015 probes[i] = find_probes_in_objfile (os->objfile, "rtld", name);
2016
2017 /* The "map_failed" probe did not exist in early
2018 versions of the probes code in which the probes'
2019 names were prefixed with "rtld_". */
2020 if (strcmp (name, "rtld_map_failed") == 0)
2021 continue;
2022
2023 if (VEC_empty (probe_p, probes[i]))
2024 {
2025 all_probes_found = 0;
2026 break;
2027 }
2028
2029 /* Ensure probe arguments can be evaluated. */
2030 if (!checked_can_use_probe_arguments)
2031 {
2032 p = VEC_index (probe_p, probes[i], 0);
2033 if (!can_evaluate_probe_arguments (p))
2034 {
2035 all_probes_found = 0;
2036 break;
2037 }
2038 checked_can_use_probe_arguments = 1;
2039 }
2040 }
2041
2042 if (all_probes_found)
2043 svr4_create_probe_breakpoints (gdbarch, probes, os->objfile);
2044
2045 for (i = 0; i < NUM_PROBES; i++)
2046 VEC_free (probe_p, probes[i]);
2047
2048 if (all_probes_found)
2049 return;
2050 }
2051 }
2052
2053 create_solib_event_breakpoint (gdbarch, address);
2054 }
2055
2056 /* Helper function for gdb_bfd_lookup_symbol. */
2057
2058 static int
2059 cmp_name_and_sec_flags (asymbol *sym, void *data)
2060 {
2061 return (strcmp (sym->name, (const char *) data) == 0
2062 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
2063 }
2064 /* Arrange for dynamic linker to hit breakpoint.
2065
2066 Both the SunOS and the SVR4 dynamic linkers have, as part of their
2067 debugger interface, support for arranging for the inferior to hit
2068 a breakpoint after mapping in the shared libraries. This function
2069 enables that breakpoint.
2070
2071 For SunOS, there is a special flag location (in_debugger) which we
2072 set to 1. When the dynamic linker sees this flag set, it will set
2073 a breakpoint at a location known only to itself, after saving the
2074 original contents of that place and the breakpoint address itself,
2075 in it's own internal structures. When we resume the inferior, it
2076 will eventually take a SIGTRAP when it runs into the breakpoint.
2077 We handle this (in a different place) by restoring the contents of
2078 the breakpointed location (which is only known after it stops),
2079 chasing around to locate the shared libraries that have been
2080 loaded, then resuming.
2081
2082 For SVR4, the debugger interface structure contains a member (r_brk)
2083 which is statically initialized at the time the shared library is
2084 built, to the offset of a function (_r_debug_state) which is guaran-
2085 teed to be called once before mapping in a library, and again when
2086 the mapping is complete. At the time we are examining this member,
2087 it contains only the unrelocated offset of the function, so we have
2088 to do our own relocation. Later, when the dynamic linker actually
2089 runs, it relocates r_brk to be the actual address of _r_debug_state().
2090
2091 The debugger interface structure also contains an enumeration which
2092 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
2093 depending upon whether or not the library is being mapped or unmapped,
2094 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
2095
2096 static int
2097 enable_break (struct svr4_info *info, int from_tty)
2098 {
2099 struct bound_minimal_symbol msymbol;
2100 const char * const *bkpt_namep;
2101 asection *interp_sect;
2102 char *interp_name;
2103 CORE_ADDR sym_addr;
2104
2105 info->interp_text_sect_low = info->interp_text_sect_high = 0;
2106 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
2107
2108 /* If we already have a shared library list in the target, and
2109 r_debug contains r_brk, set the breakpoint there - this should
2110 mean r_brk has already been relocated. Assume the dynamic linker
2111 is the object containing r_brk. */
2112
2113 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2114 sym_addr = 0;
2115 if (info->debug_base && solib_svr4_r_map (info) != 0)
2116 sym_addr = solib_svr4_r_brk (info);
2117
2118 if (sym_addr != 0)
2119 {
2120 struct obj_section *os;
2121
2122 sym_addr = gdbarch_addr_bits_remove
2123 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2124 sym_addr,
2125 &current_target));
2126
2127 /* On at least some versions of Solaris there's a dynamic relocation
2128 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
2129 we get control before the dynamic linker has self-relocated.
2130 Check if SYM_ADDR is in a known section, if it is assume we can
2131 trust its value. This is just a heuristic though, it could go away
2132 or be replaced if it's getting in the way.
2133
2134 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
2135 however it's spelled in your particular system) is ARM or Thumb.
2136 That knowledge is encoded in the address, if it's Thumb the low bit
2137 is 1. However, we've stripped that info above and it's not clear
2138 what all the consequences are of passing a non-addr_bits_remove'd
2139 address to svr4_create_solib_event_breakpoints. The call to
2140 find_pc_section verifies we know about the address and have some
2141 hope of computing the right kind of breakpoint to use (via
2142 symbol info). It does mean that GDB needs to be pointed at a
2143 non-stripped version of the dynamic linker in order to obtain
2144 information it already knows about. Sigh. */
2145
2146 os = find_pc_section (sym_addr);
2147 if (os != NULL)
2148 {
2149 /* Record the relocated start and end address of the dynamic linker
2150 text and plt section for svr4_in_dynsym_resolve_code. */
2151 bfd *tmp_bfd;
2152 CORE_ADDR load_addr;
2153
2154 tmp_bfd = os->objfile->obfd;
2155 load_addr = ANOFFSET (os->objfile->section_offsets,
2156 SECT_OFF_TEXT (os->objfile));
2157
2158 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2159 if (interp_sect)
2160 {
2161 info->interp_text_sect_low =
2162 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2163 info->interp_text_sect_high =
2164 info->interp_text_sect_low
2165 + bfd_section_size (tmp_bfd, interp_sect);
2166 }
2167 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2168 if (interp_sect)
2169 {
2170 info->interp_plt_sect_low =
2171 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2172 info->interp_plt_sect_high =
2173 info->interp_plt_sect_low
2174 + bfd_section_size (tmp_bfd, interp_sect);
2175 }
2176
2177 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2178 return 1;
2179 }
2180 }
2181
2182 /* Find the program interpreter; if not found, warn the user and drop
2183 into the old breakpoint at symbol code. */
2184 interp_name = find_program_interpreter ();
2185 if (interp_name)
2186 {
2187 CORE_ADDR load_addr = 0;
2188 int load_addr_found = 0;
2189 int loader_found_in_list = 0;
2190 struct so_list *so;
2191 bfd *tmp_bfd = NULL;
2192 struct target_ops *tmp_bfd_target;
2193 volatile struct gdb_exception ex;
2194
2195 sym_addr = 0;
2196
2197 /* Now we need to figure out where the dynamic linker was
2198 loaded so that we can load its symbols and place a breakpoint
2199 in the dynamic linker itself.
2200
2201 This address is stored on the stack. However, I've been unable
2202 to find any magic formula to find it for Solaris (appears to
2203 be trivial on GNU/Linux). Therefore, we have to try an alternate
2204 mechanism to find the dynamic linker's base address. */
2205
2206 TRY_CATCH (ex, RETURN_MASK_ALL)
2207 {
2208 tmp_bfd = solib_bfd_open (interp_name);
2209 }
2210 if (tmp_bfd == NULL)
2211 goto bkpt_at_symbol;
2212
2213 /* Now convert the TMP_BFD into a target. That way target, as
2214 well as BFD operations can be used. */
2215 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
2216 /* target_bfd_reopen acquired its own reference, so we can
2217 release ours now. */
2218 gdb_bfd_unref (tmp_bfd);
2219
2220 /* On a running target, we can get the dynamic linker's base
2221 address from the shared library table. */
2222 so = master_so_list ();
2223 while (so)
2224 {
2225 if (svr4_same_1 (interp_name, so->so_original_name))
2226 {
2227 load_addr_found = 1;
2228 loader_found_in_list = 1;
2229 load_addr = lm_addr_check (so, tmp_bfd);
2230 break;
2231 }
2232 so = so->next;
2233 }
2234
2235 /* If we were not able to find the base address of the loader
2236 from our so_list, then try using the AT_BASE auxilliary entry. */
2237 if (!load_addr_found)
2238 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
2239 {
2240 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
2241
2242 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
2243 that `+ load_addr' will overflow CORE_ADDR width not creating
2244 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
2245 GDB. */
2246
2247 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
2248 {
2249 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
2250 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
2251 tmp_bfd_target);
2252
2253 gdb_assert (load_addr < space_size);
2254
2255 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
2256 64bit ld.so with 32bit executable, it should not happen. */
2257
2258 if (tmp_entry_point < space_size
2259 && tmp_entry_point + load_addr >= space_size)
2260 load_addr -= space_size;
2261 }
2262
2263 load_addr_found = 1;
2264 }
2265
2266 /* Otherwise we find the dynamic linker's base address by examining
2267 the current pc (which should point at the entry point for the
2268 dynamic linker) and subtracting the offset of the entry point.
2269
2270 This is more fragile than the previous approaches, but is a good
2271 fallback method because it has actually been working well in
2272 most cases. */
2273 if (!load_addr_found)
2274 {
2275 struct regcache *regcache
2276 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
2277
2278 load_addr = (regcache_read_pc (regcache)
2279 - exec_entry_point (tmp_bfd, tmp_bfd_target));
2280 }
2281
2282 if (!loader_found_in_list)
2283 {
2284 info->debug_loader_name = xstrdup (interp_name);
2285 info->debug_loader_offset_p = 1;
2286 info->debug_loader_offset = load_addr;
2287 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2288 }
2289
2290 /* Record the relocated start and end address of the dynamic linker
2291 text and plt section for svr4_in_dynsym_resolve_code. */
2292 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2293 if (interp_sect)
2294 {
2295 info->interp_text_sect_low =
2296 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2297 info->interp_text_sect_high =
2298 info->interp_text_sect_low
2299 + bfd_section_size (tmp_bfd, interp_sect);
2300 }
2301 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2302 if (interp_sect)
2303 {
2304 info->interp_plt_sect_low =
2305 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2306 info->interp_plt_sect_high =
2307 info->interp_plt_sect_low
2308 + bfd_section_size (tmp_bfd, interp_sect);
2309 }
2310
2311 /* Now try to set a breakpoint in the dynamic linker. */
2312 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2313 {
2314 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
2315 (void *) *bkpt_namep);
2316 if (sym_addr != 0)
2317 break;
2318 }
2319
2320 if (sym_addr != 0)
2321 /* Convert 'sym_addr' from a function pointer to an address.
2322 Because we pass tmp_bfd_target instead of the current
2323 target, this will always produce an unrelocated value. */
2324 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2325 sym_addr,
2326 tmp_bfd_target);
2327
2328 /* We're done with both the temporary bfd and target. Closing
2329 the target closes the underlying bfd, because it holds the
2330 only remaining reference. */
2331 target_close (tmp_bfd_target);
2332
2333 if (sym_addr != 0)
2334 {
2335 svr4_create_solib_event_breakpoints (target_gdbarch (),
2336 load_addr + sym_addr);
2337 xfree (interp_name);
2338 return 1;
2339 }
2340
2341 /* For whatever reason we couldn't set a breakpoint in the dynamic
2342 linker. Warn and drop into the old code. */
2343 bkpt_at_symbol:
2344 xfree (interp_name);
2345 warning (_("Unable to find dynamic linker breakpoint function.\n"
2346 "GDB will be unable to debug shared library initializers\n"
2347 "and track explicitly loaded dynamic code."));
2348 }
2349
2350 /* Scan through the lists of symbols, trying to look up the symbol and
2351 set a breakpoint there. Terminate loop when we/if we succeed. */
2352
2353 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2354 {
2355 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2356 if ((msymbol.minsym != NULL)
2357 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2358 {
2359 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2360 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2361 sym_addr,
2362 &current_target);
2363 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2364 return 1;
2365 }
2366 }
2367
2368 if (interp_name != NULL && !current_inferior ()->attach_flag)
2369 {
2370 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
2371 {
2372 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2373 if ((msymbol.minsym != NULL)
2374 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2375 {
2376 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2377 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2378 sym_addr,
2379 &current_target);
2380 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2381 return 1;
2382 }
2383 }
2384 }
2385 return 0;
2386 }
2387
2388 /* Implement the "special_symbol_handling" target_so_ops method. */
2389
2390 static void
2391 svr4_special_symbol_handling (void)
2392 {
2393 /* Nothing to do. */
2394 }
2395
2396 /* Read the ELF program headers from ABFD. Return the contents and
2397 set *PHDRS_SIZE to the size of the program headers. */
2398
2399 static gdb_byte *
2400 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
2401 {
2402 Elf_Internal_Ehdr *ehdr;
2403 gdb_byte *buf;
2404
2405 ehdr = elf_elfheader (abfd);
2406
2407 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
2408 if (*phdrs_size == 0)
2409 return NULL;
2410
2411 buf = xmalloc (*phdrs_size);
2412 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
2413 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
2414 {
2415 xfree (buf);
2416 return NULL;
2417 }
2418
2419 return buf;
2420 }
2421
2422 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
2423 exec_bfd. Otherwise return 0.
2424
2425 We relocate all of the sections by the same amount. This
2426 behavior is mandated by recent editions of the System V ABI.
2427 According to the System V Application Binary Interface,
2428 Edition 4.1, page 5-5:
2429
2430 ... Though the system chooses virtual addresses for
2431 individual processes, it maintains the segments' relative
2432 positions. Because position-independent code uses relative
2433 addressesing between segments, the difference between
2434 virtual addresses in memory must match the difference
2435 between virtual addresses in the file. The difference
2436 between the virtual address of any segment in memory and
2437 the corresponding virtual address in the file is thus a
2438 single constant value for any one executable or shared
2439 object in a given process. This difference is the base
2440 address. One use of the base address is to relocate the
2441 memory image of the program during dynamic linking.
2442
2443 The same language also appears in Edition 4.0 of the System V
2444 ABI and is left unspecified in some of the earlier editions.
2445
2446 Decide if the objfile needs to be relocated. As indicated above, we will
2447 only be here when execution is stopped. But during attachment PC can be at
2448 arbitrary address therefore regcache_read_pc can be misleading (contrary to
2449 the auxv AT_ENTRY value). Moreover for executable with interpreter section
2450 regcache_read_pc would point to the interpreter and not the main executable.
2451
2452 So, to summarize, relocations are necessary when the start address obtained
2453 from the executable is different from the address in auxv AT_ENTRY entry.
2454
2455 [ The astute reader will note that we also test to make sure that
2456 the executable in question has the DYNAMIC flag set. It is my
2457 opinion that this test is unnecessary (undesirable even). It
2458 was added to avoid inadvertent relocation of an executable
2459 whose e_type member in the ELF header is not ET_DYN. There may
2460 be a time in the future when it is desirable to do relocations
2461 on other types of files as well in which case this condition
2462 should either be removed or modified to accomodate the new file
2463 type. - Kevin, Nov 2000. ] */
2464
2465 static int
2466 svr4_exec_displacement (CORE_ADDR *displacementp)
2467 {
2468 /* ENTRY_POINT is a possible function descriptor - before
2469 a call to gdbarch_convert_from_func_ptr_addr. */
2470 CORE_ADDR entry_point, displacement;
2471
2472 if (exec_bfd == NULL)
2473 return 0;
2474
2475 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
2476 being executed themselves and PIE (Position Independent Executable)
2477 executables are ET_DYN. */
2478
2479 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
2480 return 0;
2481
2482 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
2483 return 0;
2484
2485 displacement = entry_point - bfd_get_start_address (exec_bfd);
2486
2487 /* Verify the DISPLACEMENT candidate complies with the required page
2488 alignment. It is cheaper than the program headers comparison below. */
2489
2490 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2491 {
2492 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
2493
2494 /* p_align of PT_LOAD segments does not specify any alignment but
2495 only congruency of addresses:
2496 p_offset % p_align == p_vaddr % p_align
2497 Kernel is free to load the executable with lower alignment. */
2498
2499 if ((displacement & (elf->minpagesize - 1)) != 0)
2500 return 0;
2501 }
2502
2503 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
2504 comparing their program headers. If the program headers in the auxilliary
2505 vector do not match the program headers in the executable, then we are
2506 looking at a different file than the one used by the kernel - for
2507 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
2508
2509 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2510 {
2511 /* Be optimistic and clear OK only if GDB was able to verify the headers
2512 really do not match. */
2513 int phdrs_size, phdrs2_size, ok = 1;
2514 gdb_byte *buf, *buf2;
2515 int arch_size;
2516
2517 buf = read_program_header (-1, &phdrs_size, &arch_size);
2518 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
2519 if (buf != NULL && buf2 != NULL)
2520 {
2521 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
2522
2523 /* We are dealing with three different addresses. EXEC_BFD
2524 represents current address in on-disk file. target memory content
2525 may be different from EXEC_BFD as the file may have been prelinked
2526 to a different address after the executable has been loaded.
2527 Moreover the address of placement in target memory can be
2528 different from what the program headers in target memory say -
2529 this is the goal of PIE.
2530
2531 Detected DISPLACEMENT covers both the offsets of PIE placement and
2532 possible new prelink performed after start of the program. Here
2533 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
2534 content offset for the verification purpose. */
2535
2536 if (phdrs_size != phdrs2_size
2537 || bfd_get_arch_size (exec_bfd) != arch_size)
2538 ok = 0;
2539 else if (arch_size == 32
2540 && phdrs_size >= sizeof (Elf32_External_Phdr)
2541 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
2542 {
2543 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2544 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2545 CORE_ADDR displacement = 0;
2546 int i;
2547
2548 /* DISPLACEMENT could be found more easily by the difference of
2549 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2550 already have enough information to compute that displacement
2551 with what we've read. */
2552
2553 for (i = 0; i < ehdr2->e_phnum; i++)
2554 if (phdr2[i].p_type == PT_LOAD)
2555 {
2556 Elf32_External_Phdr *phdrp;
2557 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2558 CORE_ADDR vaddr, paddr;
2559 CORE_ADDR displacement_vaddr = 0;
2560 CORE_ADDR displacement_paddr = 0;
2561
2562 phdrp = &((Elf32_External_Phdr *) buf)[i];
2563 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2564 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2565
2566 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2567 byte_order);
2568 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2569
2570 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2571 byte_order);
2572 displacement_paddr = paddr - phdr2[i].p_paddr;
2573
2574 if (displacement_vaddr == displacement_paddr)
2575 displacement = displacement_vaddr;
2576
2577 break;
2578 }
2579
2580 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2581
2582 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
2583 {
2584 Elf32_External_Phdr *phdrp;
2585 Elf32_External_Phdr *phdr2p;
2586 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2587 CORE_ADDR vaddr, paddr;
2588 asection *plt2_asect;
2589
2590 phdrp = &((Elf32_External_Phdr *) buf)[i];
2591 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2592 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2593 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
2594
2595 /* PT_GNU_STACK is an exception by being never relocated by
2596 prelink as its addresses are always zero. */
2597
2598 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2599 continue;
2600
2601 /* Check also other adjustment combinations - PR 11786. */
2602
2603 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2604 byte_order);
2605 vaddr -= displacement;
2606 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
2607
2608 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2609 byte_order);
2610 paddr -= displacement;
2611 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
2612
2613 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2614 continue;
2615
2616 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2617 CentOS-5 has problems with filesz, memsz as well.
2618 See PR 11786. */
2619 if (phdr2[i].p_type == PT_GNU_RELRO)
2620 {
2621 Elf32_External_Phdr tmp_phdr = *phdrp;
2622 Elf32_External_Phdr tmp_phdr2 = *phdr2p;
2623
2624 memset (tmp_phdr.p_filesz, 0, 4);
2625 memset (tmp_phdr.p_memsz, 0, 4);
2626 memset (tmp_phdr.p_flags, 0, 4);
2627 memset (tmp_phdr.p_align, 0, 4);
2628 memset (tmp_phdr2.p_filesz, 0, 4);
2629 memset (tmp_phdr2.p_memsz, 0, 4);
2630 memset (tmp_phdr2.p_flags, 0, 4);
2631 memset (tmp_phdr2.p_align, 0, 4);
2632
2633 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2634 == 0)
2635 continue;
2636 }
2637
2638 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2639 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2640 if (plt2_asect)
2641 {
2642 int content2;
2643 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2644 CORE_ADDR filesz;
2645
2646 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2647 & SEC_HAS_CONTENTS) != 0;
2648
2649 filesz = extract_unsigned_integer (buf_filesz_p, 4,
2650 byte_order);
2651
2652 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2653 FILESZ is from the in-memory image. */
2654 if (content2)
2655 filesz += bfd_get_section_size (plt2_asect);
2656 else
2657 filesz -= bfd_get_section_size (plt2_asect);
2658
2659 store_unsigned_integer (buf_filesz_p, 4, byte_order,
2660 filesz);
2661
2662 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2663 continue;
2664 }
2665
2666 ok = 0;
2667 break;
2668 }
2669 }
2670 else if (arch_size == 64
2671 && phdrs_size >= sizeof (Elf64_External_Phdr)
2672 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2673 {
2674 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2675 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2676 CORE_ADDR displacement = 0;
2677 int i;
2678
2679 /* DISPLACEMENT could be found more easily by the difference of
2680 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2681 already have enough information to compute that displacement
2682 with what we've read. */
2683
2684 for (i = 0; i < ehdr2->e_phnum; i++)
2685 if (phdr2[i].p_type == PT_LOAD)
2686 {
2687 Elf64_External_Phdr *phdrp;
2688 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2689 CORE_ADDR vaddr, paddr;
2690 CORE_ADDR displacement_vaddr = 0;
2691 CORE_ADDR displacement_paddr = 0;
2692
2693 phdrp = &((Elf64_External_Phdr *) buf)[i];
2694 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2695 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2696
2697 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2698 byte_order);
2699 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2700
2701 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2702 byte_order);
2703 displacement_paddr = paddr - phdr2[i].p_paddr;
2704
2705 if (displacement_vaddr == displacement_paddr)
2706 displacement = displacement_vaddr;
2707
2708 break;
2709 }
2710
2711 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2712
2713 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2714 {
2715 Elf64_External_Phdr *phdrp;
2716 Elf64_External_Phdr *phdr2p;
2717 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2718 CORE_ADDR vaddr, paddr;
2719 asection *plt2_asect;
2720
2721 phdrp = &((Elf64_External_Phdr *) buf)[i];
2722 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2723 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2724 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2725
2726 /* PT_GNU_STACK is an exception by being never relocated by
2727 prelink as its addresses are always zero. */
2728
2729 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2730 continue;
2731
2732 /* Check also other adjustment combinations - PR 11786. */
2733
2734 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2735 byte_order);
2736 vaddr -= displacement;
2737 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2738
2739 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2740 byte_order);
2741 paddr -= displacement;
2742 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2743
2744 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2745 continue;
2746
2747 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2748 CentOS-5 has problems with filesz, memsz as well.
2749 See PR 11786. */
2750 if (phdr2[i].p_type == PT_GNU_RELRO)
2751 {
2752 Elf64_External_Phdr tmp_phdr = *phdrp;
2753 Elf64_External_Phdr tmp_phdr2 = *phdr2p;
2754
2755 memset (tmp_phdr.p_filesz, 0, 8);
2756 memset (tmp_phdr.p_memsz, 0, 8);
2757 memset (tmp_phdr.p_flags, 0, 4);
2758 memset (tmp_phdr.p_align, 0, 8);
2759 memset (tmp_phdr2.p_filesz, 0, 8);
2760 memset (tmp_phdr2.p_memsz, 0, 8);
2761 memset (tmp_phdr2.p_flags, 0, 4);
2762 memset (tmp_phdr2.p_align, 0, 8);
2763
2764 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2765 == 0)
2766 continue;
2767 }
2768
2769 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2770 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2771 if (plt2_asect)
2772 {
2773 int content2;
2774 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2775 CORE_ADDR filesz;
2776
2777 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2778 & SEC_HAS_CONTENTS) != 0;
2779
2780 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2781 byte_order);
2782
2783 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2784 FILESZ is from the in-memory image. */
2785 if (content2)
2786 filesz += bfd_get_section_size (plt2_asect);
2787 else
2788 filesz -= bfd_get_section_size (plt2_asect);
2789
2790 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2791 filesz);
2792
2793 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2794 continue;
2795 }
2796
2797 ok = 0;
2798 break;
2799 }
2800 }
2801 else
2802 ok = 0;
2803 }
2804
2805 xfree (buf);
2806 xfree (buf2);
2807
2808 if (!ok)
2809 return 0;
2810 }
2811
2812 if (info_verbose)
2813 {
2814 /* It can be printed repeatedly as there is no easy way to check
2815 the executable symbols/file has been already relocated to
2816 displacement. */
2817
2818 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2819 "displacement %s for \"%s\".\n"),
2820 paddress (target_gdbarch (), displacement),
2821 bfd_get_filename (exec_bfd));
2822 }
2823
2824 *displacementp = displacement;
2825 return 1;
2826 }
2827
2828 /* Relocate the main executable. This function should be called upon
2829 stopping the inferior process at the entry point to the program.
2830 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2831 different, the main executable is relocated by the proper amount. */
2832
2833 static void
2834 svr4_relocate_main_executable (void)
2835 {
2836 CORE_ADDR displacement;
2837
2838 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2839 probably contains the offsets computed using the PIE displacement
2840 from the previous run, which of course are irrelevant for this run.
2841 So we need to determine the new PIE displacement and recompute the
2842 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2843 already contains pre-computed offsets.
2844
2845 If we cannot compute the PIE displacement, either:
2846
2847 - The executable is not PIE.
2848
2849 - SYMFILE_OBJFILE does not match the executable started in the target.
2850 This can happen for main executable symbols loaded at the host while
2851 `ld.so --ld-args main-executable' is loaded in the target.
2852
2853 Then we leave the section offsets untouched and use them as is for
2854 this run. Either:
2855
2856 - These section offsets were properly reset earlier, and thus
2857 already contain the correct values. This can happen for instance
2858 when reconnecting via the remote protocol to a target that supports
2859 the `qOffsets' packet.
2860
2861 - The section offsets were not reset earlier, and the best we can
2862 hope is that the old offsets are still applicable to the new run. */
2863
2864 if (! svr4_exec_displacement (&displacement))
2865 return;
2866
2867 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2868 addresses. */
2869
2870 if (symfile_objfile)
2871 {
2872 struct section_offsets *new_offsets;
2873 int i;
2874
2875 new_offsets = alloca (symfile_objfile->num_sections
2876 * sizeof (*new_offsets));
2877
2878 for (i = 0; i < symfile_objfile->num_sections; i++)
2879 new_offsets->offsets[i] = displacement;
2880
2881 objfile_relocate (symfile_objfile, new_offsets);
2882 }
2883 else if (exec_bfd)
2884 {
2885 asection *asect;
2886
2887 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2888 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2889 (bfd_section_vma (exec_bfd, asect)
2890 + displacement));
2891 }
2892 }
2893
2894 /* Implement the "create_inferior_hook" target_solib_ops method.
2895
2896 For SVR4 executables, this first instruction is either the first
2897 instruction in the dynamic linker (for dynamically linked
2898 executables) or the instruction at "start" for statically linked
2899 executables. For dynamically linked executables, the system
2900 first exec's /lib/libc.so.N, which contains the dynamic linker,
2901 and starts it running. The dynamic linker maps in any needed
2902 shared libraries, maps in the actual user executable, and then
2903 jumps to "start" in the user executable.
2904
2905 We can arrange to cooperate with the dynamic linker to discover the
2906 names of shared libraries that are dynamically linked, and the base
2907 addresses to which they are linked.
2908
2909 This function is responsible for discovering those names and
2910 addresses, and saving sufficient information about them to allow
2911 their symbols to be read at a later time. */
2912
2913 static void
2914 svr4_solib_create_inferior_hook (int from_tty)
2915 {
2916 struct svr4_info *info;
2917
2918 info = get_svr4_info ();
2919
2920 /* Clear the probes-based interface's state. */
2921 free_probes_table (info);
2922 free_solib_list (info);
2923
2924 /* Relocate the main executable if necessary. */
2925 svr4_relocate_main_executable ();
2926
2927 /* No point setting a breakpoint in the dynamic linker if we can't
2928 hit it (e.g., a core file, or a trace file). */
2929 if (!target_has_execution)
2930 return;
2931
2932 if (!svr4_have_link_map_offsets ())
2933 return;
2934
2935 if (!enable_break (info, from_tty))
2936 return;
2937 }
2938
2939 static void
2940 svr4_clear_solib (void)
2941 {
2942 struct svr4_info *info;
2943
2944 info = get_svr4_info ();
2945 info->debug_base = 0;
2946 info->debug_loader_offset_p = 0;
2947 info->debug_loader_offset = 0;
2948 xfree (info->debug_loader_name);
2949 info->debug_loader_name = NULL;
2950 }
2951
2952 /* Clear any bits of ADDR that wouldn't fit in a target-format
2953 data pointer. "Data pointer" here refers to whatever sort of
2954 address the dynamic linker uses to manage its sections. At the
2955 moment, we don't support shared libraries on any processors where
2956 code and data pointers are different sizes.
2957
2958 This isn't really the right solution. What we really need here is
2959 a way to do arithmetic on CORE_ADDR values that respects the
2960 natural pointer/address correspondence. (For example, on the MIPS,
2961 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2962 sign-extend the value. There, simply truncating the bits above
2963 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2964 be a new gdbarch method or something. */
2965 static CORE_ADDR
2966 svr4_truncate_ptr (CORE_ADDR addr)
2967 {
2968 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
2969 /* We don't need to truncate anything, and the bit twiddling below
2970 will fail due to overflow problems. */
2971 return addr;
2972 else
2973 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
2974 }
2975
2976
2977 static void
2978 svr4_relocate_section_addresses (struct so_list *so,
2979 struct target_section *sec)
2980 {
2981 bfd *abfd = sec->the_bfd_section->owner;
2982
2983 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so, abfd));
2984 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so, abfd));
2985 }
2986 \f
2987
2988 /* Architecture-specific operations. */
2989
2990 /* Per-architecture data key. */
2991 static struct gdbarch_data *solib_svr4_data;
2992
2993 struct solib_svr4_ops
2994 {
2995 /* Return a description of the layout of `struct link_map'. */
2996 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2997 };
2998
2999 /* Return a default for the architecture-specific operations. */
3000
3001 static void *
3002 solib_svr4_init (struct obstack *obstack)
3003 {
3004 struct solib_svr4_ops *ops;
3005
3006 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
3007 ops->fetch_link_map_offsets = NULL;
3008 return ops;
3009 }
3010
3011 /* Set the architecture-specific `struct link_map_offsets' fetcher for
3012 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
3013
3014 void
3015 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
3016 struct link_map_offsets *(*flmo) (void))
3017 {
3018 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
3019
3020 ops->fetch_link_map_offsets = flmo;
3021
3022 set_solib_ops (gdbarch, &svr4_so_ops);
3023 }
3024
3025 /* Fetch a link_map_offsets structure using the architecture-specific
3026 `struct link_map_offsets' fetcher. */
3027
3028 static struct link_map_offsets *
3029 svr4_fetch_link_map_offsets (void)
3030 {
3031 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
3032
3033 gdb_assert (ops->fetch_link_map_offsets);
3034 return ops->fetch_link_map_offsets ();
3035 }
3036
3037 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
3038
3039 static int
3040 svr4_have_link_map_offsets (void)
3041 {
3042 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
3043
3044 return (ops->fetch_link_map_offsets != NULL);
3045 }
3046 \f
3047
3048 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
3049 `struct r_debug' and a `struct link_map' that are binary compatible
3050 with the origional SVR4 implementation. */
3051
3052 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3053 for an ILP32 SVR4 system. */
3054
3055 struct link_map_offsets *
3056 svr4_ilp32_fetch_link_map_offsets (void)
3057 {
3058 static struct link_map_offsets lmo;
3059 static struct link_map_offsets *lmp = NULL;
3060
3061 if (lmp == NULL)
3062 {
3063 lmp = &lmo;
3064
3065 lmo.r_version_offset = 0;
3066 lmo.r_version_size = 4;
3067 lmo.r_map_offset = 4;
3068 lmo.r_brk_offset = 8;
3069 lmo.r_ldsomap_offset = 20;
3070
3071 /* Everything we need is in the first 20 bytes. */
3072 lmo.link_map_size = 20;
3073 lmo.l_addr_offset = 0;
3074 lmo.l_name_offset = 4;
3075 lmo.l_ld_offset = 8;
3076 lmo.l_next_offset = 12;
3077 lmo.l_prev_offset = 16;
3078 }
3079
3080 return lmp;
3081 }
3082
3083 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3084 for an LP64 SVR4 system. */
3085
3086 struct link_map_offsets *
3087 svr4_lp64_fetch_link_map_offsets (void)
3088 {
3089 static struct link_map_offsets lmo;
3090 static struct link_map_offsets *lmp = NULL;
3091
3092 if (lmp == NULL)
3093 {
3094 lmp = &lmo;
3095
3096 lmo.r_version_offset = 0;
3097 lmo.r_version_size = 4;
3098 lmo.r_map_offset = 8;
3099 lmo.r_brk_offset = 16;
3100 lmo.r_ldsomap_offset = 40;
3101
3102 /* Everything we need is in the first 40 bytes. */
3103 lmo.link_map_size = 40;
3104 lmo.l_addr_offset = 0;
3105 lmo.l_name_offset = 8;
3106 lmo.l_ld_offset = 16;
3107 lmo.l_next_offset = 24;
3108 lmo.l_prev_offset = 32;
3109 }
3110
3111 return lmp;
3112 }
3113 \f
3114
3115 struct target_so_ops svr4_so_ops;
3116
3117 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
3118 different rule for symbol lookup. The lookup begins here in the DSO, not in
3119 the main executable. */
3120
3121 static struct symbol *
3122 elf_lookup_lib_symbol (const struct objfile *objfile,
3123 const char *name,
3124 const domain_enum domain)
3125 {
3126 bfd *abfd;
3127
3128 if (objfile == symfile_objfile)
3129 abfd = exec_bfd;
3130 else
3131 {
3132 /* OBJFILE should have been passed as the non-debug one. */
3133 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
3134
3135 abfd = objfile->obfd;
3136 }
3137
3138 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
3139 return NULL;
3140
3141 return lookup_global_symbol_from_objfile (objfile, name, domain);
3142 }
3143
3144 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
3145
3146 void
3147 _initialize_svr4_solib (void)
3148 {
3149 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
3150 solib_svr4_pspace_data
3151 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
3152
3153 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
3154 svr4_so_ops.free_so = svr4_free_so;
3155 svr4_so_ops.clear_so = svr4_clear_so;
3156 svr4_so_ops.clear_solib = svr4_clear_solib;
3157 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
3158 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
3159 svr4_so_ops.current_sos = svr4_current_sos;
3160 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
3161 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
3162 svr4_so_ops.bfd_open = solib_bfd_open;
3163 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
3164 svr4_so_ops.same = svr4_same;
3165 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
3166 svr4_so_ops.update_breakpoints = svr4_update_solib_event_breakpoints;
3167 svr4_so_ops.handle_event = svr4_handle_solib_event;
3168 }
This page took 0.11165 seconds and 4 git commands to generate.