Normalize TRY_CATCH exception handling block
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "infrun.h"
34 #include "regcache.h"
35 #include "gdbthread.h"
36 #include "observer.h"
37
38 #include "solist.h"
39 #include "solib.h"
40 #include "solib-svr4.h"
41
42 #include "bfd-target.h"
43 #include "elf-bfd.h"
44 #include "exec.h"
45 #include "auxv.h"
46 #include "gdb_bfd.h"
47 #include "probe.h"
48
49 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
50 static int svr4_have_link_map_offsets (void);
51 static void svr4_relocate_main_executable (void);
52 static void svr4_free_library_list (void *p_list);
53
54 /* Link map info to include in an allocated so_list entry. */
55
56 struct lm_info
57 {
58 /* Amount by which addresses in the binary should be relocated to
59 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
60 When prelinking is involved and the prelink base address changes,
61 we may need a different offset - the recomputed offset is in L_ADDR.
62 It is commonly the same value. It is cached as we want to warn about
63 the difference and compute it only once. L_ADDR is valid
64 iff L_ADDR_P. */
65 CORE_ADDR l_addr, l_addr_inferior;
66 unsigned int l_addr_p : 1;
67
68 /* The target location of lm. */
69 CORE_ADDR lm_addr;
70
71 /* Values read in from inferior's fields of the same name. */
72 CORE_ADDR l_ld, l_next, l_prev, l_name;
73 };
74
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
77 events.
78
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
82
83 static const char * const solib_break_names[] =
84 {
85 "r_debug_state",
86 "_r_debug_state",
87 "_dl_debug_state",
88 "rtld_db_dlactivity",
89 "__dl_rtld_db_dlactivity",
90 "_rtld_debug_state",
91
92 NULL
93 };
94
95 static const char * const bkpt_names[] =
96 {
97 "_start",
98 "__start",
99 "main",
100 NULL
101 };
102
103 static const char * const main_name_list[] =
104 {
105 "main_$main",
106 NULL
107 };
108
109 /* What to do when a probe stop occurs. */
110
111 enum probe_action
112 {
113 /* Something went seriously wrong. Stop using probes and
114 revert to using the older interface. */
115 PROBES_INTERFACE_FAILED,
116
117 /* No action is required. The shared object list is still
118 valid. */
119 DO_NOTHING,
120
121 /* The shared object list should be reloaded entirely. */
122 FULL_RELOAD,
123
124 /* Attempt to incrementally update the shared object list. If
125 the update fails or is not possible, fall back to reloading
126 the list in full. */
127 UPDATE_OR_RELOAD,
128 };
129
130 /* A probe's name and its associated action. */
131
132 struct probe_info
133 {
134 /* The name of the probe. */
135 const char *name;
136
137 /* What to do when a probe stop occurs. */
138 enum probe_action action;
139 };
140
141 /* A list of named probes and their associated actions. If all
142 probes are present in the dynamic linker then the probes-based
143 interface will be used. */
144
145 static const struct probe_info probe_info[] =
146 {
147 { "init_start", DO_NOTHING },
148 { "init_complete", FULL_RELOAD },
149 { "map_start", DO_NOTHING },
150 { "map_failed", DO_NOTHING },
151 { "reloc_complete", UPDATE_OR_RELOAD },
152 { "unmap_start", DO_NOTHING },
153 { "unmap_complete", FULL_RELOAD },
154 };
155
156 #define NUM_PROBES ARRAY_SIZE (probe_info)
157
158 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
159 the same shared library. */
160
161 static int
162 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
163 {
164 if (strcmp (gdb_so_name, inferior_so_name) == 0)
165 return 1;
166
167 /* On Solaris, when starting inferior we think that dynamic linker is
168 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
169 contains /lib/ld.so.1. Sometimes one file is a link to another, but
170 sometimes they have identical content, but are not linked to each
171 other. We don't restrict this check for Solaris, but the chances
172 of running into this situation elsewhere are very low. */
173 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
174 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
175 return 1;
176
177 /* Similarly, we observed the same issue with sparc64, but with
178 different locations. */
179 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
180 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
181 return 1;
182
183 return 0;
184 }
185
186 static int
187 svr4_same (struct so_list *gdb, struct so_list *inferior)
188 {
189 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
190 }
191
192 static struct lm_info *
193 lm_info_read (CORE_ADDR lm_addr)
194 {
195 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
196 gdb_byte *lm;
197 struct lm_info *lm_info;
198 struct cleanup *back_to;
199
200 lm = xmalloc (lmo->link_map_size);
201 back_to = make_cleanup (xfree, lm);
202
203 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
204 {
205 warning (_("Error reading shared library list entry at %s"),
206 paddress (target_gdbarch (), lm_addr)),
207 lm_info = NULL;
208 }
209 else
210 {
211 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
212
213 lm_info = xzalloc (sizeof (*lm_info));
214 lm_info->lm_addr = lm_addr;
215
216 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
217 ptr_type);
218 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
219 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
220 ptr_type);
221 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
222 ptr_type);
223 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
224 ptr_type);
225 }
226
227 do_cleanups (back_to);
228
229 return lm_info;
230 }
231
232 static int
233 has_lm_dynamic_from_link_map (void)
234 {
235 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
236
237 return lmo->l_ld_offset >= 0;
238 }
239
240 static CORE_ADDR
241 lm_addr_check (const struct so_list *so, bfd *abfd)
242 {
243 if (!so->lm_info->l_addr_p)
244 {
245 struct bfd_section *dyninfo_sect;
246 CORE_ADDR l_addr, l_dynaddr, dynaddr;
247
248 l_addr = so->lm_info->l_addr_inferior;
249
250 if (! abfd || ! has_lm_dynamic_from_link_map ())
251 goto set_addr;
252
253 l_dynaddr = so->lm_info->l_ld;
254
255 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
256 if (dyninfo_sect == NULL)
257 goto set_addr;
258
259 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
260
261 if (dynaddr + l_addr != l_dynaddr)
262 {
263 CORE_ADDR align = 0x1000;
264 CORE_ADDR minpagesize = align;
265
266 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
267 {
268 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
269 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
270 int i;
271
272 align = 1;
273
274 for (i = 0; i < ehdr->e_phnum; i++)
275 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
276 align = phdr[i].p_align;
277
278 minpagesize = get_elf_backend_data (abfd)->minpagesize;
279 }
280
281 /* Turn it into a mask. */
282 align--;
283
284 /* If the changes match the alignment requirements, we
285 assume we're using a core file that was generated by the
286 same binary, just prelinked with a different base offset.
287 If it doesn't match, we may have a different binary, the
288 same binary with the dynamic table loaded at an unrelated
289 location, or anything, really. To avoid regressions,
290 don't adjust the base offset in the latter case, although
291 odds are that, if things really changed, debugging won't
292 quite work.
293
294 One could expect more the condition
295 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
296 but the one below is relaxed for PPC. The PPC kernel supports
297 either 4k or 64k page sizes. To be prepared for 64k pages,
298 PPC ELF files are built using an alignment requirement of 64k.
299 However, when running on a kernel supporting 4k pages, the memory
300 mapping of the library may not actually happen on a 64k boundary!
301
302 (In the usual case where (l_addr & align) == 0, this check is
303 equivalent to the possibly expected check above.)
304
305 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
306
307 l_addr = l_dynaddr - dynaddr;
308
309 if ((l_addr & (minpagesize - 1)) == 0
310 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
311 {
312 if (info_verbose)
313 printf_unfiltered (_("Using PIC (Position Independent Code) "
314 "prelink displacement %s for \"%s\".\n"),
315 paddress (target_gdbarch (), l_addr),
316 so->so_name);
317 }
318 else
319 {
320 /* There is no way to verify the library file matches. prelink
321 can during prelinking of an unprelinked file (or unprelinking
322 of a prelinked file) shift the DYNAMIC segment by arbitrary
323 offset without any page size alignment. There is no way to
324 find out the ELF header and/or Program Headers for a limited
325 verification if it they match. One could do a verification
326 of the DYNAMIC segment. Still the found address is the best
327 one GDB could find. */
328
329 warning (_(".dynamic section for \"%s\" "
330 "is not at the expected address "
331 "(wrong library or version mismatch?)"), so->so_name);
332 }
333 }
334
335 set_addr:
336 so->lm_info->l_addr = l_addr;
337 so->lm_info->l_addr_p = 1;
338 }
339
340 return so->lm_info->l_addr;
341 }
342
343 /* Per pspace SVR4 specific data. */
344
345 struct svr4_info
346 {
347 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
348
349 /* Validity flag for debug_loader_offset. */
350 int debug_loader_offset_p;
351
352 /* Load address for the dynamic linker, inferred. */
353 CORE_ADDR debug_loader_offset;
354
355 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
356 char *debug_loader_name;
357
358 /* Load map address for the main executable. */
359 CORE_ADDR main_lm_addr;
360
361 CORE_ADDR interp_text_sect_low;
362 CORE_ADDR interp_text_sect_high;
363 CORE_ADDR interp_plt_sect_low;
364 CORE_ADDR interp_plt_sect_high;
365
366 /* Nonzero if the list of objects was last obtained from the target
367 via qXfer:libraries-svr4:read. */
368 int using_xfer;
369
370 /* Table of struct probe_and_action instances, used by the
371 probes-based interface to map breakpoint addresses to probes
372 and their associated actions. Lookup is performed using
373 probe_and_action->probe->address. */
374 htab_t probes_table;
375
376 /* List of objects loaded into the inferior, used by the probes-
377 based interface. */
378 struct so_list *solib_list;
379 };
380
381 /* Per-program-space data key. */
382 static const struct program_space_data *solib_svr4_pspace_data;
383
384 /* Free the probes table. */
385
386 static void
387 free_probes_table (struct svr4_info *info)
388 {
389 if (info->probes_table == NULL)
390 return;
391
392 htab_delete (info->probes_table);
393 info->probes_table = NULL;
394 }
395
396 /* Free the solib list. */
397
398 static void
399 free_solib_list (struct svr4_info *info)
400 {
401 svr4_free_library_list (&info->solib_list);
402 info->solib_list = NULL;
403 }
404
405 static void
406 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
407 {
408 struct svr4_info *info = arg;
409
410 free_probes_table (info);
411 free_solib_list (info);
412
413 xfree (info);
414 }
415
416 /* Get the current svr4 data. If none is found yet, add it now. This
417 function always returns a valid object. */
418
419 static struct svr4_info *
420 get_svr4_info (void)
421 {
422 struct svr4_info *info;
423
424 info = program_space_data (current_program_space, solib_svr4_pspace_data);
425 if (info != NULL)
426 return info;
427
428 info = XCNEW (struct svr4_info);
429 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
430 return info;
431 }
432
433 /* Local function prototypes */
434
435 static int match_main (const char *);
436
437 /* Read program header TYPE from inferior memory. The header is found
438 by scanning the OS auxillary vector.
439
440 If TYPE == -1, return the program headers instead of the contents of
441 one program header.
442
443 Return a pointer to allocated memory holding the program header contents,
444 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
445 size of those contents is returned to P_SECT_SIZE. Likewise, the target
446 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
447
448 static gdb_byte *
449 read_program_header (int type, int *p_sect_size, int *p_arch_size)
450 {
451 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
452 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
453 int arch_size, sect_size;
454 CORE_ADDR sect_addr;
455 gdb_byte *buf;
456 int pt_phdr_p = 0;
457
458 /* Get required auxv elements from target. */
459 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
460 return 0;
461 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
462 return 0;
463 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
464 return 0;
465 if (!at_phdr || !at_phnum)
466 return 0;
467
468 /* Determine ELF architecture type. */
469 if (at_phent == sizeof (Elf32_External_Phdr))
470 arch_size = 32;
471 else if (at_phent == sizeof (Elf64_External_Phdr))
472 arch_size = 64;
473 else
474 return 0;
475
476 /* Find the requested segment. */
477 if (type == -1)
478 {
479 sect_addr = at_phdr;
480 sect_size = at_phent * at_phnum;
481 }
482 else if (arch_size == 32)
483 {
484 Elf32_External_Phdr phdr;
485 int i;
486
487 /* Search for requested PHDR. */
488 for (i = 0; i < at_phnum; i++)
489 {
490 int p_type;
491
492 if (target_read_memory (at_phdr + i * sizeof (phdr),
493 (gdb_byte *)&phdr, sizeof (phdr)))
494 return 0;
495
496 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
497 4, byte_order);
498
499 if (p_type == PT_PHDR)
500 {
501 pt_phdr_p = 1;
502 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
503 4, byte_order);
504 }
505
506 if (p_type == type)
507 break;
508 }
509
510 if (i == at_phnum)
511 return 0;
512
513 /* Retrieve address and size. */
514 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
515 4, byte_order);
516 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
517 4, byte_order);
518 }
519 else
520 {
521 Elf64_External_Phdr phdr;
522 int i;
523
524 /* Search for requested PHDR. */
525 for (i = 0; i < at_phnum; i++)
526 {
527 int p_type;
528
529 if (target_read_memory (at_phdr + i * sizeof (phdr),
530 (gdb_byte *)&phdr, sizeof (phdr)))
531 return 0;
532
533 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
534 4, byte_order);
535
536 if (p_type == PT_PHDR)
537 {
538 pt_phdr_p = 1;
539 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
540 8, byte_order);
541 }
542
543 if (p_type == type)
544 break;
545 }
546
547 if (i == at_phnum)
548 return 0;
549
550 /* Retrieve address and size. */
551 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
552 8, byte_order);
553 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
554 8, byte_order);
555 }
556
557 /* PT_PHDR is optional, but we really need it
558 for PIE to make this work in general. */
559
560 if (pt_phdr_p)
561 {
562 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
563 Relocation offset is the difference between the two. */
564 sect_addr = sect_addr + (at_phdr - pt_phdr);
565 }
566
567 /* Read in requested program header. */
568 buf = xmalloc (sect_size);
569 if (target_read_memory (sect_addr, buf, sect_size))
570 {
571 xfree (buf);
572 return NULL;
573 }
574
575 if (p_arch_size)
576 *p_arch_size = arch_size;
577 if (p_sect_size)
578 *p_sect_size = sect_size;
579
580 return buf;
581 }
582
583
584 /* Return program interpreter string. */
585 static char *
586 find_program_interpreter (void)
587 {
588 gdb_byte *buf = NULL;
589
590 /* If we have an exec_bfd, use its section table. */
591 if (exec_bfd
592 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
593 {
594 struct bfd_section *interp_sect;
595
596 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
597 if (interp_sect != NULL)
598 {
599 int sect_size = bfd_section_size (exec_bfd, interp_sect);
600
601 buf = xmalloc (sect_size);
602 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
603 }
604 }
605
606 /* If we didn't find it, use the target auxillary vector. */
607 if (!buf)
608 buf = read_program_header (PT_INTERP, NULL, NULL);
609
610 return (char *) buf;
611 }
612
613
614 /* Scan for DESIRED_DYNTAG in .dynamic section of ABFD. If DESIRED_DYNTAG is
615 found, 1 is returned and the corresponding PTR is set. */
616
617 static int
618 scan_dyntag (const int desired_dyntag, bfd *abfd, CORE_ADDR *ptr)
619 {
620 int arch_size, step, sect_size;
621 long current_dyntag;
622 CORE_ADDR dyn_ptr, dyn_addr;
623 gdb_byte *bufend, *bufstart, *buf;
624 Elf32_External_Dyn *x_dynp_32;
625 Elf64_External_Dyn *x_dynp_64;
626 struct bfd_section *sect;
627 struct target_section *target_section;
628
629 if (abfd == NULL)
630 return 0;
631
632 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
633 return 0;
634
635 arch_size = bfd_get_arch_size (abfd);
636 if (arch_size == -1)
637 return 0;
638
639 /* Find the start address of the .dynamic section. */
640 sect = bfd_get_section_by_name (abfd, ".dynamic");
641 if (sect == NULL)
642 return 0;
643
644 for (target_section = current_target_sections->sections;
645 target_section < current_target_sections->sections_end;
646 target_section++)
647 if (sect == target_section->the_bfd_section)
648 break;
649 if (target_section < current_target_sections->sections_end)
650 dyn_addr = target_section->addr;
651 else
652 {
653 /* ABFD may come from OBJFILE acting only as a symbol file without being
654 loaded into the target (see add_symbol_file_command). This case is
655 such fallback to the file VMA address without the possibility of
656 having the section relocated to its actual in-memory address. */
657
658 dyn_addr = bfd_section_vma (abfd, sect);
659 }
660
661 /* Read in .dynamic from the BFD. We will get the actual value
662 from memory later. */
663 sect_size = bfd_section_size (abfd, sect);
664 buf = bufstart = alloca (sect_size);
665 if (!bfd_get_section_contents (abfd, sect,
666 buf, 0, sect_size))
667 return 0;
668
669 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
670 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
671 : sizeof (Elf64_External_Dyn);
672 for (bufend = buf + sect_size;
673 buf < bufend;
674 buf += step)
675 {
676 if (arch_size == 32)
677 {
678 x_dynp_32 = (Elf32_External_Dyn *) buf;
679 current_dyntag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
680 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
681 }
682 else
683 {
684 x_dynp_64 = (Elf64_External_Dyn *) buf;
685 current_dyntag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
686 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
687 }
688 if (current_dyntag == DT_NULL)
689 return 0;
690 if (current_dyntag == desired_dyntag)
691 {
692 /* If requested, try to read the runtime value of this .dynamic
693 entry. */
694 if (ptr)
695 {
696 struct type *ptr_type;
697 gdb_byte ptr_buf[8];
698 CORE_ADDR ptr_addr;
699
700 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
701 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
702 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
703 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
704 *ptr = dyn_ptr;
705 }
706 return 1;
707 }
708 }
709
710 return 0;
711 }
712
713 /* Scan for DESIRED_DYNTAG in .dynamic section of the target's main executable,
714 found by consulting the OS auxillary vector. If DESIRED_DYNTAG is found, 1
715 is returned and the corresponding PTR is set. */
716
717 static int
718 scan_dyntag_auxv (const int desired_dyntag, CORE_ADDR *ptr)
719 {
720 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
721 int sect_size, arch_size, step;
722 long current_dyntag;
723 CORE_ADDR dyn_ptr;
724 gdb_byte *bufend, *bufstart, *buf;
725
726 /* Read in .dynamic section. */
727 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
728 if (!buf)
729 return 0;
730
731 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
732 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
733 : sizeof (Elf64_External_Dyn);
734 for (bufend = buf + sect_size;
735 buf < bufend;
736 buf += step)
737 {
738 if (arch_size == 32)
739 {
740 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
741
742 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
743 4, byte_order);
744 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
745 4, byte_order);
746 }
747 else
748 {
749 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
750
751 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
752 8, byte_order);
753 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
754 8, byte_order);
755 }
756 if (current_dyntag == DT_NULL)
757 break;
758
759 if (current_dyntag == desired_dyntag)
760 {
761 if (ptr)
762 *ptr = dyn_ptr;
763
764 xfree (bufstart);
765 return 1;
766 }
767 }
768
769 xfree (bufstart);
770 return 0;
771 }
772
773 /* Locate the base address of dynamic linker structs for SVR4 elf
774 targets.
775
776 For SVR4 elf targets the address of the dynamic linker's runtime
777 structure is contained within the dynamic info section in the
778 executable file. The dynamic section is also mapped into the
779 inferior address space. Because the runtime loader fills in the
780 real address before starting the inferior, we have to read in the
781 dynamic info section from the inferior address space.
782 If there are any errors while trying to find the address, we
783 silently return 0, otherwise the found address is returned. */
784
785 static CORE_ADDR
786 elf_locate_base (void)
787 {
788 struct bound_minimal_symbol msymbol;
789 CORE_ADDR dyn_ptr;
790
791 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
792 instead of DT_DEBUG, although they sometimes contain an unused
793 DT_DEBUG. */
794 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
795 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
796 {
797 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
798 gdb_byte *pbuf;
799 int pbuf_size = TYPE_LENGTH (ptr_type);
800
801 pbuf = alloca (pbuf_size);
802 /* DT_MIPS_RLD_MAP contains a pointer to the address
803 of the dynamic link structure. */
804 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
805 return 0;
806 return extract_typed_address (pbuf, ptr_type);
807 }
808
809 /* Find DT_DEBUG. */
810 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
811 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
812 return dyn_ptr;
813
814 /* This may be a static executable. Look for the symbol
815 conventionally named _r_debug, as a last resort. */
816 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
817 if (msymbol.minsym != NULL)
818 return BMSYMBOL_VALUE_ADDRESS (msymbol);
819
820 /* DT_DEBUG entry not found. */
821 return 0;
822 }
823
824 /* Locate the base address of dynamic linker structs.
825
826 For both the SunOS and SVR4 shared library implementations, if the
827 inferior executable has been linked dynamically, there is a single
828 address somewhere in the inferior's data space which is the key to
829 locating all of the dynamic linker's runtime structures. This
830 address is the value of the debug base symbol. The job of this
831 function is to find and return that address, or to return 0 if there
832 is no such address (the executable is statically linked for example).
833
834 For SunOS, the job is almost trivial, since the dynamic linker and
835 all of it's structures are statically linked to the executable at
836 link time. Thus the symbol for the address we are looking for has
837 already been added to the minimal symbol table for the executable's
838 objfile at the time the symbol file's symbols were read, and all we
839 have to do is look it up there. Note that we explicitly do NOT want
840 to find the copies in the shared library.
841
842 The SVR4 version is a bit more complicated because the address
843 is contained somewhere in the dynamic info section. We have to go
844 to a lot more work to discover the address of the debug base symbol.
845 Because of this complexity, we cache the value we find and return that
846 value on subsequent invocations. Note there is no copy in the
847 executable symbol tables. */
848
849 static CORE_ADDR
850 locate_base (struct svr4_info *info)
851 {
852 /* Check to see if we have a currently valid address, and if so, avoid
853 doing all this work again and just return the cached address. If
854 we have no cached address, try to locate it in the dynamic info
855 section for ELF executables. There's no point in doing any of this
856 though if we don't have some link map offsets to work with. */
857
858 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
859 info->debug_base = elf_locate_base ();
860 return info->debug_base;
861 }
862
863 /* Find the first element in the inferior's dynamic link map, and
864 return its address in the inferior. Return zero if the address
865 could not be determined.
866
867 FIXME: Perhaps we should validate the info somehow, perhaps by
868 checking r_version for a known version number, or r_state for
869 RT_CONSISTENT. */
870
871 static CORE_ADDR
872 solib_svr4_r_map (struct svr4_info *info)
873 {
874 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
875 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
876 CORE_ADDR addr = 0;
877 volatile struct gdb_exception ex;
878
879 TRY_CATCH (ex, RETURN_MASK_ERROR)
880 {
881 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
882 ptr_type);
883 }
884 if (ex.reason < 0)
885 exception_print (gdb_stderr, ex);
886 return addr;
887 }
888
889 /* Find r_brk from the inferior's debug base. */
890
891 static CORE_ADDR
892 solib_svr4_r_brk (struct svr4_info *info)
893 {
894 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
895 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
896
897 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
898 ptr_type);
899 }
900
901 /* Find the link map for the dynamic linker (if it is not in the
902 normal list of loaded shared objects). */
903
904 static CORE_ADDR
905 solib_svr4_r_ldsomap (struct svr4_info *info)
906 {
907 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
908 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
909 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
910 ULONGEST version;
911
912 /* Check version, and return zero if `struct r_debug' doesn't have
913 the r_ldsomap member. */
914 version
915 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
916 lmo->r_version_size, byte_order);
917 if (version < 2 || lmo->r_ldsomap_offset == -1)
918 return 0;
919
920 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
921 ptr_type);
922 }
923
924 /* On Solaris systems with some versions of the dynamic linker,
925 ld.so's l_name pointer points to the SONAME in the string table
926 rather than into writable memory. So that GDB can find shared
927 libraries when loading a core file generated by gcore, ensure that
928 memory areas containing the l_name string are saved in the core
929 file. */
930
931 static int
932 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
933 {
934 struct svr4_info *info;
935 CORE_ADDR ldsomap;
936 struct so_list *newobj;
937 struct cleanup *old_chain;
938 CORE_ADDR name_lm;
939
940 info = get_svr4_info ();
941
942 info->debug_base = 0;
943 locate_base (info);
944 if (!info->debug_base)
945 return 0;
946
947 ldsomap = solib_svr4_r_ldsomap (info);
948 if (!ldsomap)
949 return 0;
950
951 newobj = XCNEW (struct so_list);
952 old_chain = make_cleanup (xfree, newobj);
953 newobj->lm_info = lm_info_read (ldsomap);
954 make_cleanup (xfree, newobj->lm_info);
955 name_lm = newobj->lm_info ? newobj->lm_info->l_name : 0;
956 do_cleanups (old_chain);
957
958 return (name_lm >= vaddr && name_lm < vaddr + size);
959 }
960
961 /* Implement the "open_symbol_file_object" target_so_ops method.
962
963 If no open symbol file, attempt to locate and open the main symbol
964 file. On SVR4 systems, this is the first link map entry. If its
965 name is here, we can open it. Useful when attaching to a process
966 without first loading its symbol file. */
967
968 static int
969 open_symbol_file_object (void *from_ttyp)
970 {
971 CORE_ADDR lm, l_name;
972 char *filename;
973 int errcode;
974 int from_tty = *(int *)from_ttyp;
975 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
976 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
977 int l_name_size = TYPE_LENGTH (ptr_type);
978 gdb_byte *l_name_buf = xmalloc (l_name_size);
979 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
980 struct svr4_info *info = get_svr4_info ();
981
982 if (symfile_objfile)
983 if (!query (_("Attempt to reload symbols from process? ")))
984 {
985 do_cleanups (cleanups);
986 return 0;
987 }
988
989 /* Always locate the debug struct, in case it has moved. */
990 info->debug_base = 0;
991 if (locate_base (info) == 0)
992 {
993 do_cleanups (cleanups);
994 return 0; /* failed somehow... */
995 }
996
997 /* First link map member should be the executable. */
998 lm = solib_svr4_r_map (info);
999 if (lm == 0)
1000 {
1001 do_cleanups (cleanups);
1002 return 0; /* failed somehow... */
1003 }
1004
1005 /* Read address of name from target memory to GDB. */
1006 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1007
1008 /* Convert the address to host format. */
1009 l_name = extract_typed_address (l_name_buf, ptr_type);
1010
1011 if (l_name == 0)
1012 {
1013 do_cleanups (cleanups);
1014 return 0; /* No filename. */
1015 }
1016
1017 /* Now fetch the filename from target memory. */
1018 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1019 make_cleanup (xfree, filename);
1020
1021 if (errcode)
1022 {
1023 warning (_("failed to read exec filename from attached file: %s"),
1024 safe_strerror (errcode));
1025 do_cleanups (cleanups);
1026 return 0;
1027 }
1028
1029 /* Have a pathname: read the symbol file. */
1030 symbol_file_add_main (filename, from_tty);
1031
1032 do_cleanups (cleanups);
1033 return 1;
1034 }
1035
1036 /* Data exchange structure for the XML parser as returned by
1037 svr4_current_sos_via_xfer_libraries. */
1038
1039 struct svr4_library_list
1040 {
1041 struct so_list *head, **tailp;
1042
1043 /* Inferior address of struct link_map used for the main executable. It is
1044 NULL if not known. */
1045 CORE_ADDR main_lm;
1046 };
1047
1048 /* Implementation for target_so_ops.free_so. */
1049
1050 static void
1051 svr4_free_so (struct so_list *so)
1052 {
1053 xfree (so->lm_info);
1054 }
1055
1056 /* Implement target_so_ops.clear_so. */
1057
1058 static void
1059 svr4_clear_so (struct so_list *so)
1060 {
1061 if (so->lm_info != NULL)
1062 so->lm_info->l_addr_p = 0;
1063 }
1064
1065 /* Free so_list built so far (called via cleanup). */
1066
1067 static void
1068 svr4_free_library_list (void *p_list)
1069 {
1070 struct so_list *list = *(struct so_list **) p_list;
1071
1072 while (list != NULL)
1073 {
1074 struct so_list *next = list->next;
1075
1076 free_so (list);
1077 list = next;
1078 }
1079 }
1080
1081 /* Copy library list. */
1082
1083 static struct so_list *
1084 svr4_copy_library_list (struct so_list *src)
1085 {
1086 struct so_list *dst = NULL;
1087 struct so_list **link = &dst;
1088
1089 while (src != NULL)
1090 {
1091 struct so_list *newobj;
1092
1093 newobj = xmalloc (sizeof (struct so_list));
1094 memcpy (newobj, src, sizeof (struct so_list));
1095
1096 newobj->lm_info = xmalloc (sizeof (struct lm_info));
1097 memcpy (newobj->lm_info, src->lm_info, sizeof (struct lm_info));
1098
1099 newobj->next = NULL;
1100 *link = newobj;
1101 link = &newobj->next;
1102
1103 src = src->next;
1104 }
1105
1106 return dst;
1107 }
1108
1109 #ifdef HAVE_LIBEXPAT
1110
1111 #include "xml-support.h"
1112
1113 /* Handle the start of a <library> element. Note: new elements are added
1114 at the tail of the list, keeping the list in order. */
1115
1116 static void
1117 library_list_start_library (struct gdb_xml_parser *parser,
1118 const struct gdb_xml_element *element,
1119 void *user_data, VEC(gdb_xml_value_s) *attributes)
1120 {
1121 struct svr4_library_list *list = user_data;
1122 const char *name = xml_find_attribute (attributes, "name")->value;
1123 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1124 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1125 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1126 struct so_list *new_elem;
1127
1128 new_elem = XCNEW (struct so_list);
1129 new_elem->lm_info = XCNEW (struct lm_info);
1130 new_elem->lm_info->lm_addr = *lmp;
1131 new_elem->lm_info->l_addr_inferior = *l_addrp;
1132 new_elem->lm_info->l_ld = *l_ldp;
1133
1134 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1135 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1136 strcpy (new_elem->so_original_name, new_elem->so_name);
1137
1138 *list->tailp = new_elem;
1139 list->tailp = &new_elem->next;
1140 }
1141
1142 /* Handle the start of a <library-list-svr4> element. */
1143
1144 static void
1145 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1146 const struct gdb_xml_element *element,
1147 void *user_data, VEC(gdb_xml_value_s) *attributes)
1148 {
1149 struct svr4_library_list *list = user_data;
1150 const char *version = xml_find_attribute (attributes, "version")->value;
1151 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1152
1153 if (strcmp (version, "1.0") != 0)
1154 gdb_xml_error (parser,
1155 _("SVR4 Library list has unsupported version \"%s\""),
1156 version);
1157
1158 if (main_lm)
1159 list->main_lm = *(ULONGEST *) main_lm->value;
1160 }
1161
1162 /* The allowed elements and attributes for an XML library list.
1163 The root element is a <library-list>. */
1164
1165 static const struct gdb_xml_attribute svr4_library_attributes[] =
1166 {
1167 { "name", GDB_XML_AF_NONE, NULL, NULL },
1168 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1169 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1170 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1171 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1172 };
1173
1174 static const struct gdb_xml_element svr4_library_list_children[] =
1175 {
1176 {
1177 "library", svr4_library_attributes, NULL,
1178 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1179 library_list_start_library, NULL
1180 },
1181 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1182 };
1183
1184 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1185 {
1186 { "version", GDB_XML_AF_NONE, NULL, NULL },
1187 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1188 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1189 };
1190
1191 static const struct gdb_xml_element svr4_library_list_elements[] =
1192 {
1193 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1194 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1195 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1196 };
1197
1198 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1199
1200 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1201 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1202 empty, caller is responsible for freeing all its entries. */
1203
1204 static int
1205 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1206 {
1207 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1208 &list->head);
1209
1210 memset (list, 0, sizeof (*list));
1211 list->tailp = &list->head;
1212 if (gdb_xml_parse_quick (_("target library list"), "library-list-svr4.dtd",
1213 svr4_library_list_elements, document, list) == 0)
1214 {
1215 /* Parsed successfully, keep the result. */
1216 discard_cleanups (back_to);
1217 return 1;
1218 }
1219
1220 do_cleanups (back_to);
1221 return 0;
1222 }
1223
1224 /* Attempt to get so_list from target via qXfer:libraries-svr4:read packet.
1225
1226 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1227 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1228 empty, caller is responsible for freeing all its entries.
1229
1230 Note that ANNEX must be NULL if the remote does not explicitly allow
1231 qXfer:libraries-svr4:read packets with non-empty annexes. Support for
1232 this can be checked using target_augmented_libraries_svr4_read (). */
1233
1234 static int
1235 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1236 const char *annex)
1237 {
1238 char *svr4_library_document;
1239 int result;
1240 struct cleanup *back_to;
1241
1242 gdb_assert (annex == NULL || target_augmented_libraries_svr4_read ());
1243
1244 /* Fetch the list of shared libraries. */
1245 svr4_library_document = target_read_stralloc (&current_target,
1246 TARGET_OBJECT_LIBRARIES_SVR4,
1247 annex);
1248 if (svr4_library_document == NULL)
1249 return 0;
1250
1251 back_to = make_cleanup (xfree, svr4_library_document);
1252 result = svr4_parse_libraries (svr4_library_document, list);
1253 do_cleanups (back_to);
1254
1255 return result;
1256 }
1257
1258 #else
1259
1260 static int
1261 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1262 const char *annex)
1263 {
1264 return 0;
1265 }
1266
1267 #endif
1268
1269 /* If no shared library information is available from the dynamic
1270 linker, build a fallback list from other sources. */
1271
1272 static struct so_list *
1273 svr4_default_sos (void)
1274 {
1275 struct svr4_info *info = get_svr4_info ();
1276 struct so_list *newobj;
1277
1278 if (!info->debug_loader_offset_p)
1279 return NULL;
1280
1281 newobj = XCNEW (struct so_list);
1282
1283 newobj->lm_info = xzalloc (sizeof (struct lm_info));
1284
1285 /* Nothing will ever check the other fields if we set l_addr_p. */
1286 newobj->lm_info->l_addr = info->debug_loader_offset;
1287 newobj->lm_info->l_addr_p = 1;
1288
1289 strncpy (newobj->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1290 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1291 strcpy (newobj->so_original_name, newobj->so_name);
1292
1293 return newobj;
1294 }
1295
1296 /* Read the whole inferior libraries chain starting at address LM.
1297 Expect the first entry in the chain's previous entry to be PREV_LM.
1298 Add the entries to the tail referenced by LINK_PTR_PTR. Ignore the
1299 first entry if IGNORE_FIRST and set global MAIN_LM_ADDR according
1300 to it. Returns nonzero upon success. If zero is returned the
1301 entries stored to LINK_PTR_PTR are still valid although they may
1302 represent only part of the inferior library list. */
1303
1304 static int
1305 svr4_read_so_list (CORE_ADDR lm, CORE_ADDR prev_lm,
1306 struct so_list ***link_ptr_ptr, int ignore_first)
1307 {
1308 CORE_ADDR first_l_name = 0;
1309 CORE_ADDR next_lm;
1310
1311 for (; lm != 0; prev_lm = lm, lm = next_lm)
1312 {
1313 struct so_list *newobj;
1314 struct cleanup *old_chain;
1315 int errcode;
1316 char *buffer;
1317
1318 newobj = XCNEW (struct so_list);
1319 old_chain = make_cleanup_free_so (newobj);
1320
1321 newobj->lm_info = lm_info_read (lm);
1322 if (newobj->lm_info == NULL)
1323 {
1324 do_cleanups (old_chain);
1325 return 0;
1326 }
1327
1328 next_lm = newobj->lm_info->l_next;
1329
1330 if (newobj->lm_info->l_prev != prev_lm)
1331 {
1332 warning (_("Corrupted shared library list: %s != %s"),
1333 paddress (target_gdbarch (), prev_lm),
1334 paddress (target_gdbarch (), newobj->lm_info->l_prev));
1335 do_cleanups (old_chain);
1336 return 0;
1337 }
1338
1339 /* For SVR4 versions, the first entry in the link map is for the
1340 inferior executable, so we must ignore it. For some versions of
1341 SVR4, it has no name. For others (Solaris 2.3 for example), it
1342 does have a name, so we can no longer use a missing name to
1343 decide when to ignore it. */
1344 if (ignore_first && newobj->lm_info->l_prev == 0)
1345 {
1346 struct svr4_info *info = get_svr4_info ();
1347
1348 first_l_name = newobj->lm_info->l_name;
1349 info->main_lm_addr = newobj->lm_info->lm_addr;
1350 do_cleanups (old_chain);
1351 continue;
1352 }
1353
1354 /* Extract this shared object's name. */
1355 target_read_string (newobj->lm_info->l_name, &buffer,
1356 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1357 if (errcode != 0)
1358 {
1359 /* If this entry's l_name address matches that of the
1360 inferior executable, then this is not a normal shared
1361 object, but (most likely) a vDSO. In this case, silently
1362 skip it; otherwise emit a warning. */
1363 if (first_l_name == 0 || newobj->lm_info->l_name != first_l_name)
1364 warning (_("Can't read pathname for load map: %s."),
1365 safe_strerror (errcode));
1366 do_cleanups (old_chain);
1367 continue;
1368 }
1369
1370 strncpy (newobj->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1371 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1372 strcpy (newobj->so_original_name, newobj->so_name);
1373 xfree (buffer);
1374
1375 /* If this entry has no name, or its name matches the name
1376 for the main executable, don't include it in the list. */
1377 if (! newobj->so_name[0] || match_main (newobj->so_name))
1378 {
1379 do_cleanups (old_chain);
1380 continue;
1381 }
1382
1383 discard_cleanups (old_chain);
1384 newobj->next = 0;
1385 **link_ptr_ptr = newobj;
1386 *link_ptr_ptr = &newobj->next;
1387 }
1388
1389 return 1;
1390 }
1391
1392 /* Read the full list of currently loaded shared objects directly
1393 from the inferior, without referring to any libraries read and
1394 stored by the probes interface. Handle special cases relating
1395 to the first elements of the list. */
1396
1397 static struct so_list *
1398 svr4_current_sos_direct (struct svr4_info *info)
1399 {
1400 CORE_ADDR lm;
1401 struct so_list *head = NULL;
1402 struct so_list **link_ptr = &head;
1403 struct cleanup *back_to;
1404 int ignore_first;
1405 struct svr4_library_list library_list;
1406
1407 /* Fall back to manual examination of the target if the packet is not
1408 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1409 tests a case where gdbserver cannot find the shared libraries list while
1410 GDB itself is able to find it via SYMFILE_OBJFILE.
1411
1412 Unfortunately statically linked inferiors will also fall back through this
1413 suboptimal code path. */
1414
1415 info->using_xfer = svr4_current_sos_via_xfer_libraries (&library_list,
1416 NULL);
1417 if (info->using_xfer)
1418 {
1419 if (library_list.main_lm)
1420 info->main_lm_addr = library_list.main_lm;
1421
1422 return library_list.head ? library_list.head : svr4_default_sos ();
1423 }
1424
1425 /* Always locate the debug struct, in case it has moved. */
1426 info->debug_base = 0;
1427 locate_base (info);
1428
1429 /* If we can't find the dynamic linker's base structure, this
1430 must not be a dynamically linked executable. Hmm. */
1431 if (! info->debug_base)
1432 return svr4_default_sos ();
1433
1434 /* Assume that everything is a library if the dynamic loader was loaded
1435 late by a static executable. */
1436 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1437 ignore_first = 0;
1438 else
1439 ignore_first = 1;
1440
1441 back_to = make_cleanup (svr4_free_library_list, &head);
1442
1443 /* Walk the inferior's link map list, and build our list of
1444 `struct so_list' nodes. */
1445 lm = solib_svr4_r_map (info);
1446 if (lm)
1447 svr4_read_so_list (lm, 0, &link_ptr, ignore_first);
1448
1449 /* On Solaris, the dynamic linker is not in the normal list of
1450 shared objects, so make sure we pick it up too. Having
1451 symbol information for the dynamic linker is quite crucial
1452 for skipping dynamic linker resolver code. */
1453 lm = solib_svr4_r_ldsomap (info);
1454 if (lm)
1455 svr4_read_so_list (lm, 0, &link_ptr, 0);
1456
1457 discard_cleanups (back_to);
1458
1459 if (head == NULL)
1460 return svr4_default_sos ();
1461
1462 return head;
1463 }
1464
1465 /* Implement the main part of the "current_sos" target_so_ops
1466 method. */
1467
1468 static struct so_list *
1469 svr4_current_sos_1 (void)
1470 {
1471 struct svr4_info *info = get_svr4_info ();
1472
1473 /* If the solib list has been read and stored by the probes
1474 interface then we return a copy of the stored list. */
1475 if (info->solib_list != NULL)
1476 return svr4_copy_library_list (info->solib_list);
1477
1478 /* Otherwise obtain the solib list directly from the inferior. */
1479 return svr4_current_sos_direct (info);
1480 }
1481
1482 /* Implement the "current_sos" target_so_ops method. */
1483
1484 static struct so_list *
1485 svr4_current_sos (void)
1486 {
1487 struct so_list *so_head = svr4_current_sos_1 ();
1488 struct mem_range vsyscall_range;
1489
1490 /* Filter out the vDSO module, if present. Its symbol file would
1491 not be found on disk. The vDSO/vsyscall's OBJFILE is instead
1492 managed by symfile-mem.c:add_vsyscall_page. */
1493 if (gdbarch_vsyscall_range (target_gdbarch (), &vsyscall_range)
1494 && vsyscall_range.length != 0)
1495 {
1496 struct so_list **sop;
1497
1498 sop = &so_head;
1499 while (*sop != NULL)
1500 {
1501 struct so_list *so = *sop;
1502
1503 /* We can't simply match the vDSO by starting address alone,
1504 because lm_info->l_addr_inferior (and also l_addr) do not
1505 necessarily represent the real starting address of the
1506 ELF if the vDSO's ELF itself is "prelinked". The l_ld
1507 field (the ".dynamic" section of the shared object)
1508 always points at the absolute/resolved address though.
1509 So check whether that address is inside the vDSO's
1510 mapping instead.
1511
1512 E.g., on Linux 3.16 (x86_64) the vDSO is a regular
1513 0-based ELF, and we see:
1514
1515 (gdb) info auxv
1516 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffb000
1517 (gdb) p/x *_r_debug.r_map.l_next
1518 $1 = {l_addr = 0x7ffff7ffb000, ..., l_ld = 0x7ffff7ffb318, ...}
1519
1520 And on Linux 2.6.32 (x86_64) we see:
1521
1522 (gdb) info auxv
1523 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffe000
1524 (gdb) p/x *_r_debug.r_map.l_next
1525 $5 = {l_addr = 0x7ffff88fe000, ..., l_ld = 0x7ffff7ffe580, ... }
1526
1527 Dumping that vDSO shows:
1528
1529 (gdb) info proc mappings
1530 0x7ffff7ffe000 0x7ffff7fff000 0x1000 0 [vdso]
1531 (gdb) dump memory vdso.bin 0x7ffff7ffe000 0x7ffff7fff000
1532 # readelf -Wa vdso.bin
1533 [...]
1534 Entry point address: 0xffffffffff700700
1535 [...]
1536 Section Headers:
1537 [Nr] Name Type Address Off Size
1538 [ 0] NULL 0000000000000000 000000 000000
1539 [ 1] .hash HASH ffffffffff700120 000120 000038
1540 [ 2] .dynsym DYNSYM ffffffffff700158 000158 0000d8
1541 [...]
1542 [ 9] .dynamic DYNAMIC ffffffffff700580 000580 0000f0
1543 */
1544 if (address_in_mem_range (so->lm_info->l_ld, &vsyscall_range))
1545 {
1546 *sop = so->next;
1547 free_so (so);
1548 break;
1549 }
1550
1551 sop = &so->next;
1552 }
1553 }
1554
1555 return so_head;
1556 }
1557
1558 /* Get the address of the link_map for a given OBJFILE. */
1559
1560 CORE_ADDR
1561 svr4_fetch_objfile_link_map (struct objfile *objfile)
1562 {
1563 struct so_list *so;
1564 struct svr4_info *info = get_svr4_info ();
1565
1566 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1567 if (info->main_lm_addr == 0)
1568 solib_add (NULL, 0, &current_target, auto_solib_add);
1569
1570 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1571 if (objfile == symfile_objfile)
1572 return info->main_lm_addr;
1573
1574 /* The other link map addresses may be found by examining the list
1575 of shared libraries. */
1576 for (so = master_so_list (); so; so = so->next)
1577 if (so->objfile == objfile)
1578 return so->lm_info->lm_addr;
1579
1580 /* Not found! */
1581 return 0;
1582 }
1583
1584 /* On some systems, the only way to recognize the link map entry for
1585 the main executable file is by looking at its name. Return
1586 non-zero iff SONAME matches one of the known main executable names. */
1587
1588 static int
1589 match_main (const char *soname)
1590 {
1591 const char * const *mainp;
1592
1593 for (mainp = main_name_list; *mainp != NULL; mainp++)
1594 {
1595 if (strcmp (soname, *mainp) == 0)
1596 return (1);
1597 }
1598
1599 return (0);
1600 }
1601
1602 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1603 SVR4 run time loader. */
1604
1605 int
1606 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1607 {
1608 struct svr4_info *info = get_svr4_info ();
1609
1610 return ((pc >= info->interp_text_sect_low
1611 && pc < info->interp_text_sect_high)
1612 || (pc >= info->interp_plt_sect_low
1613 && pc < info->interp_plt_sect_high)
1614 || in_plt_section (pc)
1615 || in_gnu_ifunc_stub (pc));
1616 }
1617
1618 /* Given an executable's ABFD and target, compute the entry-point
1619 address. */
1620
1621 static CORE_ADDR
1622 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1623 {
1624 CORE_ADDR addr;
1625
1626 /* KevinB wrote ... for most targets, the address returned by
1627 bfd_get_start_address() is the entry point for the start
1628 function. But, for some targets, bfd_get_start_address() returns
1629 the address of a function descriptor from which the entry point
1630 address may be extracted. This address is extracted by
1631 gdbarch_convert_from_func_ptr_addr(). The method
1632 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1633 function for targets which don't use function descriptors. */
1634 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1635 bfd_get_start_address (abfd),
1636 targ);
1637 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1638 }
1639
1640 /* A probe and its associated action. */
1641
1642 struct probe_and_action
1643 {
1644 /* The probe. */
1645 struct probe *probe;
1646
1647 /* The relocated address of the probe. */
1648 CORE_ADDR address;
1649
1650 /* The action. */
1651 enum probe_action action;
1652 };
1653
1654 /* Returns a hash code for the probe_and_action referenced by p. */
1655
1656 static hashval_t
1657 hash_probe_and_action (const void *p)
1658 {
1659 const struct probe_and_action *pa = p;
1660
1661 return (hashval_t) pa->address;
1662 }
1663
1664 /* Returns non-zero if the probe_and_actions referenced by p1 and p2
1665 are equal. */
1666
1667 static int
1668 equal_probe_and_action (const void *p1, const void *p2)
1669 {
1670 const struct probe_and_action *pa1 = p1;
1671 const struct probe_and_action *pa2 = p2;
1672
1673 return pa1->address == pa2->address;
1674 }
1675
1676 /* Register a solib event probe and its associated action in the
1677 probes table. */
1678
1679 static void
1680 register_solib_event_probe (struct probe *probe, CORE_ADDR address,
1681 enum probe_action action)
1682 {
1683 struct svr4_info *info = get_svr4_info ();
1684 struct probe_and_action lookup, *pa;
1685 void **slot;
1686
1687 /* Create the probes table, if necessary. */
1688 if (info->probes_table == NULL)
1689 info->probes_table = htab_create_alloc (1, hash_probe_and_action,
1690 equal_probe_and_action,
1691 xfree, xcalloc, xfree);
1692
1693 lookup.probe = probe;
1694 lookup.address = address;
1695 slot = htab_find_slot (info->probes_table, &lookup, INSERT);
1696 gdb_assert (*slot == HTAB_EMPTY_ENTRY);
1697
1698 pa = XCNEW (struct probe_and_action);
1699 pa->probe = probe;
1700 pa->address = address;
1701 pa->action = action;
1702
1703 *slot = pa;
1704 }
1705
1706 /* Get the solib event probe at the specified location, and the
1707 action associated with it. Returns NULL if no solib event probe
1708 was found. */
1709
1710 static struct probe_and_action *
1711 solib_event_probe_at (struct svr4_info *info, CORE_ADDR address)
1712 {
1713 struct probe_and_action lookup;
1714 void **slot;
1715
1716 lookup.address = address;
1717 slot = htab_find_slot (info->probes_table, &lookup, NO_INSERT);
1718
1719 if (slot == NULL)
1720 return NULL;
1721
1722 return (struct probe_and_action *) *slot;
1723 }
1724
1725 /* Decide what action to take when the specified solib event probe is
1726 hit. */
1727
1728 static enum probe_action
1729 solib_event_probe_action (struct probe_and_action *pa)
1730 {
1731 enum probe_action action;
1732 unsigned probe_argc;
1733 struct frame_info *frame = get_current_frame ();
1734
1735 action = pa->action;
1736 if (action == DO_NOTHING || action == PROBES_INTERFACE_FAILED)
1737 return action;
1738
1739 gdb_assert (action == FULL_RELOAD || action == UPDATE_OR_RELOAD);
1740
1741 /* Check that an appropriate number of arguments has been supplied.
1742 We expect:
1743 arg0: Lmid_t lmid (mandatory)
1744 arg1: struct r_debug *debug_base (mandatory)
1745 arg2: struct link_map *new (optional, for incremental updates) */
1746 probe_argc = get_probe_argument_count (pa->probe, frame);
1747 if (probe_argc == 2)
1748 action = FULL_RELOAD;
1749 else if (probe_argc < 2)
1750 action = PROBES_INTERFACE_FAILED;
1751
1752 return action;
1753 }
1754
1755 /* Populate the shared object list by reading the entire list of
1756 shared objects from the inferior. Handle special cases relating
1757 to the first elements of the list. Returns nonzero on success. */
1758
1759 static int
1760 solist_update_full (struct svr4_info *info)
1761 {
1762 free_solib_list (info);
1763 info->solib_list = svr4_current_sos_direct (info);
1764
1765 return 1;
1766 }
1767
1768 /* Update the shared object list starting from the link-map entry
1769 passed by the linker in the probe's third argument. Returns
1770 nonzero if the list was successfully updated, or zero to indicate
1771 failure. */
1772
1773 static int
1774 solist_update_incremental (struct svr4_info *info, CORE_ADDR lm)
1775 {
1776 struct so_list *tail;
1777 CORE_ADDR prev_lm;
1778
1779 /* svr4_current_sos_direct contains logic to handle a number of
1780 special cases relating to the first elements of the list. To
1781 avoid duplicating this logic we defer to solist_update_full
1782 if the list is empty. */
1783 if (info->solib_list == NULL)
1784 return 0;
1785
1786 /* Fall back to a full update if we are using a remote target
1787 that does not support incremental transfers. */
1788 if (info->using_xfer && !target_augmented_libraries_svr4_read ())
1789 return 0;
1790
1791 /* Walk to the end of the list. */
1792 for (tail = info->solib_list; tail->next != NULL; tail = tail->next)
1793 /* Nothing. */;
1794 prev_lm = tail->lm_info->lm_addr;
1795
1796 /* Read the new objects. */
1797 if (info->using_xfer)
1798 {
1799 struct svr4_library_list library_list;
1800 char annex[64];
1801
1802 xsnprintf (annex, sizeof (annex), "start=%s;prev=%s",
1803 phex_nz (lm, sizeof (lm)),
1804 phex_nz (prev_lm, sizeof (prev_lm)));
1805 if (!svr4_current_sos_via_xfer_libraries (&library_list, annex))
1806 return 0;
1807
1808 tail->next = library_list.head;
1809 }
1810 else
1811 {
1812 struct so_list **link = &tail->next;
1813
1814 /* IGNORE_FIRST may safely be set to zero here because the
1815 above check and deferral to solist_update_full ensures
1816 that this call to svr4_read_so_list will never see the
1817 first element. */
1818 if (!svr4_read_so_list (lm, prev_lm, &link, 0))
1819 return 0;
1820 }
1821
1822 return 1;
1823 }
1824
1825 /* Disable the probes-based linker interface and revert to the
1826 original interface. We don't reset the breakpoints as the
1827 ones set up for the probes-based interface are adequate. */
1828
1829 static void
1830 disable_probes_interface_cleanup (void *arg)
1831 {
1832 struct svr4_info *info = get_svr4_info ();
1833
1834 warning (_("Probes-based dynamic linker interface failed.\n"
1835 "Reverting to original interface.\n"));
1836
1837 free_probes_table (info);
1838 free_solib_list (info);
1839 }
1840
1841 /* Update the solib list as appropriate when using the
1842 probes-based linker interface. Do nothing if using the
1843 standard interface. */
1844
1845 static void
1846 svr4_handle_solib_event (void)
1847 {
1848 struct svr4_info *info = get_svr4_info ();
1849 struct probe_and_action *pa;
1850 enum probe_action action;
1851 struct cleanup *old_chain, *usm_chain;
1852 struct value *val;
1853 CORE_ADDR pc, debug_base, lm = 0;
1854 int is_initial_ns;
1855 struct frame_info *frame = get_current_frame ();
1856
1857 /* Do nothing if not using the probes interface. */
1858 if (info->probes_table == NULL)
1859 return;
1860
1861 /* If anything goes wrong we revert to the original linker
1862 interface. */
1863 old_chain = make_cleanup (disable_probes_interface_cleanup, NULL);
1864
1865 pc = regcache_read_pc (get_current_regcache ());
1866 pa = solib_event_probe_at (info, pc);
1867 if (pa == NULL)
1868 {
1869 do_cleanups (old_chain);
1870 return;
1871 }
1872
1873 action = solib_event_probe_action (pa);
1874 if (action == PROBES_INTERFACE_FAILED)
1875 {
1876 do_cleanups (old_chain);
1877 return;
1878 }
1879
1880 if (action == DO_NOTHING)
1881 {
1882 discard_cleanups (old_chain);
1883 return;
1884 }
1885
1886 /* evaluate_probe_argument looks up symbols in the dynamic linker
1887 using find_pc_section. find_pc_section is accelerated by a cache
1888 called the section map. The section map is invalidated every
1889 time a shared library is loaded or unloaded, and if the inferior
1890 is generating a lot of shared library events then the section map
1891 will be updated every time svr4_handle_solib_event is called.
1892 We called find_pc_section in svr4_create_solib_event_breakpoints,
1893 so we can guarantee that the dynamic linker's sections are in the
1894 section map. We can therefore inhibit section map updates across
1895 these calls to evaluate_probe_argument and save a lot of time. */
1896 inhibit_section_map_updates (current_program_space);
1897 usm_chain = make_cleanup (resume_section_map_updates_cleanup,
1898 current_program_space);
1899
1900 val = evaluate_probe_argument (pa->probe, 1, frame);
1901 if (val == NULL)
1902 {
1903 do_cleanups (old_chain);
1904 return;
1905 }
1906
1907 debug_base = value_as_address (val);
1908 if (debug_base == 0)
1909 {
1910 do_cleanups (old_chain);
1911 return;
1912 }
1913
1914 /* Always locate the debug struct, in case it moved. */
1915 info->debug_base = 0;
1916 if (locate_base (info) == 0)
1917 {
1918 do_cleanups (old_chain);
1919 return;
1920 }
1921
1922 /* GDB does not currently support libraries loaded via dlmopen
1923 into namespaces other than the initial one. We must ignore
1924 any namespace other than the initial namespace here until
1925 support for this is added to GDB. */
1926 if (debug_base != info->debug_base)
1927 action = DO_NOTHING;
1928
1929 if (action == UPDATE_OR_RELOAD)
1930 {
1931 val = evaluate_probe_argument (pa->probe, 2, frame);
1932 if (val != NULL)
1933 lm = value_as_address (val);
1934
1935 if (lm == 0)
1936 action = FULL_RELOAD;
1937 }
1938
1939 /* Resume section map updates. */
1940 do_cleanups (usm_chain);
1941
1942 if (action == UPDATE_OR_RELOAD)
1943 {
1944 if (!solist_update_incremental (info, lm))
1945 action = FULL_RELOAD;
1946 }
1947
1948 if (action == FULL_RELOAD)
1949 {
1950 if (!solist_update_full (info))
1951 {
1952 do_cleanups (old_chain);
1953 return;
1954 }
1955 }
1956
1957 discard_cleanups (old_chain);
1958 }
1959
1960 /* Helper function for svr4_update_solib_event_breakpoints. */
1961
1962 static int
1963 svr4_update_solib_event_breakpoint (struct breakpoint *b, void *arg)
1964 {
1965 struct bp_location *loc;
1966
1967 if (b->type != bp_shlib_event)
1968 {
1969 /* Continue iterating. */
1970 return 0;
1971 }
1972
1973 for (loc = b->loc; loc != NULL; loc = loc->next)
1974 {
1975 struct svr4_info *info;
1976 struct probe_and_action *pa;
1977
1978 info = program_space_data (loc->pspace, solib_svr4_pspace_data);
1979 if (info == NULL || info->probes_table == NULL)
1980 continue;
1981
1982 pa = solib_event_probe_at (info, loc->address);
1983 if (pa == NULL)
1984 continue;
1985
1986 if (pa->action == DO_NOTHING)
1987 {
1988 if (b->enable_state == bp_disabled && stop_on_solib_events)
1989 enable_breakpoint (b);
1990 else if (b->enable_state == bp_enabled && !stop_on_solib_events)
1991 disable_breakpoint (b);
1992 }
1993
1994 break;
1995 }
1996
1997 /* Continue iterating. */
1998 return 0;
1999 }
2000
2001 /* Enable or disable optional solib event breakpoints as appropriate.
2002 Called whenever stop_on_solib_events is changed. */
2003
2004 static void
2005 svr4_update_solib_event_breakpoints (void)
2006 {
2007 iterate_over_breakpoints (svr4_update_solib_event_breakpoint, NULL);
2008 }
2009
2010 /* Create and register solib event breakpoints. PROBES is an array
2011 of NUM_PROBES elements, each of which is vector of probes. A
2012 solib event breakpoint will be created and registered for each
2013 probe. */
2014
2015 static void
2016 svr4_create_probe_breakpoints (struct gdbarch *gdbarch,
2017 VEC (probe_p) **probes,
2018 struct objfile *objfile)
2019 {
2020 int i;
2021
2022 for (i = 0; i < NUM_PROBES; i++)
2023 {
2024 enum probe_action action = probe_info[i].action;
2025 struct probe *probe;
2026 int ix;
2027
2028 for (ix = 0;
2029 VEC_iterate (probe_p, probes[i], ix, probe);
2030 ++ix)
2031 {
2032 CORE_ADDR address = get_probe_address (probe, objfile);
2033
2034 create_solib_event_breakpoint (gdbarch, address);
2035 register_solib_event_probe (probe, address, action);
2036 }
2037 }
2038
2039 svr4_update_solib_event_breakpoints ();
2040 }
2041
2042 /* Both the SunOS and the SVR4 dynamic linkers call a marker function
2043 before and after mapping and unmapping shared libraries. The sole
2044 purpose of this method is to allow debuggers to set a breakpoint so
2045 they can track these changes.
2046
2047 Some versions of the glibc dynamic linker contain named probes
2048 to allow more fine grained stopping. Given the address of the
2049 original marker function, this function attempts to find these
2050 probes, and if found, sets breakpoints on those instead. If the
2051 probes aren't found, a single breakpoint is set on the original
2052 marker function. */
2053
2054 static void
2055 svr4_create_solib_event_breakpoints (struct gdbarch *gdbarch,
2056 CORE_ADDR address)
2057 {
2058 struct obj_section *os;
2059
2060 os = find_pc_section (address);
2061 if (os != NULL)
2062 {
2063 int with_prefix;
2064
2065 for (with_prefix = 0; with_prefix <= 1; with_prefix++)
2066 {
2067 VEC (probe_p) *probes[NUM_PROBES];
2068 int all_probes_found = 1;
2069 int checked_can_use_probe_arguments = 0;
2070 int i;
2071
2072 memset (probes, 0, sizeof (probes));
2073 for (i = 0; i < NUM_PROBES; i++)
2074 {
2075 const char *name = probe_info[i].name;
2076 struct probe *p;
2077 char buf[32];
2078
2079 /* Fedora 17 and Red Hat Enterprise Linux 6.2-6.4
2080 shipped with an early version of the probes code in
2081 which the probes' names were prefixed with "rtld_"
2082 and the "map_failed" probe did not exist. The
2083 locations of the probes are otherwise the same, so
2084 we check for probes with prefixed names if probes
2085 with unprefixed names are not present. */
2086 if (with_prefix)
2087 {
2088 xsnprintf (buf, sizeof (buf), "rtld_%s", name);
2089 name = buf;
2090 }
2091
2092 probes[i] = find_probes_in_objfile (os->objfile, "rtld", name);
2093
2094 /* The "map_failed" probe did not exist in early
2095 versions of the probes code in which the probes'
2096 names were prefixed with "rtld_". */
2097 if (strcmp (name, "rtld_map_failed") == 0)
2098 continue;
2099
2100 if (VEC_empty (probe_p, probes[i]))
2101 {
2102 all_probes_found = 0;
2103 break;
2104 }
2105
2106 /* Ensure probe arguments can be evaluated. */
2107 if (!checked_can_use_probe_arguments)
2108 {
2109 p = VEC_index (probe_p, probes[i], 0);
2110 if (!can_evaluate_probe_arguments (p))
2111 {
2112 all_probes_found = 0;
2113 break;
2114 }
2115 checked_can_use_probe_arguments = 1;
2116 }
2117 }
2118
2119 if (all_probes_found)
2120 svr4_create_probe_breakpoints (gdbarch, probes, os->objfile);
2121
2122 for (i = 0; i < NUM_PROBES; i++)
2123 VEC_free (probe_p, probes[i]);
2124
2125 if (all_probes_found)
2126 return;
2127 }
2128 }
2129
2130 create_solib_event_breakpoint (gdbarch, address);
2131 }
2132
2133 /* Helper function for gdb_bfd_lookup_symbol. */
2134
2135 static int
2136 cmp_name_and_sec_flags (asymbol *sym, void *data)
2137 {
2138 return (strcmp (sym->name, (const char *) data) == 0
2139 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
2140 }
2141 /* Arrange for dynamic linker to hit breakpoint.
2142
2143 Both the SunOS and the SVR4 dynamic linkers have, as part of their
2144 debugger interface, support for arranging for the inferior to hit
2145 a breakpoint after mapping in the shared libraries. This function
2146 enables that breakpoint.
2147
2148 For SunOS, there is a special flag location (in_debugger) which we
2149 set to 1. When the dynamic linker sees this flag set, it will set
2150 a breakpoint at a location known only to itself, after saving the
2151 original contents of that place and the breakpoint address itself,
2152 in it's own internal structures. When we resume the inferior, it
2153 will eventually take a SIGTRAP when it runs into the breakpoint.
2154 We handle this (in a different place) by restoring the contents of
2155 the breakpointed location (which is only known after it stops),
2156 chasing around to locate the shared libraries that have been
2157 loaded, then resuming.
2158
2159 For SVR4, the debugger interface structure contains a member (r_brk)
2160 which is statically initialized at the time the shared library is
2161 built, to the offset of a function (_r_debug_state) which is guaran-
2162 teed to be called once before mapping in a library, and again when
2163 the mapping is complete. At the time we are examining this member,
2164 it contains only the unrelocated offset of the function, so we have
2165 to do our own relocation. Later, when the dynamic linker actually
2166 runs, it relocates r_brk to be the actual address of _r_debug_state().
2167
2168 The debugger interface structure also contains an enumeration which
2169 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
2170 depending upon whether or not the library is being mapped or unmapped,
2171 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
2172
2173 static int
2174 enable_break (struct svr4_info *info, int from_tty)
2175 {
2176 struct bound_minimal_symbol msymbol;
2177 const char * const *bkpt_namep;
2178 asection *interp_sect;
2179 char *interp_name;
2180 CORE_ADDR sym_addr;
2181
2182 info->interp_text_sect_low = info->interp_text_sect_high = 0;
2183 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
2184
2185 /* If we already have a shared library list in the target, and
2186 r_debug contains r_brk, set the breakpoint there - this should
2187 mean r_brk has already been relocated. Assume the dynamic linker
2188 is the object containing r_brk. */
2189
2190 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2191 sym_addr = 0;
2192 if (info->debug_base && solib_svr4_r_map (info) != 0)
2193 sym_addr = solib_svr4_r_brk (info);
2194
2195 if (sym_addr != 0)
2196 {
2197 struct obj_section *os;
2198
2199 sym_addr = gdbarch_addr_bits_remove
2200 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2201 sym_addr,
2202 &current_target));
2203
2204 /* On at least some versions of Solaris there's a dynamic relocation
2205 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
2206 we get control before the dynamic linker has self-relocated.
2207 Check if SYM_ADDR is in a known section, if it is assume we can
2208 trust its value. This is just a heuristic though, it could go away
2209 or be replaced if it's getting in the way.
2210
2211 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
2212 however it's spelled in your particular system) is ARM or Thumb.
2213 That knowledge is encoded in the address, if it's Thumb the low bit
2214 is 1. However, we've stripped that info above and it's not clear
2215 what all the consequences are of passing a non-addr_bits_remove'd
2216 address to svr4_create_solib_event_breakpoints. The call to
2217 find_pc_section verifies we know about the address and have some
2218 hope of computing the right kind of breakpoint to use (via
2219 symbol info). It does mean that GDB needs to be pointed at a
2220 non-stripped version of the dynamic linker in order to obtain
2221 information it already knows about. Sigh. */
2222
2223 os = find_pc_section (sym_addr);
2224 if (os != NULL)
2225 {
2226 /* Record the relocated start and end address of the dynamic linker
2227 text and plt section for svr4_in_dynsym_resolve_code. */
2228 bfd *tmp_bfd;
2229 CORE_ADDR load_addr;
2230
2231 tmp_bfd = os->objfile->obfd;
2232 load_addr = ANOFFSET (os->objfile->section_offsets,
2233 SECT_OFF_TEXT (os->objfile));
2234
2235 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2236 if (interp_sect)
2237 {
2238 info->interp_text_sect_low =
2239 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2240 info->interp_text_sect_high =
2241 info->interp_text_sect_low
2242 + bfd_section_size (tmp_bfd, interp_sect);
2243 }
2244 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2245 if (interp_sect)
2246 {
2247 info->interp_plt_sect_low =
2248 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2249 info->interp_plt_sect_high =
2250 info->interp_plt_sect_low
2251 + bfd_section_size (tmp_bfd, interp_sect);
2252 }
2253
2254 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2255 return 1;
2256 }
2257 }
2258
2259 /* Find the program interpreter; if not found, warn the user and drop
2260 into the old breakpoint at symbol code. */
2261 interp_name = find_program_interpreter ();
2262 if (interp_name)
2263 {
2264 CORE_ADDR load_addr = 0;
2265 int load_addr_found = 0;
2266 int loader_found_in_list = 0;
2267 struct so_list *so;
2268 bfd *tmp_bfd = NULL;
2269 struct target_ops *tmp_bfd_target;
2270 volatile struct gdb_exception ex;
2271
2272 sym_addr = 0;
2273
2274 /* Now we need to figure out where the dynamic linker was
2275 loaded so that we can load its symbols and place a breakpoint
2276 in the dynamic linker itself.
2277
2278 This address is stored on the stack. However, I've been unable
2279 to find any magic formula to find it for Solaris (appears to
2280 be trivial on GNU/Linux). Therefore, we have to try an alternate
2281 mechanism to find the dynamic linker's base address. */
2282
2283 TRY_CATCH (ex, RETURN_MASK_ALL)
2284 {
2285 tmp_bfd = solib_bfd_open (interp_name);
2286 }
2287 if (tmp_bfd == NULL)
2288 goto bkpt_at_symbol;
2289
2290 /* Now convert the TMP_BFD into a target. That way target, as
2291 well as BFD operations can be used. */
2292 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
2293 /* target_bfd_reopen acquired its own reference, so we can
2294 release ours now. */
2295 gdb_bfd_unref (tmp_bfd);
2296
2297 /* On a running target, we can get the dynamic linker's base
2298 address from the shared library table. */
2299 so = master_so_list ();
2300 while (so)
2301 {
2302 if (svr4_same_1 (interp_name, so->so_original_name))
2303 {
2304 load_addr_found = 1;
2305 loader_found_in_list = 1;
2306 load_addr = lm_addr_check (so, tmp_bfd);
2307 break;
2308 }
2309 so = so->next;
2310 }
2311
2312 /* If we were not able to find the base address of the loader
2313 from our so_list, then try using the AT_BASE auxilliary entry. */
2314 if (!load_addr_found)
2315 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
2316 {
2317 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
2318
2319 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
2320 that `+ load_addr' will overflow CORE_ADDR width not creating
2321 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
2322 GDB. */
2323
2324 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
2325 {
2326 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
2327 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
2328 tmp_bfd_target);
2329
2330 gdb_assert (load_addr < space_size);
2331
2332 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
2333 64bit ld.so with 32bit executable, it should not happen. */
2334
2335 if (tmp_entry_point < space_size
2336 && tmp_entry_point + load_addr >= space_size)
2337 load_addr -= space_size;
2338 }
2339
2340 load_addr_found = 1;
2341 }
2342
2343 /* Otherwise we find the dynamic linker's base address by examining
2344 the current pc (which should point at the entry point for the
2345 dynamic linker) and subtracting the offset of the entry point.
2346
2347 This is more fragile than the previous approaches, but is a good
2348 fallback method because it has actually been working well in
2349 most cases. */
2350 if (!load_addr_found)
2351 {
2352 struct regcache *regcache
2353 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
2354
2355 load_addr = (regcache_read_pc (regcache)
2356 - exec_entry_point (tmp_bfd, tmp_bfd_target));
2357 }
2358
2359 if (!loader_found_in_list)
2360 {
2361 info->debug_loader_name = xstrdup (interp_name);
2362 info->debug_loader_offset_p = 1;
2363 info->debug_loader_offset = load_addr;
2364 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2365 }
2366
2367 /* Record the relocated start and end address of the dynamic linker
2368 text and plt section for svr4_in_dynsym_resolve_code. */
2369 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2370 if (interp_sect)
2371 {
2372 info->interp_text_sect_low =
2373 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2374 info->interp_text_sect_high =
2375 info->interp_text_sect_low
2376 + bfd_section_size (tmp_bfd, interp_sect);
2377 }
2378 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2379 if (interp_sect)
2380 {
2381 info->interp_plt_sect_low =
2382 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2383 info->interp_plt_sect_high =
2384 info->interp_plt_sect_low
2385 + bfd_section_size (tmp_bfd, interp_sect);
2386 }
2387
2388 /* Now try to set a breakpoint in the dynamic linker. */
2389 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2390 {
2391 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
2392 (void *) *bkpt_namep);
2393 if (sym_addr != 0)
2394 break;
2395 }
2396
2397 if (sym_addr != 0)
2398 /* Convert 'sym_addr' from a function pointer to an address.
2399 Because we pass tmp_bfd_target instead of the current
2400 target, this will always produce an unrelocated value. */
2401 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2402 sym_addr,
2403 tmp_bfd_target);
2404
2405 /* We're done with both the temporary bfd and target. Closing
2406 the target closes the underlying bfd, because it holds the
2407 only remaining reference. */
2408 target_close (tmp_bfd_target);
2409
2410 if (sym_addr != 0)
2411 {
2412 svr4_create_solib_event_breakpoints (target_gdbarch (),
2413 load_addr + sym_addr);
2414 xfree (interp_name);
2415 return 1;
2416 }
2417
2418 /* For whatever reason we couldn't set a breakpoint in the dynamic
2419 linker. Warn and drop into the old code. */
2420 bkpt_at_symbol:
2421 xfree (interp_name);
2422 warning (_("Unable to find dynamic linker breakpoint function.\n"
2423 "GDB will be unable to debug shared library initializers\n"
2424 "and track explicitly loaded dynamic code."));
2425 }
2426
2427 /* Scan through the lists of symbols, trying to look up the symbol and
2428 set a breakpoint there. Terminate loop when we/if we succeed. */
2429
2430 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2431 {
2432 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2433 if ((msymbol.minsym != NULL)
2434 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2435 {
2436 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2437 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2438 sym_addr,
2439 &current_target);
2440 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2441 return 1;
2442 }
2443 }
2444
2445 if (interp_name != NULL && !current_inferior ()->attach_flag)
2446 {
2447 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
2448 {
2449 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2450 if ((msymbol.minsym != NULL)
2451 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2452 {
2453 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2454 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2455 sym_addr,
2456 &current_target);
2457 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2458 return 1;
2459 }
2460 }
2461 }
2462 return 0;
2463 }
2464
2465 /* Implement the "special_symbol_handling" target_so_ops method. */
2466
2467 static void
2468 svr4_special_symbol_handling (void)
2469 {
2470 /* Nothing to do. */
2471 }
2472
2473 /* Read the ELF program headers from ABFD. Return the contents and
2474 set *PHDRS_SIZE to the size of the program headers. */
2475
2476 static gdb_byte *
2477 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
2478 {
2479 Elf_Internal_Ehdr *ehdr;
2480 gdb_byte *buf;
2481
2482 ehdr = elf_elfheader (abfd);
2483
2484 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
2485 if (*phdrs_size == 0)
2486 return NULL;
2487
2488 buf = xmalloc (*phdrs_size);
2489 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
2490 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
2491 {
2492 xfree (buf);
2493 return NULL;
2494 }
2495
2496 return buf;
2497 }
2498
2499 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
2500 exec_bfd. Otherwise return 0.
2501
2502 We relocate all of the sections by the same amount. This
2503 behavior is mandated by recent editions of the System V ABI.
2504 According to the System V Application Binary Interface,
2505 Edition 4.1, page 5-5:
2506
2507 ... Though the system chooses virtual addresses for
2508 individual processes, it maintains the segments' relative
2509 positions. Because position-independent code uses relative
2510 addressesing between segments, the difference between
2511 virtual addresses in memory must match the difference
2512 between virtual addresses in the file. The difference
2513 between the virtual address of any segment in memory and
2514 the corresponding virtual address in the file is thus a
2515 single constant value for any one executable or shared
2516 object in a given process. This difference is the base
2517 address. One use of the base address is to relocate the
2518 memory image of the program during dynamic linking.
2519
2520 The same language also appears in Edition 4.0 of the System V
2521 ABI and is left unspecified in some of the earlier editions.
2522
2523 Decide if the objfile needs to be relocated. As indicated above, we will
2524 only be here when execution is stopped. But during attachment PC can be at
2525 arbitrary address therefore regcache_read_pc can be misleading (contrary to
2526 the auxv AT_ENTRY value). Moreover for executable with interpreter section
2527 regcache_read_pc would point to the interpreter and not the main executable.
2528
2529 So, to summarize, relocations are necessary when the start address obtained
2530 from the executable is different from the address in auxv AT_ENTRY entry.
2531
2532 [ The astute reader will note that we also test to make sure that
2533 the executable in question has the DYNAMIC flag set. It is my
2534 opinion that this test is unnecessary (undesirable even). It
2535 was added to avoid inadvertent relocation of an executable
2536 whose e_type member in the ELF header is not ET_DYN. There may
2537 be a time in the future when it is desirable to do relocations
2538 on other types of files as well in which case this condition
2539 should either be removed or modified to accomodate the new file
2540 type. - Kevin, Nov 2000. ] */
2541
2542 static int
2543 svr4_exec_displacement (CORE_ADDR *displacementp)
2544 {
2545 /* ENTRY_POINT is a possible function descriptor - before
2546 a call to gdbarch_convert_from_func_ptr_addr. */
2547 CORE_ADDR entry_point, displacement;
2548
2549 if (exec_bfd == NULL)
2550 return 0;
2551
2552 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
2553 being executed themselves and PIE (Position Independent Executable)
2554 executables are ET_DYN. */
2555
2556 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
2557 return 0;
2558
2559 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
2560 return 0;
2561
2562 displacement = entry_point - bfd_get_start_address (exec_bfd);
2563
2564 /* Verify the DISPLACEMENT candidate complies with the required page
2565 alignment. It is cheaper than the program headers comparison below. */
2566
2567 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2568 {
2569 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
2570
2571 /* p_align of PT_LOAD segments does not specify any alignment but
2572 only congruency of addresses:
2573 p_offset % p_align == p_vaddr % p_align
2574 Kernel is free to load the executable with lower alignment. */
2575
2576 if ((displacement & (elf->minpagesize - 1)) != 0)
2577 return 0;
2578 }
2579
2580 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
2581 comparing their program headers. If the program headers in the auxilliary
2582 vector do not match the program headers in the executable, then we are
2583 looking at a different file than the one used by the kernel - for
2584 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
2585
2586 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2587 {
2588 /* Be optimistic and clear OK only if GDB was able to verify the headers
2589 really do not match. */
2590 int phdrs_size, phdrs2_size, ok = 1;
2591 gdb_byte *buf, *buf2;
2592 int arch_size;
2593
2594 buf = read_program_header (-1, &phdrs_size, &arch_size);
2595 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
2596 if (buf != NULL && buf2 != NULL)
2597 {
2598 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
2599
2600 /* We are dealing with three different addresses. EXEC_BFD
2601 represents current address in on-disk file. target memory content
2602 may be different from EXEC_BFD as the file may have been prelinked
2603 to a different address after the executable has been loaded.
2604 Moreover the address of placement in target memory can be
2605 different from what the program headers in target memory say -
2606 this is the goal of PIE.
2607
2608 Detected DISPLACEMENT covers both the offsets of PIE placement and
2609 possible new prelink performed after start of the program. Here
2610 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
2611 content offset for the verification purpose. */
2612
2613 if (phdrs_size != phdrs2_size
2614 || bfd_get_arch_size (exec_bfd) != arch_size)
2615 ok = 0;
2616 else if (arch_size == 32
2617 && phdrs_size >= sizeof (Elf32_External_Phdr)
2618 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
2619 {
2620 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2621 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2622 CORE_ADDR displacement = 0;
2623 int i;
2624
2625 /* DISPLACEMENT could be found more easily by the difference of
2626 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2627 already have enough information to compute that displacement
2628 with what we've read. */
2629
2630 for (i = 0; i < ehdr2->e_phnum; i++)
2631 if (phdr2[i].p_type == PT_LOAD)
2632 {
2633 Elf32_External_Phdr *phdrp;
2634 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2635 CORE_ADDR vaddr, paddr;
2636 CORE_ADDR displacement_vaddr = 0;
2637 CORE_ADDR displacement_paddr = 0;
2638
2639 phdrp = &((Elf32_External_Phdr *) buf)[i];
2640 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2641 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2642
2643 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2644 byte_order);
2645 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2646
2647 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2648 byte_order);
2649 displacement_paddr = paddr - phdr2[i].p_paddr;
2650
2651 if (displacement_vaddr == displacement_paddr)
2652 displacement = displacement_vaddr;
2653
2654 break;
2655 }
2656
2657 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2658
2659 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
2660 {
2661 Elf32_External_Phdr *phdrp;
2662 Elf32_External_Phdr *phdr2p;
2663 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2664 CORE_ADDR vaddr, paddr;
2665 asection *plt2_asect;
2666
2667 phdrp = &((Elf32_External_Phdr *) buf)[i];
2668 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2669 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2670 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
2671
2672 /* PT_GNU_STACK is an exception by being never relocated by
2673 prelink as its addresses are always zero. */
2674
2675 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2676 continue;
2677
2678 /* Check also other adjustment combinations - PR 11786. */
2679
2680 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2681 byte_order);
2682 vaddr -= displacement;
2683 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
2684
2685 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2686 byte_order);
2687 paddr -= displacement;
2688 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
2689
2690 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2691 continue;
2692
2693 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2694 CentOS-5 has problems with filesz, memsz as well.
2695 See PR 11786. */
2696 if (phdr2[i].p_type == PT_GNU_RELRO)
2697 {
2698 Elf32_External_Phdr tmp_phdr = *phdrp;
2699 Elf32_External_Phdr tmp_phdr2 = *phdr2p;
2700
2701 memset (tmp_phdr.p_filesz, 0, 4);
2702 memset (tmp_phdr.p_memsz, 0, 4);
2703 memset (tmp_phdr.p_flags, 0, 4);
2704 memset (tmp_phdr.p_align, 0, 4);
2705 memset (tmp_phdr2.p_filesz, 0, 4);
2706 memset (tmp_phdr2.p_memsz, 0, 4);
2707 memset (tmp_phdr2.p_flags, 0, 4);
2708 memset (tmp_phdr2.p_align, 0, 4);
2709
2710 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2711 == 0)
2712 continue;
2713 }
2714
2715 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2716 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2717 if (plt2_asect)
2718 {
2719 int content2;
2720 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2721 CORE_ADDR filesz;
2722
2723 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2724 & SEC_HAS_CONTENTS) != 0;
2725
2726 filesz = extract_unsigned_integer (buf_filesz_p, 4,
2727 byte_order);
2728
2729 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2730 FILESZ is from the in-memory image. */
2731 if (content2)
2732 filesz += bfd_get_section_size (plt2_asect);
2733 else
2734 filesz -= bfd_get_section_size (plt2_asect);
2735
2736 store_unsigned_integer (buf_filesz_p, 4, byte_order,
2737 filesz);
2738
2739 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2740 continue;
2741 }
2742
2743 ok = 0;
2744 break;
2745 }
2746 }
2747 else if (arch_size == 64
2748 && phdrs_size >= sizeof (Elf64_External_Phdr)
2749 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2750 {
2751 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2752 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2753 CORE_ADDR displacement = 0;
2754 int i;
2755
2756 /* DISPLACEMENT could be found more easily by the difference of
2757 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2758 already have enough information to compute that displacement
2759 with what we've read. */
2760
2761 for (i = 0; i < ehdr2->e_phnum; i++)
2762 if (phdr2[i].p_type == PT_LOAD)
2763 {
2764 Elf64_External_Phdr *phdrp;
2765 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2766 CORE_ADDR vaddr, paddr;
2767 CORE_ADDR displacement_vaddr = 0;
2768 CORE_ADDR displacement_paddr = 0;
2769
2770 phdrp = &((Elf64_External_Phdr *) buf)[i];
2771 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2772 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2773
2774 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2775 byte_order);
2776 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2777
2778 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2779 byte_order);
2780 displacement_paddr = paddr - phdr2[i].p_paddr;
2781
2782 if (displacement_vaddr == displacement_paddr)
2783 displacement = displacement_vaddr;
2784
2785 break;
2786 }
2787
2788 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2789
2790 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2791 {
2792 Elf64_External_Phdr *phdrp;
2793 Elf64_External_Phdr *phdr2p;
2794 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2795 CORE_ADDR vaddr, paddr;
2796 asection *plt2_asect;
2797
2798 phdrp = &((Elf64_External_Phdr *) buf)[i];
2799 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2800 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2801 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2802
2803 /* PT_GNU_STACK is an exception by being never relocated by
2804 prelink as its addresses are always zero. */
2805
2806 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2807 continue;
2808
2809 /* Check also other adjustment combinations - PR 11786. */
2810
2811 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2812 byte_order);
2813 vaddr -= displacement;
2814 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2815
2816 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2817 byte_order);
2818 paddr -= displacement;
2819 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2820
2821 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2822 continue;
2823
2824 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2825 CentOS-5 has problems with filesz, memsz as well.
2826 See PR 11786. */
2827 if (phdr2[i].p_type == PT_GNU_RELRO)
2828 {
2829 Elf64_External_Phdr tmp_phdr = *phdrp;
2830 Elf64_External_Phdr tmp_phdr2 = *phdr2p;
2831
2832 memset (tmp_phdr.p_filesz, 0, 8);
2833 memset (tmp_phdr.p_memsz, 0, 8);
2834 memset (tmp_phdr.p_flags, 0, 4);
2835 memset (tmp_phdr.p_align, 0, 8);
2836 memset (tmp_phdr2.p_filesz, 0, 8);
2837 memset (tmp_phdr2.p_memsz, 0, 8);
2838 memset (tmp_phdr2.p_flags, 0, 4);
2839 memset (tmp_phdr2.p_align, 0, 8);
2840
2841 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2842 == 0)
2843 continue;
2844 }
2845
2846 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2847 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2848 if (plt2_asect)
2849 {
2850 int content2;
2851 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2852 CORE_ADDR filesz;
2853
2854 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2855 & SEC_HAS_CONTENTS) != 0;
2856
2857 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2858 byte_order);
2859
2860 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2861 FILESZ is from the in-memory image. */
2862 if (content2)
2863 filesz += bfd_get_section_size (plt2_asect);
2864 else
2865 filesz -= bfd_get_section_size (plt2_asect);
2866
2867 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2868 filesz);
2869
2870 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2871 continue;
2872 }
2873
2874 ok = 0;
2875 break;
2876 }
2877 }
2878 else
2879 ok = 0;
2880 }
2881
2882 xfree (buf);
2883 xfree (buf2);
2884
2885 if (!ok)
2886 return 0;
2887 }
2888
2889 if (info_verbose)
2890 {
2891 /* It can be printed repeatedly as there is no easy way to check
2892 the executable symbols/file has been already relocated to
2893 displacement. */
2894
2895 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2896 "displacement %s for \"%s\".\n"),
2897 paddress (target_gdbarch (), displacement),
2898 bfd_get_filename (exec_bfd));
2899 }
2900
2901 *displacementp = displacement;
2902 return 1;
2903 }
2904
2905 /* Relocate the main executable. This function should be called upon
2906 stopping the inferior process at the entry point to the program.
2907 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2908 different, the main executable is relocated by the proper amount. */
2909
2910 static void
2911 svr4_relocate_main_executable (void)
2912 {
2913 CORE_ADDR displacement;
2914
2915 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2916 probably contains the offsets computed using the PIE displacement
2917 from the previous run, which of course are irrelevant for this run.
2918 So we need to determine the new PIE displacement and recompute the
2919 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2920 already contains pre-computed offsets.
2921
2922 If we cannot compute the PIE displacement, either:
2923
2924 - The executable is not PIE.
2925
2926 - SYMFILE_OBJFILE does not match the executable started in the target.
2927 This can happen for main executable symbols loaded at the host while
2928 `ld.so --ld-args main-executable' is loaded in the target.
2929
2930 Then we leave the section offsets untouched and use them as is for
2931 this run. Either:
2932
2933 - These section offsets were properly reset earlier, and thus
2934 already contain the correct values. This can happen for instance
2935 when reconnecting via the remote protocol to a target that supports
2936 the `qOffsets' packet.
2937
2938 - The section offsets were not reset earlier, and the best we can
2939 hope is that the old offsets are still applicable to the new run. */
2940
2941 if (! svr4_exec_displacement (&displacement))
2942 return;
2943
2944 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2945 addresses. */
2946
2947 if (symfile_objfile)
2948 {
2949 struct section_offsets *new_offsets;
2950 int i;
2951
2952 new_offsets = alloca (symfile_objfile->num_sections
2953 * sizeof (*new_offsets));
2954
2955 for (i = 0; i < symfile_objfile->num_sections; i++)
2956 new_offsets->offsets[i] = displacement;
2957
2958 objfile_relocate (symfile_objfile, new_offsets);
2959 }
2960 else if (exec_bfd)
2961 {
2962 asection *asect;
2963
2964 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2965 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2966 (bfd_section_vma (exec_bfd, asect)
2967 + displacement));
2968 }
2969 }
2970
2971 /* Implement the "create_inferior_hook" target_solib_ops method.
2972
2973 For SVR4 executables, this first instruction is either the first
2974 instruction in the dynamic linker (for dynamically linked
2975 executables) or the instruction at "start" for statically linked
2976 executables. For dynamically linked executables, the system
2977 first exec's /lib/libc.so.N, which contains the dynamic linker,
2978 and starts it running. The dynamic linker maps in any needed
2979 shared libraries, maps in the actual user executable, and then
2980 jumps to "start" in the user executable.
2981
2982 We can arrange to cooperate with the dynamic linker to discover the
2983 names of shared libraries that are dynamically linked, and the base
2984 addresses to which they are linked.
2985
2986 This function is responsible for discovering those names and
2987 addresses, and saving sufficient information about them to allow
2988 their symbols to be read at a later time. */
2989
2990 static void
2991 svr4_solib_create_inferior_hook (int from_tty)
2992 {
2993 struct svr4_info *info;
2994
2995 info = get_svr4_info ();
2996
2997 /* Clear the probes-based interface's state. */
2998 free_probes_table (info);
2999 free_solib_list (info);
3000
3001 /* Relocate the main executable if necessary. */
3002 svr4_relocate_main_executable ();
3003
3004 /* No point setting a breakpoint in the dynamic linker if we can't
3005 hit it (e.g., a core file, or a trace file). */
3006 if (!target_has_execution)
3007 return;
3008
3009 if (!svr4_have_link_map_offsets ())
3010 return;
3011
3012 if (!enable_break (info, from_tty))
3013 return;
3014 }
3015
3016 static void
3017 svr4_clear_solib (void)
3018 {
3019 struct svr4_info *info;
3020
3021 info = get_svr4_info ();
3022 info->debug_base = 0;
3023 info->debug_loader_offset_p = 0;
3024 info->debug_loader_offset = 0;
3025 xfree (info->debug_loader_name);
3026 info->debug_loader_name = NULL;
3027 }
3028
3029 /* Clear any bits of ADDR that wouldn't fit in a target-format
3030 data pointer. "Data pointer" here refers to whatever sort of
3031 address the dynamic linker uses to manage its sections. At the
3032 moment, we don't support shared libraries on any processors where
3033 code and data pointers are different sizes.
3034
3035 This isn't really the right solution. What we really need here is
3036 a way to do arithmetic on CORE_ADDR values that respects the
3037 natural pointer/address correspondence. (For example, on the MIPS,
3038 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
3039 sign-extend the value. There, simply truncating the bits above
3040 gdbarch_ptr_bit, as we do below, is no good.) This should probably
3041 be a new gdbarch method or something. */
3042 static CORE_ADDR
3043 svr4_truncate_ptr (CORE_ADDR addr)
3044 {
3045 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
3046 /* We don't need to truncate anything, and the bit twiddling below
3047 will fail due to overflow problems. */
3048 return addr;
3049 else
3050 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
3051 }
3052
3053
3054 static void
3055 svr4_relocate_section_addresses (struct so_list *so,
3056 struct target_section *sec)
3057 {
3058 bfd *abfd = sec->the_bfd_section->owner;
3059
3060 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so, abfd));
3061 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so, abfd));
3062 }
3063 \f
3064
3065 /* Architecture-specific operations. */
3066
3067 /* Per-architecture data key. */
3068 static struct gdbarch_data *solib_svr4_data;
3069
3070 struct solib_svr4_ops
3071 {
3072 /* Return a description of the layout of `struct link_map'. */
3073 struct link_map_offsets *(*fetch_link_map_offsets)(void);
3074 };
3075
3076 /* Return a default for the architecture-specific operations. */
3077
3078 static void *
3079 solib_svr4_init (struct obstack *obstack)
3080 {
3081 struct solib_svr4_ops *ops;
3082
3083 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
3084 ops->fetch_link_map_offsets = NULL;
3085 return ops;
3086 }
3087
3088 /* Set the architecture-specific `struct link_map_offsets' fetcher for
3089 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
3090
3091 void
3092 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
3093 struct link_map_offsets *(*flmo) (void))
3094 {
3095 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
3096
3097 ops->fetch_link_map_offsets = flmo;
3098
3099 set_solib_ops (gdbarch, &svr4_so_ops);
3100 }
3101
3102 /* Fetch a link_map_offsets structure using the architecture-specific
3103 `struct link_map_offsets' fetcher. */
3104
3105 static struct link_map_offsets *
3106 svr4_fetch_link_map_offsets (void)
3107 {
3108 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
3109
3110 gdb_assert (ops->fetch_link_map_offsets);
3111 return ops->fetch_link_map_offsets ();
3112 }
3113
3114 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
3115
3116 static int
3117 svr4_have_link_map_offsets (void)
3118 {
3119 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
3120
3121 return (ops->fetch_link_map_offsets != NULL);
3122 }
3123 \f
3124
3125 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
3126 `struct r_debug' and a `struct link_map' that are binary compatible
3127 with the origional SVR4 implementation. */
3128
3129 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3130 for an ILP32 SVR4 system. */
3131
3132 struct link_map_offsets *
3133 svr4_ilp32_fetch_link_map_offsets (void)
3134 {
3135 static struct link_map_offsets lmo;
3136 static struct link_map_offsets *lmp = NULL;
3137
3138 if (lmp == NULL)
3139 {
3140 lmp = &lmo;
3141
3142 lmo.r_version_offset = 0;
3143 lmo.r_version_size = 4;
3144 lmo.r_map_offset = 4;
3145 lmo.r_brk_offset = 8;
3146 lmo.r_ldsomap_offset = 20;
3147
3148 /* Everything we need is in the first 20 bytes. */
3149 lmo.link_map_size = 20;
3150 lmo.l_addr_offset = 0;
3151 lmo.l_name_offset = 4;
3152 lmo.l_ld_offset = 8;
3153 lmo.l_next_offset = 12;
3154 lmo.l_prev_offset = 16;
3155 }
3156
3157 return lmp;
3158 }
3159
3160 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3161 for an LP64 SVR4 system. */
3162
3163 struct link_map_offsets *
3164 svr4_lp64_fetch_link_map_offsets (void)
3165 {
3166 static struct link_map_offsets lmo;
3167 static struct link_map_offsets *lmp = NULL;
3168
3169 if (lmp == NULL)
3170 {
3171 lmp = &lmo;
3172
3173 lmo.r_version_offset = 0;
3174 lmo.r_version_size = 4;
3175 lmo.r_map_offset = 8;
3176 lmo.r_brk_offset = 16;
3177 lmo.r_ldsomap_offset = 40;
3178
3179 /* Everything we need is in the first 40 bytes. */
3180 lmo.link_map_size = 40;
3181 lmo.l_addr_offset = 0;
3182 lmo.l_name_offset = 8;
3183 lmo.l_ld_offset = 16;
3184 lmo.l_next_offset = 24;
3185 lmo.l_prev_offset = 32;
3186 }
3187
3188 return lmp;
3189 }
3190 \f
3191
3192 struct target_so_ops svr4_so_ops;
3193
3194 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
3195 different rule for symbol lookup. The lookup begins here in the DSO, not in
3196 the main executable. */
3197
3198 static struct symbol *
3199 elf_lookup_lib_symbol (struct objfile *objfile,
3200 const char *name,
3201 const domain_enum domain)
3202 {
3203 bfd *abfd;
3204
3205 if (objfile == symfile_objfile)
3206 abfd = exec_bfd;
3207 else
3208 {
3209 /* OBJFILE should have been passed as the non-debug one. */
3210 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
3211
3212 abfd = objfile->obfd;
3213 }
3214
3215 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
3216 return NULL;
3217
3218 return lookup_global_symbol_from_objfile (objfile, name, domain);
3219 }
3220
3221 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
3222
3223 void
3224 _initialize_svr4_solib (void)
3225 {
3226 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
3227 solib_svr4_pspace_data
3228 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
3229
3230 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
3231 svr4_so_ops.free_so = svr4_free_so;
3232 svr4_so_ops.clear_so = svr4_clear_so;
3233 svr4_so_ops.clear_solib = svr4_clear_solib;
3234 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
3235 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
3236 svr4_so_ops.current_sos = svr4_current_sos;
3237 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
3238 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
3239 svr4_so_ops.bfd_open = solib_bfd_open;
3240 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
3241 svr4_so_ops.same = svr4_same;
3242 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
3243 svr4_so_ops.update_breakpoints = svr4_update_solib_event_breakpoints;
3244 svr4_so_ops.handle_event = svr4_handle_solib_event;
3245 }
This page took 0.109356 seconds and 4 git commands to generate.