Convert observers to C++
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2018 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "infrun.h"
34 #include "regcache.h"
35 #include "gdbthread.h"
36 #include "observable.h"
37
38 #include "solist.h"
39 #include "solib.h"
40 #include "solib-svr4.h"
41
42 #include "bfd-target.h"
43 #include "elf-bfd.h"
44 #include "exec.h"
45 #include "auxv.h"
46 #include "gdb_bfd.h"
47 #include "probe.h"
48
49 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
50 static int svr4_have_link_map_offsets (void);
51 static void svr4_relocate_main_executable (void);
52 static void svr4_free_library_list (void *p_list);
53
54 /* On SVR4 systems, a list of symbols in the dynamic linker where
55 GDB can try to place a breakpoint to monitor shared library
56 events.
57
58 If none of these symbols are found, or other errors occur, then
59 SVR4 systems will fall back to using a symbol as the "startup
60 mapping complete" breakpoint address. */
61
62 static const char * const solib_break_names[] =
63 {
64 "r_debug_state",
65 "_r_debug_state",
66 "_dl_debug_state",
67 "rtld_db_dlactivity",
68 "__dl_rtld_db_dlactivity",
69 "_rtld_debug_state",
70
71 NULL
72 };
73
74 static const char * const bkpt_names[] =
75 {
76 "_start",
77 "__start",
78 "main",
79 NULL
80 };
81
82 static const char * const main_name_list[] =
83 {
84 "main_$main",
85 NULL
86 };
87
88 /* What to do when a probe stop occurs. */
89
90 enum probe_action
91 {
92 /* Something went seriously wrong. Stop using probes and
93 revert to using the older interface. */
94 PROBES_INTERFACE_FAILED,
95
96 /* No action is required. The shared object list is still
97 valid. */
98 DO_NOTHING,
99
100 /* The shared object list should be reloaded entirely. */
101 FULL_RELOAD,
102
103 /* Attempt to incrementally update the shared object list. If
104 the update fails or is not possible, fall back to reloading
105 the list in full. */
106 UPDATE_OR_RELOAD,
107 };
108
109 /* A probe's name and its associated action. */
110
111 struct probe_info
112 {
113 /* The name of the probe. */
114 const char *name;
115
116 /* What to do when a probe stop occurs. */
117 enum probe_action action;
118 };
119
120 /* A list of named probes and their associated actions. If all
121 probes are present in the dynamic linker then the probes-based
122 interface will be used. */
123
124 static const struct probe_info probe_info[] =
125 {
126 { "init_start", DO_NOTHING },
127 { "init_complete", FULL_RELOAD },
128 { "map_start", DO_NOTHING },
129 { "map_failed", DO_NOTHING },
130 { "reloc_complete", UPDATE_OR_RELOAD },
131 { "unmap_start", DO_NOTHING },
132 { "unmap_complete", FULL_RELOAD },
133 };
134
135 #define NUM_PROBES ARRAY_SIZE (probe_info)
136
137 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
138 the same shared library. */
139
140 static int
141 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
142 {
143 if (strcmp (gdb_so_name, inferior_so_name) == 0)
144 return 1;
145
146 /* On Solaris, when starting inferior we think that dynamic linker is
147 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
148 contains /lib/ld.so.1. Sometimes one file is a link to another, but
149 sometimes they have identical content, but are not linked to each
150 other. We don't restrict this check for Solaris, but the chances
151 of running into this situation elsewhere are very low. */
152 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
153 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
154 return 1;
155
156 /* Similarly, we observed the same issue with sparc64, but with
157 different locations. */
158 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
159 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
160 return 1;
161
162 return 0;
163 }
164
165 static int
166 svr4_same (struct so_list *gdb, struct so_list *inferior)
167 {
168 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
169 }
170
171 static lm_info_svr4 *
172 lm_info_read (CORE_ADDR lm_addr)
173 {
174 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
175 gdb_byte *lm;
176 lm_info_svr4 *lm_info;
177 struct cleanup *back_to;
178
179 lm = (gdb_byte *) xmalloc (lmo->link_map_size);
180 back_to = make_cleanup (xfree, lm);
181
182 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
183 {
184 warning (_("Error reading shared library list entry at %s"),
185 paddress (target_gdbarch (), lm_addr)),
186 lm_info = NULL;
187 }
188 else
189 {
190 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
191
192 lm_info = new lm_info_svr4;
193 lm_info->lm_addr = lm_addr;
194
195 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
196 ptr_type);
197 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
198 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
199 ptr_type);
200 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
201 ptr_type);
202 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
203 ptr_type);
204 }
205
206 do_cleanups (back_to);
207
208 return lm_info;
209 }
210
211 static int
212 has_lm_dynamic_from_link_map (void)
213 {
214 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
215
216 return lmo->l_ld_offset >= 0;
217 }
218
219 static CORE_ADDR
220 lm_addr_check (const struct so_list *so, bfd *abfd)
221 {
222 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
223
224 if (!li->l_addr_p)
225 {
226 struct bfd_section *dyninfo_sect;
227 CORE_ADDR l_addr, l_dynaddr, dynaddr;
228
229 l_addr = li->l_addr_inferior;
230
231 if (! abfd || ! has_lm_dynamic_from_link_map ())
232 goto set_addr;
233
234 l_dynaddr = li->l_ld;
235
236 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
237 if (dyninfo_sect == NULL)
238 goto set_addr;
239
240 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
241
242 if (dynaddr + l_addr != l_dynaddr)
243 {
244 CORE_ADDR align = 0x1000;
245 CORE_ADDR minpagesize = align;
246
247 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
248 {
249 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
250 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
251 int i;
252
253 align = 1;
254
255 for (i = 0; i < ehdr->e_phnum; i++)
256 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
257 align = phdr[i].p_align;
258
259 minpagesize = get_elf_backend_data (abfd)->minpagesize;
260 }
261
262 /* Turn it into a mask. */
263 align--;
264
265 /* If the changes match the alignment requirements, we
266 assume we're using a core file that was generated by the
267 same binary, just prelinked with a different base offset.
268 If it doesn't match, we may have a different binary, the
269 same binary with the dynamic table loaded at an unrelated
270 location, or anything, really. To avoid regressions,
271 don't adjust the base offset in the latter case, although
272 odds are that, if things really changed, debugging won't
273 quite work.
274
275 One could expect more the condition
276 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
277 but the one below is relaxed for PPC. The PPC kernel supports
278 either 4k or 64k page sizes. To be prepared for 64k pages,
279 PPC ELF files are built using an alignment requirement of 64k.
280 However, when running on a kernel supporting 4k pages, the memory
281 mapping of the library may not actually happen on a 64k boundary!
282
283 (In the usual case where (l_addr & align) == 0, this check is
284 equivalent to the possibly expected check above.)
285
286 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
287
288 l_addr = l_dynaddr - dynaddr;
289
290 if ((l_addr & (minpagesize - 1)) == 0
291 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
292 {
293 if (info_verbose)
294 printf_unfiltered (_("Using PIC (Position Independent Code) "
295 "prelink displacement %s for \"%s\".\n"),
296 paddress (target_gdbarch (), l_addr),
297 so->so_name);
298 }
299 else
300 {
301 /* There is no way to verify the library file matches. prelink
302 can during prelinking of an unprelinked file (or unprelinking
303 of a prelinked file) shift the DYNAMIC segment by arbitrary
304 offset without any page size alignment. There is no way to
305 find out the ELF header and/or Program Headers for a limited
306 verification if it they match. One could do a verification
307 of the DYNAMIC segment. Still the found address is the best
308 one GDB could find. */
309
310 warning (_(".dynamic section for \"%s\" "
311 "is not at the expected address "
312 "(wrong library or version mismatch?)"), so->so_name);
313 }
314 }
315
316 set_addr:
317 li->l_addr = l_addr;
318 li->l_addr_p = 1;
319 }
320
321 return li->l_addr;
322 }
323
324 /* Per pspace SVR4 specific data. */
325
326 struct svr4_info
327 {
328 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
329
330 /* Validity flag for debug_loader_offset. */
331 int debug_loader_offset_p;
332
333 /* Load address for the dynamic linker, inferred. */
334 CORE_ADDR debug_loader_offset;
335
336 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
337 char *debug_loader_name;
338
339 /* Load map address for the main executable. */
340 CORE_ADDR main_lm_addr;
341
342 CORE_ADDR interp_text_sect_low;
343 CORE_ADDR interp_text_sect_high;
344 CORE_ADDR interp_plt_sect_low;
345 CORE_ADDR interp_plt_sect_high;
346
347 /* Nonzero if the list of objects was last obtained from the target
348 via qXfer:libraries-svr4:read. */
349 int using_xfer;
350
351 /* Table of struct probe_and_action instances, used by the
352 probes-based interface to map breakpoint addresses to probes
353 and their associated actions. Lookup is performed using
354 probe_and_action->prob->address. */
355 htab_t probes_table;
356
357 /* List of objects loaded into the inferior, used by the probes-
358 based interface. */
359 struct so_list *solib_list;
360 };
361
362 /* Per-program-space data key. */
363 static const struct program_space_data *solib_svr4_pspace_data;
364
365 /* Free the probes table. */
366
367 static void
368 free_probes_table (struct svr4_info *info)
369 {
370 if (info->probes_table == NULL)
371 return;
372
373 htab_delete (info->probes_table);
374 info->probes_table = NULL;
375 }
376
377 /* Free the solib list. */
378
379 static void
380 free_solib_list (struct svr4_info *info)
381 {
382 svr4_free_library_list (&info->solib_list);
383 info->solib_list = NULL;
384 }
385
386 static void
387 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
388 {
389 struct svr4_info *info = (struct svr4_info *) arg;
390
391 free_probes_table (info);
392 free_solib_list (info);
393
394 xfree (info);
395 }
396
397 /* Get the current svr4 data. If none is found yet, add it now. This
398 function always returns a valid object. */
399
400 static struct svr4_info *
401 get_svr4_info (void)
402 {
403 struct svr4_info *info;
404
405 info = (struct svr4_info *) program_space_data (current_program_space,
406 solib_svr4_pspace_data);
407 if (info != NULL)
408 return info;
409
410 info = XCNEW (struct svr4_info);
411 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
412 return info;
413 }
414
415 /* Local function prototypes */
416
417 static int match_main (const char *);
418
419 /* Read program header TYPE from inferior memory. The header is found
420 by scanning the OS auxillary vector.
421
422 If TYPE == -1, return the program headers instead of the contents of
423 one program header.
424
425 Return a pointer to allocated memory holding the program header contents,
426 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
427 size of those contents is returned to P_SECT_SIZE. Likewise, the target
428 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE and
429 the base address of the section is returned in BASE_ADDR. */
430
431 static gdb_byte *
432 read_program_header (int type, int *p_sect_size, int *p_arch_size,
433 CORE_ADDR *base_addr)
434 {
435 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
436 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
437 int arch_size, sect_size;
438 CORE_ADDR sect_addr;
439 gdb_byte *buf;
440 int pt_phdr_p = 0;
441
442 /* Get required auxv elements from target. */
443 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
444 return 0;
445 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
446 return 0;
447 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
448 return 0;
449 if (!at_phdr || !at_phnum)
450 return 0;
451
452 /* Determine ELF architecture type. */
453 if (at_phent == sizeof (Elf32_External_Phdr))
454 arch_size = 32;
455 else if (at_phent == sizeof (Elf64_External_Phdr))
456 arch_size = 64;
457 else
458 return 0;
459
460 /* Find the requested segment. */
461 if (type == -1)
462 {
463 sect_addr = at_phdr;
464 sect_size = at_phent * at_phnum;
465 }
466 else if (arch_size == 32)
467 {
468 Elf32_External_Phdr phdr;
469 int i;
470
471 /* Search for requested PHDR. */
472 for (i = 0; i < at_phnum; i++)
473 {
474 int p_type;
475
476 if (target_read_memory (at_phdr + i * sizeof (phdr),
477 (gdb_byte *)&phdr, sizeof (phdr)))
478 return 0;
479
480 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
481 4, byte_order);
482
483 if (p_type == PT_PHDR)
484 {
485 pt_phdr_p = 1;
486 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
487 4, byte_order);
488 }
489
490 if (p_type == type)
491 break;
492 }
493
494 if (i == at_phnum)
495 return 0;
496
497 /* Retrieve address and size. */
498 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
499 4, byte_order);
500 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
501 4, byte_order);
502 }
503 else
504 {
505 Elf64_External_Phdr phdr;
506 int i;
507
508 /* Search for requested PHDR. */
509 for (i = 0; i < at_phnum; i++)
510 {
511 int p_type;
512
513 if (target_read_memory (at_phdr + i * sizeof (phdr),
514 (gdb_byte *)&phdr, sizeof (phdr)))
515 return 0;
516
517 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
518 4, byte_order);
519
520 if (p_type == PT_PHDR)
521 {
522 pt_phdr_p = 1;
523 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
524 8, byte_order);
525 }
526
527 if (p_type == type)
528 break;
529 }
530
531 if (i == at_phnum)
532 return 0;
533
534 /* Retrieve address and size. */
535 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
536 8, byte_order);
537 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
538 8, byte_order);
539 }
540
541 /* PT_PHDR is optional, but we really need it
542 for PIE to make this work in general. */
543
544 if (pt_phdr_p)
545 {
546 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
547 Relocation offset is the difference between the two. */
548 sect_addr = sect_addr + (at_phdr - pt_phdr);
549 }
550
551 /* Read in requested program header. */
552 buf = (gdb_byte *) xmalloc (sect_size);
553 if (target_read_memory (sect_addr, buf, sect_size))
554 {
555 xfree (buf);
556 return NULL;
557 }
558
559 if (p_arch_size)
560 *p_arch_size = arch_size;
561 if (p_sect_size)
562 *p_sect_size = sect_size;
563 if (base_addr)
564 *base_addr = sect_addr;
565
566 return buf;
567 }
568
569
570 /* Return program interpreter string. */
571 static char *
572 find_program_interpreter (void)
573 {
574 gdb_byte *buf = NULL;
575
576 /* If we have an exec_bfd, use its section table. */
577 if (exec_bfd
578 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
579 {
580 struct bfd_section *interp_sect;
581
582 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
583 if (interp_sect != NULL)
584 {
585 int sect_size = bfd_section_size (exec_bfd, interp_sect);
586
587 buf = (gdb_byte *) xmalloc (sect_size);
588 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
589 }
590 }
591
592 /* If we didn't find it, use the target auxillary vector. */
593 if (!buf)
594 buf = read_program_header (PT_INTERP, NULL, NULL, NULL);
595
596 return (char *) buf;
597 }
598
599
600 /* Scan for DESIRED_DYNTAG in .dynamic section of ABFD. If DESIRED_DYNTAG is
601 found, 1 is returned and the corresponding PTR is set. */
602
603 static int
604 scan_dyntag (const int desired_dyntag, bfd *abfd, CORE_ADDR *ptr,
605 CORE_ADDR *ptr_addr)
606 {
607 int arch_size, step, sect_size;
608 long current_dyntag;
609 CORE_ADDR dyn_ptr, dyn_addr;
610 gdb_byte *bufend, *bufstart, *buf;
611 Elf32_External_Dyn *x_dynp_32;
612 Elf64_External_Dyn *x_dynp_64;
613 struct bfd_section *sect;
614 struct target_section *target_section;
615
616 if (abfd == NULL)
617 return 0;
618
619 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
620 return 0;
621
622 arch_size = bfd_get_arch_size (abfd);
623 if (arch_size == -1)
624 return 0;
625
626 /* Find the start address of the .dynamic section. */
627 sect = bfd_get_section_by_name (abfd, ".dynamic");
628 if (sect == NULL)
629 return 0;
630
631 for (target_section = current_target_sections->sections;
632 target_section < current_target_sections->sections_end;
633 target_section++)
634 if (sect == target_section->the_bfd_section)
635 break;
636 if (target_section < current_target_sections->sections_end)
637 dyn_addr = target_section->addr;
638 else
639 {
640 /* ABFD may come from OBJFILE acting only as a symbol file without being
641 loaded into the target (see add_symbol_file_command). This case is
642 such fallback to the file VMA address without the possibility of
643 having the section relocated to its actual in-memory address. */
644
645 dyn_addr = bfd_section_vma (abfd, sect);
646 }
647
648 /* Read in .dynamic from the BFD. We will get the actual value
649 from memory later. */
650 sect_size = bfd_section_size (abfd, sect);
651 buf = bufstart = (gdb_byte *) alloca (sect_size);
652 if (!bfd_get_section_contents (abfd, sect,
653 buf, 0, sect_size))
654 return 0;
655
656 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
657 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
658 : sizeof (Elf64_External_Dyn);
659 for (bufend = buf + sect_size;
660 buf < bufend;
661 buf += step)
662 {
663 if (arch_size == 32)
664 {
665 x_dynp_32 = (Elf32_External_Dyn *) buf;
666 current_dyntag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
667 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
668 }
669 else
670 {
671 x_dynp_64 = (Elf64_External_Dyn *) buf;
672 current_dyntag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
673 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
674 }
675 if (current_dyntag == DT_NULL)
676 return 0;
677 if (current_dyntag == desired_dyntag)
678 {
679 /* If requested, try to read the runtime value of this .dynamic
680 entry. */
681 if (ptr)
682 {
683 struct type *ptr_type;
684 gdb_byte ptr_buf[8];
685 CORE_ADDR ptr_addr_1;
686
687 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
688 ptr_addr_1 = dyn_addr + (buf - bufstart) + arch_size / 8;
689 if (target_read_memory (ptr_addr_1, ptr_buf, arch_size / 8) == 0)
690 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
691 *ptr = dyn_ptr;
692 if (ptr_addr)
693 *ptr_addr = dyn_addr + (buf - bufstart);
694 }
695 return 1;
696 }
697 }
698
699 return 0;
700 }
701
702 /* Scan for DESIRED_DYNTAG in .dynamic section of the target's main executable,
703 found by consulting the OS auxillary vector. If DESIRED_DYNTAG is found, 1
704 is returned and the corresponding PTR is set. */
705
706 static int
707 scan_dyntag_auxv (const int desired_dyntag, CORE_ADDR *ptr,
708 CORE_ADDR *ptr_addr)
709 {
710 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
711 int sect_size, arch_size, step;
712 long current_dyntag;
713 CORE_ADDR dyn_ptr;
714 CORE_ADDR base_addr;
715 gdb_byte *bufend, *bufstart, *buf;
716
717 /* Read in .dynamic section. */
718 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size,
719 &base_addr);
720 if (!buf)
721 return 0;
722
723 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
724 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
725 : sizeof (Elf64_External_Dyn);
726 for (bufend = buf + sect_size;
727 buf < bufend;
728 buf += step)
729 {
730 if (arch_size == 32)
731 {
732 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
733
734 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
735 4, byte_order);
736 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
737 4, byte_order);
738 }
739 else
740 {
741 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
742
743 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
744 8, byte_order);
745 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
746 8, byte_order);
747 }
748 if (current_dyntag == DT_NULL)
749 break;
750
751 if (current_dyntag == desired_dyntag)
752 {
753 if (ptr)
754 *ptr = dyn_ptr;
755
756 if (ptr_addr)
757 *ptr_addr = base_addr + buf - bufstart;
758
759 xfree (bufstart);
760 return 1;
761 }
762 }
763
764 xfree (bufstart);
765 return 0;
766 }
767
768 /* Locate the base address of dynamic linker structs for SVR4 elf
769 targets.
770
771 For SVR4 elf targets the address of the dynamic linker's runtime
772 structure is contained within the dynamic info section in the
773 executable file. The dynamic section is also mapped into the
774 inferior address space. Because the runtime loader fills in the
775 real address before starting the inferior, we have to read in the
776 dynamic info section from the inferior address space.
777 If there are any errors while trying to find the address, we
778 silently return 0, otherwise the found address is returned. */
779
780 static CORE_ADDR
781 elf_locate_base (void)
782 {
783 struct bound_minimal_symbol msymbol;
784 CORE_ADDR dyn_ptr, dyn_ptr_addr;
785
786 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
787 instead of DT_DEBUG, although they sometimes contain an unused
788 DT_DEBUG. */
789 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr, NULL)
790 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr, NULL))
791 {
792 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
793 gdb_byte *pbuf;
794 int pbuf_size = TYPE_LENGTH (ptr_type);
795
796 pbuf = (gdb_byte *) alloca (pbuf_size);
797 /* DT_MIPS_RLD_MAP contains a pointer to the address
798 of the dynamic link structure. */
799 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
800 return 0;
801 return extract_typed_address (pbuf, ptr_type);
802 }
803
804 /* Then check DT_MIPS_RLD_MAP_REL. MIPS executables now use this form
805 because of needing to support PIE. DT_MIPS_RLD_MAP will also exist
806 in non-PIE. */
807 if (scan_dyntag (DT_MIPS_RLD_MAP_REL, exec_bfd, &dyn_ptr, &dyn_ptr_addr)
808 || scan_dyntag_auxv (DT_MIPS_RLD_MAP_REL, &dyn_ptr, &dyn_ptr_addr))
809 {
810 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
811 gdb_byte *pbuf;
812 int pbuf_size = TYPE_LENGTH (ptr_type);
813
814 pbuf = (gdb_byte *) alloca (pbuf_size);
815 /* DT_MIPS_RLD_MAP_REL contains an offset from the address of the
816 DT slot to the address of the dynamic link structure. */
817 if (target_read_memory (dyn_ptr + dyn_ptr_addr, pbuf, pbuf_size))
818 return 0;
819 return extract_typed_address (pbuf, ptr_type);
820 }
821
822 /* Find DT_DEBUG. */
823 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr, NULL)
824 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr, NULL))
825 return dyn_ptr;
826
827 /* This may be a static executable. Look for the symbol
828 conventionally named _r_debug, as a last resort. */
829 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
830 if (msymbol.minsym != NULL)
831 return BMSYMBOL_VALUE_ADDRESS (msymbol);
832
833 /* DT_DEBUG entry not found. */
834 return 0;
835 }
836
837 /* Locate the base address of dynamic linker structs.
838
839 For both the SunOS and SVR4 shared library implementations, if the
840 inferior executable has been linked dynamically, there is a single
841 address somewhere in the inferior's data space which is the key to
842 locating all of the dynamic linker's runtime structures. This
843 address is the value of the debug base symbol. The job of this
844 function is to find and return that address, or to return 0 if there
845 is no such address (the executable is statically linked for example).
846
847 For SunOS, the job is almost trivial, since the dynamic linker and
848 all of it's structures are statically linked to the executable at
849 link time. Thus the symbol for the address we are looking for has
850 already been added to the minimal symbol table for the executable's
851 objfile at the time the symbol file's symbols were read, and all we
852 have to do is look it up there. Note that we explicitly do NOT want
853 to find the copies in the shared library.
854
855 The SVR4 version is a bit more complicated because the address
856 is contained somewhere in the dynamic info section. We have to go
857 to a lot more work to discover the address of the debug base symbol.
858 Because of this complexity, we cache the value we find and return that
859 value on subsequent invocations. Note there is no copy in the
860 executable symbol tables. */
861
862 static CORE_ADDR
863 locate_base (struct svr4_info *info)
864 {
865 /* Check to see if we have a currently valid address, and if so, avoid
866 doing all this work again and just return the cached address. If
867 we have no cached address, try to locate it in the dynamic info
868 section for ELF executables. There's no point in doing any of this
869 though if we don't have some link map offsets to work with. */
870
871 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
872 info->debug_base = elf_locate_base ();
873 return info->debug_base;
874 }
875
876 /* Find the first element in the inferior's dynamic link map, and
877 return its address in the inferior. Return zero if the address
878 could not be determined.
879
880 FIXME: Perhaps we should validate the info somehow, perhaps by
881 checking r_version for a known version number, or r_state for
882 RT_CONSISTENT. */
883
884 static CORE_ADDR
885 solib_svr4_r_map (struct svr4_info *info)
886 {
887 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
888 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
889 CORE_ADDR addr = 0;
890
891 TRY
892 {
893 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
894 ptr_type);
895 }
896 CATCH (ex, RETURN_MASK_ERROR)
897 {
898 exception_print (gdb_stderr, ex);
899 }
900 END_CATCH
901
902 return addr;
903 }
904
905 /* Find r_brk from the inferior's debug base. */
906
907 static CORE_ADDR
908 solib_svr4_r_brk (struct svr4_info *info)
909 {
910 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
911 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
912
913 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
914 ptr_type);
915 }
916
917 /* Find the link map for the dynamic linker (if it is not in the
918 normal list of loaded shared objects). */
919
920 static CORE_ADDR
921 solib_svr4_r_ldsomap (struct svr4_info *info)
922 {
923 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
924 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
925 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
926 ULONGEST version = 0;
927
928 TRY
929 {
930 /* Check version, and return zero if `struct r_debug' doesn't have
931 the r_ldsomap member. */
932 version
933 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
934 lmo->r_version_size, byte_order);
935 }
936 CATCH (ex, RETURN_MASK_ERROR)
937 {
938 exception_print (gdb_stderr, ex);
939 }
940 END_CATCH
941
942 if (version < 2 || lmo->r_ldsomap_offset == -1)
943 return 0;
944
945 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
946 ptr_type);
947 }
948
949 /* On Solaris systems with some versions of the dynamic linker,
950 ld.so's l_name pointer points to the SONAME in the string table
951 rather than into writable memory. So that GDB can find shared
952 libraries when loading a core file generated by gcore, ensure that
953 memory areas containing the l_name string are saved in the core
954 file. */
955
956 static int
957 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
958 {
959 struct svr4_info *info;
960 CORE_ADDR ldsomap;
961 struct so_list *newobj;
962 struct cleanup *old_chain;
963 CORE_ADDR name_lm;
964
965 info = get_svr4_info ();
966
967 info->debug_base = 0;
968 locate_base (info);
969 if (!info->debug_base)
970 return 0;
971
972 ldsomap = solib_svr4_r_ldsomap (info);
973 if (!ldsomap)
974 return 0;
975
976 newobj = XCNEW (struct so_list);
977 old_chain = make_cleanup (xfree, newobj);
978 lm_info_svr4 *li = lm_info_read (ldsomap);
979 newobj->lm_info = li;
980 make_cleanup (xfree, newobj->lm_info);
981 name_lm = li != NULL ? li->l_name : 0;
982 do_cleanups (old_chain);
983
984 return (name_lm >= vaddr && name_lm < vaddr + size);
985 }
986
987 /* See solist.h. */
988
989 static int
990 open_symbol_file_object (int from_tty)
991 {
992 CORE_ADDR lm, l_name;
993 char *filename;
994 int errcode;
995 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
996 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
997 int l_name_size = TYPE_LENGTH (ptr_type);
998 gdb_byte *l_name_buf = (gdb_byte *) xmalloc (l_name_size);
999 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
1000 struct svr4_info *info = get_svr4_info ();
1001 symfile_add_flags add_flags = 0;
1002
1003 if (from_tty)
1004 add_flags |= SYMFILE_VERBOSE;
1005
1006 if (symfile_objfile)
1007 if (!query (_("Attempt to reload symbols from process? ")))
1008 {
1009 do_cleanups (cleanups);
1010 return 0;
1011 }
1012
1013 /* Always locate the debug struct, in case it has moved. */
1014 info->debug_base = 0;
1015 if (locate_base (info) == 0)
1016 {
1017 do_cleanups (cleanups);
1018 return 0; /* failed somehow... */
1019 }
1020
1021 /* First link map member should be the executable. */
1022 lm = solib_svr4_r_map (info);
1023 if (lm == 0)
1024 {
1025 do_cleanups (cleanups);
1026 return 0; /* failed somehow... */
1027 }
1028
1029 /* Read address of name from target memory to GDB. */
1030 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1031
1032 /* Convert the address to host format. */
1033 l_name = extract_typed_address (l_name_buf, ptr_type);
1034
1035 if (l_name == 0)
1036 {
1037 do_cleanups (cleanups);
1038 return 0; /* No filename. */
1039 }
1040
1041 /* Now fetch the filename from target memory. */
1042 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1043 make_cleanup (xfree, filename);
1044
1045 if (errcode)
1046 {
1047 warning (_("failed to read exec filename from attached file: %s"),
1048 safe_strerror (errcode));
1049 do_cleanups (cleanups);
1050 return 0;
1051 }
1052
1053 /* Have a pathname: read the symbol file. */
1054 symbol_file_add_main (filename, add_flags);
1055
1056 do_cleanups (cleanups);
1057 return 1;
1058 }
1059
1060 /* Data exchange structure for the XML parser as returned by
1061 svr4_current_sos_via_xfer_libraries. */
1062
1063 struct svr4_library_list
1064 {
1065 struct so_list *head, **tailp;
1066
1067 /* Inferior address of struct link_map used for the main executable. It is
1068 NULL if not known. */
1069 CORE_ADDR main_lm;
1070 };
1071
1072 /* Implementation for target_so_ops.free_so. */
1073
1074 static void
1075 svr4_free_so (struct so_list *so)
1076 {
1077 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
1078
1079 delete li;
1080 }
1081
1082 /* Implement target_so_ops.clear_so. */
1083
1084 static void
1085 svr4_clear_so (struct so_list *so)
1086 {
1087 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
1088
1089 if (li != NULL)
1090 li->l_addr_p = 0;
1091 }
1092
1093 /* Free so_list built so far (called via cleanup). */
1094
1095 static void
1096 svr4_free_library_list (void *p_list)
1097 {
1098 struct so_list *list = *(struct so_list **) p_list;
1099
1100 while (list != NULL)
1101 {
1102 struct so_list *next = list->next;
1103
1104 free_so (list);
1105 list = next;
1106 }
1107 }
1108
1109 /* Copy library list. */
1110
1111 static struct so_list *
1112 svr4_copy_library_list (struct so_list *src)
1113 {
1114 struct so_list *dst = NULL;
1115 struct so_list **link = &dst;
1116
1117 while (src != NULL)
1118 {
1119 struct so_list *newobj;
1120
1121 newobj = XNEW (struct so_list);
1122 memcpy (newobj, src, sizeof (struct so_list));
1123
1124 lm_info_svr4 *src_li = (lm_info_svr4 *) src->lm_info;
1125 newobj->lm_info = new lm_info_svr4 (*src_li);
1126
1127 newobj->next = NULL;
1128 *link = newobj;
1129 link = &newobj->next;
1130
1131 src = src->next;
1132 }
1133
1134 return dst;
1135 }
1136
1137 #ifdef HAVE_LIBEXPAT
1138
1139 #include "xml-support.h"
1140
1141 /* Handle the start of a <library> element. Note: new elements are added
1142 at the tail of the list, keeping the list in order. */
1143
1144 static void
1145 library_list_start_library (struct gdb_xml_parser *parser,
1146 const struct gdb_xml_element *element,
1147 void *user_data,
1148 std::vector<gdb_xml_value> &attributes)
1149 {
1150 struct svr4_library_list *list = (struct svr4_library_list *) user_data;
1151 const char *name
1152 = (const char *) xml_find_attribute (attributes, "name")->value.get ();
1153 ULONGEST *lmp
1154 = (ULONGEST *) xml_find_attribute (attributes, "lm")->value.get ();
1155 ULONGEST *l_addrp
1156 = (ULONGEST *) xml_find_attribute (attributes, "l_addr")->value.get ();
1157 ULONGEST *l_ldp
1158 = (ULONGEST *) xml_find_attribute (attributes, "l_ld")->value.get ();
1159 struct so_list *new_elem;
1160
1161 new_elem = XCNEW (struct so_list);
1162 lm_info_svr4 *li = new lm_info_svr4;
1163 new_elem->lm_info = li;
1164 li->lm_addr = *lmp;
1165 li->l_addr_inferior = *l_addrp;
1166 li->l_ld = *l_ldp;
1167
1168 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1169 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1170 strcpy (new_elem->so_original_name, new_elem->so_name);
1171
1172 *list->tailp = new_elem;
1173 list->tailp = &new_elem->next;
1174 }
1175
1176 /* Handle the start of a <library-list-svr4> element. */
1177
1178 static void
1179 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1180 const struct gdb_xml_element *element,
1181 void *user_data,
1182 std::vector<gdb_xml_value> &attributes)
1183 {
1184 struct svr4_library_list *list = (struct svr4_library_list *) user_data;
1185 const char *version
1186 = (const char *) xml_find_attribute (attributes, "version")->value.get ();
1187 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1188
1189 if (strcmp (version, "1.0") != 0)
1190 gdb_xml_error (parser,
1191 _("SVR4 Library list has unsupported version \"%s\""),
1192 version);
1193
1194 if (main_lm)
1195 list->main_lm = *(ULONGEST *) main_lm->value.get ();
1196 }
1197
1198 /* The allowed elements and attributes for an XML library list.
1199 The root element is a <library-list>. */
1200
1201 static const struct gdb_xml_attribute svr4_library_attributes[] =
1202 {
1203 { "name", GDB_XML_AF_NONE, NULL, NULL },
1204 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1205 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1206 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1207 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1208 };
1209
1210 static const struct gdb_xml_element svr4_library_list_children[] =
1211 {
1212 {
1213 "library", svr4_library_attributes, NULL,
1214 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1215 library_list_start_library, NULL
1216 },
1217 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1218 };
1219
1220 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1221 {
1222 { "version", GDB_XML_AF_NONE, NULL, NULL },
1223 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1224 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1225 };
1226
1227 static const struct gdb_xml_element svr4_library_list_elements[] =
1228 {
1229 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1230 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1231 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1232 };
1233
1234 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1235
1236 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1237 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1238 empty, caller is responsible for freeing all its entries. */
1239
1240 static int
1241 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1242 {
1243 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1244 &list->head);
1245
1246 memset (list, 0, sizeof (*list));
1247 list->tailp = &list->head;
1248 if (gdb_xml_parse_quick (_("target library list"), "library-list-svr4.dtd",
1249 svr4_library_list_elements, document, list) == 0)
1250 {
1251 /* Parsed successfully, keep the result. */
1252 discard_cleanups (back_to);
1253 return 1;
1254 }
1255
1256 do_cleanups (back_to);
1257 return 0;
1258 }
1259
1260 /* Attempt to get so_list from target via qXfer:libraries-svr4:read packet.
1261
1262 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1263 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1264 empty, caller is responsible for freeing all its entries.
1265
1266 Note that ANNEX must be NULL if the remote does not explicitly allow
1267 qXfer:libraries-svr4:read packets with non-empty annexes. Support for
1268 this can be checked using target_augmented_libraries_svr4_read (). */
1269
1270 static int
1271 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1272 const char *annex)
1273 {
1274 gdb_assert (annex == NULL || target_augmented_libraries_svr4_read ());
1275
1276 /* Fetch the list of shared libraries. */
1277 gdb::unique_xmalloc_ptr<char> svr4_library_document
1278 = target_read_stralloc (&current_target, TARGET_OBJECT_LIBRARIES_SVR4,
1279 annex);
1280 if (svr4_library_document == NULL)
1281 return 0;
1282
1283 return svr4_parse_libraries (svr4_library_document.get (), list);
1284 }
1285
1286 #else
1287
1288 static int
1289 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1290 const char *annex)
1291 {
1292 return 0;
1293 }
1294
1295 #endif
1296
1297 /* If no shared library information is available from the dynamic
1298 linker, build a fallback list from other sources. */
1299
1300 static struct so_list *
1301 svr4_default_sos (void)
1302 {
1303 struct svr4_info *info = get_svr4_info ();
1304 struct so_list *newobj;
1305
1306 if (!info->debug_loader_offset_p)
1307 return NULL;
1308
1309 newobj = XCNEW (struct so_list);
1310 lm_info_svr4 *li = new lm_info_svr4;
1311 newobj->lm_info = li;
1312
1313 /* Nothing will ever check the other fields if we set l_addr_p. */
1314 li->l_addr = info->debug_loader_offset;
1315 li->l_addr_p = 1;
1316
1317 strncpy (newobj->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1318 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1319 strcpy (newobj->so_original_name, newobj->so_name);
1320
1321 return newobj;
1322 }
1323
1324 /* Read the whole inferior libraries chain starting at address LM.
1325 Expect the first entry in the chain's previous entry to be PREV_LM.
1326 Add the entries to the tail referenced by LINK_PTR_PTR. Ignore the
1327 first entry if IGNORE_FIRST and set global MAIN_LM_ADDR according
1328 to it. Returns nonzero upon success. If zero is returned the
1329 entries stored to LINK_PTR_PTR are still valid although they may
1330 represent only part of the inferior library list. */
1331
1332 static int
1333 svr4_read_so_list (CORE_ADDR lm, CORE_ADDR prev_lm,
1334 struct so_list ***link_ptr_ptr, int ignore_first)
1335 {
1336 CORE_ADDR first_l_name = 0;
1337 CORE_ADDR next_lm;
1338
1339 for (; lm != 0; prev_lm = lm, lm = next_lm)
1340 {
1341 int errcode;
1342 char *buffer;
1343
1344 so_list_up newobj (XCNEW (struct so_list));
1345
1346 lm_info_svr4 *li = lm_info_read (lm);
1347 newobj->lm_info = li;
1348 if (li == NULL)
1349 return 0;
1350
1351 next_lm = li->l_next;
1352
1353 if (li->l_prev != prev_lm)
1354 {
1355 warning (_("Corrupted shared library list: %s != %s"),
1356 paddress (target_gdbarch (), prev_lm),
1357 paddress (target_gdbarch (), li->l_prev));
1358 return 0;
1359 }
1360
1361 /* For SVR4 versions, the first entry in the link map is for the
1362 inferior executable, so we must ignore it. For some versions of
1363 SVR4, it has no name. For others (Solaris 2.3 for example), it
1364 does have a name, so we can no longer use a missing name to
1365 decide when to ignore it. */
1366 if (ignore_first && li->l_prev == 0)
1367 {
1368 struct svr4_info *info = get_svr4_info ();
1369
1370 first_l_name = li->l_name;
1371 info->main_lm_addr = li->lm_addr;
1372 continue;
1373 }
1374
1375 /* Extract this shared object's name. */
1376 target_read_string (li->l_name, &buffer, SO_NAME_MAX_PATH_SIZE - 1,
1377 &errcode);
1378 if (errcode != 0)
1379 {
1380 /* If this entry's l_name address matches that of the
1381 inferior executable, then this is not a normal shared
1382 object, but (most likely) a vDSO. In this case, silently
1383 skip it; otherwise emit a warning. */
1384 if (first_l_name == 0 || li->l_name != first_l_name)
1385 warning (_("Can't read pathname for load map: %s."),
1386 safe_strerror (errcode));
1387 continue;
1388 }
1389
1390 strncpy (newobj->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1391 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1392 strcpy (newobj->so_original_name, newobj->so_name);
1393 xfree (buffer);
1394
1395 /* If this entry has no name, or its name matches the name
1396 for the main executable, don't include it in the list. */
1397 if (! newobj->so_name[0] || match_main (newobj->so_name))
1398 continue;
1399
1400 newobj->next = 0;
1401 /* Don't free it now. */
1402 **link_ptr_ptr = newobj.release ();
1403 *link_ptr_ptr = &(**link_ptr_ptr)->next;
1404 }
1405
1406 return 1;
1407 }
1408
1409 /* Read the full list of currently loaded shared objects directly
1410 from the inferior, without referring to any libraries read and
1411 stored by the probes interface. Handle special cases relating
1412 to the first elements of the list. */
1413
1414 static struct so_list *
1415 svr4_current_sos_direct (struct svr4_info *info)
1416 {
1417 CORE_ADDR lm;
1418 struct so_list *head = NULL;
1419 struct so_list **link_ptr = &head;
1420 struct cleanup *back_to;
1421 int ignore_first;
1422 struct svr4_library_list library_list;
1423
1424 /* Fall back to manual examination of the target if the packet is not
1425 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1426 tests a case where gdbserver cannot find the shared libraries list while
1427 GDB itself is able to find it via SYMFILE_OBJFILE.
1428
1429 Unfortunately statically linked inferiors will also fall back through this
1430 suboptimal code path. */
1431
1432 info->using_xfer = svr4_current_sos_via_xfer_libraries (&library_list,
1433 NULL);
1434 if (info->using_xfer)
1435 {
1436 if (library_list.main_lm)
1437 info->main_lm_addr = library_list.main_lm;
1438
1439 return library_list.head ? library_list.head : svr4_default_sos ();
1440 }
1441
1442 /* Always locate the debug struct, in case it has moved. */
1443 info->debug_base = 0;
1444 locate_base (info);
1445
1446 /* If we can't find the dynamic linker's base structure, this
1447 must not be a dynamically linked executable. Hmm. */
1448 if (! info->debug_base)
1449 return svr4_default_sos ();
1450
1451 /* Assume that everything is a library if the dynamic loader was loaded
1452 late by a static executable. */
1453 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1454 ignore_first = 0;
1455 else
1456 ignore_first = 1;
1457
1458 back_to = make_cleanup (svr4_free_library_list, &head);
1459
1460 /* Walk the inferior's link map list, and build our list of
1461 `struct so_list' nodes. */
1462 lm = solib_svr4_r_map (info);
1463 if (lm)
1464 svr4_read_so_list (lm, 0, &link_ptr, ignore_first);
1465
1466 /* On Solaris, the dynamic linker is not in the normal list of
1467 shared objects, so make sure we pick it up too. Having
1468 symbol information for the dynamic linker is quite crucial
1469 for skipping dynamic linker resolver code. */
1470 lm = solib_svr4_r_ldsomap (info);
1471 if (lm)
1472 svr4_read_so_list (lm, 0, &link_ptr, 0);
1473
1474 discard_cleanups (back_to);
1475
1476 if (head == NULL)
1477 return svr4_default_sos ();
1478
1479 return head;
1480 }
1481
1482 /* Implement the main part of the "current_sos" target_so_ops
1483 method. */
1484
1485 static struct so_list *
1486 svr4_current_sos_1 (void)
1487 {
1488 struct svr4_info *info = get_svr4_info ();
1489
1490 /* If the solib list has been read and stored by the probes
1491 interface then we return a copy of the stored list. */
1492 if (info->solib_list != NULL)
1493 return svr4_copy_library_list (info->solib_list);
1494
1495 /* Otherwise obtain the solib list directly from the inferior. */
1496 return svr4_current_sos_direct (info);
1497 }
1498
1499 /* Implement the "current_sos" target_so_ops method. */
1500
1501 static struct so_list *
1502 svr4_current_sos (void)
1503 {
1504 struct so_list *so_head = svr4_current_sos_1 ();
1505 struct mem_range vsyscall_range;
1506
1507 /* Filter out the vDSO module, if present. Its symbol file would
1508 not be found on disk. The vDSO/vsyscall's OBJFILE is instead
1509 managed by symfile-mem.c:add_vsyscall_page. */
1510 if (gdbarch_vsyscall_range (target_gdbarch (), &vsyscall_range)
1511 && vsyscall_range.length != 0)
1512 {
1513 struct so_list **sop;
1514
1515 sop = &so_head;
1516 while (*sop != NULL)
1517 {
1518 struct so_list *so = *sop;
1519
1520 /* We can't simply match the vDSO by starting address alone,
1521 because lm_info->l_addr_inferior (and also l_addr) do not
1522 necessarily represent the real starting address of the
1523 ELF if the vDSO's ELF itself is "prelinked". The l_ld
1524 field (the ".dynamic" section of the shared object)
1525 always points at the absolute/resolved address though.
1526 So check whether that address is inside the vDSO's
1527 mapping instead.
1528
1529 E.g., on Linux 3.16 (x86_64) the vDSO is a regular
1530 0-based ELF, and we see:
1531
1532 (gdb) info auxv
1533 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffb000
1534 (gdb) p/x *_r_debug.r_map.l_next
1535 $1 = {l_addr = 0x7ffff7ffb000, ..., l_ld = 0x7ffff7ffb318, ...}
1536
1537 And on Linux 2.6.32 (x86_64) we see:
1538
1539 (gdb) info auxv
1540 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffe000
1541 (gdb) p/x *_r_debug.r_map.l_next
1542 $5 = {l_addr = 0x7ffff88fe000, ..., l_ld = 0x7ffff7ffe580, ... }
1543
1544 Dumping that vDSO shows:
1545
1546 (gdb) info proc mappings
1547 0x7ffff7ffe000 0x7ffff7fff000 0x1000 0 [vdso]
1548 (gdb) dump memory vdso.bin 0x7ffff7ffe000 0x7ffff7fff000
1549 # readelf -Wa vdso.bin
1550 [...]
1551 Entry point address: 0xffffffffff700700
1552 [...]
1553 Section Headers:
1554 [Nr] Name Type Address Off Size
1555 [ 0] NULL 0000000000000000 000000 000000
1556 [ 1] .hash HASH ffffffffff700120 000120 000038
1557 [ 2] .dynsym DYNSYM ffffffffff700158 000158 0000d8
1558 [...]
1559 [ 9] .dynamic DYNAMIC ffffffffff700580 000580 0000f0
1560 */
1561
1562 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
1563
1564 if (address_in_mem_range (li->l_ld, &vsyscall_range))
1565 {
1566 *sop = so->next;
1567 free_so (so);
1568 break;
1569 }
1570
1571 sop = &so->next;
1572 }
1573 }
1574
1575 return so_head;
1576 }
1577
1578 /* Get the address of the link_map for a given OBJFILE. */
1579
1580 CORE_ADDR
1581 svr4_fetch_objfile_link_map (struct objfile *objfile)
1582 {
1583 struct so_list *so;
1584 struct svr4_info *info = get_svr4_info ();
1585
1586 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1587 if (info->main_lm_addr == 0)
1588 solib_add (NULL, 0, auto_solib_add);
1589
1590 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1591 if (objfile == symfile_objfile)
1592 return info->main_lm_addr;
1593
1594 /* The other link map addresses may be found by examining the list
1595 of shared libraries. */
1596 for (so = master_so_list (); so; so = so->next)
1597 if (so->objfile == objfile)
1598 {
1599 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
1600
1601 return li->lm_addr;
1602 }
1603
1604 /* Not found! */
1605 return 0;
1606 }
1607
1608 /* On some systems, the only way to recognize the link map entry for
1609 the main executable file is by looking at its name. Return
1610 non-zero iff SONAME matches one of the known main executable names. */
1611
1612 static int
1613 match_main (const char *soname)
1614 {
1615 const char * const *mainp;
1616
1617 for (mainp = main_name_list; *mainp != NULL; mainp++)
1618 {
1619 if (strcmp (soname, *mainp) == 0)
1620 return (1);
1621 }
1622
1623 return (0);
1624 }
1625
1626 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1627 SVR4 run time loader. */
1628
1629 int
1630 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1631 {
1632 struct svr4_info *info = get_svr4_info ();
1633
1634 return ((pc >= info->interp_text_sect_low
1635 && pc < info->interp_text_sect_high)
1636 || (pc >= info->interp_plt_sect_low
1637 && pc < info->interp_plt_sect_high)
1638 || in_plt_section (pc)
1639 || in_gnu_ifunc_stub (pc));
1640 }
1641
1642 /* Given an executable's ABFD and target, compute the entry-point
1643 address. */
1644
1645 static CORE_ADDR
1646 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1647 {
1648 CORE_ADDR addr;
1649
1650 /* KevinB wrote ... for most targets, the address returned by
1651 bfd_get_start_address() is the entry point for the start
1652 function. But, for some targets, bfd_get_start_address() returns
1653 the address of a function descriptor from which the entry point
1654 address may be extracted. This address is extracted by
1655 gdbarch_convert_from_func_ptr_addr(). The method
1656 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1657 function for targets which don't use function descriptors. */
1658 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1659 bfd_get_start_address (abfd),
1660 targ);
1661 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1662 }
1663
1664 /* A probe and its associated action. */
1665
1666 struct probe_and_action
1667 {
1668 /* The probe. */
1669 probe *prob;
1670
1671 /* The relocated address of the probe. */
1672 CORE_ADDR address;
1673
1674 /* The action. */
1675 enum probe_action action;
1676 };
1677
1678 /* Returns a hash code for the probe_and_action referenced by p. */
1679
1680 static hashval_t
1681 hash_probe_and_action (const void *p)
1682 {
1683 const struct probe_and_action *pa = (const struct probe_and_action *) p;
1684
1685 return (hashval_t) pa->address;
1686 }
1687
1688 /* Returns non-zero if the probe_and_actions referenced by p1 and p2
1689 are equal. */
1690
1691 static int
1692 equal_probe_and_action (const void *p1, const void *p2)
1693 {
1694 const struct probe_and_action *pa1 = (const struct probe_and_action *) p1;
1695 const struct probe_and_action *pa2 = (const struct probe_and_action *) p2;
1696
1697 return pa1->address == pa2->address;
1698 }
1699
1700 /* Register a solib event probe and its associated action in the
1701 probes table. */
1702
1703 static void
1704 register_solib_event_probe (probe *prob, CORE_ADDR address,
1705 enum probe_action action)
1706 {
1707 struct svr4_info *info = get_svr4_info ();
1708 struct probe_and_action lookup, *pa;
1709 void **slot;
1710
1711 /* Create the probes table, if necessary. */
1712 if (info->probes_table == NULL)
1713 info->probes_table = htab_create_alloc (1, hash_probe_and_action,
1714 equal_probe_and_action,
1715 xfree, xcalloc, xfree);
1716
1717 lookup.prob = prob;
1718 lookup.address = address;
1719 slot = htab_find_slot (info->probes_table, &lookup, INSERT);
1720 gdb_assert (*slot == HTAB_EMPTY_ENTRY);
1721
1722 pa = XCNEW (struct probe_and_action);
1723 pa->prob = prob;
1724 pa->address = address;
1725 pa->action = action;
1726
1727 *slot = pa;
1728 }
1729
1730 /* Get the solib event probe at the specified location, and the
1731 action associated with it. Returns NULL if no solib event probe
1732 was found. */
1733
1734 static struct probe_and_action *
1735 solib_event_probe_at (struct svr4_info *info, CORE_ADDR address)
1736 {
1737 struct probe_and_action lookup;
1738 void **slot;
1739
1740 lookup.address = address;
1741 slot = htab_find_slot (info->probes_table, &lookup, NO_INSERT);
1742
1743 if (slot == NULL)
1744 return NULL;
1745
1746 return (struct probe_and_action *) *slot;
1747 }
1748
1749 /* Decide what action to take when the specified solib event probe is
1750 hit. */
1751
1752 static enum probe_action
1753 solib_event_probe_action (struct probe_and_action *pa)
1754 {
1755 enum probe_action action;
1756 unsigned probe_argc = 0;
1757 struct frame_info *frame = get_current_frame ();
1758
1759 action = pa->action;
1760 if (action == DO_NOTHING || action == PROBES_INTERFACE_FAILED)
1761 return action;
1762
1763 gdb_assert (action == FULL_RELOAD || action == UPDATE_OR_RELOAD);
1764
1765 /* Check that an appropriate number of arguments has been supplied.
1766 We expect:
1767 arg0: Lmid_t lmid (mandatory)
1768 arg1: struct r_debug *debug_base (mandatory)
1769 arg2: struct link_map *new (optional, for incremental updates) */
1770 TRY
1771 {
1772 probe_argc = pa->prob->get_argument_count (frame);
1773 }
1774 CATCH (ex, RETURN_MASK_ERROR)
1775 {
1776 exception_print (gdb_stderr, ex);
1777 probe_argc = 0;
1778 }
1779 END_CATCH
1780
1781 /* If get_argument_count throws an exception, probe_argc will be set
1782 to zero. However, if pa->prob does not have arguments, then
1783 get_argument_count will succeed but probe_argc will also be zero.
1784 Both cases happen because of different things, but they are
1785 treated equally here: action will be set to
1786 PROBES_INTERFACE_FAILED. */
1787 if (probe_argc == 2)
1788 action = FULL_RELOAD;
1789 else if (probe_argc < 2)
1790 action = PROBES_INTERFACE_FAILED;
1791
1792 return action;
1793 }
1794
1795 /* Populate the shared object list by reading the entire list of
1796 shared objects from the inferior. Handle special cases relating
1797 to the first elements of the list. Returns nonzero on success. */
1798
1799 static int
1800 solist_update_full (struct svr4_info *info)
1801 {
1802 free_solib_list (info);
1803 info->solib_list = svr4_current_sos_direct (info);
1804
1805 return 1;
1806 }
1807
1808 /* Update the shared object list starting from the link-map entry
1809 passed by the linker in the probe's third argument. Returns
1810 nonzero if the list was successfully updated, or zero to indicate
1811 failure. */
1812
1813 static int
1814 solist_update_incremental (struct svr4_info *info, CORE_ADDR lm)
1815 {
1816 struct so_list *tail;
1817 CORE_ADDR prev_lm;
1818
1819 /* svr4_current_sos_direct contains logic to handle a number of
1820 special cases relating to the first elements of the list. To
1821 avoid duplicating this logic we defer to solist_update_full
1822 if the list is empty. */
1823 if (info->solib_list == NULL)
1824 return 0;
1825
1826 /* Fall back to a full update if we are using a remote target
1827 that does not support incremental transfers. */
1828 if (info->using_xfer && !target_augmented_libraries_svr4_read ())
1829 return 0;
1830
1831 /* Walk to the end of the list. */
1832 for (tail = info->solib_list; tail->next != NULL; tail = tail->next)
1833 /* Nothing. */;
1834
1835 lm_info_svr4 *li = (lm_info_svr4 *) tail->lm_info;
1836 prev_lm = li->lm_addr;
1837
1838 /* Read the new objects. */
1839 if (info->using_xfer)
1840 {
1841 struct svr4_library_list library_list;
1842 char annex[64];
1843
1844 xsnprintf (annex, sizeof (annex), "start=%s;prev=%s",
1845 phex_nz (lm, sizeof (lm)),
1846 phex_nz (prev_lm, sizeof (prev_lm)));
1847 if (!svr4_current_sos_via_xfer_libraries (&library_list, annex))
1848 return 0;
1849
1850 tail->next = library_list.head;
1851 }
1852 else
1853 {
1854 struct so_list **link = &tail->next;
1855
1856 /* IGNORE_FIRST may safely be set to zero here because the
1857 above check and deferral to solist_update_full ensures
1858 that this call to svr4_read_so_list will never see the
1859 first element. */
1860 if (!svr4_read_so_list (lm, prev_lm, &link, 0))
1861 return 0;
1862 }
1863
1864 return 1;
1865 }
1866
1867 /* Disable the probes-based linker interface and revert to the
1868 original interface. We don't reset the breakpoints as the
1869 ones set up for the probes-based interface are adequate. */
1870
1871 static void
1872 disable_probes_interface_cleanup (void *arg)
1873 {
1874 struct svr4_info *info = get_svr4_info ();
1875
1876 warning (_("Probes-based dynamic linker interface failed.\n"
1877 "Reverting to original interface.\n"));
1878
1879 free_probes_table (info);
1880 free_solib_list (info);
1881 }
1882
1883 /* Update the solib list as appropriate when using the
1884 probes-based linker interface. Do nothing if using the
1885 standard interface. */
1886
1887 static void
1888 svr4_handle_solib_event (void)
1889 {
1890 struct svr4_info *info = get_svr4_info ();
1891 struct probe_and_action *pa;
1892 enum probe_action action;
1893 struct cleanup *old_chain, *usm_chain;
1894 struct value *val = NULL;
1895 CORE_ADDR pc, debug_base, lm = 0;
1896 struct frame_info *frame = get_current_frame ();
1897
1898 /* Do nothing if not using the probes interface. */
1899 if (info->probes_table == NULL)
1900 return;
1901
1902 /* If anything goes wrong we revert to the original linker
1903 interface. */
1904 old_chain = make_cleanup (disable_probes_interface_cleanup, NULL);
1905
1906 pc = regcache_read_pc (get_current_regcache ());
1907 pa = solib_event_probe_at (info, pc);
1908 if (pa == NULL)
1909 {
1910 do_cleanups (old_chain);
1911 return;
1912 }
1913
1914 action = solib_event_probe_action (pa);
1915 if (action == PROBES_INTERFACE_FAILED)
1916 {
1917 do_cleanups (old_chain);
1918 return;
1919 }
1920
1921 if (action == DO_NOTHING)
1922 {
1923 discard_cleanups (old_chain);
1924 return;
1925 }
1926
1927 /* evaluate_argument looks up symbols in the dynamic linker
1928 using find_pc_section. find_pc_section is accelerated by a cache
1929 called the section map. The section map is invalidated every
1930 time a shared library is loaded or unloaded, and if the inferior
1931 is generating a lot of shared library events then the section map
1932 will be updated every time svr4_handle_solib_event is called.
1933 We called find_pc_section in svr4_create_solib_event_breakpoints,
1934 so we can guarantee that the dynamic linker's sections are in the
1935 section map. We can therefore inhibit section map updates across
1936 these calls to evaluate_argument and save a lot of time. */
1937 inhibit_section_map_updates (current_program_space);
1938 usm_chain = make_cleanup (resume_section_map_updates_cleanup,
1939 current_program_space);
1940
1941 TRY
1942 {
1943 val = pa->prob->evaluate_argument (1, frame);
1944 }
1945 CATCH (ex, RETURN_MASK_ERROR)
1946 {
1947 exception_print (gdb_stderr, ex);
1948 val = NULL;
1949 }
1950 END_CATCH
1951
1952 if (val == NULL)
1953 {
1954 do_cleanups (old_chain);
1955 return;
1956 }
1957
1958 debug_base = value_as_address (val);
1959 if (debug_base == 0)
1960 {
1961 do_cleanups (old_chain);
1962 return;
1963 }
1964
1965 /* Always locate the debug struct, in case it moved. */
1966 info->debug_base = 0;
1967 if (locate_base (info) == 0)
1968 {
1969 do_cleanups (old_chain);
1970 return;
1971 }
1972
1973 /* GDB does not currently support libraries loaded via dlmopen
1974 into namespaces other than the initial one. We must ignore
1975 any namespace other than the initial namespace here until
1976 support for this is added to GDB. */
1977 if (debug_base != info->debug_base)
1978 action = DO_NOTHING;
1979
1980 if (action == UPDATE_OR_RELOAD)
1981 {
1982 TRY
1983 {
1984 val = pa->prob->evaluate_argument (2, frame);
1985 }
1986 CATCH (ex, RETURN_MASK_ERROR)
1987 {
1988 exception_print (gdb_stderr, ex);
1989 do_cleanups (old_chain);
1990 return;
1991 }
1992 END_CATCH
1993
1994 if (val != NULL)
1995 lm = value_as_address (val);
1996
1997 if (lm == 0)
1998 action = FULL_RELOAD;
1999 }
2000
2001 /* Resume section map updates. */
2002 do_cleanups (usm_chain);
2003
2004 if (action == UPDATE_OR_RELOAD)
2005 {
2006 if (!solist_update_incremental (info, lm))
2007 action = FULL_RELOAD;
2008 }
2009
2010 if (action == FULL_RELOAD)
2011 {
2012 if (!solist_update_full (info))
2013 {
2014 do_cleanups (old_chain);
2015 return;
2016 }
2017 }
2018
2019 discard_cleanups (old_chain);
2020 }
2021
2022 /* Helper function for svr4_update_solib_event_breakpoints. */
2023
2024 static int
2025 svr4_update_solib_event_breakpoint (struct breakpoint *b, void *arg)
2026 {
2027 struct bp_location *loc;
2028
2029 if (b->type != bp_shlib_event)
2030 {
2031 /* Continue iterating. */
2032 return 0;
2033 }
2034
2035 for (loc = b->loc; loc != NULL; loc = loc->next)
2036 {
2037 struct svr4_info *info;
2038 struct probe_and_action *pa;
2039
2040 info = ((struct svr4_info *)
2041 program_space_data (loc->pspace, solib_svr4_pspace_data));
2042 if (info == NULL || info->probes_table == NULL)
2043 continue;
2044
2045 pa = solib_event_probe_at (info, loc->address);
2046 if (pa == NULL)
2047 continue;
2048
2049 if (pa->action == DO_NOTHING)
2050 {
2051 if (b->enable_state == bp_disabled && stop_on_solib_events)
2052 enable_breakpoint (b);
2053 else if (b->enable_state == bp_enabled && !stop_on_solib_events)
2054 disable_breakpoint (b);
2055 }
2056
2057 break;
2058 }
2059
2060 /* Continue iterating. */
2061 return 0;
2062 }
2063
2064 /* Enable or disable optional solib event breakpoints as appropriate.
2065 Called whenever stop_on_solib_events is changed. */
2066
2067 static void
2068 svr4_update_solib_event_breakpoints (void)
2069 {
2070 iterate_over_breakpoints (svr4_update_solib_event_breakpoint, NULL);
2071 }
2072
2073 /* Create and register solib event breakpoints. PROBES is an array
2074 of NUM_PROBES elements, each of which is vector of probes. A
2075 solib event breakpoint will be created and registered for each
2076 probe. */
2077
2078 static void
2079 svr4_create_probe_breakpoints (struct gdbarch *gdbarch,
2080 const std::vector<probe *> *probes,
2081 struct objfile *objfile)
2082 {
2083 for (int i = 0; i < NUM_PROBES; i++)
2084 {
2085 enum probe_action action = probe_info[i].action;
2086
2087 for (probe *p : probes[i])
2088 {
2089 CORE_ADDR address = p->get_relocated_address (objfile);
2090
2091 create_solib_event_breakpoint (gdbarch, address);
2092 register_solib_event_probe (p, address, action);
2093 }
2094 }
2095
2096 svr4_update_solib_event_breakpoints ();
2097 }
2098
2099 /* Both the SunOS and the SVR4 dynamic linkers call a marker function
2100 before and after mapping and unmapping shared libraries. The sole
2101 purpose of this method is to allow debuggers to set a breakpoint so
2102 they can track these changes.
2103
2104 Some versions of the glibc dynamic linker contain named probes
2105 to allow more fine grained stopping. Given the address of the
2106 original marker function, this function attempts to find these
2107 probes, and if found, sets breakpoints on those instead. If the
2108 probes aren't found, a single breakpoint is set on the original
2109 marker function. */
2110
2111 static void
2112 svr4_create_solib_event_breakpoints (struct gdbarch *gdbarch,
2113 CORE_ADDR address)
2114 {
2115 struct obj_section *os;
2116
2117 os = find_pc_section (address);
2118 if (os != NULL)
2119 {
2120 int with_prefix;
2121
2122 for (with_prefix = 0; with_prefix <= 1; with_prefix++)
2123 {
2124 std::vector<probe *> probes[NUM_PROBES];
2125 int all_probes_found = 1;
2126 int checked_can_use_probe_arguments = 0;
2127
2128 for (int i = 0; i < NUM_PROBES; i++)
2129 {
2130 const char *name = probe_info[i].name;
2131 probe *p;
2132 char buf[32];
2133
2134 /* Fedora 17 and Red Hat Enterprise Linux 6.2-6.4
2135 shipped with an early version of the probes code in
2136 which the probes' names were prefixed with "rtld_"
2137 and the "map_failed" probe did not exist. The
2138 locations of the probes are otherwise the same, so
2139 we check for probes with prefixed names if probes
2140 with unprefixed names are not present. */
2141 if (with_prefix)
2142 {
2143 xsnprintf (buf, sizeof (buf), "rtld_%s", name);
2144 name = buf;
2145 }
2146
2147 probes[i] = find_probes_in_objfile (os->objfile, "rtld", name);
2148
2149 /* The "map_failed" probe did not exist in early
2150 versions of the probes code in which the probes'
2151 names were prefixed with "rtld_". */
2152 if (strcmp (name, "rtld_map_failed") == 0)
2153 continue;
2154
2155 if (probes[i].empty ())
2156 {
2157 all_probes_found = 0;
2158 break;
2159 }
2160
2161 /* Ensure probe arguments can be evaluated. */
2162 if (!checked_can_use_probe_arguments)
2163 {
2164 p = probes[i][0];
2165 if (!p->can_evaluate_arguments ())
2166 {
2167 all_probes_found = 0;
2168 break;
2169 }
2170 checked_can_use_probe_arguments = 1;
2171 }
2172 }
2173
2174 if (all_probes_found)
2175 svr4_create_probe_breakpoints (gdbarch, probes, os->objfile);
2176
2177 if (all_probes_found)
2178 return;
2179 }
2180 }
2181
2182 create_solib_event_breakpoint (gdbarch, address);
2183 }
2184
2185 /* Helper function for gdb_bfd_lookup_symbol. */
2186
2187 static int
2188 cmp_name_and_sec_flags (const asymbol *sym, const void *data)
2189 {
2190 return (strcmp (sym->name, (const char *) data) == 0
2191 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
2192 }
2193 /* Arrange for dynamic linker to hit breakpoint.
2194
2195 Both the SunOS and the SVR4 dynamic linkers have, as part of their
2196 debugger interface, support for arranging for the inferior to hit
2197 a breakpoint after mapping in the shared libraries. This function
2198 enables that breakpoint.
2199
2200 For SunOS, there is a special flag location (in_debugger) which we
2201 set to 1. When the dynamic linker sees this flag set, it will set
2202 a breakpoint at a location known only to itself, after saving the
2203 original contents of that place and the breakpoint address itself,
2204 in it's own internal structures. When we resume the inferior, it
2205 will eventually take a SIGTRAP when it runs into the breakpoint.
2206 We handle this (in a different place) by restoring the contents of
2207 the breakpointed location (which is only known after it stops),
2208 chasing around to locate the shared libraries that have been
2209 loaded, then resuming.
2210
2211 For SVR4, the debugger interface structure contains a member (r_brk)
2212 which is statically initialized at the time the shared library is
2213 built, to the offset of a function (_r_debug_state) which is guaran-
2214 teed to be called once before mapping in a library, and again when
2215 the mapping is complete. At the time we are examining this member,
2216 it contains only the unrelocated offset of the function, so we have
2217 to do our own relocation. Later, when the dynamic linker actually
2218 runs, it relocates r_brk to be the actual address of _r_debug_state().
2219
2220 The debugger interface structure also contains an enumeration which
2221 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
2222 depending upon whether or not the library is being mapped or unmapped,
2223 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
2224
2225 static int
2226 enable_break (struct svr4_info *info, int from_tty)
2227 {
2228 struct bound_minimal_symbol msymbol;
2229 const char * const *bkpt_namep;
2230 asection *interp_sect;
2231 char *interp_name;
2232 CORE_ADDR sym_addr;
2233
2234 info->interp_text_sect_low = info->interp_text_sect_high = 0;
2235 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
2236
2237 /* If we already have a shared library list in the target, and
2238 r_debug contains r_brk, set the breakpoint there - this should
2239 mean r_brk has already been relocated. Assume the dynamic linker
2240 is the object containing r_brk. */
2241
2242 solib_add (NULL, from_tty, auto_solib_add);
2243 sym_addr = 0;
2244 if (info->debug_base && solib_svr4_r_map (info) != 0)
2245 sym_addr = solib_svr4_r_brk (info);
2246
2247 if (sym_addr != 0)
2248 {
2249 struct obj_section *os;
2250
2251 sym_addr = gdbarch_addr_bits_remove
2252 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2253 sym_addr,
2254 &current_target));
2255
2256 /* On at least some versions of Solaris there's a dynamic relocation
2257 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
2258 we get control before the dynamic linker has self-relocated.
2259 Check if SYM_ADDR is in a known section, if it is assume we can
2260 trust its value. This is just a heuristic though, it could go away
2261 or be replaced if it's getting in the way.
2262
2263 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
2264 however it's spelled in your particular system) is ARM or Thumb.
2265 That knowledge is encoded in the address, if it's Thumb the low bit
2266 is 1. However, we've stripped that info above and it's not clear
2267 what all the consequences are of passing a non-addr_bits_remove'd
2268 address to svr4_create_solib_event_breakpoints. The call to
2269 find_pc_section verifies we know about the address and have some
2270 hope of computing the right kind of breakpoint to use (via
2271 symbol info). It does mean that GDB needs to be pointed at a
2272 non-stripped version of the dynamic linker in order to obtain
2273 information it already knows about. Sigh. */
2274
2275 os = find_pc_section (sym_addr);
2276 if (os != NULL)
2277 {
2278 /* Record the relocated start and end address of the dynamic linker
2279 text and plt section for svr4_in_dynsym_resolve_code. */
2280 bfd *tmp_bfd;
2281 CORE_ADDR load_addr;
2282
2283 tmp_bfd = os->objfile->obfd;
2284 load_addr = ANOFFSET (os->objfile->section_offsets,
2285 SECT_OFF_TEXT (os->objfile));
2286
2287 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2288 if (interp_sect)
2289 {
2290 info->interp_text_sect_low =
2291 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2292 info->interp_text_sect_high =
2293 info->interp_text_sect_low
2294 + bfd_section_size (tmp_bfd, interp_sect);
2295 }
2296 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2297 if (interp_sect)
2298 {
2299 info->interp_plt_sect_low =
2300 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2301 info->interp_plt_sect_high =
2302 info->interp_plt_sect_low
2303 + bfd_section_size (tmp_bfd, interp_sect);
2304 }
2305
2306 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2307 return 1;
2308 }
2309 }
2310
2311 /* Find the program interpreter; if not found, warn the user and drop
2312 into the old breakpoint at symbol code. */
2313 interp_name = find_program_interpreter ();
2314 if (interp_name)
2315 {
2316 CORE_ADDR load_addr = 0;
2317 int load_addr_found = 0;
2318 int loader_found_in_list = 0;
2319 struct so_list *so;
2320 struct target_ops *tmp_bfd_target;
2321
2322 sym_addr = 0;
2323
2324 /* Now we need to figure out where the dynamic linker was
2325 loaded so that we can load its symbols and place a breakpoint
2326 in the dynamic linker itself.
2327
2328 This address is stored on the stack. However, I've been unable
2329 to find any magic formula to find it for Solaris (appears to
2330 be trivial on GNU/Linux). Therefore, we have to try an alternate
2331 mechanism to find the dynamic linker's base address. */
2332
2333 gdb_bfd_ref_ptr tmp_bfd;
2334 TRY
2335 {
2336 tmp_bfd = solib_bfd_open (interp_name);
2337 }
2338 CATCH (ex, RETURN_MASK_ALL)
2339 {
2340 }
2341 END_CATCH
2342
2343 if (tmp_bfd == NULL)
2344 goto bkpt_at_symbol;
2345
2346 /* Now convert the TMP_BFD into a target. That way target, as
2347 well as BFD operations can be used. target_bfd_reopen
2348 acquires its own reference. */
2349 tmp_bfd_target = target_bfd_reopen (tmp_bfd.get ());
2350
2351 /* On a running target, we can get the dynamic linker's base
2352 address from the shared library table. */
2353 so = master_so_list ();
2354 while (so)
2355 {
2356 if (svr4_same_1 (interp_name, so->so_original_name))
2357 {
2358 load_addr_found = 1;
2359 loader_found_in_list = 1;
2360 load_addr = lm_addr_check (so, tmp_bfd.get ());
2361 break;
2362 }
2363 so = so->next;
2364 }
2365
2366 /* If we were not able to find the base address of the loader
2367 from our so_list, then try using the AT_BASE auxilliary entry. */
2368 if (!load_addr_found)
2369 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
2370 {
2371 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
2372
2373 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
2374 that `+ load_addr' will overflow CORE_ADDR width not creating
2375 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
2376 GDB. */
2377
2378 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
2379 {
2380 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
2381 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd.get (),
2382 tmp_bfd_target);
2383
2384 gdb_assert (load_addr < space_size);
2385
2386 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
2387 64bit ld.so with 32bit executable, it should not happen. */
2388
2389 if (tmp_entry_point < space_size
2390 && tmp_entry_point + load_addr >= space_size)
2391 load_addr -= space_size;
2392 }
2393
2394 load_addr_found = 1;
2395 }
2396
2397 /* Otherwise we find the dynamic linker's base address by examining
2398 the current pc (which should point at the entry point for the
2399 dynamic linker) and subtracting the offset of the entry point.
2400
2401 This is more fragile than the previous approaches, but is a good
2402 fallback method because it has actually been working well in
2403 most cases. */
2404 if (!load_addr_found)
2405 {
2406 struct regcache *regcache
2407 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
2408
2409 load_addr = (regcache_read_pc (regcache)
2410 - exec_entry_point (tmp_bfd.get (), tmp_bfd_target));
2411 }
2412
2413 if (!loader_found_in_list)
2414 {
2415 info->debug_loader_name = xstrdup (interp_name);
2416 info->debug_loader_offset_p = 1;
2417 info->debug_loader_offset = load_addr;
2418 solib_add (NULL, from_tty, auto_solib_add);
2419 }
2420
2421 /* Record the relocated start and end address of the dynamic linker
2422 text and plt section for svr4_in_dynsym_resolve_code. */
2423 interp_sect = bfd_get_section_by_name (tmp_bfd.get (), ".text");
2424 if (interp_sect)
2425 {
2426 info->interp_text_sect_low =
2427 bfd_section_vma (tmp_bfd.get (), interp_sect) + load_addr;
2428 info->interp_text_sect_high =
2429 info->interp_text_sect_low
2430 + bfd_section_size (tmp_bfd.get (), interp_sect);
2431 }
2432 interp_sect = bfd_get_section_by_name (tmp_bfd.get (), ".plt");
2433 if (interp_sect)
2434 {
2435 info->interp_plt_sect_low =
2436 bfd_section_vma (tmp_bfd.get (), interp_sect) + load_addr;
2437 info->interp_plt_sect_high =
2438 info->interp_plt_sect_low
2439 + bfd_section_size (tmp_bfd.get (), interp_sect);
2440 }
2441
2442 /* Now try to set a breakpoint in the dynamic linker. */
2443 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2444 {
2445 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd.get (),
2446 cmp_name_and_sec_flags,
2447 *bkpt_namep);
2448 if (sym_addr != 0)
2449 break;
2450 }
2451
2452 if (sym_addr != 0)
2453 /* Convert 'sym_addr' from a function pointer to an address.
2454 Because we pass tmp_bfd_target instead of the current
2455 target, this will always produce an unrelocated value. */
2456 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2457 sym_addr,
2458 tmp_bfd_target);
2459
2460 /* We're done with both the temporary bfd and target. Closing
2461 the target closes the underlying bfd, because it holds the
2462 only remaining reference. */
2463 target_close (tmp_bfd_target);
2464
2465 if (sym_addr != 0)
2466 {
2467 svr4_create_solib_event_breakpoints (target_gdbarch (),
2468 load_addr + sym_addr);
2469 xfree (interp_name);
2470 return 1;
2471 }
2472
2473 /* For whatever reason we couldn't set a breakpoint in the dynamic
2474 linker. Warn and drop into the old code. */
2475 bkpt_at_symbol:
2476 xfree (interp_name);
2477 warning (_("Unable to find dynamic linker breakpoint function.\n"
2478 "GDB will be unable to debug shared library initializers\n"
2479 "and track explicitly loaded dynamic code."));
2480 }
2481
2482 /* Scan through the lists of symbols, trying to look up the symbol and
2483 set a breakpoint there. Terminate loop when we/if we succeed. */
2484
2485 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2486 {
2487 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2488 if ((msymbol.minsym != NULL)
2489 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2490 {
2491 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2492 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2493 sym_addr,
2494 &current_target);
2495 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2496 return 1;
2497 }
2498 }
2499
2500 if (interp_name != NULL && !current_inferior ()->attach_flag)
2501 {
2502 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
2503 {
2504 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2505 if ((msymbol.minsym != NULL)
2506 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2507 {
2508 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2509 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2510 sym_addr,
2511 &current_target);
2512 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2513 return 1;
2514 }
2515 }
2516 }
2517 return 0;
2518 }
2519
2520 /* Read the ELF program headers from ABFD. Return the contents and
2521 set *PHDRS_SIZE to the size of the program headers. */
2522
2523 static gdb_byte *
2524 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
2525 {
2526 Elf_Internal_Ehdr *ehdr;
2527 gdb_byte *buf;
2528
2529 ehdr = elf_elfheader (abfd);
2530
2531 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
2532 if (*phdrs_size == 0)
2533 return NULL;
2534
2535 buf = (gdb_byte *) xmalloc (*phdrs_size);
2536 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
2537 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
2538 {
2539 xfree (buf);
2540 return NULL;
2541 }
2542
2543 return buf;
2544 }
2545
2546 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
2547 exec_bfd. Otherwise return 0.
2548
2549 We relocate all of the sections by the same amount. This
2550 behavior is mandated by recent editions of the System V ABI.
2551 According to the System V Application Binary Interface,
2552 Edition 4.1, page 5-5:
2553
2554 ... Though the system chooses virtual addresses for
2555 individual processes, it maintains the segments' relative
2556 positions. Because position-independent code uses relative
2557 addressesing between segments, the difference between
2558 virtual addresses in memory must match the difference
2559 between virtual addresses in the file. The difference
2560 between the virtual address of any segment in memory and
2561 the corresponding virtual address in the file is thus a
2562 single constant value for any one executable or shared
2563 object in a given process. This difference is the base
2564 address. One use of the base address is to relocate the
2565 memory image of the program during dynamic linking.
2566
2567 The same language also appears in Edition 4.0 of the System V
2568 ABI and is left unspecified in some of the earlier editions.
2569
2570 Decide if the objfile needs to be relocated. As indicated above, we will
2571 only be here when execution is stopped. But during attachment PC can be at
2572 arbitrary address therefore regcache_read_pc can be misleading (contrary to
2573 the auxv AT_ENTRY value). Moreover for executable with interpreter section
2574 regcache_read_pc would point to the interpreter and not the main executable.
2575
2576 So, to summarize, relocations are necessary when the start address obtained
2577 from the executable is different from the address in auxv AT_ENTRY entry.
2578
2579 [ The astute reader will note that we also test to make sure that
2580 the executable in question has the DYNAMIC flag set. It is my
2581 opinion that this test is unnecessary (undesirable even). It
2582 was added to avoid inadvertent relocation of an executable
2583 whose e_type member in the ELF header is not ET_DYN. There may
2584 be a time in the future when it is desirable to do relocations
2585 on other types of files as well in which case this condition
2586 should either be removed or modified to accomodate the new file
2587 type. - Kevin, Nov 2000. ] */
2588
2589 static int
2590 svr4_exec_displacement (CORE_ADDR *displacementp)
2591 {
2592 /* ENTRY_POINT is a possible function descriptor - before
2593 a call to gdbarch_convert_from_func_ptr_addr. */
2594 CORE_ADDR entry_point, exec_displacement;
2595
2596 if (exec_bfd == NULL)
2597 return 0;
2598
2599 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
2600 being executed themselves and PIE (Position Independent Executable)
2601 executables are ET_DYN. */
2602
2603 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
2604 return 0;
2605
2606 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
2607 return 0;
2608
2609 exec_displacement = entry_point - bfd_get_start_address (exec_bfd);
2610
2611 /* Verify the EXEC_DISPLACEMENT candidate complies with the required page
2612 alignment. It is cheaper than the program headers comparison below. */
2613
2614 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2615 {
2616 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
2617
2618 /* p_align of PT_LOAD segments does not specify any alignment but
2619 only congruency of addresses:
2620 p_offset % p_align == p_vaddr % p_align
2621 Kernel is free to load the executable with lower alignment. */
2622
2623 if ((exec_displacement & (elf->minpagesize - 1)) != 0)
2624 return 0;
2625 }
2626
2627 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
2628 comparing their program headers. If the program headers in the auxilliary
2629 vector do not match the program headers in the executable, then we are
2630 looking at a different file than the one used by the kernel - for
2631 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
2632
2633 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2634 {
2635 /* Be optimistic and clear OK only if GDB was able to verify the headers
2636 really do not match. */
2637 int phdrs_size, phdrs2_size, ok = 1;
2638 gdb_byte *buf, *buf2;
2639 int arch_size;
2640
2641 buf = read_program_header (-1, &phdrs_size, &arch_size, NULL);
2642 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
2643 if (buf != NULL && buf2 != NULL)
2644 {
2645 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
2646
2647 /* We are dealing with three different addresses. EXEC_BFD
2648 represents current address in on-disk file. target memory content
2649 may be different from EXEC_BFD as the file may have been prelinked
2650 to a different address after the executable has been loaded.
2651 Moreover the address of placement in target memory can be
2652 different from what the program headers in target memory say -
2653 this is the goal of PIE.
2654
2655 Detected DISPLACEMENT covers both the offsets of PIE placement and
2656 possible new prelink performed after start of the program. Here
2657 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
2658 content offset for the verification purpose. */
2659
2660 if (phdrs_size != phdrs2_size
2661 || bfd_get_arch_size (exec_bfd) != arch_size)
2662 ok = 0;
2663 else if (arch_size == 32
2664 && phdrs_size >= sizeof (Elf32_External_Phdr)
2665 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
2666 {
2667 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2668 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2669 CORE_ADDR displacement = 0;
2670 int i;
2671
2672 /* DISPLACEMENT could be found more easily by the difference of
2673 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2674 already have enough information to compute that displacement
2675 with what we've read. */
2676
2677 for (i = 0; i < ehdr2->e_phnum; i++)
2678 if (phdr2[i].p_type == PT_LOAD)
2679 {
2680 Elf32_External_Phdr *phdrp;
2681 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2682 CORE_ADDR vaddr, paddr;
2683 CORE_ADDR displacement_vaddr = 0;
2684 CORE_ADDR displacement_paddr = 0;
2685
2686 phdrp = &((Elf32_External_Phdr *) buf)[i];
2687 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2688 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2689
2690 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2691 byte_order);
2692 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2693
2694 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2695 byte_order);
2696 displacement_paddr = paddr - phdr2[i].p_paddr;
2697
2698 if (displacement_vaddr == displacement_paddr)
2699 displacement = displacement_vaddr;
2700
2701 break;
2702 }
2703
2704 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2705
2706 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
2707 {
2708 Elf32_External_Phdr *phdrp;
2709 Elf32_External_Phdr *phdr2p;
2710 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2711 CORE_ADDR vaddr, paddr;
2712 asection *plt2_asect;
2713
2714 phdrp = &((Elf32_External_Phdr *) buf)[i];
2715 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2716 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2717 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
2718
2719 /* PT_GNU_STACK is an exception by being never relocated by
2720 prelink as its addresses are always zero. */
2721
2722 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2723 continue;
2724
2725 /* Check also other adjustment combinations - PR 11786. */
2726
2727 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2728 byte_order);
2729 vaddr -= displacement;
2730 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
2731
2732 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2733 byte_order);
2734 paddr -= displacement;
2735 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
2736
2737 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2738 continue;
2739
2740 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2741 CentOS-5 has problems with filesz, memsz as well.
2742 See PR 11786. */
2743 if (phdr2[i].p_type == PT_GNU_RELRO)
2744 {
2745 Elf32_External_Phdr tmp_phdr = *phdrp;
2746 Elf32_External_Phdr tmp_phdr2 = *phdr2p;
2747
2748 memset (tmp_phdr.p_filesz, 0, 4);
2749 memset (tmp_phdr.p_memsz, 0, 4);
2750 memset (tmp_phdr.p_flags, 0, 4);
2751 memset (tmp_phdr.p_align, 0, 4);
2752 memset (tmp_phdr2.p_filesz, 0, 4);
2753 memset (tmp_phdr2.p_memsz, 0, 4);
2754 memset (tmp_phdr2.p_flags, 0, 4);
2755 memset (tmp_phdr2.p_align, 0, 4);
2756
2757 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2758 == 0)
2759 continue;
2760 }
2761
2762 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2763 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2764 if (plt2_asect)
2765 {
2766 int content2;
2767 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2768 CORE_ADDR filesz;
2769
2770 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2771 & SEC_HAS_CONTENTS) != 0;
2772
2773 filesz = extract_unsigned_integer (buf_filesz_p, 4,
2774 byte_order);
2775
2776 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2777 FILESZ is from the in-memory image. */
2778 if (content2)
2779 filesz += bfd_get_section_size (plt2_asect);
2780 else
2781 filesz -= bfd_get_section_size (plt2_asect);
2782
2783 store_unsigned_integer (buf_filesz_p, 4, byte_order,
2784 filesz);
2785
2786 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2787 continue;
2788 }
2789
2790 ok = 0;
2791 break;
2792 }
2793 }
2794 else if (arch_size == 64
2795 && phdrs_size >= sizeof (Elf64_External_Phdr)
2796 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2797 {
2798 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2799 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2800 CORE_ADDR displacement = 0;
2801 int i;
2802
2803 /* DISPLACEMENT could be found more easily by the difference of
2804 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2805 already have enough information to compute that displacement
2806 with what we've read. */
2807
2808 for (i = 0; i < ehdr2->e_phnum; i++)
2809 if (phdr2[i].p_type == PT_LOAD)
2810 {
2811 Elf64_External_Phdr *phdrp;
2812 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2813 CORE_ADDR vaddr, paddr;
2814 CORE_ADDR displacement_vaddr = 0;
2815 CORE_ADDR displacement_paddr = 0;
2816
2817 phdrp = &((Elf64_External_Phdr *) buf)[i];
2818 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2819 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2820
2821 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2822 byte_order);
2823 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2824
2825 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2826 byte_order);
2827 displacement_paddr = paddr - phdr2[i].p_paddr;
2828
2829 if (displacement_vaddr == displacement_paddr)
2830 displacement = displacement_vaddr;
2831
2832 break;
2833 }
2834
2835 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2836
2837 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2838 {
2839 Elf64_External_Phdr *phdrp;
2840 Elf64_External_Phdr *phdr2p;
2841 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2842 CORE_ADDR vaddr, paddr;
2843 asection *plt2_asect;
2844
2845 phdrp = &((Elf64_External_Phdr *) buf)[i];
2846 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2847 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2848 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2849
2850 /* PT_GNU_STACK is an exception by being never relocated by
2851 prelink as its addresses are always zero. */
2852
2853 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2854 continue;
2855
2856 /* Check also other adjustment combinations - PR 11786. */
2857
2858 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2859 byte_order);
2860 vaddr -= displacement;
2861 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2862
2863 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2864 byte_order);
2865 paddr -= displacement;
2866 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2867
2868 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2869 continue;
2870
2871 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2872 CentOS-5 has problems with filesz, memsz as well.
2873 See PR 11786. */
2874 if (phdr2[i].p_type == PT_GNU_RELRO)
2875 {
2876 Elf64_External_Phdr tmp_phdr = *phdrp;
2877 Elf64_External_Phdr tmp_phdr2 = *phdr2p;
2878
2879 memset (tmp_phdr.p_filesz, 0, 8);
2880 memset (tmp_phdr.p_memsz, 0, 8);
2881 memset (tmp_phdr.p_flags, 0, 4);
2882 memset (tmp_phdr.p_align, 0, 8);
2883 memset (tmp_phdr2.p_filesz, 0, 8);
2884 memset (tmp_phdr2.p_memsz, 0, 8);
2885 memset (tmp_phdr2.p_flags, 0, 4);
2886 memset (tmp_phdr2.p_align, 0, 8);
2887
2888 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2889 == 0)
2890 continue;
2891 }
2892
2893 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2894 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2895 if (plt2_asect)
2896 {
2897 int content2;
2898 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2899 CORE_ADDR filesz;
2900
2901 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2902 & SEC_HAS_CONTENTS) != 0;
2903
2904 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2905 byte_order);
2906
2907 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2908 FILESZ is from the in-memory image. */
2909 if (content2)
2910 filesz += bfd_get_section_size (plt2_asect);
2911 else
2912 filesz -= bfd_get_section_size (plt2_asect);
2913
2914 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2915 filesz);
2916
2917 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2918 continue;
2919 }
2920
2921 ok = 0;
2922 break;
2923 }
2924 }
2925 else
2926 ok = 0;
2927 }
2928
2929 xfree (buf);
2930 xfree (buf2);
2931
2932 if (!ok)
2933 return 0;
2934 }
2935
2936 if (info_verbose)
2937 {
2938 /* It can be printed repeatedly as there is no easy way to check
2939 the executable symbols/file has been already relocated to
2940 displacement. */
2941
2942 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2943 "displacement %s for \"%s\".\n"),
2944 paddress (target_gdbarch (), exec_displacement),
2945 bfd_get_filename (exec_bfd));
2946 }
2947
2948 *displacementp = exec_displacement;
2949 return 1;
2950 }
2951
2952 /* Relocate the main executable. This function should be called upon
2953 stopping the inferior process at the entry point to the program.
2954 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2955 different, the main executable is relocated by the proper amount. */
2956
2957 static void
2958 svr4_relocate_main_executable (void)
2959 {
2960 CORE_ADDR displacement;
2961
2962 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2963 probably contains the offsets computed using the PIE displacement
2964 from the previous run, which of course are irrelevant for this run.
2965 So we need to determine the new PIE displacement and recompute the
2966 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2967 already contains pre-computed offsets.
2968
2969 If we cannot compute the PIE displacement, either:
2970
2971 - The executable is not PIE.
2972
2973 - SYMFILE_OBJFILE does not match the executable started in the target.
2974 This can happen for main executable symbols loaded at the host while
2975 `ld.so --ld-args main-executable' is loaded in the target.
2976
2977 Then we leave the section offsets untouched and use them as is for
2978 this run. Either:
2979
2980 - These section offsets were properly reset earlier, and thus
2981 already contain the correct values. This can happen for instance
2982 when reconnecting via the remote protocol to a target that supports
2983 the `qOffsets' packet.
2984
2985 - The section offsets were not reset earlier, and the best we can
2986 hope is that the old offsets are still applicable to the new run. */
2987
2988 if (! svr4_exec_displacement (&displacement))
2989 return;
2990
2991 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2992 addresses. */
2993
2994 if (symfile_objfile)
2995 {
2996 struct section_offsets *new_offsets;
2997 int i;
2998
2999 new_offsets = XALLOCAVEC (struct section_offsets,
3000 symfile_objfile->num_sections);
3001
3002 for (i = 0; i < symfile_objfile->num_sections; i++)
3003 new_offsets->offsets[i] = displacement;
3004
3005 objfile_relocate (symfile_objfile, new_offsets);
3006 }
3007 else if (exec_bfd)
3008 {
3009 asection *asect;
3010
3011 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
3012 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
3013 (bfd_section_vma (exec_bfd, asect)
3014 + displacement));
3015 }
3016 }
3017
3018 /* Implement the "create_inferior_hook" target_solib_ops method.
3019
3020 For SVR4 executables, this first instruction is either the first
3021 instruction in the dynamic linker (for dynamically linked
3022 executables) or the instruction at "start" for statically linked
3023 executables. For dynamically linked executables, the system
3024 first exec's /lib/libc.so.N, which contains the dynamic linker,
3025 and starts it running. The dynamic linker maps in any needed
3026 shared libraries, maps in the actual user executable, and then
3027 jumps to "start" in the user executable.
3028
3029 We can arrange to cooperate with the dynamic linker to discover the
3030 names of shared libraries that are dynamically linked, and the base
3031 addresses to which they are linked.
3032
3033 This function is responsible for discovering those names and
3034 addresses, and saving sufficient information about them to allow
3035 their symbols to be read at a later time. */
3036
3037 static void
3038 svr4_solib_create_inferior_hook (int from_tty)
3039 {
3040 struct svr4_info *info;
3041
3042 info = get_svr4_info ();
3043
3044 /* Clear the probes-based interface's state. */
3045 free_probes_table (info);
3046 free_solib_list (info);
3047
3048 /* Relocate the main executable if necessary. */
3049 svr4_relocate_main_executable ();
3050
3051 /* No point setting a breakpoint in the dynamic linker if we can't
3052 hit it (e.g., a core file, or a trace file). */
3053 if (!target_has_execution)
3054 return;
3055
3056 if (!svr4_have_link_map_offsets ())
3057 return;
3058
3059 if (!enable_break (info, from_tty))
3060 return;
3061 }
3062
3063 static void
3064 svr4_clear_solib (void)
3065 {
3066 struct svr4_info *info;
3067
3068 info = get_svr4_info ();
3069 info->debug_base = 0;
3070 info->debug_loader_offset_p = 0;
3071 info->debug_loader_offset = 0;
3072 xfree (info->debug_loader_name);
3073 info->debug_loader_name = NULL;
3074 }
3075
3076 /* Clear any bits of ADDR that wouldn't fit in a target-format
3077 data pointer. "Data pointer" here refers to whatever sort of
3078 address the dynamic linker uses to manage its sections. At the
3079 moment, we don't support shared libraries on any processors where
3080 code and data pointers are different sizes.
3081
3082 This isn't really the right solution. What we really need here is
3083 a way to do arithmetic on CORE_ADDR values that respects the
3084 natural pointer/address correspondence. (For example, on the MIPS,
3085 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
3086 sign-extend the value. There, simply truncating the bits above
3087 gdbarch_ptr_bit, as we do below, is no good.) This should probably
3088 be a new gdbarch method or something. */
3089 static CORE_ADDR
3090 svr4_truncate_ptr (CORE_ADDR addr)
3091 {
3092 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
3093 /* We don't need to truncate anything, and the bit twiddling below
3094 will fail due to overflow problems. */
3095 return addr;
3096 else
3097 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
3098 }
3099
3100
3101 static void
3102 svr4_relocate_section_addresses (struct so_list *so,
3103 struct target_section *sec)
3104 {
3105 bfd *abfd = sec->the_bfd_section->owner;
3106
3107 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so, abfd));
3108 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so, abfd));
3109 }
3110 \f
3111
3112 /* Architecture-specific operations. */
3113
3114 /* Per-architecture data key. */
3115 static struct gdbarch_data *solib_svr4_data;
3116
3117 struct solib_svr4_ops
3118 {
3119 /* Return a description of the layout of `struct link_map'. */
3120 struct link_map_offsets *(*fetch_link_map_offsets)(void);
3121 };
3122
3123 /* Return a default for the architecture-specific operations. */
3124
3125 static void *
3126 solib_svr4_init (struct obstack *obstack)
3127 {
3128 struct solib_svr4_ops *ops;
3129
3130 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
3131 ops->fetch_link_map_offsets = NULL;
3132 return ops;
3133 }
3134
3135 /* Set the architecture-specific `struct link_map_offsets' fetcher for
3136 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
3137
3138 void
3139 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
3140 struct link_map_offsets *(*flmo) (void))
3141 {
3142 struct solib_svr4_ops *ops
3143 = (struct solib_svr4_ops *) gdbarch_data (gdbarch, solib_svr4_data);
3144
3145 ops->fetch_link_map_offsets = flmo;
3146
3147 set_solib_ops (gdbarch, &svr4_so_ops);
3148 }
3149
3150 /* Fetch a link_map_offsets structure using the architecture-specific
3151 `struct link_map_offsets' fetcher. */
3152
3153 static struct link_map_offsets *
3154 svr4_fetch_link_map_offsets (void)
3155 {
3156 struct solib_svr4_ops *ops
3157 = (struct solib_svr4_ops *) gdbarch_data (target_gdbarch (),
3158 solib_svr4_data);
3159
3160 gdb_assert (ops->fetch_link_map_offsets);
3161 return ops->fetch_link_map_offsets ();
3162 }
3163
3164 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
3165
3166 static int
3167 svr4_have_link_map_offsets (void)
3168 {
3169 struct solib_svr4_ops *ops
3170 = (struct solib_svr4_ops *) gdbarch_data (target_gdbarch (),
3171 solib_svr4_data);
3172
3173 return (ops->fetch_link_map_offsets != NULL);
3174 }
3175 \f
3176
3177 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
3178 `struct r_debug' and a `struct link_map' that are binary compatible
3179 with the origional SVR4 implementation. */
3180
3181 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3182 for an ILP32 SVR4 system. */
3183
3184 struct link_map_offsets *
3185 svr4_ilp32_fetch_link_map_offsets (void)
3186 {
3187 static struct link_map_offsets lmo;
3188 static struct link_map_offsets *lmp = NULL;
3189
3190 if (lmp == NULL)
3191 {
3192 lmp = &lmo;
3193
3194 lmo.r_version_offset = 0;
3195 lmo.r_version_size = 4;
3196 lmo.r_map_offset = 4;
3197 lmo.r_brk_offset = 8;
3198 lmo.r_ldsomap_offset = 20;
3199
3200 /* Everything we need is in the first 20 bytes. */
3201 lmo.link_map_size = 20;
3202 lmo.l_addr_offset = 0;
3203 lmo.l_name_offset = 4;
3204 lmo.l_ld_offset = 8;
3205 lmo.l_next_offset = 12;
3206 lmo.l_prev_offset = 16;
3207 }
3208
3209 return lmp;
3210 }
3211
3212 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3213 for an LP64 SVR4 system. */
3214
3215 struct link_map_offsets *
3216 svr4_lp64_fetch_link_map_offsets (void)
3217 {
3218 static struct link_map_offsets lmo;
3219 static struct link_map_offsets *lmp = NULL;
3220
3221 if (lmp == NULL)
3222 {
3223 lmp = &lmo;
3224
3225 lmo.r_version_offset = 0;
3226 lmo.r_version_size = 4;
3227 lmo.r_map_offset = 8;
3228 lmo.r_brk_offset = 16;
3229 lmo.r_ldsomap_offset = 40;
3230
3231 /* Everything we need is in the first 40 bytes. */
3232 lmo.link_map_size = 40;
3233 lmo.l_addr_offset = 0;
3234 lmo.l_name_offset = 8;
3235 lmo.l_ld_offset = 16;
3236 lmo.l_next_offset = 24;
3237 lmo.l_prev_offset = 32;
3238 }
3239
3240 return lmp;
3241 }
3242 \f
3243
3244 struct target_so_ops svr4_so_ops;
3245
3246 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
3247 different rule for symbol lookup. The lookup begins here in the DSO, not in
3248 the main executable. */
3249
3250 static struct block_symbol
3251 elf_lookup_lib_symbol (struct objfile *objfile,
3252 const char *name,
3253 const domain_enum domain)
3254 {
3255 bfd *abfd;
3256
3257 if (objfile == symfile_objfile)
3258 abfd = exec_bfd;
3259 else
3260 {
3261 /* OBJFILE should have been passed as the non-debug one. */
3262 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
3263
3264 abfd = objfile->obfd;
3265 }
3266
3267 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL, NULL) != 1)
3268 return (struct block_symbol) {NULL, NULL};
3269
3270 return lookup_global_symbol_from_objfile (objfile, name, domain);
3271 }
3272
3273 void
3274 _initialize_svr4_solib (void)
3275 {
3276 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
3277 solib_svr4_pspace_data
3278 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
3279
3280 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
3281 svr4_so_ops.free_so = svr4_free_so;
3282 svr4_so_ops.clear_so = svr4_clear_so;
3283 svr4_so_ops.clear_solib = svr4_clear_solib;
3284 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
3285 svr4_so_ops.current_sos = svr4_current_sos;
3286 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
3287 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
3288 svr4_so_ops.bfd_open = solib_bfd_open;
3289 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
3290 svr4_so_ops.same = svr4_same;
3291 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
3292 svr4_so_ops.update_breakpoints = svr4_update_solib_event_breakpoints;
3293 svr4_so_ops.handle_event = svr4_handle_solib_event;
3294 }
This page took 0.140343 seconds and 5 git commands to generate.