bf2577a43367a38ccb50050dfa716cc73914cc18
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2017 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "infrun.h"
34 #include "regcache.h"
35 #include "gdbthread.h"
36 #include "observer.h"
37
38 #include "solist.h"
39 #include "solib.h"
40 #include "solib-svr4.h"
41
42 #include "bfd-target.h"
43 #include "elf-bfd.h"
44 #include "exec.h"
45 #include "auxv.h"
46 #include "gdb_bfd.h"
47 #include "probe.h"
48
49 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
50 static int svr4_have_link_map_offsets (void);
51 static void svr4_relocate_main_executable (void);
52 static void svr4_free_library_list (void *p_list);
53
54 /* On SVR4 systems, a list of symbols in the dynamic linker where
55 GDB can try to place a breakpoint to monitor shared library
56 events.
57
58 If none of these symbols are found, or other errors occur, then
59 SVR4 systems will fall back to using a symbol as the "startup
60 mapping complete" breakpoint address. */
61
62 static const char * const solib_break_names[] =
63 {
64 "r_debug_state",
65 "_r_debug_state",
66 "_dl_debug_state",
67 "rtld_db_dlactivity",
68 "__dl_rtld_db_dlactivity",
69 "_rtld_debug_state",
70
71 NULL
72 };
73
74 static const char * const bkpt_names[] =
75 {
76 "_start",
77 "__start",
78 "main",
79 NULL
80 };
81
82 static const char * const main_name_list[] =
83 {
84 "main_$main",
85 NULL
86 };
87
88 /* What to do when a probe stop occurs. */
89
90 enum probe_action
91 {
92 /* Something went seriously wrong. Stop using probes and
93 revert to using the older interface. */
94 PROBES_INTERFACE_FAILED,
95
96 /* No action is required. The shared object list is still
97 valid. */
98 DO_NOTHING,
99
100 /* The shared object list should be reloaded entirely. */
101 FULL_RELOAD,
102
103 /* Attempt to incrementally update the shared object list. If
104 the update fails or is not possible, fall back to reloading
105 the list in full. */
106 UPDATE_OR_RELOAD,
107 };
108
109 /* A probe's name and its associated action. */
110
111 struct probe_info
112 {
113 /* The name of the probe. */
114 const char *name;
115
116 /* What to do when a probe stop occurs. */
117 enum probe_action action;
118 };
119
120 /* A list of named probes and their associated actions. If all
121 probes are present in the dynamic linker then the probes-based
122 interface will be used. */
123
124 static const struct probe_info probe_info[] =
125 {
126 { "init_start", DO_NOTHING },
127 { "init_complete", FULL_RELOAD },
128 { "map_start", DO_NOTHING },
129 { "map_failed", DO_NOTHING },
130 { "reloc_complete", UPDATE_OR_RELOAD },
131 { "unmap_start", DO_NOTHING },
132 { "unmap_complete", FULL_RELOAD },
133 };
134
135 #define NUM_PROBES ARRAY_SIZE (probe_info)
136
137 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
138 the same shared library. */
139
140 static int
141 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
142 {
143 if (strcmp (gdb_so_name, inferior_so_name) == 0)
144 return 1;
145
146 /* On Solaris, when starting inferior we think that dynamic linker is
147 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
148 contains /lib/ld.so.1. Sometimes one file is a link to another, but
149 sometimes they have identical content, but are not linked to each
150 other. We don't restrict this check for Solaris, but the chances
151 of running into this situation elsewhere are very low. */
152 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
153 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
154 return 1;
155
156 /* Similarly, we observed the same issue with sparc64, but with
157 different locations. */
158 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
159 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
160 return 1;
161
162 return 0;
163 }
164
165 static int
166 svr4_same (struct so_list *gdb, struct so_list *inferior)
167 {
168 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
169 }
170
171 static lm_info_svr4 *
172 lm_info_read (CORE_ADDR lm_addr)
173 {
174 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
175 gdb_byte *lm;
176 lm_info_svr4 *lm_info;
177 struct cleanup *back_to;
178
179 lm = (gdb_byte *) xmalloc (lmo->link_map_size);
180 back_to = make_cleanup (xfree, lm);
181
182 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
183 {
184 warning (_("Error reading shared library list entry at %s"),
185 paddress (target_gdbarch (), lm_addr)),
186 lm_info = NULL;
187 }
188 else
189 {
190 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
191
192 lm_info = new lm_info_svr4;
193 lm_info->lm_addr = lm_addr;
194
195 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
196 ptr_type);
197 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
198 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
199 ptr_type);
200 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
201 ptr_type);
202 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
203 ptr_type);
204 }
205
206 do_cleanups (back_to);
207
208 return lm_info;
209 }
210
211 static int
212 has_lm_dynamic_from_link_map (void)
213 {
214 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
215
216 return lmo->l_ld_offset >= 0;
217 }
218
219 static CORE_ADDR
220 lm_addr_check (const struct so_list *so, bfd *abfd)
221 {
222 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
223
224 if (!li->l_addr_p)
225 {
226 struct bfd_section *dyninfo_sect;
227 CORE_ADDR l_addr, l_dynaddr, dynaddr;
228
229 l_addr = li->l_addr_inferior;
230
231 if (! abfd || ! has_lm_dynamic_from_link_map ())
232 goto set_addr;
233
234 l_dynaddr = li->l_ld;
235
236 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
237 if (dyninfo_sect == NULL)
238 goto set_addr;
239
240 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
241
242 if (dynaddr + l_addr != l_dynaddr)
243 {
244 CORE_ADDR align = 0x1000;
245 CORE_ADDR minpagesize = align;
246
247 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
248 {
249 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
250 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
251 int i;
252
253 align = 1;
254
255 for (i = 0; i < ehdr->e_phnum; i++)
256 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
257 align = phdr[i].p_align;
258
259 minpagesize = get_elf_backend_data (abfd)->minpagesize;
260 }
261
262 /* Turn it into a mask. */
263 align--;
264
265 /* If the changes match the alignment requirements, we
266 assume we're using a core file that was generated by the
267 same binary, just prelinked with a different base offset.
268 If it doesn't match, we may have a different binary, the
269 same binary with the dynamic table loaded at an unrelated
270 location, or anything, really. To avoid regressions,
271 don't adjust the base offset in the latter case, although
272 odds are that, if things really changed, debugging won't
273 quite work.
274
275 One could expect more the condition
276 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
277 but the one below is relaxed for PPC. The PPC kernel supports
278 either 4k or 64k page sizes. To be prepared for 64k pages,
279 PPC ELF files are built using an alignment requirement of 64k.
280 However, when running on a kernel supporting 4k pages, the memory
281 mapping of the library may not actually happen on a 64k boundary!
282
283 (In the usual case where (l_addr & align) == 0, this check is
284 equivalent to the possibly expected check above.)
285
286 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
287
288 l_addr = l_dynaddr - dynaddr;
289
290 if ((l_addr & (minpagesize - 1)) == 0
291 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
292 {
293 if (info_verbose)
294 printf_unfiltered (_("Using PIC (Position Independent Code) "
295 "prelink displacement %s for \"%s\".\n"),
296 paddress (target_gdbarch (), l_addr),
297 so->so_name);
298 }
299 else
300 {
301 /* There is no way to verify the library file matches. prelink
302 can during prelinking of an unprelinked file (or unprelinking
303 of a prelinked file) shift the DYNAMIC segment by arbitrary
304 offset without any page size alignment. There is no way to
305 find out the ELF header and/or Program Headers for a limited
306 verification if it they match. One could do a verification
307 of the DYNAMIC segment. Still the found address is the best
308 one GDB could find. */
309
310 warning (_(".dynamic section for \"%s\" "
311 "is not at the expected address "
312 "(wrong library or version mismatch?)"), so->so_name);
313 }
314 }
315
316 set_addr:
317 li->l_addr = l_addr;
318 li->l_addr_p = 1;
319 }
320
321 return li->l_addr;
322 }
323
324 /* Per pspace SVR4 specific data. */
325
326 struct svr4_info
327 {
328 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
329
330 /* Validity flag for debug_loader_offset. */
331 int debug_loader_offset_p;
332
333 /* Load address for the dynamic linker, inferred. */
334 CORE_ADDR debug_loader_offset;
335
336 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
337 char *debug_loader_name;
338
339 /* Load map address for the main executable. */
340 CORE_ADDR main_lm_addr;
341
342 CORE_ADDR interp_text_sect_low;
343 CORE_ADDR interp_text_sect_high;
344 CORE_ADDR interp_plt_sect_low;
345 CORE_ADDR interp_plt_sect_high;
346
347 /* Nonzero if the list of objects was last obtained from the target
348 via qXfer:libraries-svr4:read. */
349 int using_xfer;
350
351 /* Table of struct probe_and_action instances, used by the
352 probes-based interface to map breakpoint addresses to probes
353 and their associated actions. Lookup is performed using
354 probe_and_action->probe->address. */
355 htab_t probes_table;
356
357 /* List of objects loaded into the inferior, used by the probes-
358 based interface. */
359 struct so_list *solib_list;
360 };
361
362 /* Per-program-space data key. */
363 static const struct program_space_data *solib_svr4_pspace_data;
364
365 /* Free the probes table. */
366
367 static void
368 free_probes_table (struct svr4_info *info)
369 {
370 if (info->probes_table == NULL)
371 return;
372
373 htab_delete (info->probes_table);
374 info->probes_table = NULL;
375 }
376
377 /* Free the solib list. */
378
379 static void
380 free_solib_list (struct svr4_info *info)
381 {
382 svr4_free_library_list (&info->solib_list);
383 info->solib_list = NULL;
384 }
385
386 static void
387 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
388 {
389 struct svr4_info *info = (struct svr4_info *) arg;
390
391 free_probes_table (info);
392 free_solib_list (info);
393
394 xfree (info);
395 }
396
397 /* Get the current svr4 data. If none is found yet, add it now. This
398 function always returns a valid object. */
399
400 static struct svr4_info *
401 get_svr4_info (void)
402 {
403 struct svr4_info *info;
404
405 info = (struct svr4_info *) program_space_data (current_program_space,
406 solib_svr4_pspace_data);
407 if (info != NULL)
408 return info;
409
410 info = XCNEW (struct svr4_info);
411 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
412 return info;
413 }
414
415 /* Local function prototypes */
416
417 static int match_main (const char *);
418
419 /* Read program header TYPE from inferior memory. The header is found
420 by scanning the OS auxillary vector.
421
422 If TYPE == -1, return the program headers instead of the contents of
423 one program header.
424
425 Return a pointer to allocated memory holding the program header contents,
426 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
427 size of those contents is returned to P_SECT_SIZE. Likewise, the target
428 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE and
429 the base address of the section is returned in BASE_ADDR. */
430
431 static gdb_byte *
432 read_program_header (int type, int *p_sect_size, int *p_arch_size,
433 CORE_ADDR *base_addr)
434 {
435 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
436 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
437 int arch_size, sect_size;
438 CORE_ADDR sect_addr;
439 gdb_byte *buf;
440 int pt_phdr_p = 0;
441
442 /* Get required auxv elements from target. */
443 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
444 return 0;
445 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
446 return 0;
447 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
448 return 0;
449 if (!at_phdr || !at_phnum)
450 return 0;
451
452 /* Determine ELF architecture type. */
453 if (at_phent == sizeof (Elf32_External_Phdr))
454 arch_size = 32;
455 else if (at_phent == sizeof (Elf64_External_Phdr))
456 arch_size = 64;
457 else
458 return 0;
459
460 /* Find the requested segment. */
461 if (type == -1)
462 {
463 sect_addr = at_phdr;
464 sect_size = at_phent * at_phnum;
465 }
466 else if (arch_size == 32)
467 {
468 Elf32_External_Phdr phdr;
469 int i;
470
471 /* Search for requested PHDR. */
472 for (i = 0; i < at_phnum; i++)
473 {
474 int p_type;
475
476 if (target_read_memory (at_phdr + i * sizeof (phdr),
477 (gdb_byte *)&phdr, sizeof (phdr)))
478 return 0;
479
480 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
481 4, byte_order);
482
483 if (p_type == PT_PHDR)
484 {
485 pt_phdr_p = 1;
486 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
487 4, byte_order);
488 }
489
490 if (p_type == type)
491 break;
492 }
493
494 if (i == at_phnum)
495 return 0;
496
497 /* Retrieve address and size. */
498 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
499 4, byte_order);
500 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
501 4, byte_order);
502 }
503 else
504 {
505 Elf64_External_Phdr phdr;
506 int i;
507
508 /* Search for requested PHDR. */
509 for (i = 0; i < at_phnum; i++)
510 {
511 int p_type;
512
513 if (target_read_memory (at_phdr + i * sizeof (phdr),
514 (gdb_byte *)&phdr, sizeof (phdr)))
515 return 0;
516
517 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
518 4, byte_order);
519
520 if (p_type == PT_PHDR)
521 {
522 pt_phdr_p = 1;
523 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
524 8, byte_order);
525 }
526
527 if (p_type == type)
528 break;
529 }
530
531 if (i == at_phnum)
532 return 0;
533
534 /* Retrieve address and size. */
535 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
536 8, byte_order);
537 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
538 8, byte_order);
539 }
540
541 /* PT_PHDR is optional, but we really need it
542 for PIE to make this work in general. */
543
544 if (pt_phdr_p)
545 {
546 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
547 Relocation offset is the difference between the two. */
548 sect_addr = sect_addr + (at_phdr - pt_phdr);
549 }
550
551 /* Read in requested program header. */
552 buf = (gdb_byte *) xmalloc (sect_size);
553 if (target_read_memory (sect_addr, buf, sect_size))
554 {
555 xfree (buf);
556 return NULL;
557 }
558
559 if (p_arch_size)
560 *p_arch_size = arch_size;
561 if (p_sect_size)
562 *p_sect_size = sect_size;
563 if (base_addr)
564 *base_addr = sect_addr;
565
566 return buf;
567 }
568
569
570 /* Return program interpreter string. */
571 static char *
572 find_program_interpreter (void)
573 {
574 gdb_byte *buf = NULL;
575
576 /* If we have an exec_bfd, use its section table. */
577 if (exec_bfd
578 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
579 {
580 struct bfd_section *interp_sect;
581
582 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
583 if (interp_sect != NULL)
584 {
585 int sect_size = bfd_section_size (exec_bfd, interp_sect);
586
587 buf = (gdb_byte *) xmalloc (sect_size);
588 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
589 }
590 }
591
592 /* If we didn't find it, use the target auxillary vector. */
593 if (!buf)
594 buf = read_program_header (PT_INTERP, NULL, NULL, NULL);
595
596 return (char *) buf;
597 }
598
599
600 /* Scan for DESIRED_DYNTAG in .dynamic section of ABFD. If DESIRED_DYNTAG is
601 found, 1 is returned and the corresponding PTR is set. */
602
603 static int
604 scan_dyntag (const int desired_dyntag, bfd *abfd, CORE_ADDR *ptr,
605 CORE_ADDR *ptr_addr)
606 {
607 int arch_size, step, sect_size;
608 long current_dyntag;
609 CORE_ADDR dyn_ptr, dyn_addr;
610 gdb_byte *bufend, *bufstart, *buf;
611 Elf32_External_Dyn *x_dynp_32;
612 Elf64_External_Dyn *x_dynp_64;
613 struct bfd_section *sect;
614 struct target_section *target_section;
615
616 if (abfd == NULL)
617 return 0;
618
619 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
620 return 0;
621
622 arch_size = bfd_get_arch_size (abfd);
623 if (arch_size == -1)
624 return 0;
625
626 /* Find the start address of the .dynamic section. */
627 sect = bfd_get_section_by_name (abfd, ".dynamic");
628 if (sect == NULL)
629 return 0;
630
631 for (target_section = current_target_sections->sections;
632 target_section < current_target_sections->sections_end;
633 target_section++)
634 if (sect == target_section->the_bfd_section)
635 break;
636 if (target_section < current_target_sections->sections_end)
637 dyn_addr = target_section->addr;
638 else
639 {
640 /* ABFD may come from OBJFILE acting only as a symbol file without being
641 loaded into the target (see add_symbol_file_command). This case is
642 such fallback to the file VMA address without the possibility of
643 having the section relocated to its actual in-memory address. */
644
645 dyn_addr = bfd_section_vma (abfd, sect);
646 }
647
648 /* Read in .dynamic from the BFD. We will get the actual value
649 from memory later. */
650 sect_size = bfd_section_size (abfd, sect);
651 buf = bufstart = (gdb_byte *) alloca (sect_size);
652 if (!bfd_get_section_contents (abfd, sect,
653 buf, 0, sect_size))
654 return 0;
655
656 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
657 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
658 : sizeof (Elf64_External_Dyn);
659 for (bufend = buf + sect_size;
660 buf < bufend;
661 buf += step)
662 {
663 if (arch_size == 32)
664 {
665 x_dynp_32 = (Elf32_External_Dyn *) buf;
666 current_dyntag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
667 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
668 }
669 else
670 {
671 x_dynp_64 = (Elf64_External_Dyn *) buf;
672 current_dyntag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
673 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
674 }
675 if (current_dyntag == DT_NULL)
676 return 0;
677 if (current_dyntag == desired_dyntag)
678 {
679 /* If requested, try to read the runtime value of this .dynamic
680 entry. */
681 if (ptr)
682 {
683 struct type *ptr_type;
684 gdb_byte ptr_buf[8];
685 CORE_ADDR ptr_addr_1;
686
687 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
688 ptr_addr_1 = dyn_addr + (buf - bufstart) + arch_size / 8;
689 if (target_read_memory (ptr_addr_1, ptr_buf, arch_size / 8) == 0)
690 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
691 *ptr = dyn_ptr;
692 if (ptr_addr)
693 *ptr_addr = dyn_addr + (buf - bufstart);
694 }
695 return 1;
696 }
697 }
698
699 return 0;
700 }
701
702 /* Scan for DESIRED_DYNTAG in .dynamic section of the target's main executable,
703 found by consulting the OS auxillary vector. If DESIRED_DYNTAG is found, 1
704 is returned and the corresponding PTR is set. */
705
706 static int
707 scan_dyntag_auxv (const int desired_dyntag, CORE_ADDR *ptr,
708 CORE_ADDR *ptr_addr)
709 {
710 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
711 int sect_size, arch_size, step;
712 long current_dyntag;
713 CORE_ADDR dyn_ptr;
714 CORE_ADDR base_addr;
715 gdb_byte *bufend, *bufstart, *buf;
716
717 /* Read in .dynamic section. */
718 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size,
719 &base_addr);
720 if (!buf)
721 return 0;
722
723 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
724 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
725 : sizeof (Elf64_External_Dyn);
726 for (bufend = buf + sect_size;
727 buf < bufend;
728 buf += step)
729 {
730 if (arch_size == 32)
731 {
732 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
733
734 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
735 4, byte_order);
736 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
737 4, byte_order);
738 }
739 else
740 {
741 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
742
743 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
744 8, byte_order);
745 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
746 8, byte_order);
747 }
748 if (current_dyntag == DT_NULL)
749 break;
750
751 if (current_dyntag == desired_dyntag)
752 {
753 if (ptr)
754 *ptr = dyn_ptr;
755
756 if (ptr_addr)
757 *ptr_addr = base_addr + buf - bufstart;
758
759 xfree (bufstart);
760 return 1;
761 }
762 }
763
764 xfree (bufstart);
765 return 0;
766 }
767
768 /* Locate the base address of dynamic linker structs for SVR4 elf
769 targets.
770
771 For SVR4 elf targets the address of the dynamic linker's runtime
772 structure is contained within the dynamic info section in the
773 executable file. The dynamic section is also mapped into the
774 inferior address space. Because the runtime loader fills in the
775 real address before starting the inferior, we have to read in the
776 dynamic info section from the inferior address space.
777 If there are any errors while trying to find the address, we
778 silently return 0, otherwise the found address is returned. */
779
780 static CORE_ADDR
781 elf_locate_base (void)
782 {
783 struct bound_minimal_symbol msymbol;
784 CORE_ADDR dyn_ptr, dyn_ptr_addr;
785
786 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
787 instead of DT_DEBUG, although they sometimes contain an unused
788 DT_DEBUG. */
789 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr, NULL)
790 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr, NULL))
791 {
792 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
793 gdb_byte *pbuf;
794 int pbuf_size = TYPE_LENGTH (ptr_type);
795
796 pbuf = (gdb_byte *) alloca (pbuf_size);
797 /* DT_MIPS_RLD_MAP contains a pointer to the address
798 of the dynamic link structure. */
799 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
800 return 0;
801 return extract_typed_address (pbuf, ptr_type);
802 }
803
804 /* Then check DT_MIPS_RLD_MAP_REL. MIPS executables now use this form
805 because of needing to support PIE. DT_MIPS_RLD_MAP will also exist
806 in non-PIE. */
807 if (scan_dyntag (DT_MIPS_RLD_MAP_REL, exec_bfd, &dyn_ptr, &dyn_ptr_addr)
808 || scan_dyntag_auxv (DT_MIPS_RLD_MAP_REL, &dyn_ptr, &dyn_ptr_addr))
809 {
810 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
811 gdb_byte *pbuf;
812 int pbuf_size = TYPE_LENGTH (ptr_type);
813
814 pbuf = (gdb_byte *) alloca (pbuf_size);
815 /* DT_MIPS_RLD_MAP_REL contains an offset from the address of the
816 DT slot to the address of the dynamic link structure. */
817 if (target_read_memory (dyn_ptr + dyn_ptr_addr, pbuf, pbuf_size))
818 return 0;
819 return extract_typed_address (pbuf, ptr_type);
820 }
821
822 /* Find DT_DEBUG. */
823 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr, NULL)
824 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr, NULL))
825 return dyn_ptr;
826
827 /* This may be a static executable. Look for the symbol
828 conventionally named _r_debug, as a last resort. */
829 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
830 if (msymbol.minsym != NULL)
831 return BMSYMBOL_VALUE_ADDRESS (msymbol);
832
833 /* DT_DEBUG entry not found. */
834 return 0;
835 }
836
837 /* Locate the base address of dynamic linker structs.
838
839 For both the SunOS and SVR4 shared library implementations, if the
840 inferior executable has been linked dynamically, there is a single
841 address somewhere in the inferior's data space which is the key to
842 locating all of the dynamic linker's runtime structures. This
843 address is the value of the debug base symbol. The job of this
844 function is to find and return that address, or to return 0 if there
845 is no such address (the executable is statically linked for example).
846
847 For SunOS, the job is almost trivial, since the dynamic linker and
848 all of it's structures are statically linked to the executable at
849 link time. Thus the symbol for the address we are looking for has
850 already been added to the minimal symbol table for the executable's
851 objfile at the time the symbol file's symbols were read, and all we
852 have to do is look it up there. Note that we explicitly do NOT want
853 to find the copies in the shared library.
854
855 The SVR4 version is a bit more complicated because the address
856 is contained somewhere in the dynamic info section. We have to go
857 to a lot more work to discover the address of the debug base symbol.
858 Because of this complexity, we cache the value we find and return that
859 value on subsequent invocations. Note there is no copy in the
860 executable symbol tables. */
861
862 static CORE_ADDR
863 locate_base (struct svr4_info *info)
864 {
865 /* Check to see if we have a currently valid address, and if so, avoid
866 doing all this work again and just return the cached address. If
867 we have no cached address, try to locate it in the dynamic info
868 section for ELF executables. There's no point in doing any of this
869 though if we don't have some link map offsets to work with. */
870
871 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
872 info->debug_base = elf_locate_base ();
873 return info->debug_base;
874 }
875
876 /* Find the first element in the inferior's dynamic link map, and
877 return its address in the inferior. Return zero if the address
878 could not be determined.
879
880 FIXME: Perhaps we should validate the info somehow, perhaps by
881 checking r_version for a known version number, or r_state for
882 RT_CONSISTENT. */
883
884 static CORE_ADDR
885 solib_svr4_r_map (struct svr4_info *info)
886 {
887 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
888 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
889 CORE_ADDR addr = 0;
890
891 TRY
892 {
893 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
894 ptr_type);
895 }
896 CATCH (ex, RETURN_MASK_ERROR)
897 {
898 exception_print (gdb_stderr, ex);
899 }
900 END_CATCH
901
902 return addr;
903 }
904
905 /* Find r_brk from the inferior's debug base. */
906
907 static CORE_ADDR
908 solib_svr4_r_brk (struct svr4_info *info)
909 {
910 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
911 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
912
913 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
914 ptr_type);
915 }
916
917 /* Find the link map for the dynamic linker (if it is not in the
918 normal list of loaded shared objects). */
919
920 static CORE_ADDR
921 solib_svr4_r_ldsomap (struct svr4_info *info)
922 {
923 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
924 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
925 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
926 ULONGEST version = 0;
927
928 TRY
929 {
930 /* Check version, and return zero if `struct r_debug' doesn't have
931 the r_ldsomap member. */
932 version
933 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
934 lmo->r_version_size, byte_order);
935 }
936 CATCH (ex, RETURN_MASK_ERROR)
937 {
938 exception_print (gdb_stderr, ex);
939 }
940 END_CATCH
941
942 if (version < 2 || lmo->r_ldsomap_offset == -1)
943 return 0;
944
945 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
946 ptr_type);
947 }
948
949 /* On Solaris systems with some versions of the dynamic linker,
950 ld.so's l_name pointer points to the SONAME in the string table
951 rather than into writable memory. So that GDB can find shared
952 libraries when loading a core file generated by gcore, ensure that
953 memory areas containing the l_name string are saved in the core
954 file. */
955
956 static int
957 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
958 {
959 struct svr4_info *info;
960 CORE_ADDR ldsomap;
961 struct so_list *newobj;
962 struct cleanup *old_chain;
963 CORE_ADDR name_lm;
964
965 info = get_svr4_info ();
966
967 info->debug_base = 0;
968 locate_base (info);
969 if (!info->debug_base)
970 return 0;
971
972 ldsomap = solib_svr4_r_ldsomap (info);
973 if (!ldsomap)
974 return 0;
975
976 newobj = XCNEW (struct so_list);
977 old_chain = make_cleanup (xfree, newobj);
978 lm_info_svr4 *li = lm_info_read (ldsomap);
979 newobj->lm_info = li;
980 make_cleanup (xfree, newobj->lm_info);
981 name_lm = li != NULL ? li->l_name : 0;
982 do_cleanups (old_chain);
983
984 return (name_lm >= vaddr && name_lm < vaddr + size);
985 }
986
987 /* See solist.h. */
988
989 static int
990 open_symbol_file_object (int from_tty)
991 {
992 CORE_ADDR lm, l_name;
993 char *filename;
994 int errcode;
995 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
996 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
997 int l_name_size = TYPE_LENGTH (ptr_type);
998 gdb_byte *l_name_buf = (gdb_byte *) xmalloc (l_name_size);
999 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
1000 struct svr4_info *info = get_svr4_info ();
1001 symfile_add_flags add_flags = 0;
1002
1003 if (from_tty)
1004 add_flags |= SYMFILE_VERBOSE;
1005
1006 if (symfile_objfile)
1007 if (!query (_("Attempt to reload symbols from process? ")))
1008 {
1009 do_cleanups (cleanups);
1010 return 0;
1011 }
1012
1013 /* Always locate the debug struct, in case it has moved. */
1014 info->debug_base = 0;
1015 if (locate_base (info) == 0)
1016 {
1017 do_cleanups (cleanups);
1018 return 0; /* failed somehow... */
1019 }
1020
1021 /* First link map member should be the executable. */
1022 lm = solib_svr4_r_map (info);
1023 if (lm == 0)
1024 {
1025 do_cleanups (cleanups);
1026 return 0; /* failed somehow... */
1027 }
1028
1029 /* Read address of name from target memory to GDB. */
1030 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1031
1032 /* Convert the address to host format. */
1033 l_name = extract_typed_address (l_name_buf, ptr_type);
1034
1035 if (l_name == 0)
1036 {
1037 do_cleanups (cleanups);
1038 return 0; /* No filename. */
1039 }
1040
1041 /* Now fetch the filename from target memory. */
1042 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1043 make_cleanup (xfree, filename);
1044
1045 if (errcode)
1046 {
1047 warning (_("failed to read exec filename from attached file: %s"),
1048 safe_strerror (errcode));
1049 do_cleanups (cleanups);
1050 return 0;
1051 }
1052
1053 /* Have a pathname: read the symbol file. */
1054 symbol_file_add_main (filename, add_flags);
1055
1056 do_cleanups (cleanups);
1057 return 1;
1058 }
1059
1060 /* Data exchange structure for the XML parser as returned by
1061 svr4_current_sos_via_xfer_libraries. */
1062
1063 struct svr4_library_list
1064 {
1065 struct so_list *head, **tailp;
1066
1067 /* Inferior address of struct link_map used for the main executable. It is
1068 NULL if not known. */
1069 CORE_ADDR main_lm;
1070 };
1071
1072 /* Implementation for target_so_ops.free_so. */
1073
1074 static void
1075 svr4_free_so (struct so_list *so)
1076 {
1077 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
1078
1079 delete li;
1080 }
1081
1082 /* Implement target_so_ops.clear_so. */
1083
1084 static void
1085 svr4_clear_so (struct so_list *so)
1086 {
1087 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
1088
1089 if (li != NULL)
1090 li->l_addr_p = 0;
1091 }
1092
1093 /* Free so_list built so far (called via cleanup). */
1094
1095 static void
1096 svr4_free_library_list (void *p_list)
1097 {
1098 struct so_list *list = *(struct so_list **) p_list;
1099
1100 while (list != NULL)
1101 {
1102 struct so_list *next = list->next;
1103
1104 free_so (list);
1105 list = next;
1106 }
1107 }
1108
1109 /* Copy library list. */
1110
1111 static struct so_list *
1112 svr4_copy_library_list (struct so_list *src)
1113 {
1114 struct so_list *dst = NULL;
1115 struct so_list **link = &dst;
1116
1117 while (src != NULL)
1118 {
1119 struct so_list *newobj;
1120
1121 newobj = XNEW (struct so_list);
1122 memcpy (newobj, src, sizeof (struct so_list));
1123
1124 lm_info_svr4 *src_li = (lm_info_svr4 *) src->lm_info;
1125 newobj->lm_info = new lm_info_svr4 (*src_li);
1126
1127 newobj->next = NULL;
1128 *link = newobj;
1129 link = &newobj->next;
1130
1131 src = src->next;
1132 }
1133
1134 return dst;
1135 }
1136
1137 #ifdef HAVE_LIBEXPAT
1138
1139 #include "xml-support.h"
1140
1141 /* Handle the start of a <library> element. Note: new elements are added
1142 at the tail of the list, keeping the list in order. */
1143
1144 static void
1145 library_list_start_library (struct gdb_xml_parser *parser,
1146 const struct gdb_xml_element *element,
1147 void *user_data, VEC(gdb_xml_value_s) *attributes)
1148 {
1149 struct svr4_library_list *list = (struct svr4_library_list *) user_data;
1150 const char *name
1151 = (const char *) xml_find_attribute (attributes, "name")->value;
1152 ULONGEST *lmp
1153 = (ULONGEST *) xml_find_attribute (attributes, "lm")->value;
1154 ULONGEST *l_addrp
1155 = (ULONGEST *) xml_find_attribute (attributes, "l_addr")->value;
1156 ULONGEST *l_ldp
1157 = (ULONGEST *) xml_find_attribute (attributes, "l_ld")->value;
1158 struct so_list *new_elem;
1159
1160 new_elem = XCNEW (struct so_list);
1161 lm_info_svr4 *li = new lm_info_svr4;
1162 new_elem->lm_info = li;
1163 li->lm_addr = *lmp;
1164 li->l_addr_inferior = *l_addrp;
1165 li->l_ld = *l_ldp;
1166
1167 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1168 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1169 strcpy (new_elem->so_original_name, new_elem->so_name);
1170
1171 *list->tailp = new_elem;
1172 list->tailp = &new_elem->next;
1173 }
1174
1175 /* Handle the start of a <library-list-svr4> element. */
1176
1177 static void
1178 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1179 const struct gdb_xml_element *element,
1180 void *user_data, VEC(gdb_xml_value_s) *attributes)
1181 {
1182 struct svr4_library_list *list = (struct svr4_library_list *) user_data;
1183 const char *version
1184 = (const char *) xml_find_attribute (attributes, "version")->value;
1185 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1186
1187 if (strcmp (version, "1.0") != 0)
1188 gdb_xml_error (parser,
1189 _("SVR4 Library list has unsupported version \"%s\""),
1190 version);
1191
1192 if (main_lm)
1193 list->main_lm = *(ULONGEST *) main_lm->value;
1194 }
1195
1196 /* The allowed elements and attributes for an XML library list.
1197 The root element is a <library-list>. */
1198
1199 static const struct gdb_xml_attribute svr4_library_attributes[] =
1200 {
1201 { "name", GDB_XML_AF_NONE, NULL, NULL },
1202 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1203 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1204 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1205 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1206 };
1207
1208 static const struct gdb_xml_element svr4_library_list_children[] =
1209 {
1210 {
1211 "library", svr4_library_attributes, NULL,
1212 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1213 library_list_start_library, NULL
1214 },
1215 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1216 };
1217
1218 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1219 {
1220 { "version", GDB_XML_AF_NONE, NULL, NULL },
1221 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1222 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1223 };
1224
1225 static const struct gdb_xml_element svr4_library_list_elements[] =
1226 {
1227 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1228 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1229 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1230 };
1231
1232 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1233
1234 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1235 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1236 empty, caller is responsible for freeing all its entries. */
1237
1238 static int
1239 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1240 {
1241 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1242 &list->head);
1243
1244 memset (list, 0, sizeof (*list));
1245 list->tailp = &list->head;
1246 if (gdb_xml_parse_quick (_("target library list"), "library-list-svr4.dtd",
1247 svr4_library_list_elements, document, list) == 0)
1248 {
1249 /* Parsed successfully, keep the result. */
1250 discard_cleanups (back_to);
1251 return 1;
1252 }
1253
1254 do_cleanups (back_to);
1255 return 0;
1256 }
1257
1258 /* Attempt to get so_list from target via qXfer:libraries-svr4:read packet.
1259
1260 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1261 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1262 empty, caller is responsible for freeing all its entries.
1263
1264 Note that ANNEX must be NULL if the remote does not explicitly allow
1265 qXfer:libraries-svr4:read packets with non-empty annexes. Support for
1266 this can be checked using target_augmented_libraries_svr4_read (). */
1267
1268 static int
1269 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1270 const char *annex)
1271 {
1272 gdb_assert (annex == NULL || target_augmented_libraries_svr4_read ());
1273
1274 /* Fetch the list of shared libraries. */
1275 gdb::unique_xmalloc_ptr<char> svr4_library_document
1276 = target_read_stralloc (&current_target, TARGET_OBJECT_LIBRARIES_SVR4,
1277 annex);
1278 if (svr4_library_document == NULL)
1279 return 0;
1280
1281 return svr4_parse_libraries (svr4_library_document.get (), list);
1282 }
1283
1284 #else
1285
1286 static int
1287 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1288 const char *annex)
1289 {
1290 return 0;
1291 }
1292
1293 #endif
1294
1295 /* If no shared library information is available from the dynamic
1296 linker, build a fallback list from other sources. */
1297
1298 static struct so_list *
1299 svr4_default_sos (void)
1300 {
1301 struct svr4_info *info = get_svr4_info ();
1302 struct so_list *newobj;
1303
1304 if (!info->debug_loader_offset_p)
1305 return NULL;
1306
1307 newobj = XCNEW (struct so_list);
1308 lm_info_svr4 *li = new lm_info_svr4;
1309 newobj->lm_info = li;
1310
1311 /* Nothing will ever check the other fields if we set l_addr_p. */
1312 li->l_addr = info->debug_loader_offset;
1313 li->l_addr_p = 1;
1314
1315 strncpy (newobj->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1316 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1317 strcpy (newobj->so_original_name, newobj->so_name);
1318
1319 return newobj;
1320 }
1321
1322 /* Read the whole inferior libraries chain starting at address LM.
1323 Expect the first entry in the chain's previous entry to be PREV_LM.
1324 Add the entries to the tail referenced by LINK_PTR_PTR. Ignore the
1325 first entry if IGNORE_FIRST and set global MAIN_LM_ADDR according
1326 to it. Returns nonzero upon success. If zero is returned the
1327 entries stored to LINK_PTR_PTR are still valid although they may
1328 represent only part of the inferior library list. */
1329
1330 static int
1331 svr4_read_so_list (CORE_ADDR lm, CORE_ADDR prev_lm,
1332 struct so_list ***link_ptr_ptr, int ignore_first)
1333 {
1334 CORE_ADDR first_l_name = 0;
1335 CORE_ADDR next_lm;
1336
1337 for (; lm != 0; prev_lm = lm, lm = next_lm)
1338 {
1339 int errcode;
1340 char *buffer;
1341
1342 so_list_up newobj (XCNEW (struct so_list));
1343
1344 lm_info_svr4 *li = lm_info_read (lm);
1345 newobj->lm_info = li;
1346 if (li == NULL)
1347 return 0;
1348
1349 next_lm = li->l_next;
1350
1351 if (li->l_prev != prev_lm)
1352 {
1353 warning (_("Corrupted shared library list: %s != %s"),
1354 paddress (target_gdbarch (), prev_lm),
1355 paddress (target_gdbarch (), li->l_prev));
1356 return 0;
1357 }
1358
1359 /* For SVR4 versions, the first entry in the link map is for the
1360 inferior executable, so we must ignore it. For some versions of
1361 SVR4, it has no name. For others (Solaris 2.3 for example), it
1362 does have a name, so we can no longer use a missing name to
1363 decide when to ignore it. */
1364 if (ignore_first && li->l_prev == 0)
1365 {
1366 struct svr4_info *info = get_svr4_info ();
1367
1368 first_l_name = li->l_name;
1369 info->main_lm_addr = li->lm_addr;
1370 continue;
1371 }
1372
1373 /* Extract this shared object's name. */
1374 target_read_string (li->l_name, &buffer, SO_NAME_MAX_PATH_SIZE - 1,
1375 &errcode);
1376 if (errcode != 0)
1377 {
1378 /* If this entry's l_name address matches that of the
1379 inferior executable, then this is not a normal shared
1380 object, but (most likely) a vDSO. In this case, silently
1381 skip it; otherwise emit a warning. */
1382 if (first_l_name == 0 || li->l_name != first_l_name)
1383 warning (_("Can't read pathname for load map: %s."),
1384 safe_strerror (errcode));
1385 continue;
1386 }
1387
1388 strncpy (newobj->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1389 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1390 strcpy (newobj->so_original_name, newobj->so_name);
1391 xfree (buffer);
1392
1393 /* If this entry has no name, or its name matches the name
1394 for the main executable, don't include it in the list. */
1395 if (! newobj->so_name[0] || match_main (newobj->so_name))
1396 continue;
1397
1398 newobj->next = 0;
1399 /* Don't free it now. */
1400 **link_ptr_ptr = newobj.release ();
1401 *link_ptr_ptr = &(**link_ptr_ptr)->next;
1402 }
1403
1404 return 1;
1405 }
1406
1407 /* Read the full list of currently loaded shared objects directly
1408 from the inferior, without referring to any libraries read and
1409 stored by the probes interface. Handle special cases relating
1410 to the first elements of the list. */
1411
1412 static struct so_list *
1413 svr4_current_sos_direct (struct svr4_info *info)
1414 {
1415 CORE_ADDR lm;
1416 struct so_list *head = NULL;
1417 struct so_list **link_ptr = &head;
1418 struct cleanup *back_to;
1419 int ignore_first;
1420 struct svr4_library_list library_list;
1421
1422 /* Fall back to manual examination of the target if the packet is not
1423 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1424 tests a case where gdbserver cannot find the shared libraries list while
1425 GDB itself is able to find it via SYMFILE_OBJFILE.
1426
1427 Unfortunately statically linked inferiors will also fall back through this
1428 suboptimal code path. */
1429
1430 info->using_xfer = svr4_current_sos_via_xfer_libraries (&library_list,
1431 NULL);
1432 if (info->using_xfer)
1433 {
1434 if (library_list.main_lm)
1435 info->main_lm_addr = library_list.main_lm;
1436
1437 return library_list.head ? library_list.head : svr4_default_sos ();
1438 }
1439
1440 /* Always locate the debug struct, in case it has moved. */
1441 info->debug_base = 0;
1442 locate_base (info);
1443
1444 /* If we can't find the dynamic linker's base structure, this
1445 must not be a dynamically linked executable. Hmm. */
1446 if (! info->debug_base)
1447 return svr4_default_sos ();
1448
1449 /* Assume that everything is a library if the dynamic loader was loaded
1450 late by a static executable. */
1451 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1452 ignore_first = 0;
1453 else
1454 ignore_first = 1;
1455
1456 back_to = make_cleanup (svr4_free_library_list, &head);
1457
1458 /* Walk the inferior's link map list, and build our list of
1459 `struct so_list' nodes. */
1460 lm = solib_svr4_r_map (info);
1461 if (lm)
1462 svr4_read_so_list (lm, 0, &link_ptr, ignore_first);
1463
1464 /* On Solaris, the dynamic linker is not in the normal list of
1465 shared objects, so make sure we pick it up too. Having
1466 symbol information for the dynamic linker is quite crucial
1467 for skipping dynamic linker resolver code. */
1468 lm = solib_svr4_r_ldsomap (info);
1469 if (lm)
1470 svr4_read_so_list (lm, 0, &link_ptr, 0);
1471
1472 discard_cleanups (back_to);
1473
1474 if (head == NULL)
1475 return svr4_default_sos ();
1476
1477 return head;
1478 }
1479
1480 /* Implement the main part of the "current_sos" target_so_ops
1481 method. */
1482
1483 static struct so_list *
1484 svr4_current_sos_1 (void)
1485 {
1486 struct svr4_info *info = get_svr4_info ();
1487
1488 /* If the solib list has been read and stored by the probes
1489 interface then we return a copy of the stored list. */
1490 if (info->solib_list != NULL)
1491 return svr4_copy_library_list (info->solib_list);
1492
1493 /* Otherwise obtain the solib list directly from the inferior. */
1494 return svr4_current_sos_direct (info);
1495 }
1496
1497 /* Implement the "current_sos" target_so_ops method. */
1498
1499 static struct so_list *
1500 svr4_current_sos (void)
1501 {
1502 struct so_list *so_head = svr4_current_sos_1 ();
1503 struct mem_range vsyscall_range;
1504
1505 /* Filter out the vDSO module, if present. Its symbol file would
1506 not be found on disk. The vDSO/vsyscall's OBJFILE is instead
1507 managed by symfile-mem.c:add_vsyscall_page. */
1508 if (gdbarch_vsyscall_range (target_gdbarch (), &vsyscall_range)
1509 && vsyscall_range.length != 0)
1510 {
1511 struct so_list **sop;
1512
1513 sop = &so_head;
1514 while (*sop != NULL)
1515 {
1516 struct so_list *so = *sop;
1517
1518 /* We can't simply match the vDSO by starting address alone,
1519 because lm_info->l_addr_inferior (and also l_addr) do not
1520 necessarily represent the real starting address of the
1521 ELF if the vDSO's ELF itself is "prelinked". The l_ld
1522 field (the ".dynamic" section of the shared object)
1523 always points at the absolute/resolved address though.
1524 So check whether that address is inside the vDSO's
1525 mapping instead.
1526
1527 E.g., on Linux 3.16 (x86_64) the vDSO is a regular
1528 0-based ELF, and we see:
1529
1530 (gdb) info auxv
1531 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffb000
1532 (gdb) p/x *_r_debug.r_map.l_next
1533 $1 = {l_addr = 0x7ffff7ffb000, ..., l_ld = 0x7ffff7ffb318, ...}
1534
1535 And on Linux 2.6.32 (x86_64) we see:
1536
1537 (gdb) info auxv
1538 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffe000
1539 (gdb) p/x *_r_debug.r_map.l_next
1540 $5 = {l_addr = 0x7ffff88fe000, ..., l_ld = 0x7ffff7ffe580, ... }
1541
1542 Dumping that vDSO shows:
1543
1544 (gdb) info proc mappings
1545 0x7ffff7ffe000 0x7ffff7fff000 0x1000 0 [vdso]
1546 (gdb) dump memory vdso.bin 0x7ffff7ffe000 0x7ffff7fff000
1547 # readelf -Wa vdso.bin
1548 [...]
1549 Entry point address: 0xffffffffff700700
1550 [...]
1551 Section Headers:
1552 [Nr] Name Type Address Off Size
1553 [ 0] NULL 0000000000000000 000000 000000
1554 [ 1] .hash HASH ffffffffff700120 000120 000038
1555 [ 2] .dynsym DYNSYM ffffffffff700158 000158 0000d8
1556 [...]
1557 [ 9] .dynamic DYNAMIC ffffffffff700580 000580 0000f0
1558 */
1559
1560 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
1561
1562 if (address_in_mem_range (li->l_ld, &vsyscall_range))
1563 {
1564 *sop = so->next;
1565 free_so (so);
1566 break;
1567 }
1568
1569 sop = &so->next;
1570 }
1571 }
1572
1573 return so_head;
1574 }
1575
1576 /* Get the address of the link_map for a given OBJFILE. */
1577
1578 CORE_ADDR
1579 svr4_fetch_objfile_link_map (struct objfile *objfile)
1580 {
1581 struct so_list *so;
1582 struct svr4_info *info = get_svr4_info ();
1583
1584 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1585 if (info->main_lm_addr == 0)
1586 solib_add (NULL, 0, auto_solib_add);
1587
1588 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1589 if (objfile == symfile_objfile)
1590 return info->main_lm_addr;
1591
1592 /* The other link map addresses may be found by examining the list
1593 of shared libraries. */
1594 for (so = master_so_list (); so; so = so->next)
1595 if (so->objfile == objfile)
1596 {
1597 lm_info_svr4 *li = (lm_info_svr4 *) so->lm_info;
1598
1599 return li->lm_addr;
1600 }
1601
1602 /* Not found! */
1603 return 0;
1604 }
1605
1606 /* On some systems, the only way to recognize the link map entry for
1607 the main executable file is by looking at its name. Return
1608 non-zero iff SONAME matches one of the known main executable names. */
1609
1610 static int
1611 match_main (const char *soname)
1612 {
1613 const char * const *mainp;
1614
1615 for (mainp = main_name_list; *mainp != NULL; mainp++)
1616 {
1617 if (strcmp (soname, *mainp) == 0)
1618 return (1);
1619 }
1620
1621 return (0);
1622 }
1623
1624 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1625 SVR4 run time loader. */
1626
1627 int
1628 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1629 {
1630 struct svr4_info *info = get_svr4_info ();
1631
1632 return ((pc >= info->interp_text_sect_low
1633 && pc < info->interp_text_sect_high)
1634 || (pc >= info->interp_plt_sect_low
1635 && pc < info->interp_plt_sect_high)
1636 || in_plt_section (pc)
1637 || in_gnu_ifunc_stub (pc));
1638 }
1639
1640 /* Given an executable's ABFD and target, compute the entry-point
1641 address. */
1642
1643 static CORE_ADDR
1644 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1645 {
1646 CORE_ADDR addr;
1647
1648 /* KevinB wrote ... for most targets, the address returned by
1649 bfd_get_start_address() is the entry point for the start
1650 function. But, for some targets, bfd_get_start_address() returns
1651 the address of a function descriptor from which the entry point
1652 address may be extracted. This address is extracted by
1653 gdbarch_convert_from_func_ptr_addr(). The method
1654 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1655 function for targets which don't use function descriptors. */
1656 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1657 bfd_get_start_address (abfd),
1658 targ);
1659 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1660 }
1661
1662 /* A probe and its associated action. */
1663
1664 struct probe_and_action
1665 {
1666 /* The probe. */
1667 struct probe *probe;
1668
1669 /* The relocated address of the probe. */
1670 CORE_ADDR address;
1671
1672 /* The action. */
1673 enum probe_action action;
1674 };
1675
1676 /* Returns a hash code for the probe_and_action referenced by p. */
1677
1678 static hashval_t
1679 hash_probe_and_action (const void *p)
1680 {
1681 const struct probe_and_action *pa = (const struct probe_and_action *) p;
1682
1683 return (hashval_t) pa->address;
1684 }
1685
1686 /* Returns non-zero if the probe_and_actions referenced by p1 and p2
1687 are equal. */
1688
1689 static int
1690 equal_probe_and_action (const void *p1, const void *p2)
1691 {
1692 const struct probe_and_action *pa1 = (const struct probe_and_action *) p1;
1693 const struct probe_and_action *pa2 = (const struct probe_and_action *) p2;
1694
1695 return pa1->address == pa2->address;
1696 }
1697
1698 /* Register a solib event probe and its associated action in the
1699 probes table. */
1700
1701 static void
1702 register_solib_event_probe (struct probe *probe, CORE_ADDR address,
1703 enum probe_action action)
1704 {
1705 struct svr4_info *info = get_svr4_info ();
1706 struct probe_and_action lookup, *pa;
1707 void **slot;
1708
1709 /* Create the probes table, if necessary. */
1710 if (info->probes_table == NULL)
1711 info->probes_table = htab_create_alloc (1, hash_probe_and_action,
1712 equal_probe_and_action,
1713 xfree, xcalloc, xfree);
1714
1715 lookup.probe = probe;
1716 lookup.address = address;
1717 slot = htab_find_slot (info->probes_table, &lookup, INSERT);
1718 gdb_assert (*slot == HTAB_EMPTY_ENTRY);
1719
1720 pa = XCNEW (struct probe_and_action);
1721 pa->probe = probe;
1722 pa->address = address;
1723 pa->action = action;
1724
1725 *slot = pa;
1726 }
1727
1728 /* Get the solib event probe at the specified location, and the
1729 action associated with it. Returns NULL if no solib event probe
1730 was found. */
1731
1732 static struct probe_and_action *
1733 solib_event_probe_at (struct svr4_info *info, CORE_ADDR address)
1734 {
1735 struct probe_and_action lookup;
1736 void **slot;
1737
1738 lookup.address = address;
1739 slot = htab_find_slot (info->probes_table, &lookup, NO_INSERT);
1740
1741 if (slot == NULL)
1742 return NULL;
1743
1744 return (struct probe_and_action *) *slot;
1745 }
1746
1747 /* Decide what action to take when the specified solib event probe is
1748 hit. */
1749
1750 static enum probe_action
1751 solib_event_probe_action (struct probe_and_action *pa)
1752 {
1753 enum probe_action action;
1754 unsigned probe_argc = 0;
1755 struct frame_info *frame = get_current_frame ();
1756
1757 action = pa->action;
1758 if (action == DO_NOTHING || action == PROBES_INTERFACE_FAILED)
1759 return action;
1760
1761 gdb_assert (action == FULL_RELOAD || action == UPDATE_OR_RELOAD);
1762
1763 /* Check that an appropriate number of arguments has been supplied.
1764 We expect:
1765 arg0: Lmid_t lmid (mandatory)
1766 arg1: struct r_debug *debug_base (mandatory)
1767 arg2: struct link_map *new (optional, for incremental updates) */
1768 TRY
1769 {
1770 probe_argc = get_probe_argument_count (pa->probe, frame);
1771 }
1772 CATCH (ex, RETURN_MASK_ERROR)
1773 {
1774 exception_print (gdb_stderr, ex);
1775 probe_argc = 0;
1776 }
1777 END_CATCH
1778
1779 /* If get_probe_argument_count throws an exception, probe_argc will
1780 be set to zero. However, if pa->probe does not have arguments,
1781 then get_probe_argument_count will succeed but probe_argc will
1782 also be zero. Both cases happen because of different things, but
1783 they are treated equally here: action will be set to
1784 PROBES_INTERFACE_FAILED. */
1785 if (probe_argc == 2)
1786 action = FULL_RELOAD;
1787 else if (probe_argc < 2)
1788 action = PROBES_INTERFACE_FAILED;
1789
1790 return action;
1791 }
1792
1793 /* Populate the shared object list by reading the entire list of
1794 shared objects from the inferior. Handle special cases relating
1795 to the first elements of the list. Returns nonzero on success. */
1796
1797 static int
1798 solist_update_full (struct svr4_info *info)
1799 {
1800 free_solib_list (info);
1801 info->solib_list = svr4_current_sos_direct (info);
1802
1803 return 1;
1804 }
1805
1806 /* Update the shared object list starting from the link-map entry
1807 passed by the linker in the probe's third argument. Returns
1808 nonzero if the list was successfully updated, or zero to indicate
1809 failure. */
1810
1811 static int
1812 solist_update_incremental (struct svr4_info *info, CORE_ADDR lm)
1813 {
1814 struct so_list *tail;
1815 CORE_ADDR prev_lm;
1816
1817 /* svr4_current_sos_direct contains logic to handle a number of
1818 special cases relating to the first elements of the list. To
1819 avoid duplicating this logic we defer to solist_update_full
1820 if the list is empty. */
1821 if (info->solib_list == NULL)
1822 return 0;
1823
1824 /* Fall back to a full update if we are using a remote target
1825 that does not support incremental transfers. */
1826 if (info->using_xfer && !target_augmented_libraries_svr4_read ())
1827 return 0;
1828
1829 /* Walk to the end of the list. */
1830 for (tail = info->solib_list; tail->next != NULL; tail = tail->next)
1831 /* Nothing. */;
1832
1833 lm_info_svr4 *li = (lm_info_svr4 *) tail->lm_info;
1834 prev_lm = li->lm_addr;
1835
1836 /* Read the new objects. */
1837 if (info->using_xfer)
1838 {
1839 struct svr4_library_list library_list;
1840 char annex[64];
1841
1842 xsnprintf (annex, sizeof (annex), "start=%s;prev=%s",
1843 phex_nz (lm, sizeof (lm)),
1844 phex_nz (prev_lm, sizeof (prev_lm)));
1845 if (!svr4_current_sos_via_xfer_libraries (&library_list, annex))
1846 return 0;
1847
1848 tail->next = library_list.head;
1849 }
1850 else
1851 {
1852 struct so_list **link = &tail->next;
1853
1854 /* IGNORE_FIRST may safely be set to zero here because the
1855 above check and deferral to solist_update_full ensures
1856 that this call to svr4_read_so_list will never see the
1857 first element. */
1858 if (!svr4_read_so_list (lm, prev_lm, &link, 0))
1859 return 0;
1860 }
1861
1862 return 1;
1863 }
1864
1865 /* Disable the probes-based linker interface and revert to the
1866 original interface. We don't reset the breakpoints as the
1867 ones set up for the probes-based interface are adequate. */
1868
1869 static void
1870 disable_probes_interface_cleanup (void *arg)
1871 {
1872 struct svr4_info *info = get_svr4_info ();
1873
1874 warning (_("Probes-based dynamic linker interface failed.\n"
1875 "Reverting to original interface.\n"));
1876
1877 free_probes_table (info);
1878 free_solib_list (info);
1879 }
1880
1881 /* Update the solib list as appropriate when using the
1882 probes-based linker interface. Do nothing if using the
1883 standard interface. */
1884
1885 static void
1886 svr4_handle_solib_event (void)
1887 {
1888 struct svr4_info *info = get_svr4_info ();
1889 struct probe_and_action *pa;
1890 enum probe_action action;
1891 struct cleanup *old_chain, *usm_chain;
1892 struct value *val = NULL;
1893 CORE_ADDR pc, debug_base, lm = 0;
1894 struct frame_info *frame = get_current_frame ();
1895
1896 /* Do nothing if not using the probes interface. */
1897 if (info->probes_table == NULL)
1898 return;
1899
1900 /* If anything goes wrong we revert to the original linker
1901 interface. */
1902 old_chain = make_cleanup (disable_probes_interface_cleanup, NULL);
1903
1904 pc = regcache_read_pc (get_current_regcache ());
1905 pa = solib_event_probe_at (info, pc);
1906 if (pa == NULL)
1907 {
1908 do_cleanups (old_chain);
1909 return;
1910 }
1911
1912 action = solib_event_probe_action (pa);
1913 if (action == PROBES_INTERFACE_FAILED)
1914 {
1915 do_cleanups (old_chain);
1916 return;
1917 }
1918
1919 if (action == DO_NOTHING)
1920 {
1921 discard_cleanups (old_chain);
1922 return;
1923 }
1924
1925 /* evaluate_probe_argument looks up symbols in the dynamic linker
1926 using find_pc_section. find_pc_section is accelerated by a cache
1927 called the section map. The section map is invalidated every
1928 time a shared library is loaded or unloaded, and if the inferior
1929 is generating a lot of shared library events then the section map
1930 will be updated every time svr4_handle_solib_event is called.
1931 We called find_pc_section in svr4_create_solib_event_breakpoints,
1932 so we can guarantee that the dynamic linker's sections are in the
1933 section map. We can therefore inhibit section map updates across
1934 these calls to evaluate_probe_argument and save a lot of time. */
1935 inhibit_section_map_updates (current_program_space);
1936 usm_chain = make_cleanup (resume_section_map_updates_cleanup,
1937 current_program_space);
1938
1939 TRY
1940 {
1941 val = evaluate_probe_argument (pa->probe, 1, frame);
1942 }
1943 CATCH (ex, RETURN_MASK_ERROR)
1944 {
1945 exception_print (gdb_stderr, ex);
1946 val = NULL;
1947 }
1948 END_CATCH
1949
1950 if (val == NULL)
1951 {
1952 do_cleanups (old_chain);
1953 return;
1954 }
1955
1956 debug_base = value_as_address (val);
1957 if (debug_base == 0)
1958 {
1959 do_cleanups (old_chain);
1960 return;
1961 }
1962
1963 /* Always locate the debug struct, in case it moved. */
1964 info->debug_base = 0;
1965 if (locate_base (info) == 0)
1966 {
1967 do_cleanups (old_chain);
1968 return;
1969 }
1970
1971 /* GDB does not currently support libraries loaded via dlmopen
1972 into namespaces other than the initial one. We must ignore
1973 any namespace other than the initial namespace here until
1974 support for this is added to GDB. */
1975 if (debug_base != info->debug_base)
1976 action = DO_NOTHING;
1977
1978 if (action == UPDATE_OR_RELOAD)
1979 {
1980 TRY
1981 {
1982 val = evaluate_probe_argument (pa->probe, 2, frame);
1983 }
1984 CATCH (ex, RETURN_MASK_ERROR)
1985 {
1986 exception_print (gdb_stderr, ex);
1987 do_cleanups (old_chain);
1988 return;
1989 }
1990 END_CATCH
1991
1992 if (val != NULL)
1993 lm = value_as_address (val);
1994
1995 if (lm == 0)
1996 action = FULL_RELOAD;
1997 }
1998
1999 /* Resume section map updates. */
2000 do_cleanups (usm_chain);
2001
2002 if (action == UPDATE_OR_RELOAD)
2003 {
2004 if (!solist_update_incremental (info, lm))
2005 action = FULL_RELOAD;
2006 }
2007
2008 if (action == FULL_RELOAD)
2009 {
2010 if (!solist_update_full (info))
2011 {
2012 do_cleanups (old_chain);
2013 return;
2014 }
2015 }
2016
2017 discard_cleanups (old_chain);
2018 }
2019
2020 /* Helper function for svr4_update_solib_event_breakpoints. */
2021
2022 static int
2023 svr4_update_solib_event_breakpoint (struct breakpoint *b, void *arg)
2024 {
2025 struct bp_location *loc;
2026
2027 if (b->type != bp_shlib_event)
2028 {
2029 /* Continue iterating. */
2030 return 0;
2031 }
2032
2033 for (loc = b->loc; loc != NULL; loc = loc->next)
2034 {
2035 struct svr4_info *info;
2036 struct probe_and_action *pa;
2037
2038 info = ((struct svr4_info *)
2039 program_space_data (loc->pspace, solib_svr4_pspace_data));
2040 if (info == NULL || info->probes_table == NULL)
2041 continue;
2042
2043 pa = solib_event_probe_at (info, loc->address);
2044 if (pa == NULL)
2045 continue;
2046
2047 if (pa->action == DO_NOTHING)
2048 {
2049 if (b->enable_state == bp_disabled && stop_on_solib_events)
2050 enable_breakpoint (b);
2051 else if (b->enable_state == bp_enabled && !stop_on_solib_events)
2052 disable_breakpoint (b);
2053 }
2054
2055 break;
2056 }
2057
2058 /* Continue iterating. */
2059 return 0;
2060 }
2061
2062 /* Enable or disable optional solib event breakpoints as appropriate.
2063 Called whenever stop_on_solib_events is changed. */
2064
2065 static void
2066 svr4_update_solib_event_breakpoints (void)
2067 {
2068 iterate_over_breakpoints (svr4_update_solib_event_breakpoint, NULL);
2069 }
2070
2071 /* Create and register solib event breakpoints. PROBES is an array
2072 of NUM_PROBES elements, each of which is vector of probes. A
2073 solib event breakpoint will be created and registered for each
2074 probe. */
2075
2076 static void
2077 svr4_create_probe_breakpoints (struct gdbarch *gdbarch,
2078 VEC (probe_p) **probes,
2079 struct objfile *objfile)
2080 {
2081 int i;
2082
2083 for (i = 0; i < NUM_PROBES; i++)
2084 {
2085 enum probe_action action = probe_info[i].action;
2086 struct probe *probe;
2087 int ix;
2088
2089 for (ix = 0;
2090 VEC_iterate (probe_p, probes[i], ix, probe);
2091 ++ix)
2092 {
2093 CORE_ADDR address = get_probe_address (probe, objfile);
2094
2095 create_solib_event_breakpoint (gdbarch, address);
2096 register_solib_event_probe (probe, address, action);
2097 }
2098 }
2099
2100 svr4_update_solib_event_breakpoints ();
2101 }
2102
2103 /* Both the SunOS and the SVR4 dynamic linkers call a marker function
2104 before and after mapping and unmapping shared libraries. The sole
2105 purpose of this method is to allow debuggers to set a breakpoint so
2106 they can track these changes.
2107
2108 Some versions of the glibc dynamic linker contain named probes
2109 to allow more fine grained stopping. Given the address of the
2110 original marker function, this function attempts to find these
2111 probes, and if found, sets breakpoints on those instead. If the
2112 probes aren't found, a single breakpoint is set on the original
2113 marker function. */
2114
2115 static void
2116 svr4_create_solib_event_breakpoints (struct gdbarch *gdbarch,
2117 CORE_ADDR address)
2118 {
2119 struct obj_section *os;
2120
2121 os = find_pc_section (address);
2122 if (os != NULL)
2123 {
2124 int with_prefix;
2125
2126 for (with_prefix = 0; with_prefix <= 1; with_prefix++)
2127 {
2128 VEC (probe_p) *probes[NUM_PROBES];
2129 int all_probes_found = 1;
2130 int checked_can_use_probe_arguments = 0;
2131 int i;
2132
2133 memset (probes, 0, sizeof (probes));
2134 for (i = 0; i < NUM_PROBES; i++)
2135 {
2136 const char *name = probe_info[i].name;
2137 struct probe *p;
2138 char buf[32];
2139
2140 /* Fedora 17 and Red Hat Enterprise Linux 6.2-6.4
2141 shipped with an early version of the probes code in
2142 which the probes' names were prefixed with "rtld_"
2143 and the "map_failed" probe did not exist. The
2144 locations of the probes are otherwise the same, so
2145 we check for probes with prefixed names if probes
2146 with unprefixed names are not present. */
2147 if (with_prefix)
2148 {
2149 xsnprintf (buf, sizeof (buf), "rtld_%s", name);
2150 name = buf;
2151 }
2152
2153 probes[i] = find_probes_in_objfile (os->objfile, "rtld", name);
2154
2155 /* The "map_failed" probe did not exist in early
2156 versions of the probes code in which the probes'
2157 names were prefixed with "rtld_". */
2158 if (strcmp (name, "rtld_map_failed") == 0)
2159 continue;
2160
2161 if (VEC_empty (probe_p, probes[i]))
2162 {
2163 all_probes_found = 0;
2164 break;
2165 }
2166
2167 /* Ensure probe arguments can be evaluated. */
2168 if (!checked_can_use_probe_arguments)
2169 {
2170 p = VEC_index (probe_p, probes[i], 0);
2171 if (!can_evaluate_probe_arguments (p))
2172 {
2173 all_probes_found = 0;
2174 break;
2175 }
2176 checked_can_use_probe_arguments = 1;
2177 }
2178 }
2179
2180 if (all_probes_found)
2181 svr4_create_probe_breakpoints (gdbarch, probes, os->objfile);
2182
2183 for (i = 0; i < NUM_PROBES; i++)
2184 VEC_free (probe_p, probes[i]);
2185
2186 if (all_probes_found)
2187 return;
2188 }
2189 }
2190
2191 create_solib_event_breakpoint (gdbarch, address);
2192 }
2193
2194 /* Helper function for gdb_bfd_lookup_symbol. */
2195
2196 static int
2197 cmp_name_and_sec_flags (const asymbol *sym, const void *data)
2198 {
2199 return (strcmp (sym->name, (const char *) data) == 0
2200 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
2201 }
2202 /* Arrange for dynamic linker to hit breakpoint.
2203
2204 Both the SunOS and the SVR4 dynamic linkers have, as part of their
2205 debugger interface, support for arranging for the inferior to hit
2206 a breakpoint after mapping in the shared libraries. This function
2207 enables that breakpoint.
2208
2209 For SunOS, there is a special flag location (in_debugger) which we
2210 set to 1. When the dynamic linker sees this flag set, it will set
2211 a breakpoint at a location known only to itself, after saving the
2212 original contents of that place and the breakpoint address itself,
2213 in it's own internal structures. When we resume the inferior, it
2214 will eventually take a SIGTRAP when it runs into the breakpoint.
2215 We handle this (in a different place) by restoring the contents of
2216 the breakpointed location (which is only known after it stops),
2217 chasing around to locate the shared libraries that have been
2218 loaded, then resuming.
2219
2220 For SVR4, the debugger interface structure contains a member (r_brk)
2221 which is statically initialized at the time the shared library is
2222 built, to the offset of a function (_r_debug_state) which is guaran-
2223 teed to be called once before mapping in a library, and again when
2224 the mapping is complete. At the time we are examining this member,
2225 it contains only the unrelocated offset of the function, so we have
2226 to do our own relocation. Later, when the dynamic linker actually
2227 runs, it relocates r_brk to be the actual address of _r_debug_state().
2228
2229 The debugger interface structure also contains an enumeration which
2230 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
2231 depending upon whether or not the library is being mapped or unmapped,
2232 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
2233
2234 static int
2235 enable_break (struct svr4_info *info, int from_tty)
2236 {
2237 struct bound_minimal_symbol msymbol;
2238 const char * const *bkpt_namep;
2239 asection *interp_sect;
2240 char *interp_name;
2241 CORE_ADDR sym_addr;
2242
2243 info->interp_text_sect_low = info->interp_text_sect_high = 0;
2244 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
2245
2246 /* If we already have a shared library list in the target, and
2247 r_debug contains r_brk, set the breakpoint there - this should
2248 mean r_brk has already been relocated. Assume the dynamic linker
2249 is the object containing r_brk. */
2250
2251 solib_add (NULL, from_tty, auto_solib_add);
2252 sym_addr = 0;
2253 if (info->debug_base && solib_svr4_r_map (info) != 0)
2254 sym_addr = solib_svr4_r_brk (info);
2255
2256 if (sym_addr != 0)
2257 {
2258 struct obj_section *os;
2259
2260 sym_addr = gdbarch_addr_bits_remove
2261 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2262 sym_addr,
2263 &current_target));
2264
2265 /* On at least some versions of Solaris there's a dynamic relocation
2266 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
2267 we get control before the dynamic linker has self-relocated.
2268 Check if SYM_ADDR is in a known section, if it is assume we can
2269 trust its value. This is just a heuristic though, it could go away
2270 or be replaced if it's getting in the way.
2271
2272 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
2273 however it's spelled in your particular system) is ARM or Thumb.
2274 That knowledge is encoded in the address, if it's Thumb the low bit
2275 is 1. However, we've stripped that info above and it's not clear
2276 what all the consequences are of passing a non-addr_bits_remove'd
2277 address to svr4_create_solib_event_breakpoints. The call to
2278 find_pc_section verifies we know about the address and have some
2279 hope of computing the right kind of breakpoint to use (via
2280 symbol info). It does mean that GDB needs to be pointed at a
2281 non-stripped version of the dynamic linker in order to obtain
2282 information it already knows about. Sigh. */
2283
2284 os = find_pc_section (sym_addr);
2285 if (os != NULL)
2286 {
2287 /* Record the relocated start and end address of the dynamic linker
2288 text and plt section for svr4_in_dynsym_resolve_code. */
2289 bfd *tmp_bfd;
2290 CORE_ADDR load_addr;
2291
2292 tmp_bfd = os->objfile->obfd;
2293 load_addr = ANOFFSET (os->objfile->section_offsets,
2294 SECT_OFF_TEXT (os->objfile));
2295
2296 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2297 if (interp_sect)
2298 {
2299 info->interp_text_sect_low =
2300 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2301 info->interp_text_sect_high =
2302 info->interp_text_sect_low
2303 + bfd_section_size (tmp_bfd, interp_sect);
2304 }
2305 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2306 if (interp_sect)
2307 {
2308 info->interp_plt_sect_low =
2309 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2310 info->interp_plt_sect_high =
2311 info->interp_plt_sect_low
2312 + bfd_section_size (tmp_bfd, interp_sect);
2313 }
2314
2315 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2316 return 1;
2317 }
2318 }
2319
2320 /* Find the program interpreter; if not found, warn the user and drop
2321 into the old breakpoint at symbol code. */
2322 interp_name = find_program_interpreter ();
2323 if (interp_name)
2324 {
2325 CORE_ADDR load_addr = 0;
2326 int load_addr_found = 0;
2327 int loader_found_in_list = 0;
2328 struct so_list *so;
2329 struct target_ops *tmp_bfd_target;
2330
2331 sym_addr = 0;
2332
2333 /* Now we need to figure out where the dynamic linker was
2334 loaded so that we can load its symbols and place a breakpoint
2335 in the dynamic linker itself.
2336
2337 This address is stored on the stack. However, I've been unable
2338 to find any magic formula to find it for Solaris (appears to
2339 be trivial on GNU/Linux). Therefore, we have to try an alternate
2340 mechanism to find the dynamic linker's base address. */
2341
2342 gdb_bfd_ref_ptr tmp_bfd;
2343 TRY
2344 {
2345 tmp_bfd = solib_bfd_open (interp_name);
2346 }
2347 CATCH (ex, RETURN_MASK_ALL)
2348 {
2349 }
2350 END_CATCH
2351
2352 if (tmp_bfd == NULL)
2353 goto bkpt_at_symbol;
2354
2355 /* Now convert the TMP_BFD into a target. That way target, as
2356 well as BFD operations can be used. target_bfd_reopen
2357 acquires its own reference. */
2358 tmp_bfd_target = target_bfd_reopen (tmp_bfd.get ());
2359
2360 /* On a running target, we can get the dynamic linker's base
2361 address from the shared library table. */
2362 so = master_so_list ();
2363 while (so)
2364 {
2365 if (svr4_same_1 (interp_name, so->so_original_name))
2366 {
2367 load_addr_found = 1;
2368 loader_found_in_list = 1;
2369 load_addr = lm_addr_check (so, tmp_bfd.get ());
2370 break;
2371 }
2372 so = so->next;
2373 }
2374
2375 /* If we were not able to find the base address of the loader
2376 from our so_list, then try using the AT_BASE auxilliary entry. */
2377 if (!load_addr_found)
2378 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
2379 {
2380 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
2381
2382 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
2383 that `+ load_addr' will overflow CORE_ADDR width not creating
2384 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
2385 GDB. */
2386
2387 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
2388 {
2389 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
2390 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd.get (),
2391 tmp_bfd_target);
2392
2393 gdb_assert (load_addr < space_size);
2394
2395 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
2396 64bit ld.so with 32bit executable, it should not happen. */
2397
2398 if (tmp_entry_point < space_size
2399 && tmp_entry_point + load_addr >= space_size)
2400 load_addr -= space_size;
2401 }
2402
2403 load_addr_found = 1;
2404 }
2405
2406 /* Otherwise we find the dynamic linker's base address by examining
2407 the current pc (which should point at the entry point for the
2408 dynamic linker) and subtracting the offset of the entry point.
2409
2410 This is more fragile than the previous approaches, but is a good
2411 fallback method because it has actually been working well in
2412 most cases. */
2413 if (!load_addr_found)
2414 {
2415 struct regcache *regcache
2416 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
2417
2418 load_addr = (regcache_read_pc (regcache)
2419 - exec_entry_point (tmp_bfd.get (), tmp_bfd_target));
2420 }
2421
2422 if (!loader_found_in_list)
2423 {
2424 info->debug_loader_name = xstrdup (interp_name);
2425 info->debug_loader_offset_p = 1;
2426 info->debug_loader_offset = load_addr;
2427 solib_add (NULL, from_tty, auto_solib_add);
2428 }
2429
2430 /* Record the relocated start and end address of the dynamic linker
2431 text and plt section for svr4_in_dynsym_resolve_code. */
2432 interp_sect = bfd_get_section_by_name (tmp_bfd.get (), ".text");
2433 if (interp_sect)
2434 {
2435 info->interp_text_sect_low =
2436 bfd_section_vma (tmp_bfd.get (), interp_sect) + load_addr;
2437 info->interp_text_sect_high =
2438 info->interp_text_sect_low
2439 + bfd_section_size (tmp_bfd.get (), interp_sect);
2440 }
2441 interp_sect = bfd_get_section_by_name (tmp_bfd.get (), ".plt");
2442 if (interp_sect)
2443 {
2444 info->interp_plt_sect_low =
2445 bfd_section_vma (tmp_bfd.get (), interp_sect) + load_addr;
2446 info->interp_plt_sect_high =
2447 info->interp_plt_sect_low
2448 + bfd_section_size (tmp_bfd.get (), interp_sect);
2449 }
2450
2451 /* Now try to set a breakpoint in the dynamic linker. */
2452 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2453 {
2454 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd.get (),
2455 cmp_name_and_sec_flags,
2456 *bkpt_namep);
2457 if (sym_addr != 0)
2458 break;
2459 }
2460
2461 if (sym_addr != 0)
2462 /* Convert 'sym_addr' from a function pointer to an address.
2463 Because we pass tmp_bfd_target instead of the current
2464 target, this will always produce an unrelocated value. */
2465 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2466 sym_addr,
2467 tmp_bfd_target);
2468
2469 /* We're done with both the temporary bfd and target. Closing
2470 the target closes the underlying bfd, because it holds the
2471 only remaining reference. */
2472 target_close (tmp_bfd_target);
2473
2474 if (sym_addr != 0)
2475 {
2476 svr4_create_solib_event_breakpoints (target_gdbarch (),
2477 load_addr + sym_addr);
2478 xfree (interp_name);
2479 return 1;
2480 }
2481
2482 /* For whatever reason we couldn't set a breakpoint in the dynamic
2483 linker. Warn and drop into the old code. */
2484 bkpt_at_symbol:
2485 xfree (interp_name);
2486 warning (_("Unable to find dynamic linker breakpoint function.\n"
2487 "GDB will be unable to debug shared library initializers\n"
2488 "and track explicitly loaded dynamic code."));
2489 }
2490
2491 /* Scan through the lists of symbols, trying to look up the symbol and
2492 set a breakpoint there. Terminate loop when we/if we succeed. */
2493
2494 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2495 {
2496 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2497 if ((msymbol.minsym != NULL)
2498 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2499 {
2500 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2501 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2502 sym_addr,
2503 &current_target);
2504 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2505 return 1;
2506 }
2507 }
2508
2509 if (interp_name != NULL && !current_inferior ()->attach_flag)
2510 {
2511 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
2512 {
2513 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2514 if ((msymbol.minsym != NULL)
2515 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2516 {
2517 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2518 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2519 sym_addr,
2520 &current_target);
2521 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2522 return 1;
2523 }
2524 }
2525 }
2526 return 0;
2527 }
2528
2529 /* Read the ELF program headers from ABFD. Return the contents and
2530 set *PHDRS_SIZE to the size of the program headers. */
2531
2532 static gdb_byte *
2533 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
2534 {
2535 Elf_Internal_Ehdr *ehdr;
2536 gdb_byte *buf;
2537
2538 ehdr = elf_elfheader (abfd);
2539
2540 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
2541 if (*phdrs_size == 0)
2542 return NULL;
2543
2544 buf = (gdb_byte *) xmalloc (*phdrs_size);
2545 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
2546 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
2547 {
2548 xfree (buf);
2549 return NULL;
2550 }
2551
2552 return buf;
2553 }
2554
2555 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
2556 exec_bfd. Otherwise return 0.
2557
2558 We relocate all of the sections by the same amount. This
2559 behavior is mandated by recent editions of the System V ABI.
2560 According to the System V Application Binary Interface,
2561 Edition 4.1, page 5-5:
2562
2563 ... Though the system chooses virtual addresses for
2564 individual processes, it maintains the segments' relative
2565 positions. Because position-independent code uses relative
2566 addressesing between segments, the difference between
2567 virtual addresses in memory must match the difference
2568 between virtual addresses in the file. The difference
2569 between the virtual address of any segment in memory and
2570 the corresponding virtual address in the file is thus a
2571 single constant value for any one executable or shared
2572 object in a given process. This difference is the base
2573 address. One use of the base address is to relocate the
2574 memory image of the program during dynamic linking.
2575
2576 The same language also appears in Edition 4.0 of the System V
2577 ABI and is left unspecified in some of the earlier editions.
2578
2579 Decide if the objfile needs to be relocated. As indicated above, we will
2580 only be here when execution is stopped. But during attachment PC can be at
2581 arbitrary address therefore regcache_read_pc can be misleading (contrary to
2582 the auxv AT_ENTRY value). Moreover for executable with interpreter section
2583 regcache_read_pc would point to the interpreter and not the main executable.
2584
2585 So, to summarize, relocations are necessary when the start address obtained
2586 from the executable is different from the address in auxv AT_ENTRY entry.
2587
2588 [ The astute reader will note that we also test to make sure that
2589 the executable in question has the DYNAMIC flag set. It is my
2590 opinion that this test is unnecessary (undesirable even). It
2591 was added to avoid inadvertent relocation of an executable
2592 whose e_type member in the ELF header is not ET_DYN. There may
2593 be a time in the future when it is desirable to do relocations
2594 on other types of files as well in which case this condition
2595 should either be removed or modified to accomodate the new file
2596 type. - Kevin, Nov 2000. ] */
2597
2598 static int
2599 svr4_exec_displacement (CORE_ADDR *displacementp)
2600 {
2601 /* ENTRY_POINT is a possible function descriptor - before
2602 a call to gdbarch_convert_from_func_ptr_addr. */
2603 CORE_ADDR entry_point, exec_displacement;
2604
2605 if (exec_bfd == NULL)
2606 return 0;
2607
2608 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
2609 being executed themselves and PIE (Position Independent Executable)
2610 executables are ET_DYN. */
2611
2612 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
2613 return 0;
2614
2615 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
2616 return 0;
2617
2618 exec_displacement = entry_point - bfd_get_start_address (exec_bfd);
2619
2620 /* Verify the EXEC_DISPLACEMENT candidate complies with the required page
2621 alignment. It is cheaper than the program headers comparison below. */
2622
2623 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2624 {
2625 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
2626
2627 /* p_align of PT_LOAD segments does not specify any alignment but
2628 only congruency of addresses:
2629 p_offset % p_align == p_vaddr % p_align
2630 Kernel is free to load the executable with lower alignment. */
2631
2632 if ((exec_displacement & (elf->minpagesize - 1)) != 0)
2633 return 0;
2634 }
2635
2636 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
2637 comparing their program headers. If the program headers in the auxilliary
2638 vector do not match the program headers in the executable, then we are
2639 looking at a different file than the one used by the kernel - for
2640 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
2641
2642 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2643 {
2644 /* Be optimistic and clear OK only if GDB was able to verify the headers
2645 really do not match. */
2646 int phdrs_size, phdrs2_size, ok = 1;
2647 gdb_byte *buf, *buf2;
2648 int arch_size;
2649
2650 buf = read_program_header (-1, &phdrs_size, &arch_size, NULL);
2651 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
2652 if (buf != NULL && buf2 != NULL)
2653 {
2654 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
2655
2656 /* We are dealing with three different addresses. EXEC_BFD
2657 represents current address in on-disk file. target memory content
2658 may be different from EXEC_BFD as the file may have been prelinked
2659 to a different address after the executable has been loaded.
2660 Moreover the address of placement in target memory can be
2661 different from what the program headers in target memory say -
2662 this is the goal of PIE.
2663
2664 Detected DISPLACEMENT covers both the offsets of PIE placement and
2665 possible new prelink performed after start of the program. Here
2666 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
2667 content offset for the verification purpose. */
2668
2669 if (phdrs_size != phdrs2_size
2670 || bfd_get_arch_size (exec_bfd) != arch_size)
2671 ok = 0;
2672 else if (arch_size == 32
2673 && phdrs_size >= sizeof (Elf32_External_Phdr)
2674 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
2675 {
2676 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2677 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2678 CORE_ADDR displacement = 0;
2679 int i;
2680
2681 /* DISPLACEMENT could be found more easily by the difference of
2682 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2683 already have enough information to compute that displacement
2684 with what we've read. */
2685
2686 for (i = 0; i < ehdr2->e_phnum; i++)
2687 if (phdr2[i].p_type == PT_LOAD)
2688 {
2689 Elf32_External_Phdr *phdrp;
2690 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2691 CORE_ADDR vaddr, paddr;
2692 CORE_ADDR displacement_vaddr = 0;
2693 CORE_ADDR displacement_paddr = 0;
2694
2695 phdrp = &((Elf32_External_Phdr *) buf)[i];
2696 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2697 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2698
2699 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2700 byte_order);
2701 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2702
2703 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2704 byte_order);
2705 displacement_paddr = paddr - phdr2[i].p_paddr;
2706
2707 if (displacement_vaddr == displacement_paddr)
2708 displacement = displacement_vaddr;
2709
2710 break;
2711 }
2712
2713 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2714
2715 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
2716 {
2717 Elf32_External_Phdr *phdrp;
2718 Elf32_External_Phdr *phdr2p;
2719 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2720 CORE_ADDR vaddr, paddr;
2721 asection *plt2_asect;
2722
2723 phdrp = &((Elf32_External_Phdr *) buf)[i];
2724 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2725 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2726 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
2727
2728 /* PT_GNU_STACK is an exception by being never relocated by
2729 prelink as its addresses are always zero. */
2730
2731 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2732 continue;
2733
2734 /* Check also other adjustment combinations - PR 11786. */
2735
2736 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2737 byte_order);
2738 vaddr -= displacement;
2739 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
2740
2741 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2742 byte_order);
2743 paddr -= displacement;
2744 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
2745
2746 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2747 continue;
2748
2749 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2750 CentOS-5 has problems with filesz, memsz as well.
2751 See PR 11786. */
2752 if (phdr2[i].p_type == PT_GNU_RELRO)
2753 {
2754 Elf32_External_Phdr tmp_phdr = *phdrp;
2755 Elf32_External_Phdr tmp_phdr2 = *phdr2p;
2756
2757 memset (tmp_phdr.p_filesz, 0, 4);
2758 memset (tmp_phdr.p_memsz, 0, 4);
2759 memset (tmp_phdr.p_flags, 0, 4);
2760 memset (tmp_phdr.p_align, 0, 4);
2761 memset (tmp_phdr2.p_filesz, 0, 4);
2762 memset (tmp_phdr2.p_memsz, 0, 4);
2763 memset (tmp_phdr2.p_flags, 0, 4);
2764 memset (tmp_phdr2.p_align, 0, 4);
2765
2766 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2767 == 0)
2768 continue;
2769 }
2770
2771 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2772 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2773 if (plt2_asect)
2774 {
2775 int content2;
2776 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2777 CORE_ADDR filesz;
2778
2779 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2780 & SEC_HAS_CONTENTS) != 0;
2781
2782 filesz = extract_unsigned_integer (buf_filesz_p, 4,
2783 byte_order);
2784
2785 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2786 FILESZ is from the in-memory image. */
2787 if (content2)
2788 filesz += bfd_get_section_size (plt2_asect);
2789 else
2790 filesz -= bfd_get_section_size (plt2_asect);
2791
2792 store_unsigned_integer (buf_filesz_p, 4, byte_order,
2793 filesz);
2794
2795 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2796 continue;
2797 }
2798
2799 ok = 0;
2800 break;
2801 }
2802 }
2803 else if (arch_size == 64
2804 && phdrs_size >= sizeof (Elf64_External_Phdr)
2805 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2806 {
2807 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2808 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2809 CORE_ADDR displacement = 0;
2810 int i;
2811
2812 /* DISPLACEMENT could be found more easily by the difference of
2813 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2814 already have enough information to compute that displacement
2815 with what we've read. */
2816
2817 for (i = 0; i < ehdr2->e_phnum; i++)
2818 if (phdr2[i].p_type == PT_LOAD)
2819 {
2820 Elf64_External_Phdr *phdrp;
2821 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2822 CORE_ADDR vaddr, paddr;
2823 CORE_ADDR displacement_vaddr = 0;
2824 CORE_ADDR displacement_paddr = 0;
2825
2826 phdrp = &((Elf64_External_Phdr *) buf)[i];
2827 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2828 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2829
2830 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2831 byte_order);
2832 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2833
2834 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2835 byte_order);
2836 displacement_paddr = paddr - phdr2[i].p_paddr;
2837
2838 if (displacement_vaddr == displacement_paddr)
2839 displacement = displacement_vaddr;
2840
2841 break;
2842 }
2843
2844 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2845
2846 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2847 {
2848 Elf64_External_Phdr *phdrp;
2849 Elf64_External_Phdr *phdr2p;
2850 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2851 CORE_ADDR vaddr, paddr;
2852 asection *plt2_asect;
2853
2854 phdrp = &((Elf64_External_Phdr *) buf)[i];
2855 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2856 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2857 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2858
2859 /* PT_GNU_STACK is an exception by being never relocated by
2860 prelink as its addresses are always zero. */
2861
2862 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2863 continue;
2864
2865 /* Check also other adjustment combinations - PR 11786. */
2866
2867 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2868 byte_order);
2869 vaddr -= displacement;
2870 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2871
2872 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2873 byte_order);
2874 paddr -= displacement;
2875 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2876
2877 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2878 continue;
2879
2880 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2881 CentOS-5 has problems with filesz, memsz as well.
2882 See PR 11786. */
2883 if (phdr2[i].p_type == PT_GNU_RELRO)
2884 {
2885 Elf64_External_Phdr tmp_phdr = *phdrp;
2886 Elf64_External_Phdr tmp_phdr2 = *phdr2p;
2887
2888 memset (tmp_phdr.p_filesz, 0, 8);
2889 memset (tmp_phdr.p_memsz, 0, 8);
2890 memset (tmp_phdr.p_flags, 0, 4);
2891 memset (tmp_phdr.p_align, 0, 8);
2892 memset (tmp_phdr2.p_filesz, 0, 8);
2893 memset (tmp_phdr2.p_memsz, 0, 8);
2894 memset (tmp_phdr2.p_flags, 0, 4);
2895 memset (tmp_phdr2.p_align, 0, 8);
2896
2897 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2898 == 0)
2899 continue;
2900 }
2901
2902 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2903 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2904 if (plt2_asect)
2905 {
2906 int content2;
2907 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2908 CORE_ADDR filesz;
2909
2910 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2911 & SEC_HAS_CONTENTS) != 0;
2912
2913 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2914 byte_order);
2915
2916 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2917 FILESZ is from the in-memory image. */
2918 if (content2)
2919 filesz += bfd_get_section_size (plt2_asect);
2920 else
2921 filesz -= bfd_get_section_size (plt2_asect);
2922
2923 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2924 filesz);
2925
2926 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2927 continue;
2928 }
2929
2930 ok = 0;
2931 break;
2932 }
2933 }
2934 else
2935 ok = 0;
2936 }
2937
2938 xfree (buf);
2939 xfree (buf2);
2940
2941 if (!ok)
2942 return 0;
2943 }
2944
2945 if (info_verbose)
2946 {
2947 /* It can be printed repeatedly as there is no easy way to check
2948 the executable symbols/file has been already relocated to
2949 displacement. */
2950
2951 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2952 "displacement %s for \"%s\".\n"),
2953 paddress (target_gdbarch (), exec_displacement),
2954 bfd_get_filename (exec_bfd));
2955 }
2956
2957 *displacementp = exec_displacement;
2958 return 1;
2959 }
2960
2961 /* Relocate the main executable. This function should be called upon
2962 stopping the inferior process at the entry point to the program.
2963 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2964 different, the main executable is relocated by the proper amount. */
2965
2966 static void
2967 svr4_relocate_main_executable (void)
2968 {
2969 CORE_ADDR displacement;
2970
2971 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2972 probably contains the offsets computed using the PIE displacement
2973 from the previous run, which of course are irrelevant for this run.
2974 So we need to determine the new PIE displacement and recompute the
2975 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2976 already contains pre-computed offsets.
2977
2978 If we cannot compute the PIE displacement, either:
2979
2980 - The executable is not PIE.
2981
2982 - SYMFILE_OBJFILE does not match the executable started in the target.
2983 This can happen for main executable symbols loaded at the host while
2984 `ld.so --ld-args main-executable' is loaded in the target.
2985
2986 Then we leave the section offsets untouched and use them as is for
2987 this run. Either:
2988
2989 - These section offsets were properly reset earlier, and thus
2990 already contain the correct values. This can happen for instance
2991 when reconnecting via the remote protocol to a target that supports
2992 the `qOffsets' packet.
2993
2994 - The section offsets were not reset earlier, and the best we can
2995 hope is that the old offsets are still applicable to the new run. */
2996
2997 if (! svr4_exec_displacement (&displacement))
2998 return;
2999
3000 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
3001 addresses. */
3002
3003 if (symfile_objfile)
3004 {
3005 struct section_offsets *new_offsets;
3006 int i;
3007
3008 new_offsets = XALLOCAVEC (struct section_offsets,
3009 symfile_objfile->num_sections);
3010
3011 for (i = 0; i < symfile_objfile->num_sections; i++)
3012 new_offsets->offsets[i] = displacement;
3013
3014 objfile_relocate (symfile_objfile, new_offsets);
3015 }
3016 else if (exec_bfd)
3017 {
3018 asection *asect;
3019
3020 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
3021 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
3022 (bfd_section_vma (exec_bfd, asect)
3023 + displacement));
3024 }
3025 }
3026
3027 /* Implement the "create_inferior_hook" target_solib_ops method.
3028
3029 For SVR4 executables, this first instruction is either the first
3030 instruction in the dynamic linker (for dynamically linked
3031 executables) or the instruction at "start" for statically linked
3032 executables. For dynamically linked executables, the system
3033 first exec's /lib/libc.so.N, which contains the dynamic linker,
3034 and starts it running. The dynamic linker maps in any needed
3035 shared libraries, maps in the actual user executable, and then
3036 jumps to "start" in the user executable.
3037
3038 We can arrange to cooperate with the dynamic linker to discover the
3039 names of shared libraries that are dynamically linked, and the base
3040 addresses to which they are linked.
3041
3042 This function is responsible for discovering those names and
3043 addresses, and saving sufficient information about them to allow
3044 their symbols to be read at a later time. */
3045
3046 static void
3047 svr4_solib_create_inferior_hook (int from_tty)
3048 {
3049 struct svr4_info *info;
3050
3051 info = get_svr4_info ();
3052
3053 /* Clear the probes-based interface's state. */
3054 free_probes_table (info);
3055 free_solib_list (info);
3056
3057 /* Relocate the main executable if necessary. */
3058 svr4_relocate_main_executable ();
3059
3060 /* No point setting a breakpoint in the dynamic linker if we can't
3061 hit it (e.g., a core file, or a trace file). */
3062 if (!target_has_execution)
3063 return;
3064
3065 if (!svr4_have_link_map_offsets ())
3066 return;
3067
3068 if (!enable_break (info, from_tty))
3069 return;
3070 }
3071
3072 static void
3073 svr4_clear_solib (void)
3074 {
3075 struct svr4_info *info;
3076
3077 info = get_svr4_info ();
3078 info->debug_base = 0;
3079 info->debug_loader_offset_p = 0;
3080 info->debug_loader_offset = 0;
3081 xfree (info->debug_loader_name);
3082 info->debug_loader_name = NULL;
3083 }
3084
3085 /* Clear any bits of ADDR that wouldn't fit in a target-format
3086 data pointer. "Data pointer" here refers to whatever sort of
3087 address the dynamic linker uses to manage its sections. At the
3088 moment, we don't support shared libraries on any processors where
3089 code and data pointers are different sizes.
3090
3091 This isn't really the right solution. What we really need here is
3092 a way to do arithmetic on CORE_ADDR values that respects the
3093 natural pointer/address correspondence. (For example, on the MIPS,
3094 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
3095 sign-extend the value. There, simply truncating the bits above
3096 gdbarch_ptr_bit, as we do below, is no good.) This should probably
3097 be a new gdbarch method or something. */
3098 static CORE_ADDR
3099 svr4_truncate_ptr (CORE_ADDR addr)
3100 {
3101 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
3102 /* We don't need to truncate anything, and the bit twiddling below
3103 will fail due to overflow problems. */
3104 return addr;
3105 else
3106 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
3107 }
3108
3109
3110 static void
3111 svr4_relocate_section_addresses (struct so_list *so,
3112 struct target_section *sec)
3113 {
3114 bfd *abfd = sec->the_bfd_section->owner;
3115
3116 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so, abfd));
3117 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so, abfd));
3118 }
3119 \f
3120
3121 /* Architecture-specific operations. */
3122
3123 /* Per-architecture data key. */
3124 static struct gdbarch_data *solib_svr4_data;
3125
3126 struct solib_svr4_ops
3127 {
3128 /* Return a description of the layout of `struct link_map'. */
3129 struct link_map_offsets *(*fetch_link_map_offsets)(void);
3130 };
3131
3132 /* Return a default for the architecture-specific operations. */
3133
3134 static void *
3135 solib_svr4_init (struct obstack *obstack)
3136 {
3137 struct solib_svr4_ops *ops;
3138
3139 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
3140 ops->fetch_link_map_offsets = NULL;
3141 return ops;
3142 }
3143
3144 /* Set the architecture-specific `struct link_map_offsets' fetcher for
3145 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
3146
3147 void
3148 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
3149 struct link_map_offsets *(*flmo) (void))
3150 {
3151 struct solib_svr4_ops *ops
3152 = (struct solib_svr4_ops *) gdbarch_data (gdbarch, solib_svr4_data);
3153
3154 ops->fetch_link_map_offsets = flmo;
3155
3156 set_solib_ops (gdbarch, &svr4_so_ops);
3157 }
3158
3159 /* Fetch a link_map_offsets structure using the architecture-specific
3160 `struct link_map_offsets' fetcher. */
3161
3162 static struct link_map_offsets *
3163 svr4_fetch_link_map_offsets (void)
3164 {
3165 struct solib_svr4_ops *ops
3166 = (struct solib_svr4_ops *) gdbarch_data (target_gdbarch (),
3167 solib_svr4_data);
3168
3169 gdb_assert (ops->fetch_link_map_offsets);
3170 return ops->fetch_link_map_offsets ();
3171 }
3172
3173 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
3174
3175 static int
3176 svr4_have_link_map_offsets (void)
3177 {
3178 struct solib_svr4_ops *ops
3179 = (struct solib_svr4_ops *) gdbarch_data (target_gdbarch (),
3180 solib_svr4_data);
3181
3182 return (ops->fetch_link_map_offsets != NULL);
3183 }
3184 \f
3185
3186 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
3187 `struct r_debug' and a `struct link_map' that are binary compatible
3188 with the origional SVR4 implementation. */
3189
3190 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3191 for an ILP32 SVR4 system. */
3192
3193 struct link_map_offsets *
3194 svr4_ilp32_fetch_link_map_offsets (void)
3195 {
3196 static struct link_map_offsets lmo;
3197 static struct link_map_offsets *lmp = NULL;
3198
3199 if (lmp == NULL)
3200 {
3201 lmp = &lmo;
3202
3203 lmo.r_version_offset = 0;
3204 lmo.r_version_size = 4;
3205 lmo.r_map_offset = 4;
3206 lmo.r_brk_offset = 8;
3207 lmo.r_ldsomap_offset = 20;
3208
3209 /* Everything we need is in the first 20 bytes. */
3210 lmo.link_map_size = 20;
3211 lmo.l_addr_offset = 0;
3212 lmo.l_name_offset = 4;
3213 lmo.l_ld_offset = 8;
3214 lmo.l_next_offset = 12;
3215 lmo.l_prev_offset = 16;
3216 }
3217
3218 return lmp;
3219 }
3220
3221 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3222 for an LP64 SVR4 system. */
3223
3224 struct link_map_offsets *
3225 svr4_lp64_fetch_link_map_offsets (void)
3226 {
3227 static struct link_map_offsets lmo;
3228 static struct link_map_offsets *lmp = NULL;
3229
3230 if (lmp == NULL)
3231 {
3232 lmp = &lmo;
3233
3234 lmo.r_version_offset = 0;
3235 lmo.r_version_size = 4;
3236 lmo.r_map_offset = 8;
3237 lmo.r_brk_offset = 16;
3238 lmo.r_ldsomap_offset = 40;
3239
3240 /* Everything we need is in the first 40 bytes. */
3241 lmo.link_map_size = 40;
3242 lmo.l_addr_offset = 0;
3243 lmo.l_name_offset = 8;
3244 lmo.l_ld_offset = 16;
3245 lmo.l_next_offset = 24;
3246 lmo.l_prev_offset = 32;
3247 }
3248
3249 return lmp;
3250 }
3251 \f
3252
3253 struct target_so_ops svr4_so_ops;
3254
3255 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
3256 different rule for symbol lookup. The lookup begins here in the DSO, not in
3257 the main executable. */
3258
3259 static struct block_symbol
3260 elf_lookup_lib_symbol (struct objfile *objfile,
3261 const char *name,
3262 const domain_enum domain)
3263 {
3264 bfd *abfd;
3265
3266 if (objfile == symfile_objfile)
3267 abfd = exec_bfd;
3268 else
3269 {
3270 /* OBJFILE should have been passed as the non-debug one. */
3271 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
3272
3273 abfd = objfile->obfd;
3274 }
3275
3276 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL, NULL) != 1)
3277 return (struct block_symbol) {NULL, NULL};
3278
3279 return lookup_global_symbol_from_objfile (objfile, name, domain);
3280 }
3281
3282 void
3283 _initialize_svr4_solib (void)
3284 {
3285 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
3286 solib_svr4_pspace_data
3287 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
3288
3289 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
3290 svr4_so_ops.free_so = svr4_free_so;
3291 svr4_so_ops.clear_so = svr4_clear_so;
3292 svr4_so_ops.clear_solib = svr4_clear_solib;
3293 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
3294 svr4_so_ops.current_sos = svr4_current_sos;
3295 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
3296 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
3297 svr4_so_ops.bfd_open = solib_bfd_open;
3298 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
3299 svr4_so_ops.same = svr4_same;
3300 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
3301 svr4_so_ops.update_breakpoints = svr4_update_solib_event_breakpoints;
3302 svr4_so_ops.handle_event = svr4_handle_solib_event;
3303 }
This page took 0.09911 seconds and 4 git commands to generate.