ChangeLog:
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "regcache.h"
34 #include "gdbthread.h"
35 #include "observer.h"
36
37 #include "gdb_assert.h"
38
39 #include "solist.h"
40 #include "solib.h"
41 #include "solib-svr4.h"
42
43 #include "bfd-target.h"
44 #include "elf-bfd.h"
45 #include "exec.h"
46 #include "auxv.h"
47 #include "exceptions.h"
48 #include "gdb_bfd.h"
49 #include "probe.h"
50
51 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52 static int svr4_have_link_map_offsets (void);
53 static void svr4_relocate_main_executable (void);
54 static void svr4_free_library_list (void *p_list);
55
56 /* Link map info to include in an allocated so_list entry. */
57
58 struct lm_info
59 {
60 /* Amount by which addresses in the binary should be relocated to
61 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
62 When prelinking is involved and the prelink base address changes,
63 we may need a different offset - the recomputed offset is in L_ADDR.
64 It is commonly the same value. It is cached as we want to warn about
65 the difference and compute it only once. L_ADDR is valid
66 iff L_ADDR_P. */
67 CORE_ADDR l_addr, l_addr_inferior;
68 unsigned int l_addr_p : 1;
69
70 /* The target location of lm. */
71 CORE_ADDR lm_addr;
72
73 /* Values read in from inferior's fields of the same name. */
74 CORE_ADDR l_ld, l_next, l_prev, l_name;
75 };
76
77 /* On SVR4 systems, a list of symbols in the dynamic linker where
78 GDB can try to place a breakpoint to monitor shared library
79 events.
80
81 If none of these symbols are found, or other errors occur, then
82 SVR4 systems will fall back to using a symbol as the "startup
83 mapping complete" breakpoint address. */
84
85 static const char * const solib_break_names[] =
86 {
87 "r_debug_state",
88 "_r_debug_state",
89 "_dl_debug_state",
90 "rtld_db_dlactivity",
91 "__dl_rtld_db_dlactivity",
92 "_rtld_debug_state",
93
94 NULL
95 };
96
97 static const char * const bkpt_names[] =
98 {
99 "_start",
100 "__start",
101 "main",
102 NULL
103 };
104
105 static const char * const main_name_list[] =
106 {
107 "main_$main",
108 NULL
109 };
110
111 /* What to do when a probe stop occurs. */
112
113 enum probe_action
114 {
115 /* Something went seriously wrong. Stop using probes and
116 revert to using the older interface. */
117 PROBES_INTERFACE_FAILED,
118
119 /* No action is required. The shared object list is still
120 valid. */
121 DO_NOTHING,
122
123 /* The shared object list should be reloaded entirely. */
124 FULL_RELOAD,
125
126 /* Attempt to incrementally update the shared object list. If
127 the update fails or is not possible, fall back to reloading
128 the list in full. */
129 UPDATE_OR_RELOAD,
130 };
131
132 /* A probe's name and its associated action. */
133
134 struct probe_info
135 {
136 /* The name of the probe. */
137 const char *name;
138
139 /* What to do when a probe stop occurs. */
140 enum probe_action action;
141 };
142
143 /* A list of named probes and their associated actions. If all
144 probes are present in the dynamic linker then the probes-based
145 interface will be used. */
146
147 static const struct probe_info probe_info[] =
148 {
149 { "init_start", DO_NOTHING },
150 { "init_complete", FULL_RELOAD },
151 { "map_start", DO_NOTHING },
152 { "map_failed", DO_NOTHING },
153 { "reloc_complete", UPDATE_OR_RELOAD },
154 { "unmap_start", DO_NOTHING },
155 { "unmap_complete", FULL_RELOAD },
156 };
157
158 #define NUM_PROBES ARRAY_SIZE (probe_info)
159
160 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
161 the same shared library. */
162
163 static int
164 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
165 {
166 if (strcmp (gdb_so_name, inferior_so_name) == 0)
167 return 1;
168
169 /* On Solaris, when starting inferior we think that dynamic linker is
170 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
171 contains /lib/ld.so.1. Sometimes one file is a link to another, but
172 sometimes they have identical content, but are not linked to each
173 other. We don't restrict this check for Solaris, but the chances
174 of running into this situation elsewhere are very low. */
175 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
176 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
177 return 1;
178
179 /* Similarly, we observed the same issue with sparc64, but with
180 different locations. */
181 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
182 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
183 return 1;
184
185 return 0;
186 }
187
188 static int
189 svr4_same (struct so_list *gdb, struct so_list *inferior)
190 {
191 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
192 }
193
194 static struct lm_info *
195 lm_info_read (CORE_ADDR lm_addr)
196 {
197 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
198 gdb_byte *lm;
199 struct lm_info *lm_info;
200 struct cleanup *back_to;
201
202 lm = xmalloc (lmo->link_map_size);
203 back_to = make_cleanup (xfree, lm);
204
205 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
206 {
207 warning (_("Error reading shared library list entry at %s"),
208 paddress (target_gdbarch (), lm_addr)),
209 lm_info = NULL;
210 }
211 else
212 {
213 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
214
215 lm_info = xzalloc (sizeof (*lm_info));
216 lm_info->lm_addr = lm_addr;
217
218 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
219 ptr_type);
220 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
221 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
222 ptr_type);
223 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
224 ptr_type);
225 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
226 ptr_type);
227 }
228
229 do_cleanups (back_to);
230
231 return lm_info;
232 }
233
234 static int
235 has_lm_dynamic_from_link_map (void)
236 {
237 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
238
239 return lmo->l_ld_offset >= 0;
240 }
241
242 static CORE_ADDR
243 lm_addr_check (const struct so_list *so, bfd *abfd)
244 {
245 if (!so->lm_info->l_addr_p)
246 {
247 struct bfd_section *dyninfo_sect;
248 CORE_ADDR l_addr, l_dynaddr, dynaddr;
249
250 l_addr = so->lm_info->l_addr_inferior;
251
252 if (! abfd || ! has_lm_dynamic_from_link_map ())
253 goto set_addr;
254
255 l_dynaddr = so->lm_info->l_ld;
256
257 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
258 if (dyninfo_sect == NULL)
259 goto set_addr;
260
261 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
262
263 if (dynaddr + l_addr != l_dynaddr)
264 {
265 CORE_ADDR align = 0x1000;
266 CORE_ADDR minpagesize = align;
267
268 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
269 {
270 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
271 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
272 int i;
273
274 align = 1;
275
276 for (i = 0; i < ehdr->e_phnum; i++)
277 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
278 align = phdr[i].p_align;
279
280 minpagesize = get_elf_backend_data (abfd)->minpagesize;
281 }
282
283 /* Turn it into a mask. */
284 align--;
285
286 /* If the changes match the alignment requirements, we
287 assume we're using a core file that was generated by the
288 same binary, just prelinked with a different base offset.
289 If it doesn't match, we may have a different binary, the
290 same binary with the dynamic table loaded at an unrelated
291 location, or anything, really. To avoid regressions,
292 don't adjust the base offset in the latter case, although
293 odds are that, if things really changed, debugging won't
294 quite work.
295
296 One could expect more the condition
297 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
298 but the one below is relaxed for PPC. The PPC kernel supports
299 either 4k or 64k page sizes. To be prepared for 64k pages,
300 PPC ELF files are built using an alignment requirement of 64k.
301 However, when running on a kernel supporting 4k pages, the memory
302 mapping of the library may not actually happen on a 64k boundary!
303
304 (In the usual case where (l_addr & align) == 0, this check is
305 equivalent to the possibly expected check above.)
306
307 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
308
309 l_addr = l_dynaddr - dynaddr;
310
311 if ((l_addr & (minpagesize - 1)) == 0
312 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
313 {
314 if (info_verbose)
315 printf_unfiltered (_("Using PIC (Position Independent Code) "
316 "prelink displacement %s for \"%s\".\n"),
317 paddress (target_gdbarch (), l_addr),
318 so->so_name);
319 }
320 else
321 {
322 /* There is no way to verify the library file matches. prelink
323 can during prelinking of an unprelinked file (or unprelinking
324 of a prelinked file) shift the DYNAMIC segment by arbitrary
325 offset without any page size alignment. There is no way to
326 find out the ELF header and/or Program Headers for a limited
327 verification if it they match. One could do a verification
328 of the DYNAMIC segment. Still the found address is the best
329 one GDB could find. */
330
331 warning (_(".dynamic section for \"%s\" "
332 "is not at the expected address "
333 "(wrong library or version mismatch?)"), so->so_name);
334 }
335 }
336
337 set_addr:
338 so->lm_info->l_addr = l_addr;
339 so->lm_info->l_addr_p = 1;
340 }
341
342 return so->lm_info->l_addr;
343 }
344
345 /* Per pspace SVR4 specific data. */
346
347 struct svr4_info
348 {
349 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
350
351 /* Validity flag for debug_loader_offset. */
352 int debug_loader_offset_p;
353
354 /* Load address for the dynamic linker, inferred. */
355 CORE_ADDR debug_loader_offset;
356
357 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
358 char *debug_loader_name;
359
360 /* Load map address for the main executable. */
361 CORE_ADDR main_lm_addr;
362
363 CORE_ADDR interp_text_sect_low;
364 CORE_ADDR interp_text_sect_high;
365 CORE_ADDR interp_plt_sect_low;
366 CORE_ADDR interp_plt_sect_high;
367
368 /* Nonzero if the list of objects was last obtained from the target
369 via qXfer:libraries-svr4:read. */
370 int using_xfer;
371
372 /* Table of struct probe_and_action instances, used by the
373 probes-based interface to map breakpoint addresses to probes
374 and their associated actions. Lookup is performed using
375 probe_and_action->probe->address. */
376 htab_t probes_table;
377
378 /* List of objects loaded into the inferior, used by the probes-
379 based interface. */
380 struct so_list *solib_list;
381 };
382
383 /* Per-program-space data key. */
384 static const struct program_space_data *solib_svr4_pspace_data;
385
386 /* Free the probes table. */
387
388 static void
389 free_probes_table (struct svr4_info *info)
390 {
391 if (info->probes_table == NULL)
392 return;
393
394 htab_delete (info->probes_table);
395 info->probes_table = NULL;
396 }
397
398 /* Free the solib list. */
399
400 static void
401 free_solib_list (struct svr4_info *info)
402 {
403 svr4_free_library_list (&info->solib_list);
404 info->solib_list = NULL;
405 }
406
407 static void
408 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
409 {
410 struct svr4_info *info;
411
412 info = program_space_data (pspace, solib_svr4_pspace_data);
413 if (info == NULL)
414 return;
415
416 free_probes_table (info);
417 free_solib_list (info);
418
419 xfree (info);
420 }
421
422 /* Get the current svr4 data. If none is found yet, add it now. This
423 function always returns a valid object. */
424
425 static struct svr4_info *
426 get_svr4_info (void)
427 {
428 struct svr4_info *info;
429
430 info = program_space_data (current_program_space, solib_svr4_pspace_data);
431 if (info != NULL)
432 return info;
433
434 info = XZALLOC (struct svr4_info);
435 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
436 return info;
437 }
438
439 /* Local function prototypes */
440
441 static int match_main (const char *);
442
443 /* Read program header TYPE from inferior memory. The header is found
444 by scanning the OS auxillary vector.
445
446 If TYPE == -1, return the program headers instead of the contents of
447 one program header.
448
449 Return a pointer to allocated memory holding the program header contents,
450 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
451 size of those contents is returned to P_SECT_SIZE. Likewise, the target
452 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
453
454 static gdb_byte *
455 read_program_header (int type, int *p_sect_size, int *p_arch_size)
456 {
457 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
458 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
459 int arch_size, sect_size;
460 CORE_ADDR sect_addr;
461 gdb_byte *buf;
462 int pt_phdr_p = 0;
463
464 /* Get required auxv elements from target. */
465 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
466 return 0;
467 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
468 return 0;
469 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
470 return 0;
471 if (!at_phdr || !at_phnum)
472 return 0;
473
474 /* Determine ELF architecture type. */
475 if (at_phent == sizeof (Elf32_External_Phdr))
476 arch_size = 32;
477 else if (at_phent == sizeof (Elf64_External_Phdr))
478 arch_size = 64;
479 else
480 return 0;
481
482 /* Find the requested segment. */
483 if (type == -1)
484 {
485 sect_addr = at_phdr;
486 sect_size = at_phent * at_phnum;
487 }
488 else if (arch_size == 32)
489 {
490 Elf32_External_Phdr phdr;
491 int i;
492
493 /* Search for requested PHDR. */
494 for (i = 0; i < at_phnum; i++)
495 {
496 int p_type;
497
498 if (target_read_memory (at_phdr + i * sizeof (phdr),
499 (gdb_byte *)&phdr, sizeof (phdr)))
500 return 0;
501
502 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
503 4, byte_order);
504
505 if (p_type == PT_PHDR)
506 {
507 pt_phdr_p = 1;
508 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
509 4, byte_order);
510 }
511
512 if (p_type == type)
513 break;
514 }
515
516 if (i == at_phnum)
517 return 0;
518
519 /* Retrieve address and size. */
520 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
521 4, byte_order);
522 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
523 4, byte_order);
524 }
525 else
526 {
527 Elf64_External_Phdr phdr;
528 int i;
529
530 /* Search for requested PHDR. */
531 for (i = 0; i < at_phnum; i++)
532 {
533 int p_type;
534
535 if (target_read_memory (at_phdr + i * sizeof (phdr),
536 (gdb_byte *)&phdr, sizeof (phdr)))
537 return 0;
538
539 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
540 4, byte_order);
541
542 if (p_type == PT_PHDR)
543 {
544 pt_phdr_p = 1;
545 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
546 8, byte_order);
547 }
548
549 if (p_type == type)
550 break;
551 }
552
553 if (i == at_phnum)
554 return 0;
555
556 /* Retrieve address and size. */
557 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
558 8, byte_order);
559 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
560 8, byte_order);
561 }
562
563 /* PT_PHDR is optional, but we really need it
564 for PIE to make this work in general. */
565
566 if (pt_phdr_p)
567 {
568 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
569 Relocation offset is the difference between the two. */
570 sect_addr = sect_addr + (at_phdr - pt_phdr);
571 }
572
573 /* Read in requested program header. */
574 buf = xmalloc (sect_size);
575 if (target_read_memory (sect_addr, buf, sect_size))
576 {
577 xfree (buf);
578 return NULL;
579 }
580
581 if (p_arch_size)
582 *p_arch_size = arch_size;
583 if (p_sect_size)
584 *p_sect_size = sect_size;
585
586 return buf;
587 }
588
589
590 /* Return program interpreter string. */
591 static char *
592 find_program_interpreter (void)
593 {
594 gdb_byte *buf = NULL;
595
596 /* If we have an exec_bfd, use its section table. */
597 if (exec_bfd
598 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
599 {
600 struct bfd_section *interp_sect;
601
602 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
603 if (interp_sect != NULL)
604 {
605 int sect_size = bfd_section_size (exec_bfd, interp_sect);
606
607 buf = xmalloc (sect_size);
608 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
609 }
610 }
611
612 /* If we didn't find it, use the target auxillary vector. */
613 if (!buf)
614 buf = read_program_header (PT_INTERP, NULL, NULL);
615
616 return (char *) buf;
617 }
618
619
620 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
621 returned and the corresponding PTR is set. */
622
623 static int
624 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
625 {
626 int arch_size, step, sect_size;
627 long dyn_tag;
628 CORE_ADDR dyn_ptr, dyn_addr;
629 gdb_byte *bufend, *bufstart, *buf;
630 Elf32_External_Dyn *x_dynp_32;
631 Elf64_External_Dyn *x_dynp_64;
632 struct bfd_section *sect;
633 struct target_section *target_section;
634
635 if (abfd == NULL)
636 return 0;
637
638 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
639 return 0;
640
641 arch_size = bfd_get_arch_size (abfd);
642 if (arch_size == -1)
643 return 0;
644
645 /* Find the start address of the .dynamic section. */
646 sect = bfd_get_section_by_name (abfd, ".dynamic");
647 if (sect == NULL)
648 return 0;
649
650 for (target_section = current_target_sections->sections;
651 target_section < current_target_sections->sections_end;
652 target_section++)
653 if (sect == target_section->the_bfd_section)
654 break;
655 if (target_section < current_target_sections->sections_end)
656 dyn_addr = target_section->addr;
657 else
658 {
659 /* ABFD may come from OBJFILE acting only as a symbol file without being
660 loaded into the target (see add_symbol_file_command). This case is
661 such fallback to the file VMA address without the possibility of
662 having the section relocated to its actual in-memory address. */
663
664 dyn_addr = bfd_section_vma (abfd, sect);
665 }
666
667 /* Read in .dynamic from the BFD. We will get the actual value
668 from memory later. */
669 sect_size = bfd_section_size (abfd, sect);
670 buf = bufstart = alloca (sect_size);
671 if (!bfd_get_section_contents (abfd, sect,
672 buf, 0, sect_size))
673 return 0;
674
675 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
676 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
677 : sizeof (Elf64_External_Dyn);
678 for (bufend = buf + sect_size;
679 buf < bufend;
680 buf += step)
681 {
682 if (arch_size == 32)
683 {
684 x_dynp_32 = (Elf32_External_Dyn *) buf;
685 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
686 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
687 }
688 else
689 {
690 x_dynp_64 = (Elf64_External_Dyn *) buf;
691 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
692 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
693 }
694 if (dyn_tag == DT_NULL)
695 return 0;
696 if (dyn_tag == dyntag)
697 {
698 /* If requested, try to read the runtime value of this .dynamic
699 entry. */
700 if (ptr)
701 {
702 struct type *ptr_type;
703 gdb_byte ptr_buf[8];
704 CORE_ADDR ptr_addr;
705
706 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
707 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
708 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
709 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
710 *ptr = dyn_ptr;
711 }
712 return 1;
713 }
714 }
715
716 return 0;
717 }
718
719 /* Scan for DYNTAG in .dynamic section of the target's main executable,
720 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
721 returned and the corresponding PTR is set. */
722
723 static int
724 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
725 {
726 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
727 int sect_size, arch_size, step;
728 long dyn_tag;
729 CORE_ADDR dyn_ptr;
730 gdb_byte *bufend, *bufstart, *buf;
731
732 /* Read in .dynamic section. */
733 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
734 if (!buf)
735 return 0;
736
737 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
738 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
739 : sizeof (Elf64_External_Dyn);
740 for (bufend = buf + sect_size;
741 buf < bufend;
742 buf += step)
743 {
744 if (arch_size == 32)
745 {
746 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
747
748 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
749 4, byte_order);
750 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
751 4, byte_order);
752 }
753 else
754 {
755 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
756
757 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
758 8, byte_order);
759 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
760 8, byte_order);
761 }
762 if (dyn_tag == DT_NULL)
763 break;
764
765 if (dyn_tag == dyntag)
766 {
767 if (ptr)
768 *ptr = dyn_ptr;
769
770 xfree (bufstart);
771 return 1;
772 }
773 }
774
775 xfree (bufstart);
776 return 0;
777 }
778
779 /* Locate the base address of dynamic linker structs for SVR4 elf
780 targets.
781
782 For SVR4 elf targets the address of the dynamic linker's runtime
783 structure is contained within the dynamic info section in the
784 executable file. The dynamic section is also mapped into the
785 inferior address space. Because the runtime loader fills in the
786 real address before starting the inferior, we have to read in the
787 dynamic info section from the inferior address space.
788 If there are any errors while trying to find the address, we
789 silently return 0, otherwise the found address is returned. */
790
791 static CORE_ADDR
792 elf_locate_base (void)
793 {
794 struct minimal_symbol *msymbol;
795 CORE_ADDR dyn_ptr;
796
797 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
798 instead of DT_DEBUG, although they sometimes contain an unused
799 DT_DEBUG. */
800 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
801 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
802 {
803 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
804 gdb_byte *pbuf;
805 int pbuf_size = TYPE_LENGTH (ptr_type);
806
807 pbuf = alloca (pbuf_size);
808 /* DT_MIPS_RLD_MAP contains a pointer to the address
809 of the dynamic link structure. */
810 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
811 return 0;
812 return extract_typed_address (pbuf, ptr_type);
813 }
814
815 /* Find DT_DEBUG. */
816 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
817 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
818 return dyn_ptr;
819
820 /* This may be a static executable. Look for the symbol
821 conventionally named _r_debug, as a last resort. */
822 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
823 if (msymbol != NULL)
824 return SYMBOL_VALUE_ADDRESS (msymbol);
825
826 /* DT_DEBUG entry not found. */
827 return 0;
828 }
829
830 /* Locate the base address of dynamic linker structs.
831
832 For both the SunOS and SVR4 shared library implementations, if the
833 inferior executable has been linked dynamically, there is a single
834 address somewhere in the inferior's data space which is the key to
835 locating all of the dynamic linker's runtime structures. This
836 address is the value of the debug base symbol. The job of this
837 function is to find and return that address, or to return 0 if there
838 is no such address (the executable is statically linked for example).
839
840 For SunOS, the job is almost trivial, since the dynamic linker and
841 all of it's structures are statically linked to the executable at
842 link time. Thus the symbol for the address we are looking for has
843 already been added to the minimal symbol table for the executable's
844 objfile at the time the symbol file's symbols were read, and all we
845 have to do is look it up there. Note that we explicitly do NOT want
846 to find the copies in the shared library.
847
848 The SVR4 version is a bit more complicated because the address
849 is contained somewhere in the dynamic info section. We have to go
850 to a lot more work to discover the address of the debug base symbol.
851 Because of this complexity, we cache the value we find and return that
852 value on subsequent invocations. Note there is no copy in the
853 executable symbol tables. */
854
855 static CORE_ADDR
856 locate_base (struct svr4_info *info)
857 {
858 /* Check to see if we have a currently valid address, and if so, avoid
859 doing all this work again and just return the cached address. If
860 we have no cached address, try to locate it in the dynamic info
861 section for ELF executables. There's no point in doing any of this
862 though if we don't have some link map offsets to work with. */
863
864 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
865 info->debug_base = elf_locate_base ();
866 return info->debug_base;
867 }
868
869 /* Find the first element in the inferior's dynamic link map, and
870 return its address in the inferior. Return zero if the address
871 could not be determined.
872
873 FIXME: Perhaps we should validate the info somehow, perhaps by
874 checking r_version for a known version number, or r_state for
875 RT_CONSISTENT. */
876
877 static CORE_ADDR
878 solib_svr4_r_map (struct svr4_info *info)
879 {
880 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
881 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
882 CORE_ADDR addr = 0;
883 volatile struct gdb_exception ex;
884
885 TRY_CATCH (ex, RETURN_MASK_ERROR)
886 {
887 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
888 ptr_type);
889 }
890 exception_print (gdb_stderr, ex);
891 return addr;
892 }
893
894 /* Find r_brk from the inferior's debug base. */
895
896 static CORE_ADDR
897 solib_svr4_r_brk (struct svr4_info *info)
898 {
899 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
900 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
901
902 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
903 ptr_type);
904 }
905
906 /* Find the link map for the dynamic linker (if it is not in the
907 normal list of loaded shared objects). */
908
909 static CORE_ADDR
910 solib_svr4_r_ldsomap (struct svr4_info *info)
911 {
912 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
913 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
914 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
915 ULONGEST version;
916
917 /* Check version, and return zero if `struct r_debug' doesn't have
918 the r_ldsomap member. */
919 version
920 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
921 lmo->r_version_size, byte_order);
922 if (version < 2 || lmo->r_ldsomap_offset == -1)
923 return 0;
924
925 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
926 ptr_type);
927 }
928
929 /* On Solaris systems with some versions of the dynamic linker,
930 ld.so's l_name pointer points to the SONAME in the string table
931 rather than into writable memory. So that GDB can find shared
932 libraries when loading a core file generated by gcore, ensure that
933 memory areas containing the l_name string are saved in the core
934 file. */
935
936 static int
937 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
938 {
939 struct svr4_info *info;
940 CORE_ADDR ldsomap;
941 struct so_list *new;
942 struct cleanup *old_chain;
943 CORE_ADDR name_lm;
944
945 info = get_svr4_info ();
946
947 info->debug_base = 0;
948 locate_base (info);
949 if (!info->debug_base)
950 return 0;
951
952 ldsomap = solib_svr4_r_ldsomap (info);
953 if (!ldsomap)
954 return 0;
955
956 new = XZALLOC (struct so_list);
957 old_chain = make_cleanup (xfree, new);
958 new->lm_info = lm_info_read (ldsomap);
959 make_cleanup (xfree, new->lm_info);
960 name_lm = new->lm_info ? new->lm_info->l_name : 0;
961 do_cleanups (old_chain);
962
963 return (name_lm >= vaddr && name_lm < vaddr + size);
964 }
965
966 /* Implement the "open_symbol_file_object" target_so_ops method.
967
968 If no open symbol file, attempt to locate and open the main symbol
969 file. On SVR4 systems, this is the first link map entry. If its
970 name is here, we can open it. Useful when attaching to a process
971 without first loading its symbol file. */
972
973 static int
974 open_symbol_file_object (void *from_ttyp)
975 {
976 CORE_ADDR lm, l_name;
977 char *filename;
978 int errcode;
979 int from_tty = *(int *)from_ttyp;
980 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
981 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
982 int l_name_size = TYPE_LENGTH (ptr_type);
983 gdb_byte *l_name_buf = xmalloc (l_name_size);
984 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
985 struct svr4_info *info = get_svr4_info ();
986
987 if (symfile_objfile)
988 if (!query (_("Attempt to reload symbols from process? ")))
989 {
990 do_cleanups (cleanups);
991 return 0;
992 }
993
994 /* Always locate the debug struct, in case it has moved. */
995 info->debug_base = 0;
996 if (locate_base (info) == 0)
997 {
998 do_cleanups (cleanups);
999 return 0; /* failed somehow... */
1000 }
1001
1002 /* First link map member should be the executable. */
1003 lm = solib_svr4_r_map (info);
1004 if (lm == 0)
1005 {
1006 do_cleanups (cleanups);
1007 return 0; /* failed somehow... */
1008 }
1009
1010 /* Read address of name from target memory to GDB. */
1011 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1012
1013 /* Convert the address to host format. */
1014 l_name = extract_typed_address (l_name_buf, ptr_type);
1015
1016 if (l_name == 0)
1017 {
1018 do_cleanups (cleanups);
1019 return 0; /* No filename. */
1020 }
1021
1022 /* Now fetch the filename from target memory. */
1023 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1024 make_cleanup (xfree, filename);
1025
1026 if (errcode)
1027 {
1028 warning (_("failed to read exec filename from attached file: %s"),
1029 safe_strerror (errcode));
1030 do_cleanups (cleanups);
1031 return 0;
1032 }
1033
1034 /* Have a pathname: read the symbol file. */
1035 symbol_file_add_main (filename, from_tty);
1036
1037 do_cleanups (cleanups);
1038 return 1;
1039 }
1040
1041 /* Data exchange structure for the XML parser as returned by
1042 svr4_current_sos_via_xfer_libraries. */
1043
1044 struct svr4_library_list
1045 {
1046 struct so_list *head, **tailp;
1047
1048 /* Inferior address of struct link_map used for the main executable. It is
1049 NULL if not known. */
1050 CORE_ADDR main_lm;
1051 };
1052
1053 /* Implementation for target_so_ops.free_so. */
1054
1055 static void
1056 svr4_free_so (struct so_list *so)
1057 {
1058 xfree (so->lm_info);
1059 }
1060
1061 /* Implement target_so_ops.clear_so. */
1062
1063 static void
1064 svr4_clear_so (struct so_list *so)
1065 {
1066 if (so->lm_info != NULL)
1067 so->lm_info->l_addr_p = 0;
1068 }
1069
1070 /* Free so_list built so far (called via cleanup). */
1071
1072 static void
1073 svr4_free_library_list (void *p_list)
1074 {
1075 struct so_list *list = *(struct so_list **) p_list;
1076
1077 while (list != NULL)
1078 {
1079 struct so_list *next = list->next;
1080
1081 free_so (list);
1082 list = next;
1083 }
1084 }
1085
1086 /* Copy library list. */
1087
1088 static struct so_list *
1089 svr4_copy_library_list (struct so_list *src)
1090 {
1091 struct so_list *dst = NULL;
1092 struct so_list **link = &dst;
1093
1094 while (src != NULL)
1095 {
1096 struct so_list *new;
1097
1098 new = xmalloc (sizeof (struct so_list));
1099 memcpy (new, src, sizeof (struct so_list));
1100
1101 new->lm_info = xmalloc (sizeof (struct lm_info));
1102 memcpy (new->lm_info, src->lm_info, sizeof (struct lm_info));
1103
1104 new->next = NULL;
1105 *link = new;
1106 link = &new->next;
1107
1108 src = src->next;
1109 }
1110
1111 return dst;
1112 }
1113
1114 #ifdef HAVE_LIBEXPAT
1115
1116 #include "xml-support.h"
1117
1118 /* Handle the start of a <library> element. Note: new elements are added
1119 at the tail of the list, keeping the list in order. */
1120
1121 static void
1122 library_list_start_library (struct gdb_xml_parser *parser,
1123 const struct gdb_xml_element *element,
1124 void *user_data, VEC(gdb_xml_value_s) *attributes)
1125 {
1126 struct svr4_library_list *list = user_data;
1127 const char *name = xml_find_attribute (attributes, "name")->value;
1128 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1129 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1130 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1131 struct so_list *new_elem;
1132
1133 new_elem = XZALLOC (struct so_list);
1134 new_elem->lm_info = XZALLOC (struct lm_info);
1135 new_elem->lm_info->lm_addr = *lmp;
1136 new_elem->lm_info->l_addr_inferior = *l_addrp;
1137 new_elem->lm_info->l_ld = *l_ldp;
1138
1139 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1140 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1141 strcpy (new_elem->so_original_name, new_elem->so_name);
1142
1143 *list->tailp = new_elem;
1144 list->tailp = &new_elem->next;
1145 }
1146
1147 /* Handle the start of a <library-list-svr4> element. */
1148
1149 static void
1150 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1151 const struct gdb_xml_element *element,
1152 void *user_data, VEC(gdb_xml_value_s) *attributes)
1153 {
1154 struct svr4_library_list *list = user_data;
1155 const char *version = xml_find_attribute (attributes, "version")->value;
1156 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1157
1158 if (strcmp (version, "1.0") != 0)
1159 gdb_xml_error (parser,
1160 _("SVR4 Library list has unsupported version \"%s\""),
1161 version);
1162
1163 if (main_lm)
1164 list->main_lm = *(ULONGEST *) main_lm->value;
1165 }
1166
1167 /* The allowed elements and attributes for an XML library list.
1168 The root element is a <library-list>. */
1169
1170 static const struct gdb_xml_attribute svr4_library_attributes[] =
1171 {
1172 { "name", GDB_XML_AF_NONE, NULL, NULL },
1173 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1174 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1175 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1176 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1177 };
1178
1179 static const struct gdb_xml_element svr4_library_list_children[] =
1180 {
1181 {
1182 "library", svr4_library_attributes, NULL,
1183 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1184 library_list_start_library, NULL
1185 },
1186 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1187 };
1188
1189 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1190 {
1191 { "version", GDB_XML_AF_NONE, NULL, NULL },
1192 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1193 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1194 };
1195
1196 static const struct gdb_xml_element svr4_library_list_elements[] =
1197 {
1198 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1199 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1200 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1201 };
1202
1203 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1204
1205 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1206 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1207 empty, caller is responsible for freeing all its entries. */
1208
1209 static int
1210 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1211 {
1212 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1213 &list->head);
1214
1215 memset (list, 0, sizeof (*list));
1216 list->tailp = &list->head;
1217 if (gdb_xml_parse_quick (_("target library list"), "library-list.dtd",
1218 svr4_library_list_elements, document, list) == 0)
1219 {
1220 /* Parsed successfully, keep the result. */
1221 discard_cleanups (back_to);
1222 return 1;
1223 }
1224
1225 do_cleanups (back_to);
1226 return 0;
1227 }
1228
1229 /* Attempt to get so_list from target via qXfer:libraries-svr4:read packet.
1230
1231 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1232 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1233 empty, caller is responsible for freeing all its entries.
1234
1235 Note that ANNEX must be NULL if the remote does not explicitly allow
1236 qXfer:libraries-svr4:read packets with non-empty annexes. Support for
1237 this can be checked using target_augmented_libraries_svr4_read (). */
1238
1239 static int
1240 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1241 const char *annex)
1242 {
1243 char *svr4_library_document;
1244 int result;
1245 struct cleanup *back_to;
1246
1247 gdb_assert (annex == NULL || target_augmented_libraries_svr4_read ());
1248
1249 /* Fetch the list of shared libraries. */
1250 svr4_library_document = target_read_stralloc (&current_target,
1251 TARGET_OBJECT_LIBRARIES_SVR4,
1252 annex);
1253 if (svr4_library_document == NULL)
1254 return 0;
1255
1256 back_to = make_cleanup (xfree, svr4_library_document);
1257 result = svr4_parse_libraries (svr4_library_document, list);
1258 do_cleanups (back_to);
1259
1260 return result;
1261 }
1262
1263 #else
1264
1265 static int
1266 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1267 const char *annex)
1268 {
1269 return 0;
1270 }
1271
1272 #endif
1273
1274 /* If no shared library information is available from the dynamic
1275 linker, build a fallback list from other sources. */
1276
1277 static struct so_list *
1278 svr4_default_sos (void)
1279 {
1280 struct svr4_info *info = get_svr4_info ();
1281 struct so_list *new;
1282
1283 if (!info->debug_loader_offset_p)
1284 return NULL;
1285
1286 new = XZALLOC (struct so_list);
1287
1288 new->lm_info = xzalloc (sizeof (struct lm_info));
1289
1290 /* Nothing will ever check the other fields if we set l_addr_p. */
1291 new->lm_info->l_addr = info->debug_loader_offset;
1292 new->lm_info->l_addr_p = 1;
1293
1294 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1295 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1296 strcpy (new->so_original_name, new->so_name);
1297
1298 return new;
1299 }
1300
1301 /* Read the whole inferior libraries chain starting at address LM.
1302 Expect the first entry in the chain's previous entry to be PREV_LM.
1303 Add the entries to the tail referenced by LINK_PTR_PTR. Ignore the
1304 first entry if IGNORE_FIRST and set global MAIN_LM_ADDR according
1305 to it. Returns nonzero upon success. If zero is returned the
1306 entries stored to LINK_PTR_PTR are still valid although they may
1307 represent only part of the inferior library list. */
1308
1309 static int
1310 svr4_read_so_list (CORE_ADDR lm, CORE_ADDR prev_lm,
1311 struct so_list ***link_ptr_ptr, int ignore_first)
1312 {
1313 struct so_list *first = NULL;
1314 CORE_ADDR next_lm;
1315
1316 for (; lm != 0; prev_lm = lm, lm = next_lm)
1317 {
1318 struct so_list *new;
1319 struct cleanup *old_chain;
1320 int errcode;
1321 char *buffer;
1322
1323 new = XZALLOC (struct so_list);
1324 old_chain = make_cleanup_free_so (new);
1325
1326 new->lm_info = lm_info_read (lm);
1327 if (new->lm_info == NULL)
1328 {
1329 do_cleanups (old_chain);
1330 return 0;
1331 }
1332
1333 next_lm = new->lm_info->l_next;
1334
1335 if (new->lm_info->l_prev != prev_lm)
1336 {
1337 warning (_("Corrupted shared library list: %s != %s"),
1338 paddress (target_gdbarch (), prev_lm),
1339 paddress (target_gdbarch (), new->lm_info->l_prev));
1340 do_cleanups (old_chain);
1341 return 0;
1342 }
1343
1344 /* For SVR4 versions, the first entry in the link map is for the
1345 inferior executable, so we must ignore it. For some versions of
1346 SVR4, it has no name. For others (Solaris 2.3 for example), it
1347 does have a name, so we can no longer use a missing name to
1348 decide when to ignore it. */
1349 if (ignore_first && new->lm_info->l_prev == 0)
1350 {
1351 struct svr4_info *info = get_svr4_info ();
1352
1353 first = new;
1354 info->main_lm_addr = new->lm_info->lm_addr;
1355 do_cleanups (old_chain);
1356 continue;
1357 }
1358
1359 /* Extract this shared object's name. */
1360 target_read_string (new->lm_info->l_name, &buffer,
1361 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1362 if (errcode != 0)
1363 {
1364 /* If this entry's l_name address matches that of the
1365 inferior executable, then this is not a normal shared
1366 object, but (most likely) a vDSO. In this case, silently
1367 skip it; otherwise emit a warning. */
1368 if (first == NULL
1369 || new->lm_info->l_name != first->lm_info->l_name)
1370 warning (_("Can't read pathname for load map: %s."),
1371 safe_strerror (errcode));
1372 do_cleanups (old_chain);
1373 continue;
1374 }
1375
1376 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1377 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1378 strcpy (new->so_original_name, new->so_name);
1379 xfree (buffer);
1380
1381 /* If this entry has no name, or its name matches the name
1382 for the main executable, don't include it in the list. */
1383 if (! new->so_name[0] || match_main (new->so_name))
1384 {
1385 do_cleanups (old_chain);
1386 continue;
1387 }
1388
1389 discard_cleanups (old_chain);
1390 new->next = 0;
1391 **link_ptr_ptr = new;
1392 *link_ptr_ptr = &new->next;
1393 }
1394
1395 return 1;
1396 }
1397
1398 /* Read the full list of currently loaded shared objects directly
1399 from the inferior, without referring to any libraries read and
1400 stored by the probes interface. Handle special cases relating
1401 to the first elements of the list. */
1402
1403 static struct so_list *
1404 svr4_current_sos_direct (struct svr4_info *info)
1405 {
1406 CORE_ADDR lm;
1407 struct so_list *head = NULL;
1408 struct so_list **link_ptr = &head;
1409 struct cleanup *back_to;
1410 int ignore_first;
1411 struct svr4_library_list library_list;
1412
1413 /* Fall back to manual examination of the target if the packet is not
1414 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1415 tests a case where gdbserver cannot find the shared libraries list while
1416 GDB itself is able to find it via SYMFILE_OBJFILE.
1417
1418 Unfortunately statically linked inferiors will also fall back through this
1419 suboptimal code path. */
1420
1421 info->using_xfer = svr4_current_sos_via_xfer_libraries (&library_list,
1422 NULL);
1423 if (info->using_xfer)
1424 {
1425 if (library_list.main_lm)
1426 info->main_lm_addr = library_list.main_lm;
1427
1428 return library_list.head ? library_list.head : svr4_default_sos ();
1429 }
1430
1431 /* Always locate the debug struct, in case it has moved. */
1432 info->debug_base = 0;
1433 locate_base (info);
1434
1435 /* If we can't find the dynamic linker's base structure, this
1436 must not be a dynamically linked executable. Hmm. */
1437 if (! info->debug_base)
1438 return svr4_default_sos ();
1439
1440 /* Assume that everything is a library if the dynamic loader was loaded
1441 late by a static executable. */
1442 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1443 ignore_first = 0;
1444 else
1445 ignore_first = 1;
1446
1447 back_to = make_cleanup (svr4_free_library_list, &head);
1448
1449 /* Walk the inferior's link map list, and build our list of
1450 `struct so_list' nodes. */
1451 lm = solib_svr4_r_map (info);
1452 if (lm)
1453 svr4_read_so_list (lm, 0, &link_ptr, ignore_first);
1454
1455 /* On Solaris, the dynamic linker is not in the normal list of
1456 shared objects, so make sure we pick it up too. Having
1457 symbol information for the dynamic linker is quite crucial
1458 for skipping dynamic linker resolver code. */
1459 lm = solib_svr4_r_ldsomap (info);
1460 if (lm)
1461 svr4_read_so_list (lm, 0, &link_ptr, 0);
1462
1463 discard_cleanups (back_to);
1464
1465 if (head == NULL)
1466 return svr4_default_sos ();
1467
1468 return head;
1469 }
1470
1471 /* Implement the "current_sos" target_so_ops method. */
1472
1473 static struct so_list *
1474 svr4_current_sos (void)
1475 {
1476 struct svr4_info *info = get_svr4_info ();
1477
1478 /* If the solib list has been read and stored by the probes
1479 interface then we return a copy of the stored list. */
1480 if (info->solib_list != NULL)
1481 return svr4_copy_library_list (info->solib_list);
1482
1483 /* Otherwise obtain the solib list directly from the inferior. */
1484 return svr4_current_sos_direct (info);
1485 }
1486
1487 /* Get the address of the link_map for a given OBJFILE. */
1488
1489 CORE_ADDR
1490 svr4_fetch_objfile_link_map (struct objfile *objfile)
1491 {
1492 struct so_list *so;
1493 struct svr4_info *info = get_svr4_info ();
1494
1495 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1496 if (info->main_lm_addr == 0)
1497 solib_add (NULL, 0, &current_target, auto_solib_add);
1498
1499 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1500 if (objfile == symfile_objfile)
1501 return info->main_lm_addr;
1502
1503 /* The other link map addresses may be found by examining the list
1504 of shared libraries. */
1505 for (so = master_so_list (); so; so = so->next)
1506 if (so->objfile == objfile)
1507 return so->lm_info->lm_addr;
1508
1509 /* Not found! */
1510 return 0;
1511 }
1512
1513 /* On some systems, the only way to recognize the link map entry for
1514 the main executable file is by looking at its name. Return
1515 non-zero iff SONAME matches one of the known main executable names. */
1516
1517 static int
1518 match_main (const char *soname)
1519 {
1520 const char * const *mainp;
1521
1522 for (mainp = main_name_list; *mainp != NULL; mainp++)
1523 {
1524 if (strcmp (soname, *mainp) == 0)
1525 return (1);
1526 }
1527
1528 return (0);
1529 }
1530
1531 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1532 SVR4 run time loader. */
1533
1534 int
1535 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1536 {
1537 struct svr4_info *info = get_svr4_info ();
1538
1539 return ((pc >= info->interp_text_sect_low
1540 && pc < info->interp_text_sect_high)
1541 || (pc >= info->interp_plt_sect_low
1542 && pc < info->interp_plt_sect_high)
1543 || in_plt_section (pc)
1544 || in_gnu_ifunc_stub (pc));
1545 }
1546
1547 /* Given an executable's ABFD and target, compute the entry-point
1548 address. */
1549
1550 static CORE_ADDR
1551 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1552 {
1553 CORE_ADDR addr;
1554
1555 /* KevinB wrote ... for most targets, the address returned by
1556 bfd_get_start_address() is the entry point for the start
1557 function. But, for some targets, bfd_get_start_address() returns
1558 the address of a function descriptor from which the entry point
1559 address may be extracted. This address is extracted by
1560 gdbarch_convert_from_func_ptr_addr(). The method
1561 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1562 function for targets which don't use function descriptors. */
1563 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1564 bfd_get_start_address (abfd),
1565 targ);
1566 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1567 }
1568
1569 /* A probe and its associated action. */
1570
1571 struct probe_and_action
1572 {
1573 /* The probe. */
1574 struct probe *probe;
1575
1576 /* The action. */
1577 enum probe_action action;
1578 };
1579
1580 /* Returns a hash code for the probe_and_action referenced by p. */
1581
1582 static hashval_t
1583 hash_probe_and_action (const void *p)
1584 {
1585 const struct probe_and_action *pa = p;
1586
1587 return (hashval_t) pa->probe->address;
1588 }
1589
1590 /* Returns non-zero if the probe_and_actions referenced by p1 and p2
1591 are equal. */
1592
1593 static int
1594 equal_probe_and_action (const void *p1, const void *p2)
1595 {
1596 const struct probe_and_action *pa1 = p1;
1597 const struct probe_and_action *pa2 = p2;
1598
1599 return pa1->probe->address == pa2->probe->address;
1600 }
1601
1602 /* Register a solib event probe and its associated action in the
1603 probes table. */
1604
1605 static void
1606 register_solib_event_probe (struct probe *probe, enum probe_action action)
1607 {
1608 struct svr4_info *info = get_svr4_info ();
1609 struct probe_and_action lookup, *pa;
1610 void **slot;
1611
1612 /* Create the probes table, if necessary. */
1613 if (info->probes_table == NULL)
1614 info->probes_table = htab_create_alloc (1, hash_probe_and_action,
1615 equal_probe_and_action,
1616 xfree, xcalloc, xfree);
1617
1618 lookup.probe = probe;
1619 slot = htab_find_slot (info->probes_table, &lookup, INSERT);
1620 gdb_assert (*slot == HTAB_EMPTY_ENTRY);
1621
1622 pa = XCNEW (struct probe_and_action);
1623 pa->probe = probe;
1624 pa->action = action;
1625
1626 *slot = pa;
1627 }
1628
1629 /* Get the solib event probe at the specified location, and the
1630 action associated with it. Returns NULL if no solib event probe
1631 was found. */
1632
1633 static struct probe_and_action *
1634 solib_event_probe_at (struct svr4_info *info, CORE_ADDR address)
1635 {
1636 struct probe lookup_probe;
1637 struct probe_and_action lookup;
1638 void **slot;
1639
1640 lookup_probe.address = address;
1641 lookup.probe = &lookup_probe;
1642 slot = htab_find_slot (info->probes_table, &lookup, NO_INSERT);
1643
1644 if (slot == NULL)
1645 return NULL;
1646
1647 return (struct probe_and_action *) *slot;
1648 }
1649
1650 /* Decide what action to take when the specified solib event probe is
1651 hit. */
1652
1653 static enum probe_action
1654 solib_event_probe_action (struct probe_and_action *pa)
1655 {
1656 enum probe_action action;
1657 unsigned probe_argc;
1658
1659 action = pa->action;
1660 if (action == DO_NOTHING || action == PROBES_INTERFACE_FAILED)
1661 return action;
1662
1663 gdb_assert (action == FULL_RELOAD || action == UPDATE_OR_RELOAD);
1664
1665 /* Check that an appropriate number of arguments has been supplied.
1666 We expect:
1667 arg0: Lmid_t lmid (mandatory)
1668 arg1: struct r_debug *debug_base (mandatory)
1669 arg2: struct link_map *new (optional, for incremental updates) */
1670 probe_argc = get_probe_argument_count (pa->probe);
1671 if (probe_argc == 2)
1672 action = FULL_RELOAD;
1673 else if (probe_argc < 2)
1674 action = PROBES_INTERFACE_FAILED;
1675
1676 return action;
1677 }
1678
1679 /* Populate the shared object list by reading the entire list of
1680 shared objects from the inferior. Handle special cases relating
1681 to the first elements of the list. Returns nonzero on success. */
1682
1683 static int
1684 solist_update_full (struct svr4_info *info)
1685 {
1686 free_solib_list (info);
1687 info->solib_list = svr4_current_sos_direct (info);
1688
1689 return 1;
1690 }
1691
1692 /* Update the shared object list starting from the link-map entry
1693 passed by the linker in the probe's third argument. Returns
1694 nonzero if the list was successfully updated, or zero to indicate
1695 failure. */
1696
1697 static int
1698 solist_update_incremental (struct svr4_info *info, CORE_ADDR lm)
1699 {
1700 struct so_list *tail;
1701 CORE_ADDR prev_lm;
1702
1703 /* svr4_current_sos_direct contains logic to handle a number of
1704 special cases relating to the first elements of the list. To
1705 avoid duplicating this logic we defer to solist_update_full
1706 if the list is empty. */
1707 if (info->solib_list == NULL)
1708 return 0;
1709
1710 /* Fall back to a full update if we are using a remote target
1711 that does not support incremental transfers. */
1712 if (info->using_xfer && !target_augmented_libraries_svr4_read ())
1713 return 0;
1714
1715 /* Walk to the end of the list. */
1716 for (tail = info->solib_list; tail->next != NULL; tail = tail->next)
1717 /* Nothing. */;
1718 prev_lm = tail->lm_info->lm_addr;
1719
1720 /* Read the new objects. */
1721 if (info->using_xfer)
1722 {
1723 struct svr4_library_list library_list;
1724 char annex[64];
1725
1726 xsnprintf (annex, sizeof (annex), "start=%s;prev=%s",
1727 phex_nz (lm, sizeof (lm)),
1728 phex_nz (prev_lm, sizeof (prev_lm)));
1729 if (!svr4_current_sos_via_xfer_libraries (&library_list, annex))
1730 return 0;
1731
1732 tail->next = library_list.head;
1733 }
1734 else
1735 {
1736 struct so_list **link = &tail->next;
1737
1738 /* IGNORE_FIRST may safely be set to zero here because the
1739 above check and deferral to solist_update_full ensures
1740 that this call to svr4_read_so_list will never see the
1741 first element. */
1742 if (!svr4_read_so_list (lm, prev_lm, &link, 0))
1743 return 0;
1744 }
1745
1746 return 1;
1747 }
1748
1749 /* Disable the probes-based linker interface and revert to the
1750 original interface. We don't reset the breakpoints as the
1751 ones set up for the probes-based interface are adequate. */
1752
1753 static void
1754 disable_probes_interface_cleanup (void *arg)
1755 {
1756 struct svr4_info *info = get_svr4_info ();
1757
1758 warning (_("Probes-based dynamic linker interface failed.\n"
1759 "Reverting to original interface.\n"));
1760
1761 free_probes_table (info);
1762 free_solib_list (info);
1763 }
1764
1765 /* Update the solib list as appropriate when using the
1766 probes-based linker interface. Do nothing if using the
1767 standard interface. */
1768
1769 static void
1770 svr4_handle_solib_event (void)
1771 {
1772 struct svr4_info *info = get_svr4_info ();
1773 struct probe_and_action *pa;
1774 enum probe_action action;
1775 struct cleanup *old_chain, *usm_chain;
1776 struct value *val;
1777 CORE_ADDR pc, debug_base, lm = 0;
1778 int is_initial_ns;
1779
1780 /* Do nothing if not using the probes interface. */
1781 if (info->probes_table == NULL)
1782 return;
1783
1784 /* If anything goes wrong we revert to the original linker
1785 interface. */
1786 old_chain = make_cleanup (disable_probes_interface_cleanup, NULL);
1787
1788 pc = regcache_read_pc (get_current_regcache ());
1789 pa = solib_event_probe_at (info, pc);
1790 if (pa == NULL)
1791 {
1792 do_cleanups (old_chain);
1793 return;
1794 }
1795
1796 action = solib_event_probe_action (pa);
1797 if (action == PROBES_INTERFACE_FAILED)
1798 {
1799 do_cleanups (old_chain);
1800 return;
1801 }
1802
1803 if (action == DO_NOTHING)
1804 {
1805 discard_cleanups (old_chain);
1806 return;
1807 }
1808
1809 /* evaluate_probe_argument looks up symbols in the dynamic linker
1810 using find_pc_section. find_pc_section is accelerated by a cache
1811 called the section map. The section map is invalidated every
1812 time a shared library is loaded or unloaded, and if the inferior
1813 is generating a lot of shared library events then the section map
1814 will be updated every time svr4_handle_solib_event is called.
1815 We called find_pc_section in svr4_create_solib_event_breakpoints,
1816 so we can guarantee that the dynamic linker's sections are in the
1817 section map. We can therefore inhibit section map updates across
1818 these calls to evaluate_probe_argument and save a lot of time. */
1819 inhibit_section_map_updates (current_program_space);
1820 usm_chain = make_cleanup (resume_section_map_updates_cleanup,
1821 current_program_space);
1822
1823 val = evaluate_probe_argument (pa->probe, 1);
1824 if (val == NULL)
1825 {
1826 do_cleanups (old_chain);
1827 return;
1828 }
1829
1830 debug_base = value_as_address (val);
1831 if (debug_base == 0)
1832 {
1833 do_cleanups (old_chain);
1834 return;
1835 }
1836
1837 /* Always locate the debug struct, in case it moved. */
1838 info->debug_base = 0;
1839 if (locate_base (info) == 0)
1840 {
1841 do_cleanups (old_chain);
1842 return;
1843 }
1844
1845 /* GDB does not currently support libraries loaded via dlmopen
1846 into namespaces other than the initial one. We must ignore
1847 any namespace other than the initial namespace here until
1848 support for this is added to GDB. */
1849 if (debug_base != info->debug_base)
1850 action = DO_NOTHING;
1851
1852 if (action == UPDATE_OR_RELOAD)
1853 {
1854 val = evaluate_probe_argument (pa->probe, 2);
1855 if (val != NULL)
1856 lm = value_as_address (val);
1857
1858 if (lm == 0)
1859 action = FULL_RELOAD;
1860 }
1861
1862 /* Resume section map updates. */
1863 do_cleanups (usm_chain);
1864
1865 if (action == UPDATE_OR_RELOAD)
1866 {
1867 if (!solist_update_incremental (info, lm))
1868 action = FULL_RELOAD;
1869 }
1870
1871 if (action == FULL_RELOAD)
1872 {
1873 if (!solist_update_full (info))
1874 {
1875 do_cleanups (old_chain);
1876 return;
1877 }
1878 }
1879
1880 discard_cleanups (old_chain);
1881 }
1882
1883 /* Helper function for svr4_update_solib_event_breakpoints. */
1884
1885 static int
1886 svr4_update_solib_event_breakpoint (struct breakpoint *b, void *arg)
1887 {
1888 struct bp_location *loc;
1889
1890 if (b->type != bp_shlib_event)
1891 {
1892 /* Continue iterating. */
1893 return 0;
1894 }
1895
1896 for (loc = b->loc; loc != NULL; loc = loc->next)
1897 {
1898 struct svr4_info *info;
1899 struct probe_and_action *pa;
1900
1901 info = program_space_data (loc->pspace, solib_svr4_pspace_data);
1902 if (info == NULL || info->probes_table == NULL)
1903 continue;
1904
1905 pa = solib_event_probe_at (info, loc->address);
1906 if (pa == NULL)
1907 continue;
1908
1909 if (pa->action == DO_NOTHING)
1910 {
1911 if (b->enable_state == bp_disabled && stop_on_solib_events)
1912 enable_breakpoint (b);
1913 else if (b->enable_state == bp_enabled && !stop_on_solib_events)
1914 disable_breakpoint (b);
1915 }
1916
1917 break;
1918 }
1919
1920 /* Continue iterating. */
1921 return 0;
1922 }
1923
1924 /* Enable or disable optional solib event breakpoints as appropriate.
1925 Called whenever stop_on_solib_events is changed. */
1926
1927 static void
1928 svr4_update_solib_event_breakpoints (void)
1929 {
1930 iterate_over_breakpoints (svr4_update_solib_event_breakpoint, NULL);
1931 }
1932
1933 /* Create and register solib event breakpoints. PROBES is an array
1934 of NUM_PROBES elements, each of which is vector of probes. A
1935 solib event breakpoint will be created and registered for each
1936 probe. */
1937
1938 static void
1939 svr4_create_probe_breakpoints (struct gdbarch *gdbarch,
1940 VEC (probe_p) **probes)
1941 {
1942 int i;
1943
1944 for (i = 0; i < NUM_PROBES; i++)
1945 {
1946 enum probe_action action = probe_info[i].action;
1947 struct probe *probe;
1948 int ix;
1949
1950 for (ix = 0;
1951 VEC_iterate (probe_p, probes[i], ix, probe);
1952 ++ix)
1953 {
1954 create_solib_event_breakpoint (gdbarch, probe->address);
1955 register_solib_event_probe (probe, action);
1956 }
1957 }
1958
1959 svr4_update_solib_event_breakpoints ();
1960 }
1961
1962 /* Both the SunOS and the SVR4 dynamic linkers call a marker function
1963 before and after mapping and unmapping shared libraries. The sole
1964 purpose of this method is to allow debuggers to set a breakpoint so
1965 they can track these changes.
1966
1967 Some versions of the glibc dynamic linker contain named probes
1968 to allow more fine grained stopping. Given the address of the
1969 original marker function, this function attempts to find these
1970 probes, and if found, sets breakpoints on those instead. If the
1971 probes aren't found, a single breakpoint is set on the original
1972 marker function. */
1973
1974 static void
1975 svr4_create_solib_event_breakpoints (struct gdbarch *gdbarch,
1976 CORE_ADDR address)
1977 {
1978 struct obj_section *os;
1979
1980 os = find_pc_section (address);
1981 if (os != NULL)
1982 {
1983 int with_prefix;
1984
1985 for (with_prefix = 0; with_prefix <= 1; with_prefix++)
1986 {
1987 VEC (probe_p) *probes[NUM_PROBES];
1988 int all_probes_found = 1;
1989 int checked_can_use_probe_arguments = 0;
1990 int i;
1991
1992 memset (probes, 0, sizeof (probes));
1993 for (i = 0; i < NUM_PROBES; i++)
1994 {
1995 const char *name = probe_info[i].name;
1996 struct probe *p;
1997 char buf[32];
1998
1999 /* Fedora 17 and Red Hat Enterprise Linux 6.2-6.4
2000 shipped with an early version of the probes code in
2001 which the probes' names were prefixed with "rtld_"
2002 and the "map_failed" probe did not exist. The
2003 locations of the probes are otherwise the same, so
2004 we check for probes with prefixed names if probes
2005 with unprefixed names are not present. */
2006 if (with_prefix)
2007 {
2008 xsnprintf (buf, sizeof (buf), "rtld_%s", name);
2009 name = buf;
2010 }
2011
2012 probes[i] = find_probes_in_objfile (os->objfile, "rtld", name);
2013
2014 /* The "map_failed" probe did not exist in early
2015 versions of the probes code in which the probes'
2016 names were prefixed with "rtld_". */
2017 if (strcmp (name, "rtld_map_failed") == 0)
2018 continue;
2019
2020 if (VEC_empty (probe_p, probes[i]))
2021 {
2022 all_probes_found = 0;
2023 break;
2024 }
2025
2026 /* Ensure probe arguments can be evaluated. */
2027 if (!checked_can_use_probe_arguments)
2028 {
2029 p = VEC_index (probe_p, probes[i], 0);
2030 if (!can_evaluate_probe_arguments (p))
2031 {
2032 all_probes_found = 0;
2033 break;
2034 }
2035 checked_can_use_probe_arguments = 1;
2036 }
2037 }
2038
2039 if (all_probes_found)
2040 svr4_create_probe_breakpoints (gdbarch, probes);
2041
2042 for (i = 0; i < NUM_PROBES; i++)
2043 VEC_free (probe_p, probes[i]);
2044
2045 if (all_probes_found)
2046 return;
2047 }
2048 }
2049
2050 create_solib_event_breakpoint (gdbarch, address);
2051 }
2052
2053 /* Helper function for gdb_bfd_lookup_symbol. */
2054
2055 static int
2056 cmp_name_and_sec_flags (asymbol *sym, void *data)
2057 {
2058 return (strcmp (sym->name, (const char *) data) == 0
2059 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
2060 }
2061 /* Arrange for dynamic linker to hit breakpoint.
2062
2063 Both the SunOS and the SVR4 dynamic linkers have, as part of their
2064 debugger interface, support for arranging for the inferior to hit
2065 a breakpoint after mapping in the shared libraries. This function
2066 enables that breakpoint.
2067
2068 For SunOS, there is a special flag location (in_debugger) which we
2069 set to 1. When the dynamic linker sees this flag set, it will set
2070 a breakpoint at a location known only to itself, after saving the
2071 original contents of that place and the breakpoint address itself,
2072 in it's own internal structures. When we resume the inferior, it
2073 will eventually take a SIGTRAP when it runs into the breakpoint.
2074 We handle this (in a different place) by restoring the contents of
2075 the breakpointed location (which is only known after it stops),
2076 chasing around to locate the shared libraries that have been
2077 loaded, then resuming.
2078
2079 For SVR4, the debugger interface structure contains a member (r_brk)
2080 which is statically initialized at the time the shared library is
2081 built, to the offset of a function (_r_debug_state) which is guaran-
2082 teed to be called once before mapping in a library, and again when
2083 the mapping is complete. At the time we are examining this member,
2084 it contains only the unrelocated offset of the function, so we have
2085 to do our own relocation. Later, when the dynamic linker actually
2086 runs, it relocates r_brk to be the actual address of _r_debug_state().
2087
2088 The debugger interface structure also contains an enumeration which
2089 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
2090 depending upon whether or not the library is being mapped or unmapped,
2091 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
2092
2093 static int
2094 enable_break (struct svr4_info *info, int from_tty)
2095 {
2096 struct minimal_symbol *msymbol;
2097 const char * const *bkpt_namep;
2098 asection *interp_sect;
2099 char *interp_name;
2100 CORE_ADDR sym_addr;
2101
2102 info->interp_text_sect_low = info->interp_text_sect_high = 0;
2103 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
2104
2105 /* If we already have a shared library list in the target, and
2106 r_debug contains r_brk, set the breakpoint there - this should
2107 mean r_brk has already been relocated. Assume the dynamic linker
2108 is the object containing r_brk. */
2109
2110 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2111 sym_addr = 0;
2112 if (info->debug_base && solib_svr4_r_map (info) != 0)
2113 sym_addr = solib_svr4_r_brk (info);
2114
2115 if (sym_addr != 0)
2116 {
2117 struct obj_section *os;
2118
2119 sym_addr = gdbarch_addr_bits_remove
2120 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2121 sym_addr,
2122 &current_target));
2123
2124 /* On at least some versions of Solaris there's a dynamic relocation
2125 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
2126 we get control before the dynamic linker has self-relocated.
2127 Check if SYM_ADDR is in a known section, if it is assume we can
2128 trust its value. This is just a heuristic though, it could go away
2129 or be replaced if it's getting in the way.
2130
2131 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
2132 however it's spelled in your particular system) is ARM or Thumb.
2133 That knowledge is encoded in the address, if it's Thumb the low bit
2134 is 1. However, we've stripped that info above and it's not clear
2135 what all the consequences are of passing a non-addr_bits_remove'd
2136 address to svr4_create_solib_event_breakpoints. The call to
2137 find_pc_section verifies we know about the address and have some
2138 hope of computing the right kind of breakpoint to use (via
2139 symbol info). It does mean that GDB needs to be pointed at a
2140 non-stripped version of the dynamic linker in order to obtain
2141 information it already knows about. Sigh. */
2142
2143 os = find_pc_section (sym_addr);
2144 if (os != NULL)
2145 {
2146 /* Record the relocated start and end address of the dynamic linker
2147 text and plt section for svr4_in_dynsym_resolve_code. */
2148 bfd *tmp_bfd;
2149 CORE_ADDR load_addr;
2150
2151 tmp_bfd = os->objfile->obfd;
2152 load_addr = ANOFFSET (os->objfile->section_offsets,
2153 SECT_OFF_TEXT (os->objfile));
2154
2155 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2156 if (interp_sect)
2157 {
2158 info->interp_text_sect_low =
2159 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2160 info->interp_text_sect_high =
2161 info->interp_text_sect_low
2162 + bfd_section_size (tmp_bfd, interp_sect);
2163 }
2164 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2165 if (interp_sect)
2166 {
2167 info->interp_plt_sect_low =
2168 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2169 info->interp_plt_sect_high =
2170 info->interp_plt_sect_low
2171 + bfd_section_size (tmp_bfd, interp_sect);
2172 }
2173
2174 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2175 return 1;
2176 }
2177 }
2178
2179 /* Find the program interpreter; if not found, warn the user and drop
2180 into the old breakpoint at symbol code. */
2181 interp_name = find_program_interpreter ();
2182 if (interp_name)
2183 {
2184 CORE_ADDR load_addr = 0;
2185 int load_addr_found = 0;
2186 int loader_found_in_list = 0;
2187 struct so_list *so;
2188 bfd *tmp_bfd = NULL;
2189 struct target_ops *tmp_bfd_target;
2190 volatile struct gdb_exception ex;
2191
2192 sym_addr = 0;
2193
2194 /* Now we need to figure out where the dynamic linker was
2195 loaded so that we can load its symbols and place a breakpoint
2196 in the dynamic linker itself.
2197
2198 This address is stored on the stack. However, I've been unable
2199 to find any magic formula to find it for Solaris (appears to
2200 be trivial on GNU/Linux). Therefore, we have to try an alternate
2201 mechanism to find the dynamic linker's base address. */
2202
2203 TRY_CATCH (ex, RETURN_MASK_ALL)
2204 {
2205 tmp_bfd = solib_bfd_open (interp_name);
2206 }
2207 if (tmp_bfd == NULL)
2208 goto bkpt_at_symbol;
2209
2210 /* Now convert the TMP_BFD into a target. That way target, as
2211 well as BFD operations can be used. */
2212 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
2213 /* target_bfd_reopen acquired its own reference, so we can
2214 release ours now. */
2215 gdb_bfd_unref (tmp_bfd);
2216
2217 /* On a running target, we can get the dynamic linker's base
2218 address from the shared library table. */
2219 so = master_so_list ();
2220 while (so)
2221 {
2222 if (svr4_same_1 (interp_name, so->so_original_name))
2223 {
2224 load_addr_found = 1;
2225 loader_found_in_list = 1;
2226 load_addr = lm_addr_check (so, tmp_bfd);
2227 break;
2228 }
2229 so = so->next;
2230 }
2231
2232 /* If we were not able to find the base address of the loader
2233 from our so_list, then try using the AT_BASE auxilliary entry. */
2234 if (!load_addr_found)
2235 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
2236 {
2237 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
2238
2239 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
2240 that `+ load_addr' will overflow CORE_ADDR width not creating
2241 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
2242 GDB. */
2243
2244 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
2245 {
2246 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
2247 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
2248 tmp_bfd_target);
2249
2250 gdb_assert (load_addr < space_size);
2251
2252 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
2253 64bit ld.so with 32bit executable, it should not happen. */
2254
2255 if (tmp_entry_point < space_size
2256 && tmp_entry_point + load_addr >= space_size)
2257 load_addr -= space_size;
2258 }
2259
2260 load_addr_found = 1;
2261 }
2262
2263 /* Otherwise we find the dynamic linker's base address by examining
2264 the current pc (which should point at the entry point for the
2265 dynamic linker) and subtracting the offset of the entry point.
2266
2267 This is more fragile than the previous approaches, but is a good
2268 fallback method because it has actually been working well in
2269 most cases. */
2270 if (!load_addr_found)
2271 {
2272 struct regcache *regcache
2273 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
2274
2275 load_addr = (regcache_read_pc (regcache)
2276 - exec_entry_point (tmp_bfd, tmp_bfd_target));
2277 }
2278
2279 if (!loader_found_in_list)
2280 {
2281 info->debug_loader_name = xstrdup (interp_name);
2282 info->debug_loader_offset_p = 1;
2283 info->debug_loader_offset = load_addr;
2284 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2285 }
2286
2287 /* Record the relocated start and end address of the dynamic linker
2288 text and plt section for svr4_in_dynsym_resolve_code. */
2289 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2290 if (interp_sect)
2291 {
2292 info->interp_text_sect_low =
2293 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2294 info->interp_text_sect_high =
2295 info->interp_text_sect_low
2296 + bfd_section_size (tmp_bfd, interp_sect);
2297 }
2298 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2299 if (interp_sect)
2300 {
2301 info->interp_plt_sect_low =
2302 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2303 info->interp_plt_sect_high =
2304 info->interp_plt_sect_low
2305 + bfd_section_size (tmp_bfd, interp_sect);
2306 }
2307
2308 /* Now try to set a breakpoint in the dynamic linker. */
2309 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2310 {
2311 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
2312 (void *) *bkpt_namep);
2313 if (sym_addr != 0)
2314 break;
2315 }
2316
2317 if (sym_addr != 0)
2318 /* Convert 'sym_addr' from a function pointer to an address.
2319 Because we pass tmp_bfd_target instead of the current
2320 target, this will always produce an unrelocated value. */
2321 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2322 sym_addr,
2323 tmp_bfd_target);
2324
2325 /* We're done with both the temporary bfd and target. Closing
2326 the target closes the underlying bfd, because it holds the
2327 only remaining reference. */
2328 target_close (tmp_bfd_target);
2329
2330 if (sym_addr != 0)
2331 {
2332 svr4_create_solib_event_breakpoints (target_gdbarch (),
2333 load_addr + sym_addr);
2334 xfree (interp_name);
2335 return 1;
2336 }
2337
2338 /* For whatever reason we couldn't set a breakpoint in the dynamic
2339 linker. Warn and drop into the old code. */
2340 bkpt_at_symbol:
2341 xfree (interp_name);
2342 warning (_("Unable to find dynamic linker breakpoint function.\n"
2343 "GDB will be unable to debug shared library initializers\n"
2344 "and track explicitly loaded dynamic code."));
2345 }
2346
2347 /* Scan through the lists of symbols, trying to look up the symbol and
2348 set a breakpoint there. Terminate loop when we/if we succeed. */
2349
2350 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2351 {
2352 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2353 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
2354 {
2355 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
2356 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2357 sym_addr,
2358 &current_target);
2359 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2360 return 1;
2361 }
2362 }
2363
2364 if (interp_name != NULL && !current_inferior ()->attach_flag)
2365 {
2366 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
2367 {
2368 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2369 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
2370 {
2371 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
2372 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2373 sym_addr,
2374 &current_target);
2375 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2376 return 1;
2377 }
2378 }
2379 }
2380 return 0;
2381 }
2382
2383 /* Implement the "special_symbol_handling" target_so_ops method. */
2384
2385 static void
2386 svr4_special_symbol_handling (void)
2387 {
2388 /* Nothing to do. */
2389 }
2390
2391 /* Read the ELF program headers from ABFD. Return the contents and
2392 set *PHDRS_SIZE to the size of the program headers. */
2393
2394 static gdb_byte *
2395 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
2396 {
2397 Elf_Internal_Ehdr *ehdr;
2398 gdb_byte *buf;
2399
2400 ehdr = elf_elfheader (abfd);
2401
2402 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
2403 if (*phdrs_size == 0)
2404 return NULL;
2405
2406 buf = xmalloc (*phdrs_size);
2407 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
2408 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
2409 {
2410 xfree (buf);
2411 return NULL;
2412 }
2413
2414 return buf;
2415 }
2416
2417 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
2418 exec_bfd. Otherwise return 0.
2419
2420 We relocate all of the sections by the same amount. This
2421 behavior is mandated by recent editions of the System V ABI.
2422 According to the System V Application Binary Interface,
2423 Edition 4.1, page 5-5:
2424
2425 ... Though the system chooses virtual addresses for
2426 individual processes, it maintains the segments' relative
2427 positions. Because position-independent code uses relative
2428 addressesing between segments, the difference between
2429 virtual addresses in memory must match the difference
2430 between virtual addresses in the file. The difference
2431 between the virtual address of any segment in memory and
2432 the corresponding virtual address in the file is thus a
2433 single constant value for any one executable or shared
2434 object in a given process. This difference is the base
2435 address. One use of the base address is to relocate the
2436 memory image of the program during dynamic linking.
2437
2438 The same language also appears in Edition 4.0 of the System V
2439 ABI and is left unspecified in some of the earlier editions.
2440
2441 Decide if the objfile needs to be relocated. As indicated above, we will
2442 only be here when execution is stopped. But during attachment PC can be at
2443 arbitrary address therefore regcache_read_pc can be misleading (contrary to
2444 the auxv AT_ENTRY value). Moreover for executable with interpreter section
2445 regcache_read_pc would point to the interpreter and not the main executable.
2446
2447 So, to summarize, relocations are necessary when the start address obtained
2448 from the executable is different from the address in auxv AT_ENTRY entry.
2449
2450 [ The astute reader will note that we also test to make sure that
2451 the executable in question has the DYNAMIC flag set. It is my
2452 opinion that this test is unnecessary (undesirable even). It
2453 was added to avoid inadvertent relocation of an executable
2454 whose e_type member in the ELF header is not ET_DYN. There may
2455 be a time in the future when it is desirable to do relocations
2456 on other types of files as well in which case this condition
2457 should either be removed or modified to accomodate the new file
2458 type. - Kevin, Nov 2000. ] */
2459
2460 static int
2461 svr4_exec_displacement (CORE_ADDR *displacementp)
2462 {
2463 /* ENTRY_POINT is a possible function descriptor - before
2464 a call to gdbarch_convert_from_func_ptr_addr. */
2465 CORE_ADDR entry_point, displacement;
2466
2467 if (exec_bfd == NULL)
2468 return 0;
2469
2470 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
2471 being executed themselves and PIE (Position Independent Executable)
2472 executables are ET_DYN. */
2473
2474 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
2475 return 0;
2476
2477 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
2478 return 0;
2479
2480 displacement = entry_point - bfd_get_start_address (exec_bfd);
2481
2482 /* Verify the DISPLACEMENT candidate complies with the required page
2483 alignment. It is cheaper than the program headers comparison below. */
2484
2485 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2486 {
2487 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
2488
2489 /* p_align of PT_LOAD segments does not specify any alignment but
2490 only congruency of addresses:
2491 p_offset % p_align == p_vaddr % p_align
2492 Kernel is free to load the executable with lower alignment. */
2493
2494 if ((displacement & (elf->minpagesize - 1)) != 0)
2495 return 0;
2496 }
2497
2498 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
2499 comparing their program headers. If the program headers in the auxilliary
2500 vector do not match the program headers in the executable, then we are
2501 looking at a different file than the one used by the kernel - for
2502 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
2503
2504 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2505 {
2506 /* Be optimistic and clear OK only if GDB was able to verify the headers
2507 really do not match. */
2508 int phdrs_size, phdrs2_size, ok = 1;
2509 gdb_byte *buf, *buf2;
2510 int arch_size;
2511
2512 buf = read_program_header (-1, &phdrs_size, &arch_size);
2513 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
2514 if (buf != NULL && buf2 != NULL)
2515 {
2516 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
2517
2518 /* We are dealing with three different addresses. EXEC_BFD
2519 represents current address in on-disk file. target memory content
2520 may be different from EXEC_BFD as the file may have been prelinked
2521 to a different address after the executable has been loaded.
2522 Moreover the address of placement in target memory can be
2523 different from what the program headers in target memory say -
2524 this is the goal of PIE.
2525
2526 Detected DISPLACEMENT covers both the offsets of PIE placement and
2527 possible new prelink performed after start of the program. Here
2528 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
2529 content offset for the verification purpose. */
2530
2531 if (phdrs_size != phdrs2_size
2532 || bfd_get_arch_size (exec_bfd) != arch_size)
2533 ok = 0;
2534 else if (arch_size == 32
2535 && phdrs_size >= sizeof (Elf32_External_Phdr)
2536 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
2537 {
2538 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2539 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2540 CORE_ADDR displacement = 0;
2541 int i;
2542
2543 /* DISPLACEMENT could be found more easily by the difference of
2544 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2545 already have enough information to compute that displacement
2546 with what we've read. */
2547
2548 for (i = 0; i < ehdr2->e_phnum; i++)
2549 if (phdr2[i].p_type == PT_LOAD)
2550 {
2551 Elf32_External_Phdr *phdrp;
2552 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2553 CORE_ADDR vaddr, paddr;
2554 CORE_ADDR displacement_vaddr = 0;
2555 CORE_ADDR displacement_paddr = 0;
2556
2557 phdrp = &((Elf32_External_Phdr *) buf)[i];
2558 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2559 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2560
2561 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2562 byte_order);
2563 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2564
2565 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2566 byte_order);
2567 displacement_paddr = paddr - phdr2[i].p_paddr;
2568
2569 if (displacement_vaddr == displacement_paddr)
2570 displacement = displacement_vaddr;
2571
2572 break;
2573 }
2574
2575 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2576
2577 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
2578 {
2579 Elf32_External_Phdr *phdrp;
2580 Elf32_External_Phdr *phdr2p;
2581 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2582 CORE_ADDR vaddr, paddr;
2583 asection *plt2_asect;
2584
2585 phdrp = &((Elf32_External_Phdr *) buf)[i];
2586 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2587 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2588 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
2589
2590 /* PT_GNU_STACK is an exception by being never relocated by
2591 prelink as its addresses are always zero. */
2592
2593 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2594 continue;
2595
2596 /* Check also other adjustment combinations - PR 11786. */
2597
2598 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2599 byte_order);
2600 vaddr -= displacement;
2601 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
2602
2603 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2604 byte_order);
2605 paddr -= displacement;
2606 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
2607
2608 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2609 continue;
2610
2611 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2612 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2613 if (plt2_asect)
2614 {
2615 int content2;
2616 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2617 CORE_ADDR filesz;
2618
2619 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2620 & SEC_HAS_CONTENTS) != 0;
2621
2622 filesz = extract_unsigned_integer (buf_filesz_p, 4,
2623 byte_order);
2624
2625 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2626 FILESZ is from the in-memory image. */
2627 if (content2)
2628 filesz += bfd_get_section_size (plt2_asect);
2629 else
2630 filesz -= bfd_get_section_size (plt2_asect);
2631
2632 store_unsigned_integer (buf_filesz_p, 4, byte_order,
2633 filesz);
2634
2635 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2636 continue;
2637 }
2638
2639 ok = 0;
2640 break;
2641 }
2642 }
2643 else if (arch_size == 64
2644 && phdrs_size >= sizeof (Elf64_External_Phdr)
2645 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2646 {
2647 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2648 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2649 CORE_ADDR displacement = 0;
2650 int i;
2651
2652 /* DISPLACEMENT could be found more easily by the difference of
2653 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2654 already have enough information to compute that displacement
2655 with what we've read. */
2656
2657 for (i = 0; i < ehdr2->e_phnum; i++)
2658 if (phdr2[i].p_type == PT_LOAD)
2659 {
2660 Elf64_External_Phdr *phdrp;
2661 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2662 CORE_ADDR vaddr, paddr;
2663 CORE_ADDR displacement_vaddr = 0;
2664 CORE_ADDR displacement_paddr = 0;
2665
2666 phdrp = &((Elf64_External_Phdr *) buf)[i];
2667 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2668 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2669
2670 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2671 byte_order);
2672 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2673
2674 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2675 byte_order);
2676 displacement_paddr = paddr - phdr2[i].p_paddr;
2677
2678 if (displacement_vaddr == displacement_paddr)
2679 displacement = displacement_vaddr;
2680
2681 break;
2682 }
2683
2684 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2685
2686 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2687 {
2688 Elf64_External_Phdr *phdrp;
2689 Elf64_External_Phdr *phdr2p;
2690 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2691 CORE_ADDR vaddr, paddr;
2692 asection *plt2_asect;
2693
2694 phdrp = &((Elf64_External_Phdr *) buf)[i];
2695 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2696 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2697 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2698
2699 /* PT_GNU_STACK is an exception by being never relocated by
2700 prelink as its addresses are always zero. */
2701
2702 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2703 continue;
2704
2705 /* Check also other adjustment combinations - PR 11786. */
2706
2707 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2708 byte_order);
2709 vaddr -= displacement;
2710 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2711
2712 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2713 byte_order);
2714 paddr -= displacement;
2715 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2716
2717 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2718 continue;
2719
2720 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2721 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2722 if (plt2_asect)
2723 {
2724 int content2;
2725 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2726 CORE_ADDR filesz;
2727
2728 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2729 & SEC_HAS_CONTENTS) != 0;
2730
2731 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2732 byte_order);
2733
2734 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2735 FILESZ is from the in-memory image. */
2736 if (content2)
2737 filesz += bfd_get_section_size (plt2_asect);
2738 else
2739 filesz -= bfd_get_section_size (plt2_asect);
2740
2741 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2742 filesz);
2743
2744 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2745 continue;
2746 }
2747
2748 ok = 0;
2749 break;
2750 }
2751 }
2752 else
2753 ok = 0;
2754 }
2755
2756 xfree (buf);
2757 xfree (buf2);
2758
2759 if (!ok)
2760 return 0;
2761 }
2762
2763 if (info_verbose)
2764 {
2765 /* It can be printed repeatedly as there is no easy way to check
2766 the executable symbols/file has been already relocated to
2767 displacement. */
2768
2769 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2770 "displacement %s for \"%s\".\n"),
2771 paddress (target_gdbarch (), displacement),
2772 bfd_get_filename (exec_bfd));
2773 }
2774
2775 *displacementp = displacement;
2776 return 1;
2777 }
2778
2779 /* Relocate the main executable. This function should be called upon
2780 stopping the inferior process at the entry point to the program.
2781 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2782 different, the main executable is relocated by the proper amount. */
2783
2784 static void
2785 svr4_relocate_main_executable (void)
2786 {
2787 CORE_ADDR displacement;
2788
2789 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2790 probably contains the offsets computed using the PIE displacement
2791 from the previous run, which of course are irrelevant for this run.
2792 So we need to determine the new PIE displacement and recompute the
2793 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2794 already contains pre-computed offsets.
2795
2796 If we cannot compute the PIE displacement, either:
2797
2798 - The executable is not PIE.
2799
2800 - SYMFILE_OBJFILE does not match the executable started in the target.
2801 This can happen for main executable symbols loaded at the host while
2802 `ld.so --ld-args main-executable' is loaded in the target.
2803
2804 Then we leave the section offsets untouched and use them as is for
2805 this run. Either:
2806
2807 - These section offsets were properly reset earlier, and thus
2808 already contain the correct values. This can happen for instance
2809 when reconnecting via the remote protocol to a target that supports
2810 the `qOffsets' packet.
2811
2812 - The section offsets were not reset earlier, and the best we can
2813 hope is that the old offsets are still applicable to the new run. */
2814
2815 if (! svr4_exec_displacement (&displacement))
2816 return;
2817
2818 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2819 addresses. */
2820
2821 if (symfile_objfile)
2822 {
2823 struct section_offsets *new_offsets;
2824 int i;
2825
2826 new_offsets = alloca (symfile_objfile->num_sections
2827 * sizeof (*new_offsets));
2828
2829 for (i = 0; i < symfile_objfile->num_sections; i++)
2830 new_offsets->offsets[i] = displacement;
2831
2832 objfile_relocate (symfile_objfile, new_offsets);
2833 }
2834 else if (exec_bfd)
2835 {
2836 asection *asect;
2837
2838 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2839 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2840 (bfd_section_vma (exec_bfd, asect)
2841 + displacement));
2842 }
2843 }
2844
2845 /* Implement the "create_inferior_hook" target_solib_ops method.
2846
2847 For SVR4 executables, this first instruction is either the first
2848 instruction in the dynamic linker (for dynamically linked
2849 executables) or the instruction at "start" for statically linked
2850 executables. For dynamically linked executables, the system
2851 first exec's /lib/libc.so.N, which contains the dynamic linker,
2852 and starts it running. The dynamic linker maps in any needed
2853 shared libraries, maps in the actual user executable, and then
2854 jumps to "start" in the user executable.
2855
2856 We can arrange to cooperate with the dynamic linker to discover the
2857 names of shared libraries that are dynamically linked, and the base
2858 addresses to which they are linked.
2859
2860 This function is responsible for discovering those names and
2861 addresses, and saving sufficient information about them to allow
2862 their symbols to be read at a later time. */
2863
2864 static void
2865 svr4_solib_create_inferior_hook (int from_tty)
2866 {
2867 struct svr4_info *info;
2868
2869 info = get_svr4_info ();
2870
2871 /* Clear the probes-based interface's state. */
2872 free_probes_table (info);
2873 free_solib_list (info);
2874
2875 /* Relocate the main executable if necessary. */
2876 svr4_relocate_main_executable ();
2877
2878 /* No point setting a breakpoint in the dynamic linker if we can't
2879 hit it (e.g., a core file, or a trace file). */
2880 if (!target_has_execution)
2881 return;
2882
2883 if (!svr4_have_link_map_offsets ())
2884 return;
2885
2886 if (!enable_break (info, from_tty))
2887 return;
2888 }
2889
2890 static void
2891 svr4_clear_solib (void)
2892 {
2893 struct svr4_info *info;
2894
2895 info = get_svr4_info ();
2896 info->debug_base = 0;
2897 info->debug_loader_offset_p = 0;
2898 info->debug_loader_offset = 0;
2899 xfree (info->debug_loader_name);
2900 info->debug_loader_name = NULL;
2901 }
2902
2903 /* Clear any bits of ADDR that wouldn't fit in a target-format
2904 data pointer. "Data pointer" here refers to whatever sort of
2905 address the dynamic linker uses to manage its sections. At the
2906 moment, we don't support shared libraries on any processors where
2907 code and data pointers are different sizes.
2908
2909 This isn't really the right solution. What we really need here is
2910 a way to do arithmetic on CORE_ADDR values that respects the
2911 natural pointer/address correspondence. (For example, on the MIPS,
2912 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2913 sign-extend the value. There, simply truncating the bits above
2914 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2915 be a new gdbarch method or something. */
2916 static CORE_ADDR
2917 svr4_truncate_ptr (CORE_ADDR addr)
2918 {
2919 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
2920 /* We don't need to truncate anything, and the bit twiddling below
2921 will fail due to overflow problems. */
2922 return addr;
2923 else
2924 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
2925 }
2926
2927
2928 static void
2929 svr4_relocate_section_addresses (struct so_list *so,
2930 struct target_section *sec)
2931 {
2932 bfd *abfd = sec->the_bfd_section->owner;
2933
2934 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so, abfd));
2935 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so, abfd));
2936 }
2937 \f
2938
2939 /* Architecture-specific operations. */
2940
2941 /* Per-architecture data key. */
2942 static struct gdbarch_data *solib_svr4_data;
2943
2944 struct solib_svr4_ops
2945 {
2946 /* Return a description of the layout of `struct link_map'. */
2947 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2948 };
2949
2950 /* Return a default for the architecture-specific operations. */
2951
2952 static void *
2953 solib_svr4_init (struct obstack *obstack)
2954 {
2955 struct solib_svr4_ops *ops;
2956
2957 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2958 ops->fetch_link_map_offsets = NULL;
2959 return ops;
2960 }
2961
2962 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2963 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2964
2965 void
2966 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2967 struct link_map_offsets *(*flmo) (void))
2968 {
2969 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2970
2971 ops->fetch_link_map_offsets = flmo;
2972
2973 set_solib_ops (gdbarch, &svr4_so_ops);
2974 }
2975
2976 /* Fetch a link_map_offsets structure using the architecture-specific
2977 `struct link_map_offsets' fetcher. */
2978
2979 static struct link_map_offsets *
2980 svr4_fetch_link_map_offsets (void)
2981 {
2982 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2983
2984 gdb_assert (ops->fetch_link_map_offsets);
2985 return ops->fetch_link_map_offsets ();
2986 }
2987
2988 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2989
2990 static int
2991 svr4_have_link_map_offsets (void)
2992 {
2993 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2994
2995 return (ops->fetch_link_map_offsets != NULL);
2996 }
2997 \f
2998
2999 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
3000 `struct r_debug' and a `struct link_map' that are binary compatible
3001 with the origional SVR4 implementation. */
3002
3003 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3004 for an ILP32 SVR4 system. */
3005
3006 struct link_map_offsets *
3007 svr4_ilp32_fetch_link_map_offsets (void)
3008 {
3009 static struct link_map_offsets lmo;
3010 static struct link_map_offsets *lmp = NULL;
3011
3012 if (lmp == NULL)
3013 {
3014 lmp = &lmo;
3015
3016 lmo.r_version_offset = 0;
3017 lmo.r_version_size = 4;
3018 lmo.r_map_offset = 4;
3019 lmo.r_brk_offset = 8;
3020 lmo.r_ldsomap_offset = 20;
3021
3022 /* Everything we need is in the first 20 bytes. */
3023 lmo.link_map_size = 20;
3024 lmo.l_addr_offset = 0;
3025 lmo.l_name_offset = 4;
3026 lmo.l_ld_offset = 8;
3027 lmo.l_next_offset = 12;
3028 lmo.l_prev_offset = 16;
3029 }
3030
3031 return lmp;
3032 }
3033
3034 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3035 for an LP64 SVR4 system. */
3036
3037 struct link_map_offsets *
3038 svr4_lp64_fetch_link_map_offsets (void)
3039 {
3040 static struct link_map_offsets lmo;
3041 static struct link_map_offsets *lmp = NULL;
3042
3043 if (lmp == NULL)
3044 {
3045 lmp = &lmo;
3046
3047 lmo.r_version_offset = 0;
3048 lmo.r_version_size = 4;
3049 lmo.r_map_offset = 8;
3050 lmo.r_brk_offset = 16;
3051 lmo.r_ldsomap_offset = 40;
3052
3053 /* Everything we need is in the first 40 bytes. */
3054 lmo.link_map_size = 40;
3055 lmo.l_addr_offset = 0;
3056 lmo.l_name_offset = 8;
3057 lmo.l_ld_offset = 16;
3058 lmo.l_next_offset = 24;
3059 lmo.l_prev_offset = 32;
3060 }
3061
3062 return lmp;
3063 }
3064 \f
3065
3066 struct target_so_ops svr4_so_ops;
3067
3068 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
3069 different rule for symbol lookup. The lookup begins here in the DSO, not in
3070 the main executable. */
3071
3072 static struct symbol *
3073 elf_lookup_lib_symbol (const struct objfile *objfile,
3074 const char *name,
3075 const domain_enum domain)
3076 {
3077 bfd *abfd;
3078
3079 if (objfile == symfile_objfile)
3080 abfd = exec_bfd;
3081 else
3082 {
3083 /* OBJFILE should have been passed as the non-debug one. */
3084 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
3085
3086 abfd = objfile->obfd;
3087 }
3088
3089 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
3090 return NULL;
3091
3092 return lookup_global_symbol_from_objfile (objfile, name, domain);
3093 }
3094
3095 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
3096
3097 void
3098 _initialize_svr4_solib (void)
3099 {
3100 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
3101 solib_svr4_pspace_data
3102 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
3103
3104 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
3105 svr4_so_ops.free_so = svr4_free_so;
3106 svr4_so_ops.clear_so = svr4_clear_so;
3107 svr4_so_ops.clear_solib = svr4_clear_solib;
3108 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
3109 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
3110 svr4_so_ops.current_sos = svr4_current_sos;
3111 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
3112 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
3113 svr4_so_ops.bfd_open = solib_bfd_open;
3114 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
3115 svr4_so_ops.same = svr4_same;
3116 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
3117 svr4_so_ops.update_breakpoints = svr4_update_solib_event_breakpoints;
3118 svr4_so_ops.handle_event = svr4_handle_solib_event;
3119 }
This page took 0.14694 seconds and 4 git commands to generate.