3eea0570afc647a04f1308dea2b253cdf253e43f
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "regcache.h"
34 #include "gdbthread.h"
35 #include "observer.h"
36
37 #include "gdb_assert.h"
38
39 #include "solist.h"
40 #include "solib.h"
41 #include "solib-svr4.h"
42
43 #include "bfd-target.h"
44 #include "elf-bfd.h"
45 #include "exec.h"
46 #include "auxv.h"
47 #include "exceptions.h"
48 #include "gdb_bfd.h"
49 #include "probe.h"
50
51 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52 static int svr4_have_link_map_offsets (void);
53 static void svr4_relocate_main_executable (void);
54 static void svr4_free_library_list (void *p_list);
55
56 /* Link map info to include in an allocated so_list entry. */
57
58 struct lm_info
59 {
60 /* Amount by which addresses in the binary should be relocated to
61 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
62 When prelinking is involved and the prelink base address changes,
63 we may need a different offset - the recomputed offset is in L_ADDR.
64 It is commonly the same value. It is cached as we want to warn about
65 the difference and compute it only once. L_ADDR is valid
66 iff L_ADDR_P. */
67 CORE_ADDR l_addr, l_addr_inferior;
68 unsigned int l_addr_p : 1;
69
70 /* The target location of lm. */
71 CORE_ADDR lm_addr;
72
73 /* Values read in from inferior's fields of the same name. */
74 CORE_ADDR l_ld, l_next, l_prev, l_name;
75 };
76
77 /* On SVR4 systems, a list of symbols in the dynamic linker where
78 GDB can try to place a breakpoint to monitor shared library
79 events.
80
81 If none of these symbols are found, or other errors occur, then
82 SVR4 systems will fall back to using a symbol as the "startup
83 mapping complete" breakpoint address. */
84
85 static const char * const solib_break_names[] =
86 {
87 "r_debug_state",
88 "_r_debug_state",
89 "_dl_debug_state",
90 "rtld_db_dlactivity",
91 "__dl_rtld_db_dlactivity",
92 "_rtld_debug_state",
93
94 NULL
95 };
96
97 static const char * const bkpt_names[] =
98 {
99 "_start",
100 "__start",
101 "main",
102 NULL
103 };
104
105 static const char * const main_name_list[] =
106 {
107 "main_$main",
108 NULL
109 };
110
111 /* What to do when a probe stop occurs. */
112
113 enum probe_action
114 {
115 /* Something went seriously wrong. Stop using probes and
116 revert to using the older interface. */
117 PROBES_INTERFACE_FAILED,
118
119 /* No action is required. The shared object list is still
120 valid. */
121 DO_NOTHING,
122
123 /* The shared object list should be reloaded entirely. */
124 FULL_RELOAD,
125
126 /* Attempt to incrementally update the shared object list. If
127 the update fails or is not possible, fall back to reloading
128 the list in full. */
129 UPDATE_OR_RELOAD,
130 };
131
132 /* A probe's name and its associated action. */
133
134 struct probe_info
135 {
136 /* The name of the probe. */
137 const char *name;
138
139 /* What to do when a probe stop occurs. */
140 enum probe_action action;
141 };
142
143 /* A list of named probes and their associated actions. If all
144 probes are present in the dynamic linker then the probes-based
145 interface will be used. */
146
147 static const struct probe_info probe_info[] =
148 {
149 { "init_start", DO_NOTHING },
150 { "init_complete", FULL_RELOAD },
151 { "map_start", DO_NOTHING },
152 { "map_failed", DO_NOTHING },
153 { "reloc_complete", UPDATE_OR_RELOAD },
154 { "unmap_start", DO_NOTHING },
155 { "unmap_complete", FULL_RELOAD },
156 };
157
158 #define NUM_PROBES ARRAY_SIZE (probe_info)
159
160 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
161 the same shared library. */
162
163 static int
164 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
165 {
166 if (strcmp (gdb_so_name, inferior_so_name) == 0)
167 return 1;
168
169 /* On Solaris, when starting inferior we think that dynamic linker is
170 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
171 contains /lib/ld.so.1. Sometimes one file is a link to another, but
172 sometimes they have identical content, but are not linked to each
173 other. We don't restrict this check for Solaris, but the chances
174 of running into this situation elsewhere are very low. */
175 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
176 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
177 return 1;
178
179 /* Similarly, we observed the same issue with sparc64, but with
180 different locations. */
181 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
182 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
183 return 1;
184
185 return 0;
186 }
187
188 static int
189 svr4_same (struct so_list *gdb, struct so_list *inferior)
190 {
191 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
192 }
193
194 static struct lm_info *
195 lm_info_read (CORE_ADDR lm_addr)
196 {
197 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
198 gdb_byte *lm;
199 struct lm_info *lm_info;
200 struct cleanup *back_to;
201
202 lm = xmalloc (lmo->link_map_size);
203 back_to = make_cleanup (xfree, lm);
204
205 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
206 {
207 warning (_("Error reading shared library list entry at %s"),
208 paddress (target_gdbarch (), lm_addr)),
209 lm_info = NULL;
210 }
211 else
212 {
213 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
214
215 lm_info = xzalloc (sizeof (*lm_info));
216 lm_info->lm_addr = lm_addr;
217
218 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
219 ptr_type);
220 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
221 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
222 ptr_type);
223 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
224 ptr_type);
225 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
226 ptr_type);
227 }
228
229 do_cleanups (back_to);
230
231 return lm_info;
232 }
233
234 static int
235 has_lm_dynamic_from_link_map (void)
236 {
237 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
238
239 return lmo->l_ld_offset >= 0;
240 }
241
242 static CORE_ADDR
243 lm_addr_check (const struct so_list *so, bfd *abfd)
244 {
245 if (!so->lm_info->l_addr_p)
246 {
247 struct bfd_section *dyninfo_sect;
248 CORE_ADDR l_addr, l_dynaddr, dynaddr;
249
250 l_addr = so->lm_info->l_addr_inferior;
251
252 if (! abfd || ! has_lm_dynamic_from_link_map ())
253 goto set_addr;
254
255 l_dynaddr = so->lm_info->l_ld;
256
257 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
258 if (dyninfo_sect == NULL)
259 goto set_addr;
260
261 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
262
263 if (dynaddr + l_addr != l_dynaddr)
264 {
265 CORE_ADDR align = 0x1000;
266 CORE_ADDR minpagesize = align;
267
268 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
269 {
270 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
271 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
272 int i;
273
274 align = 1;
275
276 for (i = 0; i < ehdr->e_phnum; i++)
277 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
278 align = phdr[i].p_align;
279
280 minpagesize = get_elf_backend_data (abfd)->minpagesize;
281 }
282
283 /* Turn it into a mask. */
284 align--;
285
286 /* If the changes match the alignment requirements, we
287 assume we're using a core file that was generated by the
288 same binary, just prelinked with a different base offset.
289 If it doesn't match, we may have a different binary, the
290 same binary with the dynamic table loaded at an unrelated
291 location, or anything, really. To avoid regressions,
292 don't adjust the base offset in the latter case, although
293 odds are that, if things really changed, debugging won't
294 quite work.
295
296 One could expect more the condition
297 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
298 but the one below is relaxed for PPC. The PPC kernel supports
299 either 4k or 64k page sizes. To be prepared for 64k pages,
300 PPC ELF files are built using an alignment requirement of 64k.
301 However, when running on a kernel supporting 4k pages, the memory
302 mapping of the library may not actually happen on a 64k boundary!
303
304 (In the usual case where (l_addr & align) == 0, this check is
305 equivalent to the possibly expected check above.)
306
307 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
308
309 l_addr = l_dynaddr - dynaddr;
310
311 if ((l_addr & (minpagesize - 1)) == 0
312 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
313 {
314 if (info_verbose)
315 printf_unfiltered (_("Using PIC (Position Independent Code) "
316 "prelink displacement %s for \"%s\".\n"),
317 paddress (target_gdbarch (), l_addr),
318 so->so_name);
319 }
320 else
321 {
322 /* There is no way to verify the library file matches. prelink
323 can during prelinking of an unprelinked file (or unprelinking
324 of a prelinked file) shift the DYNAMIC segment by arbitrary
325 offset without any page size alignment. There is no way to
326 find out the ELF header and/or Program Headers for a limited
327 verification if it they match. One could do a verification
328 of the DYNAMIC segment. Still the found address is the best
329 one GDB could find. */
330
331 warning (_(".dynamic section for \"%s\" "
332 "is not at the expected address "
333 "(wrong library or version mismatch?)"), so->so_name);
334 }
335 }
336
337 set_addr:
338 so->lm_info->l_addr = l_addr;
339 so->lm_info->l_addr_p = 1;
340 }
341
342 return so->lm_info->l_addr;
343 }
344
345 /* Per pspace SVR4 specific data. */
346
347 struct svr4_info
348 {
349 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
350
351 /* Validity flag for debug_loader_offset. */
352 int debug_loader_offset_p;
353
354 /* Load address for the dynamic linker, inferred. */
355 CORE_ADDR debug_loader_offset;
356
357 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
358 char *debug_loader_name;
359
360 /* Load map address for the main executable. */
361 CORE_ADDR main_lm_addr;
362
363 CORE_ADDR interp_text_sect_low;
364 CORE_ADDR interp_text_sect_high;
365 CORE_ADDR interp_plt_sect_low;
366 CORE_ADDR interp_plt_sect_high;
367
368 /* Nonzero if the list of objects was last obtained from the target
369 via qXfer:libraries-svr4:read. */
370 int using_xfer;
371
372 /* Table of struct probe_and_action instances, used by the
373 probes-based interface to map breakpoint addresses to probes
374 and their associated actions. Lookup is performed using
375 probe_and_action->probe->address. */
376 htab_t probes_table;
377
378 /* List of objects loaded into the inferior, used by the probes-
379 based interface. */
380 struct so_list *solib_list;
381 };
382
383 /* Per-program-space data key. */
384 static const struct program_space_data *solib_svr4_pspace_data;
385
386 /* Free the probes table. */
387
388 static void
389 free_probes_table (struct svr4_info *info)
390 {
391 if (info->probes_table == NULL)
392 return;
393
394 htab_delete (info->probes_table);
395 info->probes_table = NULL;
396 }
397
398 /* Free the solib list. */
399
400 static void
401 free_solib_list (struct svr4_info *info)
402 {
403 svr4_free_library_list (&info->solib_list);
404 info->solib_list = NULL;
405 }
406
407 static void
408 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
409 {
410 struct svr4_info *info = arg;
411
412 free_probes_table (info);
413 free_solib_list (info);
414
415 xfree (info);
416 }
417
418 /* Get the current svr4 data. If none is found yet, add it now. This
419 function always returns a valid object. */
420
421 static struct svr4_info *
422 get_svr4_info (void)
423 {
424 struct svr4_info *info;
425
426 info = program_space_data (current_program_space, solib_svr4_pspace_data);
427 if (info != NULL)
428 return info;
429
430 info = XZALLOC (struct svr4_info);
431 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
432 return info;
433 }
434
435 /* Local function prototypes */
436
437 static int match_main (const char *);
438
439 /* Read program header TYPE from inferior memory. The header is found
440 by scanning the OS auxillary vector.
441
442 If TYPE == -1, return the program headers instead of the contents of
443 one program header.
444
445 Return a pointer to allocated memory holding the program header contents,
446 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
447 size of those contents is returned to P_SECT_SIZE. Likewise, the target
448 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
449
450 static gdb_byte *
451 read_program_header (int type, int *p_sect_size, int *p_arch_size)
452 {
453 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
454 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
455 int arch_size, sect_size;
456 CORE_ADDR sect_addr;
457 gdb_byte *buf;
458 int pt_phdr_p = 0;
459
460 /* Get required auxv elements from target. */
461 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
462 return 0;
463 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
464 return 0;
465 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
466 return 0;
467 if (!at_phdr || !at_phnum)
468 return 0;
469
470 /* Determine ELF architecture type. */
471 if (at_phent == sizeof (Elf32_External_Phdr))
472 arch_size = 32;
473 else if (at_phent == sizeof (Elf64_External_Phdr))
474 arch_size = 64;
475 else
476 return 0;
477
478 /* Find the requested segment. */
479 if (type == -1)
480 {
481 sect_addr = at_phdr;
482 sect_size = at_phent * at_phnum;
483 }
484 else if (arch_size == 32)
485 {
486 Elf32_External_Phdr phdr;
487 int i;
488
489 /* Search for requested PHDR. */
490 for (i = 0; i < at_phnum; i++)
491 {
492 int p_type;
493
494 if (target_read_memory (at_phdr + i * sizeof (phdr),
495 (gdb_byte *)&phdr, sizeof (phdr)))
496 return 0;
497
498 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
499 4, byte_order);
500
501 if (p_type == PT_PHDR)
502 {
503 pt_phdr_p = 1;
504 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
505 4, byte_order);
506 }
507
508 if (p_type == type)
509 break;
510 }
511
512 if (i == at_phnum)
513 return 0;
514
515 /* Retrieve address and size. */
516 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
517 4, byte_order);
518 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
519 4, byte_order);
520 }
521 else
522 {
523 Elf64_External_Phdr phdr;
524 int i;
525
526 /* Search for requested PHDR. */
527 for (i = 0; i < at_phnum; i++)
528 {
529 int p_type;
530
531 if (target_read_memory (at_phdr + i * sizeof (phdr),
532 (gdb_byte *)&phdr, sizeof (phdr)))
533 return 0;
534
535 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
536 4, byte_order);
537
538 if (p_type == PT_PHDR)
539 {
540 pt_phdr_p = 1;
541 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
542 8, byte_order);
543 }
544
545 if (p_type == type)
546 break;
547 }
548
549 if (i == at_phnum)
550 return 0;
551
552 /* Retrieve address and size. */
553 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
554 8, byte_order);
555 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
556 8, byte_order);
557 }
558
559 /* PT_PHDR is optional, but we really need it
560 for PIE to make this work in general. */
561
562 if (pt_phdr_p)
563 {
564 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
565 Relocation offset is the difference between the two. */
566 sect_addr = sect_addr + (at_phdr - pt_phdr);
567 }
568
569 /* Read in requested program header. */
570 buf = xmalloc (sect_size);
571 if (target_read_memory (sect_addr, buf, sect_size))
572 {
573 xfree (buf);
574 return NULL;
575 }
576
577 if (p_arch_size)
578 *p_arch_size = arch_size;
579 if (p_sect_size)
580 *p_sect_size = sect_size;
581
582 return buf;
583 }
584
585
586 /* Return program interpreter string. */
587 static char *
588 find_program_interpreter (void)
589 {
590 gdb_byte *buf = NULL;
591
592 /* If we have an exec_bfd, use its section table. */
593 if (exec_bfd
594 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
595 {
596 struct bfd_section *interp_sect;
597
598 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
599 if (interp_sect != NULL)
600 {
601 int sect_size = bfd_section_size (exec_bfd, interp_sect);
602
603 buf = xmalloc (sect_size);
604 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
605 }
606 }
607
608 /* If we didn't find it, use the target auxillary vector. */
609 if (!buf)
610 buf = read_program_header (PT_INTERP, NULL, NULL);
611
612 return (char *) buf;
613 }
614
615
616 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
617 returned and the corresponding PTR is set. */
618
619 static int
620 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
621 {
622 int arch_size, step, sect_size;
623 long dyn_tag;
624 CORE_ADDR dyn_ptr, dyn_addr;
625 gdb_byte *bufend, *bufstart, *buf;
626 Elf32_External_Dyn *x_dynp_32;
627 Elf64_External_Dyn *x_dynp_64;
628 struct bfd_section *sect;
629 struct target_section *target_section;
630
631 if (abfd == NULL)
632 return 0;
633
634 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
635 return 0;
636
637 arch_size = bfd_get_arch_size (abfd);
638 if (arch_size == -1)
639 return 0;
640
641 /* Find the start address of the .dynamic section. */
642 sect = bfd_get_section_by_name (abfd, ".dynamic");
643 if (sect == NULL)
644 return 0;
645
646 for (target_section = current_target_sections->sections;
647 target_section < current_target_sections->sections_end;
648 target_section++)
649 if (sect == target_section->the_bfd_section)
650 break;
651 if (target_section < current_target_sections->sections_end)
652 dyn_addr = target_section->addr;
653 else
654 {
655 /* ABFD may come from OBJFILE acting only as a symbol file without being
656 loaded into the target (see add_symbol_file_command). This case is
657 such fallback to the file VMA address without the possibility of
658 having the section relocated to its actual in-memory address. */
659
660 dyn_addr = bfd_section_vma (abfd, sect);
661 }
662
663 /* Read in .dynamic from the BFD. We will get the actual value
664 from memory later. */
665 sect_size = bfd_section_size (abfd, sect);
666 buf = bufstart = alloca (sect_size);
667 if (!bfd_get_section_contents (abfd, sect,
668 buf, 0, sect_size))
669 return 0;
670
671 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
672 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
673 : sizeof (Elf64_External_Dyn);
674 for (bufend = buf + sect_size;
675 buf < bufend;
676 buf += step)
677 {
678 if (arch_size == 32)
679 {
680 x_dynp_32 = (Elf32_External_Dyn *) buf;
681 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
682 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
683 }
684 else
685 {
686 x_dynp_64 = (Elf64_External_Dyn *) buf;
687 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
688 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
689 }
690 if (dyn_tag == DT_NULL)
691 return 0;
692 if (dyn_tag == dyntag)
693 {
694 /* If requested, try to read the runtime value of this .dynamic
695 entry. */
696 if (ptr)
697 {
698 struct type *ptr_type;
699 gdb_byte ptr_buf[8];
700 CORE_ADDR ptr_addr;
701
702 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
703 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
704 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
705 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
706 *ptr = dyn_ptr;
707 }
708 return 1;
709 }
710 }
711
712 return 0;
713 }
714
715 /* Scan for DYNTAG in .dynamic section of the target's main executable,
716 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
717 returned and the corresponding PTR is set. */
718
719 static int
720 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
721 {
722 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
723 int sect_size, arch_size, step;
724 long dyn_tag;
725 CORE_ADDR dyn_ptr;
726 gdb_byte *bufend, *bufstart, *buf;
727
728 /* Read in .dynamic section. */
729 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
730 if (!buf)
731 return 0;
732
733 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
734 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
735 : sizeof (Elf64_External_Dyn);
736 for (bufend = buf + sect_size;
737 buf < bufend;
738 buf += step)
739 {
740 if (arch_size == 32)
741 {
742 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
743
744 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
745 4, byte_order);
746 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
747 4, byte_order);
748 }
749 else
750 {
751 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
752
753 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
754 8, byte_order);
755 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
756 8, byte_order);
757 }
758 if (dyn_tag == DT_NULL)
759 break;
760
761 if (dyn_tag == dyntag)
762 {
763 if (ptr)
764 *ptr = dyn_ptr;
765
766 xfree (bufstart);
767 return 1;
768 }
769 }
770
771 xfree (bufstart);
772 return 0;
773 }
774
775 /* Locate the base address of dynamic linker structs for SVR4 elf
776 targets.
777
778 For SVR4 elf targets the address of the dynamic linker's runtime
779 structure is contained within the dynamic info section in the
780 executable file. The dynamic section is also mapped into the
781 inferior address space. Because the runtime loader fills in the
782 real address before starting the inferior, we have to read in the
783 dynamic info section from the inferior address space.
784 If there are any errors while trying to find the address, we
785 silently return 0, otherwise the found address is returned. */
786
787 static CORE_ADDR
788 elf_locate_base (void)
789 {
790 struct minimal_symbol *msymbol;
791 CORE_ADDR dyn_ptr;
792
793 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
794 instead of DT_DEBUG, although they sometimes contain an unused
795 DT_DEBUG. */
796 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
797 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
798 {
799 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
800 gdb_byte *pbuf;
801 int pbuf_size = TYPE_LENGTH (ptr_type);
802
803 pbuf = alloca (pbuf_size);
804 /* DT_MIPS_RLD_MAP contains a pointer to the address
805 of the dynamic link structure. */
806 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
807 return 0;
808 return extract_typed_address (pbuf, ptr_type);
809 }
810
811 /* Find DT_DEBUG. */
812 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
813 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
814 return dyn_ptr;
815
816 /* This may be a static executable. Look for the symbol
817 conventionally named _r_debug, as a last resort. */
818 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
819 if (msymbol != NULL)
820 return SYMBOL_VALUE_ADDRESS (msymbol);
821
822 /* DT_DEBUG entry not found. */
823 return 0;
824 }
825
826 /* Locate the base address of dynamic linker structs.
827
828 For both the SunOS and SVR4 shared library implementations, if the
829 inferior executable has been linked dynamically, there is a single
830 address somewhere in the inferior's data space which is the key to
831 locating all of the dynamic linker's runtime structures. This
832 address is the value of the debug base symbol. The job of this
833 function is to find and return that address, or to return 0 if there
834 is no such address (the executable is statically linked for example).
835
836 For SunOS, the job is almost trivial, since the dynamic linker and
837 all of it's structures are statically linked to the executable at
838 link time. Thus the symbol for the address we are looking for has
839 already been added to the minimal symbol table for the executable's
840 objfile at the time the symbol file's symbols were read, and all we
841 have to do is look it up there. Note that we explicitly do NOT want
842 to find the copies in the shared library.
843
844 The SVR4 version is a bit more complicated because the address
845 is contained somewhere in the dynamic info section. We have to go
846 to a lot more work to discover the address of the debug base symbol.
847 Because of this complexity, we cache the value we find and return that
848 value on subsequent invocations. Note there is no copy in the
849 executable symbol tables. */
850
851 static CORE_ADDR
852 locate_base (struct svr4_info *info)
853 {
854 /* Check to see if we have a currently valid address, and if so, avoid
855 doing all this work again and just return the cached address. If
856 we have no cached address, try to locate it in the dynamic info
857 section for ELF executables. There's no point in doing any of this
858 though if we don't have some link map offsets to work with. */
859
860 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
861 info->debug_base = elf_locate_base ();
862 return info->debug_base;
863 }
864
865 /* Find the first element in the inferior's dynamic link map, and
866 return its address in the inferior. Return zero if the address
867 could not be determined.
868
869 FIXME: Perhaps we should validate the info somehow, perhaps by
870 checking r_version for a known version number, or r_state for
871 RT_CONSISTENT. */
872
873 static CORE_ADDR
874 solib_svr4_r_map (struct svr4_info *info)
875 {
876 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
877 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
878 CORE_ADDR addr = 0;
879 volatile struct gdb_exception ex;
880
881 TRY_CATCH (ex, RETURN_MASK_ERROR)
882 {
883 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
884 ptr_type);
885 }
886 exception_print (gdb_stderr, ex);
887 return addr;
888 }
889
890 /* Find r_brk from the inferior's debug base. */
891
892 static CORE_ADDR
893 solib_svr4_r_brk (struct svr4_info *info)
894 {
895 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
896 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
897
898 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
899 ptr_type);
900 }
901
902 /* Find the link map for the dynamic linker (if it is not in the
903 normal list of loaded shared objects). */
904
905 static CORE_ADDR
906 solib_svr4_r_ldsomap (struct svr4_info *info)
907 {
908 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
909 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
910 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
911 ULONGEST version;
912
913 /* Check version, and return zero if `struct r_debug' doesn't have
914 the r_ldsomap member. */
915 version
916 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
917 lmo->r_version_size, byte_order);
918 if (version < 2 || lmo->r_ldsomap_offset == -1)
919 return 0;
920
921 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
922 ptr_type);
923 }
924
925 /* On Solaris systems with some versions of the dynamic linker,
926 ld.so's l_name pointer points to the SONAME in the string table
927 rather than into writable memory. So that GDB can find shared
928 libraries when loading a core file generated by gcore, ensure that
929 memory areas containing the l_name string are saved in the core
930 file. */
931
932 static int
933 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
934 {
935 struct svr4_info *info;
936 CORE_ADDR ldsomap;
937 struct so_list *new;
938 struct cleanup *old_chain;
939 CORE_ADDR name_lm;
940
941 info = get_svr4_info ();
942
943 info->debug_base = 0;
944 locate_base (info);
945 if (!info->debug_base)
946 return 0;
947
948 ldsomap = solib_svr4_r_ldsomap (info);
949 if (!ldsomap)
950 return 0;
951
952 new = XZALLOC (struct so_list);
953 old_chain = make_cleanup (xfree, new);
954 new->lm_info = lm_info_read (ldsomap);
955 make_cleanup (xfree, new->lm_info);
956 name_lm = new->lm_info ? new->lm_info->l_name : 0;
957 do_cleanups (old_chain);
958
959 return (name_lm >= vaddr && name_lm < vaddr + size);
960 }
961
962 /* Implement the "open_symbol_file_object" target_so_ops method.
963
964 If no open symbol file, attempt to locate and open the main symbol
965 file. On SVR4 systems, this is the first link map entry. If its
966 name is here, we can open it. Useful when attaching to a process
967 without first loading its symbol file. */
968
969 static int
970 open_symbol_file_object (void *from_ttyp)
971 {
972 CORE_ADDR lm, l_name;
973 char *filename;
974 int errcode;
975 int from_tty = *(int *)from_ttyp;
976 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
977 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
978 int l_name_size = TYPE_LENGTH (ptr_type);
979 gdb_byte *l_name_buf = xmalloc (l_name_size);
980 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
981 struct svr4_info *info = get_svr4_info ();
982
983 if (symfile_objfile)
984 if (!query (_("Attempt to reload symbols from process? ")))
985 {
986 do_cleanups (cleanups);
987 return 0;
988 }
989
990 /* Always locate the debug struct, in case it has moved. */
991 info->debug_base = 0;
992 if (locate_base (info) == 0)
993 {
994 do_cleanups (cleanups);
995 return 0; /* failed somehow... */
996 }
997
998 /* First link map member should be the executable. */
999 lm = solib_svr4_r_map (info);
1000 if (lm == 0)
1001 {
1002 do_cleanups (cleanups);
1003 return 0; /* failed somehow... */
1004 }
1005
1006 /* Read address of name from target memory to GDB. */
1007 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1008
1009 /* Convert the address to host format. */
1010 l_name = extract_typed_address (l_name_buf, ptr_type);
1011
1012 if (l_name == 0)
1013 {
1014 do_cleanups (cleanups);
1015 return 0; /* No filename. */
1016 }
1017
1018 /* Now fetch the filename from target memory. */
1019 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1020 make_cleanup (xfree, filename);
1021
1022 if (errcode)
1023 {
1024 warning (_("failed to read exec filename from attached file: %s"),
1025 safe_strerror (errcode));
1026 do_cleanups (cleanups);
1027 return 0;
1028 }
1029
1030 /* Have a pathname: read the symbol file. */
1031 symbol_file_add_main (filename, from_tty);
1032
1033 do_cleanups (cleanups);
1034 return 1;
1035 }
1036
1037 /* Data exchange structure for the XML parser as returned by
1038 svr4_current_sos_via_xfer_libraries. */
1039
1040 struct svr4_library_list
1041 {
1042 struct so_list *head, **tailp;
1043
1044 /* Inferior address of struct link_map used for the main executable. It is
1045 NULL if not known. */
1046 CORE_ADDR main_lm;
1047 };
1048
1049 /* Implementation for target_so_ops.free_so. */
1050
1051 static void
1052 svr4_free_so (struct so_list *so)
1053 {
1054 xfree (so->lm_info);
1055 }
1056
1057 /* Implement target_so_ops.clear_so. */
1058
1059 static void
1060 svr4_clear_so (struct so_list *so)
1061 {
1062 if (so->lm_info != NULL)
1063 so->lm_info->l_addr_p = 0;
1064 }
1065
1066 /* Free so_list built so far (called via cleanup). */
1067
1068 static void
1069 svr4_free_library_list (void *p_list)
1070 {
1071 struct so_list *list = *(struct so_list **) p_list;
1072
1073 while (list != NULL)
1074 {
1075 struct so_list *next = list->next;
1076
1077 free_so (list);
1078 list = next;
1079 }
1080 }
1081
1082 /* Copy library list. */
1083
1084 static struct so_list *
1085 svr4_copy_library_list (struct so_list *src)
1086 {
1087 struct so_list *dst = NULL;
1088 struct so_list **link = &dst;
1089
1090 while (src != NULL)
1091 {
1092 struct so_list *new;
1093
1094 new = xmalloc (sizeof (struct so_list));
1095 memcpy (new, src, sizeof (struct so_list));
1096
1097 new->lm_info = xmalloc (sizeof (struct lm_info));
1098 memcpy (new->lm_info, src->lm_info, sizeof (struct lm_info));
1099
1100 new->next = NULL;
1101 *link = new;
1102 link = &new->next;
1103
1104 src = src->next;
1105 }
1106
1107 return dst;
1108 }
1109
1110 #ifdef HAVE_LIBEXPAT
1111
1112 #include "xml-support.h"
1113
1114 /* Handle the start of a <library> element. Note: new elements are added
1115 at the tail of the list, keeping the list in order. */
1116
1117 static void
1118 library_list_start_library (struct gdb_xml_parser *parser,
1119 const struct gdb_xml_element *element,
1120 void *user_data, VEC(gdb_xml_value_s) *attributes)
1121 {
1122 struct svr4_library_list *list = user_data;
1123 const char *name = xml_find_attribute (attributes, "name")->value;
1124 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1125 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1126 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1127 struct so_list *new_elem;
1128
1129 new_elem = XZALLOC (struct so_list);
1130 new_elem->lm_info = XZALLOC (struct lm_info);
1131 new_elem->lm_info->lm_addr = *lmp;
1132 new_elem->lm_info->l_addr_inferior = *l_addrp;
1133 new_elem->lm_info->l_ld = *l_ldp;
1134
1135 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1136 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1137 strcpy (new_elem->so_original_name, new_elem->so_name);
1138
1139 *list->tailp = new_elem;
1140 list->tailp = &new_elem->next;
1141 }
1142
1143 /* Handle the start of a <library-list-svr4> element. */
1144
1145 static void
1146 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1147 const struct gdb_xml_element *element,
1148 void *user_data, VEC(gdb_xml_value_s) *attributes)
1149 {
1150 struct svr4_library_list *list = user_data;
1151 const char *version = xml_find_attribute (attributes, "version")->value;
1152 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1153
1154 if (strcmp (version, "1.0") != 0)
1155 gdb_xml_error (parser,
1156 _("SVR4 Library list has unsupported version \"%s\""),
1157 version);
1158
1159 if (main_lm)
1160 list->main_lm = *(ULONGEST *) main_lm->value;
1161 }
1162
1163 /* The allowed elements and attributes for an XML library list.
1164 The root element is a <library-list>. */
1165
1166 static const struct gdb_xml_attribute svr4_library_attributes[] =
1167 {
1168 { "name", GDB_XML_AF_NONE, NULL, NULL },
1169 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1170 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1171 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1172 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1173 };
1174
1175 static const struct gdb_xml_element svr4_library_list_children[] =
1176 {
1177 {
1178 "library", svr4_library_attributes, NULL,
1179 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1180 library_list_start_library, NULL
1181 },
1182 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1183 };
1184
1185 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1186 {
1187 { "version", GDB_XML_AF_NONE, NULL, NULL },
1188 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1189 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1190 };
1191
1192 static const struct gdb_xml_element svr4_library_list_elements[] =
1193 {
1194 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1195 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1196 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1197 };
1198
1199 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1200
1201 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1202 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1203 empty, caller is responsible for freeing all its entries. */
1204
1205 static int
1206 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1207 {
1208 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1209 &list->head);
1210
1211 memset (list, 0, sizeof (*list));
1212 list->tailp = &list->head;
1213 if (gdb_xml_parse_quick (_("target library list"), "library-list.dtd",
1214 svr4_library_list_elements, document, list) == 0)
1215 {
1216 /* Parsed successfully, keep the result. */
1217 discard_cleanups (back_to);
1218 return 1;
1219 }
1220
1221 do_cleanups (back_to);
1222 return 0;
1223 }
1224
1225 /* Attempt to get so_list from target via qXfer:libraries-svr4:read packet.
1226
1227 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1228 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1229 empty, caller is responsible for freeing all its entries.
1230
1231 Note that ANNEX must be NULL if the remote does not explicitly allow
1232 qXfer:libraries-svr4:read packets with non-empty annexes. Support for
1233 this can be checked using target_augmented_libraries_svr4_read (). */
1234
1235 static int
1236 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1237 const char *annex)
1238 {
1239 char *svr4_library_document;
1240 int result;
1241 struct cleanup *back_to;
1242
1243 gdb_assert (annex == NULL || target_augmented_libraries_svr4_read ());
1244
1245 /* Fetch the list of shared libraries. */
1246 svr4_library_document = target_read_stralloc (&current_target,
1247 TARGET_OBJECT_LIBRARIES_SVR4,
1248 annex);
1249 if (svr4_library_document == NULL)
1250 return 0;
1251
1252 back_to = make_cleanup (xfree, svr4_library_document);
1253 result = svr4_parse_libraries (svr4_library_document, list);
1254 do_cleanups (back_to);
1255
1256 return result;
1257 }
1258
1259 #else
1260
1261 static int
1262 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1263 const char *annex)
1264 {
1265 return 0;
1266 }
1267
1268 #endif
1269
1270 /* If no shared library information is available from the dynamic
1271 linker, build a fallback list from other sources. */
1272
1273 static struct so_list *
1274 svr4_default_sos (void)
1275 {
1276 struct svr4_info *info = get_svr4_info ();
1277 struct so_list *new;
1278
1279 if (!info->debug_loader_offset_p)
1280 return NULL;
1281
1282 new = XZALLOC (struct so_list);
1283
1284 new->lm_info = xzalloc (sizeof (struct lm_info));
1285
1286 /* Nothing will ever check the other fields if we set l_addr_p. */
1287 new->lm_info->l_addr = info->debug_loader_offset;
1288 new->lm_info->l_addr_p = 1;
1289
1290 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1291 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1292 strcpy (new->so_original_name, new->so_name);
1293
1294 return new;
1295 }
1296
1297 /* Read the whole inferior libraries chain starting at address LM.
1298 Expect the first entry in the chain's previous entry to be PREV_LM.
1299 Add the entries to the tail referenced by LINK_PTR_PTR. Ignore the
1300 first entry if IGNORE_FIRST and set global MAIN_LM_ADDR according
1301 to it. Returns nonzero upon success. If zero is returned the
1302 entries stored to LINK_PTR_PTR are still valid although they may
1303 represent only part of the inferior library list. */
1304
1305 static int
1306 svr4_read_so_list (CORE_ADDR lm, CORE_ADDR prev_lm,
1307 struct so_list ***link_ptr_ptr, int ignore_first)
1308 {
1309 struct so_list *first = NULL;
1310 CORE_ADDR next_lm;
1311
1312 for (; lm != 0; prev_lm = lm, lm = next_lm)
1313 {
1314 struct so_list *new;
1315 struct cleanup *old_chain;
1316 int errcode;
1317 char *buffer;
1318
1319 new = XZALLOC (struct so_list);
1320 old_chain = make_cleanup_free_so (new);
1321
1322 new->lm_info = lm_info_read (lm);
1323 if (new->lm_info == NULL)
1324 {
1325 do_cleanups (old_chain);
1326 return 0;
1327 }
1328
1329 next_lm = new->lm_info->l_next;
1330
1331 if (new->lm_info->l_prev != prev_lm)
1332 {
1333 warning (_("Corrupted shared library list: %s != %s"),
1334 paddress (target_gdbarch (), prev_lm),
1335 paddress (target_gdbarch (), new->lm_info->l_prev));
1336 do_cleanups (old_chain);
1337 return 0;
1338 }
1339
1340 /* For SVR4 versions, the first entry in the link map is for the
1341 inferior executable, so we must ignore it. For some versions of
1342 SVR4, it has no name. For others (Solaris 2.3 for example), it
1343 does have a name, so we can no longer use a missing name to
1344 decide when to ignore it. */
1345 if (ignore_first && new->lm_info->l_prev == 0)
1346 {
1347 struct svr4_info *info = get_svr4_info ();
1348
1349 first = new;
1350 info->main_lm_addr = new->lm_info->lm_addr;
1351 do_cleanups (old_chain);
1352 continue;
1353 }
1354
1355 /* Extract this shared object's name. */
1356 target_read_string (new->lm_info->l_name, &buffer,
1357 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1358 if (errcode != 0)
1359 {
1360 /* If this entry's l_name address matches that of the
1361 inferior executable, then this is not a normal shared
1362 object, but (most likely) a vDSO. In this case, silently
1363 skip it; otherwise emit a warning. */
1364 if (first == NULL
1365 || new->lm_info->l_name != first->lm_info->l_name)
1366 warning (_("Can't read pathname for load map: %s."),
1367 safe_strerror (errcode));
1368 do_cleanups (old_chain);
1369 continue;
1370 }
1371
1372 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1373 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1374 strcpy (new->so_original_name, new->so_name);
1375 xfree (buffer);
1376
1377 /* If this entry has no name, or its name matches the name
1378 for the main executable, don't include it in the list. */
1379 if (! new->so_name[0] || match_main (new->so_name))
1380 {
1381 do_cleanups (old_chain);
1382 continue;
1383 }
1384
1385 discard_cleanups (old_chain);
1386 new->next = 0;
1387 **link_ptr_ptr = new;
1388 *link_ptr_ptr = &new->next;
1389 }
1390
1391 return 1;
1392 }
1393
1394 /* Read the full list of currently loaded shared objects directly
1395 from the inferior, without referring to any libraries read and
1396 stored by the probes interface. Handle special cases relating
1397 to the first elements of the list. */
1398
1399 static struct so_list *
1400 svr4_current_sos_direct (struct svr4_info *info)
1401 {
1402 CORE_ADDR lm;
1403 struct so_list *head = NULL;
1404 struct so_list **link_ptr = &head;
1405 struct cleanup *back_to;
1406 int ignore_first;
1407 struct svr4_library_list library_list;
1408
1409 /* Fall back to manual examination of the target if the packet is not
1410 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1411 tests a case where gdbserver cannot find the shared libraries list while
1412 GDB itself is able to find it via SYMFILE_OBJFILE.
1413
1414 Unfortunately statically linked inferiors will also fall back through this
1415 suboptimal code path. */
1416
1417 info->using_xfer = svr4_current_sos_via_xfer_libraries (&library_list,
1418 NULL);
1419 if (info->using_xfer)
1420 {
1421 if (library_list.main_lm)
1422 info->main_lm_addr = library_list.main_lm;
1423
1424 return library_list.head ? library_list.head : svr4_default_sos ();
1425 }
1426
1427 /* Always locate the debug struct, in case it has moved. */
1428 info->debug_base = 0;
1429 locate_base (info);
1430
1431 /* If we can't find the dynamic linker's base structure, this
1432 must not be a dynamically linked executable. Hmm. */
1433 if (! info->debug_base)
1434 return svr4_default_sos ();
1435
1436 /* Assume that everything is a library if the dynamic loader was loaded
1437 late by a static executable. */
1438 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1439 ignore_first = 0;
1440 else
1441 ignore_first = 1;
1442
1443 back_to = make_cleanup (svr4_free_library_list, &head);
1444
1445 /* Walk the inferior's link map list, and build our list of
1446 `struct so_list' nodes. */
1447 lm = solib_svr4_r_map (info);
1448 if (lm)
1449 svr4_read_so_list (lm, 0, &link_ptr, ignore_first);
1450
1451 /* On Solaris, the dynamic linker is not in the normal list of
1452 shared objects, so make sure we pick it up too. Having
1453 symbol information for the dynamic linker is quite crucial
1454 for skipping dynamic linker resolver code. */
1455 lm = solib_svr4_r_ldsomap (info);
1456 if (lm)
1457 svr4_read_so_list (lm, 0, &link_ptr, 0);
1458
1459 discard_cleanups (back_to);
1460
1461 if (head == NULL)
1462 return svr4_default_sos ();
1463
1464 return head;
1465 }
1466
1467 /* Implement the "current_sos" target_so_ops method. */
1468
1469 static struct so_list *
1470 svr4_current_sos (void)
1471 {
1472 struct svr4_info *info = get_svr4_info ();
1473
1474 /* If the solib list has been read and stored by the probes
1475 interface then we return a copy of the stored list. */
1476 if (info->solib_list != NULL)
1477 return svr4_copy_library_list (info->solib_list);
1478
1479 /* Otherwise obtain the solib list directly from the inferior. */
1480 return svr4_current_sos_direct (info);
1481 }
1482
1483 /* Get the address of the link_map for a given OBJFILE. */
1484
1485 CORE_ADDR
1486 svr4_fetch_objfile_link_map (struct objfile *objfile)
1487 {
1488 struct so_list *so;
1489 struct svr4_info *info = get_svr4_info ();
1490
1491 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1492 if (info->main_lm_addr == 0)
1493 solib_add (NULL, 0, &current_target, auto_solib_add);
1494
1495 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1496 if (objfile == symfile_objfile)
1497 return info->main_lm_addr;
1498
1499 /* The other link map addresses may be found by examining the list
1500 of shared libraries. */
1501 for (so = master_so_list (); so; so = so->next)
1502 if (so->objfile == objfile)
1503 return so->lm_info->lm_addr;
1504
1505 /* Not found! */
1506 return 0;
1507 }
1508
1509 /* On some systems, the only way to recognize the link map entry for
1510 the main executable file is by looking at its name. Return
1511 non-zero iff SONAME matches one of the known main executable names. */
1512
1513 static int
1514 match_main (const char *soname)
1515 {
1516 const char * const *mainp;
1517
1518 for (mainp = main_name_list; *mainp != NULL; mainp++)
1519 {
1520 if (strcmp (soname, *mainp) == 0)
1521 return (1);
1522 }
1523
1524 return (0);
1525 }
1526
1527 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1528 SVR4 run time loader. */
1529
1530 int
1531 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1532 {
1533 struct svr4_info *info = get_svr4_info ();
1534
1535 return ((pc >= info->interp_text_sect_low
1536 && pc < info->interp_text_sect_high)
1537 || (pc >= info->interp_plt_sect_low
1538 && pc < info->interp_plt_sect_high)
1539 || in_plt_section (pc)
1540 || in_gnu_ifunc_stub (pc));
1541 }
1542
1543 /* Given an executable's ABFD and target, compute the entry-point
1544 address. */
1545
1546 static CORE_ADDR
1547 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1548 {
1549 CORE_ADDR addr;
1550
1551 /* KevinB wrote ... for most targets, the address returned by
1552 bfd_get_start_address() is the entry point for the start
1553 function. But, for some targets, bfd_get_start_address() returns
1554 the address of a function descriptor from which the entry point
1555 address may be extracted. This address is extracted by
1556 gdbarch_convert_from_func_ptr_addr(). The method
1557 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1558 function for targets which don't use function descriptors. */
1559 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1560 bfd_get_start_address (abfd),
1561 targ);
1562 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1563 }
1564
1565 /* A probe and its associated action. */
1566
1567 struct probe_and_action
1568 {
1569 /* The probe. */
1570 struct probe *probe;
1571
1572 /* The action. */
1573 enum probe_action action;
1574 };
1575
1576 /* Returns a hash code for the probe_and_action referenced by p. */
1577
1578 static hashval_t
1579 hash_probe_and_action (const void *p)
1580 {
1581 const struct probe_and_action *pa = p;
1582
1583 return (hashval_t) pa->probe->address;
1584 }
1585
1586 /* Returns non-zero if the probe_and_actions referenced by p1 and p2
1587 are equal. */
1588
1589 static int
1590 equal_probe_and_action (const void *p1, const void *p2)
1591 {
1592 const struct probe_and_action *pa1 = p1;
1593 const struct probe_and_action *pa2 = p2;
1594
1595 return pa1->probe->address == pa2->probe->address;
1596 }
1597
1598 /* Register a solib event probe and its associated action in the
1599 probes table. */
1600
1601 static void
1602 register_solib_event_probe (struct probe *probe, enum probe_action action)
1603 {
1604 struct svr4_info *info = get_svr4_info ();
1605 struct probe_and_action lookup, *pa;
1606 void **slot;
1607
1608 /* Create the probes table, if necessary. */
1609 if (info->probes_table == NULL)
1610 info->probes_table = htab_create_alloc (1, hash_probe_and_action,
1611 equal_probe_and_action,
1612 xfree, xcalloc, xfree);
1613
1614 lookup.probe = probe;
1615 slot = htab_find_slot (info->probes_table, &lookup, INSERT);
1616 gdb_assert (*slot == HTAB_EMPTY_ENTRY);
1617
1618 pa = XCNEW (struct probe_and_action);
1619 pa->probe = probe;
1620 pa->action = action;
1621
1622 *slot = pa;
1623 }
1624
1625 /* Get the solib event probe at the specified location, and the
1626 action associated with it. Returns NULL if no solib event probe
1627 was found. */
1628
1629 static struct probe_and_action *
1630 solib_event_probe_at (struct svr4_info *info, CORE_ADDR address)
1631 {
1632 struct probe lookup_probe;
1633 struct probe_and_action lookup;
1634 void **slot;
1635
1636 lookup_probe.address = address;
1637 lookup.probe = &lookup_probe;
1638 slot = htab_find_slot (info->probes_table, &lookup, NO_INSERT);
1639
1640 if (slot == NULL)
1641 return NULL;
1642
1643 return (struct probe_and_action *) *slot;
1644 }
1645
1646 /* Decide what action to take when the specified solib event probe is
1647 hit. */
1648
1649 static enum probe_action
1650 solib_event_probe_action (struct probe_and_action *pa)
1651 {
1652 enum probe_action action;
1653 unsigned probe_argc;
1654
1655 action = pa->action;
1656 if (action == DO_NOTHING || action == PROBES_INTERFACE_FAILED)
1657 return action;
1658
1659 gdb_assert (action == FULL_RELOAD || action == UPDATE_OR_RELOAD);
1660
1661 /* Check that an appropriate number of arguments has been supplied.
1662 We expect:
1663 arg0: Lmid_t lmid (mandatory)
1664 arg1: struct r_debug *debug_base (mandatory)
1665 arg2: struct link_map *new (optional, for incremental updates) */
1666 probe_argc = get_probe_argument_count (pa->probe);
1667 if (probe_argc == 2)
1668 action = FULL_RELOAD;
1669 else if (probe_argc < 2)
1670 action = PROBES_INTERFACE_FAILED;
1671
1672 return action;
1673 }
1674
1675 /* Populate the shared object list by reading the entire list of
1676 shared objects from the inferior. Handle special cases relating
1677 to the first elements of the list. Returns nonzero on success. */
1678
1679 static int
1680 solist_update_full (struct svr4_info *info)
1681 {
1682 free_solib_list (info);
1683 info->solib_list = svr4_current_sos_direct (info);
1684
1685 return 1;
1686 }
1687
1688 /* Update the shared object list starting from the link-map entry
1689 passed by the linker in the probe's third argument. Returns
1690 nonzero if the list was successfully updated, or zero to indicate
1691 failure. */
1692
1693 static int
1694 solist_update_incremental (struct svr4_info *info, CORE_ADDR lm)
1695 {
1696 struct so_list *tail;
1697 CORE_ADDR prev_lm;
1698
1699 /* svr4_current_sos_direct contains logic to handle a number of
1700 special cases relating to the first elements of the list. To
1701 avoid duplicating this logic we defer to solist_update_full
1702 if the list is empty. */
1703 if (info->solib_list == NULL)
1704 return 0;
1705
1706 /* Fall back to a full update if we are using a remote target
1707 that does not support incremental transfers. */
1708 if (info->using_xfer && !target_augmented_libraries_svr4_read ())
1709 return 0;
1710
1711 /* Walk to the end of the list. */
1712 for (tail = info->solib_list; tail->next != NULL; tail = tail->next)
1713 /* Nothing. */;
1714 prev_lm = tail->lm_info->lm_addr;
1715
1716 /* Read the new objects. */
1717 if (info->using_xfer)
1718 {
1719 struct svr4_library_list library_list;
1720 char annex[64];
1721
1722 xsnprintf (annex, sizeof (annex), "start=%s;prev=%s",
1723 phex_nz (lm, sizeof (lm)),
1724 phex_nz (prev_lm, sizeof (prev_lm)));
1725 if (!svr4_current_sos_via_xfer_libraries (&library_list, annex))
1726 return 0;
1727
1728 tail->next = library_list.head;
1729 }
1730 else
1731 {
1732 struct so_list **link = &tail->next;
1733
1734 /* IGNORE_FIRST may safely be set to zero here because the
1735 above check and deferral to solist_update_full ensures
1736 that this call to svr4_read_so_list will never see the
1737 first element. */
1738 if (!svr4_read_so_list (lm, prev_lm, &link, 0))
1739 return 0;
1740 }
1741
1742 return 1;
1743 }
1744
1745 /* Disable the probes-based linker interface and revert to the
1746 original interface. We don't reset the breakpoints as the
1747 ones set up for the probes-based interface are adequate. */
1748
1749 static void
1750 disable_probes_interface_cleanup (void *arg)
1751 {
1752 struct svr4_info *info = get_svr4_info ();
1753
1754 warning (_("Probes-based dynamic linker interface failed.\n"
1755 "Reverting to original interface.\n"));
1756
1757 free_probes_table (info);
1758 free_solib_list (info);
1759 }
1760
1761 /* Update the solib list as appropriate when using the
1762 probes-based linker interface. Do nothing if using the
1763 standard interface. */
1764
1765 static void
1766 svr4_handle_solib_event (void)
1767 {
1768 struct svr4_info *info = get_svr4_info ();
1769 struct probe_and_action *pa;
1770 enum probe_action action;
1771 struct cleanup *old_chain, *usm_chain;
1772 struct value *val;
1773 CORE_ADDR pc, debug_base, lm = 0;
1774 int is_initial_ns;
1775
1776 /* Do nothing if not using the probes interface. */
1777 if (info->probes_table == NULL)
1778 return;
1779
1780 /* If anything goes wrong we revert to the original linker
1781 interface. */
1782 old_chain = make_cleanup (disable_probes_interface_cleanup, NULL);
1783
1784 pc = regcache_read_pc (get_current_regcache ());
1785 pa = solib_event_probe_at (info, pc);
1786 if (pa == NULL)
1787 {
1788 do_cleanups (old_chain);
1789 return;
1790 }
1791
1792 action = solib_event_probe_action (pa);
1793 if (action == PROBES_INTERFACE_FAILED)
1794 {
1795 do_cleanups (old_chain);
1796 return;
1797 }
1798
1799 if (action == DO_NOTHING)
1800 {
1801 discard_cleanups (old_chain);
1802 return;
1803 }
1804
1805 /* evaluate_probe_argument looks up symbols in the dynamic linker
1806 using find_pc_section. find_pc_section is accelerated by a cache
1807 called the section map. The section map is invalidated every
1808 time a shared library is loaded or unloaded, and if the inferior
1809 is generating a lot of shared library events then the section map
1810 will be updated every time svr4_handle_solib_event is called.
1811 We called find_pc_section in svr4_create_solib_event_breakpoints,
1812 so we can guarantee that the dynamic linker's sections are in the
1813 section map. We can therefore inhibit section map updates across
1814 these calls to evaluate_probe_argument and save a lot of time. */
1815 inhibit_section_map_updates (current_program_space);
1816 usm_chain = make_cleanup (resume_section_map_updates_cleanup,
1817 current_program_space);
1818
1819 val = evaluate_probe_argument (pa->probe, 1);
1820 if (val == NULL)
1821 {
1822 do_cleanups (old_chain);
1823 return;
1824 }
1825
1826 debug_base = value_as_address (val);
1827 if (debug_base == 0)
1828 {
1829 do_cleanups (old_chain);
1830 return;
1831 }
1832
1833 /* Always locate the debug struct, in case it moved. */
1834 info->debug_base = 0;
1835 if (locate_base (info) == 0)
1836 {
1837 do_cleanups (old_chain);
1838 return;
1839 }
1840
1841 /* GDB does not currently support libraries loaded via dlmopen
1842 into namespaces other than the initial one. We must ignore
1843 any namespace other than the initial namespace here until
1844 support for this is added to GDB. */
1845 if (debug_base != info->debug_base)
1846 action = DO_NOTHING;
1847
1848 if (action == UPDATE_OR_RELOAD)
1849 {
1850 val = evaluate_probe_argument (pa->probe, 2);
1851 if (val != NULL)
1852 lm = value_as_address (val);
1853
1854 if (lm == 0)
1855 action = FULL_RELOAD;
1856 }
1857
1858 /* Resume section map updates. */
1859 do_cleanups (usm_chain);
1860
1861 if (action == UPDATE_OR_RELOAD)
1862 {
1863 if (!solist_update_incremental (info, lm))
1864 action = FULL_RELOAD;
1865 }
1866
1867 if (action == FULL_RELOAD)
1868 {
1869 if (!solist_update_full (info))
1870 {
1871 do_cleanups (old_chain);
1872 return;
1873 }
1874 }
1875
1876 discard_cleanups (old_chain);
1877 }
1878
1879 /* Helper function for svr4_update_solib_event_breakpoints. */
1880
1881 static int
1882 svr4_update_solib_event_breakpoint (struct breakpoint *b, void *arg)
1883 {
1884 struct bp_location *loc;
1885
1886 if (b->type != bp_shlib_event)
1887 {
1888 /* Continue iterating. */
1889 return 0;
1890 }
1891
1892 for (loc = b->loc; loc != NULL; loc = loc->next)
1893 {
1894 struct svr4_info *info;
1895 struct probe_and_action *pa;
1896
1897 info = program_space_data (loc->pspace, solib_svr4_pspace_data);
1898 if (info == NULL || info->probes_table == NULL)
1899 continue;
1900
1901 pa = solib_event_probe_at (info, loc->address);
1902 if (pa == NULL)
1903 continue;
1904
1905 if (pa->action == DO_NOTHING)
1906 {
1907 if (b->enable_state == bp_disabled && stop_on_solib_events)
1908 enable_breakpoint (b);
1909 else if (b->enable_state == bp_enabled && !stop_on_solib_events)
1910 disable_breakpoint (b);
1911 }
1912
1913 break;
1914 }
1915
1916 /* Continue iterating. */
1917 return 0;
1918 }
1919
1920 /* Enable or disable optional solib event breakpoints as appropriate.
1921 Called whenever stop_on_solib_events is changed. */
1922
1923 static void
1924 svr4_update_solib_event_breakpoints (void)
1925 {
1926 iterate_over_breakpoints (svr4_update_solib_event_breakpoint, NULL);
1927 }
1928
1929 /* Create and register solib event breakpoints. PROBES is an array
1930 of NUM_PROBES elements, each of which is vector of probes. A
1931 solib event breakpoint will be created and registered for each
1932 probe. */
1933
1934 static void
1935 svr4_create_probe_breakpoints (struct gdbarch *gdbarch,
1936 VEC (probe_p) **probes)
1937 {
1938 int i;
1939
1940 for (i = 0; i < NUM_PROBES; i++)
1941 {
1942 enum probe_action action = probe_info[i].action;
1943 struct probe *probe;
1944 int ix;
1945
1946 for (ix = 0;
1947 VEC_iterate (probe_p, probes[i], ix, probe);
1948 ++ix)
1949 {
1950 create_solib_event_breakpoint (gdbarch, probe->address);
1951 register_solib_event_probe (probe, action);
1952 }
1953 }
1954
1955 svr4_update_solib_event_breakpoints ();
1956 }
1957
1958 /* Both the SunOS and the SVR4 dynamic linkers call a marker function
1959 before and after mapping and unmapping shared libraries. The sole
1960 purpose of this method is to allow debuggers to set a breakpoint so
1961 they can track these changes.
1962
1963 Some versions of the glibc dynamic linker contain named probes
1964 to allow more fine grained stopping. Given the address of the
1965 original marker function, this function attempts to find these
1966 probes, and if found, sets breakpoints on those instead. If the
1967 probes aren't found, a single breakpoint is set on the original
1968 marker function. */
1969
1970 static void
1971 svr4_create_solib_event_breakpoints (struct gdbarch *gdbarch,
1972 CORE_ADDR address)
1973 {
1974 struct obj_section *os;
1975
1976 os = find_pc_section (address);
1977 if (os != NULL)
1978 {
1979 int with_prefix;
1980
1981 for (with_prefix = 0; with_prefix <= 1; with_prefix++)
1982 {
1983 VEC (probe_p) *probes[NUM_PROBES];
1984 int all_probes_found = 1;
1985 int checked_can_use_probe_arguments = 0;
1986 int i;
1987
1988 memset (probes, 0, sizeof (probes));
1989 for (i = 0; i < NUM_PROBES; i++)
1990 {
1991 const char *name = probe_info[i].name;
1992 struct probe *p;
1993 char buf[32];
1994
1995 /* Fedora 17 and Red Hat Enterprise Linux 6.2-6.4
1996 shipped with an early version of the probes code in
1997 which the probes' names were prefixed with "rtld_"
1998 and the "map_failed" probe did not exist. The
1999 locations of the probes are otherwise the same, so
2000 we check for probes with prefixed names if probes
2001 with unprefixed names are not present. */
2002 if (with_prefix)
2003 {
2004 xsnprintf (buf, sizeof (buf), "rtld_%s", name);
2005 name = buf;
2006 }
2007
2008 probes[i] = find_probes_in_objfile (os->objfile, "rtld", name);
2009
2010 /* The "map_failed" probe did not exist in early
2011 versions of the probes code in which the probes'
2012 names were prefixed with "rtld_". */
2013 if (strcmp (name, "rtld_map_failed") == 0)
2014 continue;
2015
2016 if (VEC_empty (probe_p, probes[i]))
2017 {
2018 all_probes_found = 0;
2019 break;
2020 }
2021
2022 /* Ensure probe arguments can be evaluated. */
2023 if (!checked_can_use_probe_arguments)
2024 {
2025 p = VEC_index (probe_p, probes[i], 0);
2026 if (!can_evaluate_probe_arguments (p))
2027 {
2028 all_probes_found = 0;
2029 break;
2030 }
2031 checked_can_use_probe_arguments = 1;
2032 }
2033 }
2034
2035 if (all_probes_found)
2036 svr4_create_probe_breakpoints (gdbarch, probes);
2037
2038 for (i = 0; i < NUM_PROBES; i++)
2039 VEC_free (probe_p, probes[i]);
2040
2041 if (all_probes_found)
2042 return;
2043 }
2044 }
2045
2046 create_solib_event_breakpoint (gdbarch, address);
2047 }
2048
2049 /* Helper function for gdb_bfd_lookup_symbol. */
2050
2051 static int
2052 cmp_name_and_sec_flags (asymbol *sym, void *data)
2053 {
2054 return (strcmp (sym->name, (const char *) data) == 0
2055 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
2056 }
2057 /* Arrange for dynamic linker to hit breakpoint.
2058
2059 Both the SunOS and the SVR4 dynamic linkers have, as part of their
2060 debugger interface, support for arranging for the inferior to hit
2061 a breakpoint after mapping in the shared libraries. This function
2062 enables that breakpoint.
2063
2064 For SunOS, there is a special flag location (in_debugger) which we
2065 set to 1. When the dynamic linker sees this flag set, it will set
2066 a breakpoint at a location known only to itself, after saving the
2067 original contents of that place and the breakpoint address itself,
2068 in it's own internal structures. When we resume the inferior, it
2069 will eventually take a SIGTRAP when it runs into the breakpoint.
2070 We handle this (in a different place) by restoring the contents of
2071 the breakpointed location (which is only known after it stops),
2072 chasing around to locate the shared libraries that have been
2073 loaded, then resuming.
2074
2075 For SVR4, the debugger interface structure contains a member (r_brk)
2076 which is statically initialized at the time the shared library is
2077 built, to the offset of a function (_r_debug_state) which is guaran-
2078 teed to be called once before mapping in a library, and again when
2079 the mapping is complete. At the time we are examining this member,
2080 it contains only the unrelocated offset of the function, so we have
2081 to do our own relocation. Later, when the dynamic linker actually
2082 runs, it relocates r_brk to be the actual address of _r_debug_state().
2083
2084 The debugger interface structure also contains an enumeration which
2085 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
2086 depending upon whether or not the library is being mapped or unmapped,
2087 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
2088
2089 static int
2090 enable_break (struct svr4_info *info, int from_tty)
2091 {
2092 struct minimal_symbol *msymbol;
2093 const char * const *bkpt_namep;
2094 asection *interp_sect;
2095 char *interp_name;
2096 CORE_ADDR sym_addr;
2097
2098 info->interp_text_sect_low = info->interp_text_sect_high = 0;
2099 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
2100
2101 /* If we already have a shared library list in the target, and
2102 r_debug contains r_brk, set the breakpoint there - this should
2103 mean r_brk has already been relocated. Assume the dynamic linker
2104 is the object containing r_brk. */
2105
2106 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2107 sym_addr = 0;
2108 if (info->debug_base && solib_svr4_r_map (info) != 0)
2109 sym_addr = solib_svr4_r_brk (info);
2110
2111 if (sym_addr != 0)
2112 {
2113 struct obj_section *os;
2114
2115 sym_addr = gdbarch_addr_bits_remove
2116 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2117 sym_addr,
2118 &current_target));
2119
2120 /* On at least some versions of Solaris there's a dynamic relocation
2121 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
2122 we get control before the dynamic linker has self-relocated.
2123 Check if SYM_ADDR is in a known section, if it is assume we can
2124 trust its value. This is just a heuristic though, it could go away
2125 or be replaced if it's getting in the way.
2126
2127 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
2128 however it's spelled in your particular system) is ARM or Thumb.
2129 That knowledge is encoded in the address, if it's Thumb the low bit
2130 is 1. However, we've stripped that info above and it's not clear
2131 what all the consequences are of passing a non-addr_bits_remove'd
2132 address to svr4_create_solib_event_breakpoints. The call to
2133 find_pc_section verifies we know about the address and have some
2134 hope of computing the right kind of breakpoint to use (via
2135 symbol info). It does mean that GDB needs to be pointed at a
2136 non-stripped version of the dynamic linker in order to obtain
2137 information it already knows about. Sigh. */
2138
2139 os = find_pc_section (sym_addr);
2140 if (os != NULL)
2141 {
2142 /* Record the relocated start and end address of the dynamic linker
2143 text and plt section for svr4_in_dynsym_resolve_code. */
2144 bfd *tmp_bfd;
2145 CORE_ADDR load_addr;
2146
2147 tmp_bfd = os->objfile->obfd;
2148 load_addr = ANOFFSET (os->objfile->section_offsets,
2149 SECT_OFF_TEXT (os->objfile));
2150
2151 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2152 if (interp_sect)
2153 {
2154 info->interp_text_sect_low =
2155 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2156 info->interp_text_sect_high =
2157 info->interp_text_sect_low
2158 + bfd_section_size (tmp_bfd, interp_sect);
2159 }
2160 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2161 if (interp_sect)
2162 {
2163 info->interp_plt_sect_low =
2164 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2165 info->interp_plt_sect_high =
2166 info->interp_plt_sect_low
2167 + bfd_section_size (tmp_bfd, interp_sect);
2168 }
2169
2170 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2171 return 1;
2172 }
2173 }
2174
2175 /* Find the program interpreter; if not found, warn the user and drop
2176 into the old breakpoint at symbol code. */
2177 interp_name = find_program_interpreter ();
2178 if (interp_name)
2179 {
2180 CORE_ADDR load_addr = 0;
2181 int load_addr_found = 0;
2182 int loader_found_in_list = 0;
2183 struct so_list *so;
2184 bfd *tmp_bfd = NULL;
2185 struct target_ops *tmp_bfd_target;
2186 volatile struct gdb_exception ex;
2187
2188 sym_addr = 0;
2189
2190 /* Now we need to figure out where the dynamic linker was
2191 loaded so that we can load its symbols and place a breakpoint
2192 in the dynamic linker itself.
2193
2194 This address is stored on the stack. However, I've been unable
2195 to find any magic formula to find it for Solaris (appears to
2196 be trivial on GNU/Linux). Therefore, we have to try an alternate
2197 mechanism to find the dynamic linker's base address. */
2198
2199 TRY_CATCH (ex, RETURN_MASK_ALL)
2200 {
2201 tmp_bfd = solib_bfd_open (interp_name);
2202 }
2203 if (tmp_bfd == NULL)
2204 goto bkpt_at_symbol;
2205
2206 /* Now convert the TMP_BFD into a target. That way target, as
2207 well as BFD operations can be used. */
2208 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
2209 /* target_bfd_reopen acquired its own reference, so we can
2210 release ours now. */
2211 gdb_bfd_unref (tmp_bfd);
2212
2213 /* On a running target, we can get the dynamic linker's base
2214 address from the shared library table. */
2215 so = master_so_list ();
2216 while (so)
2217 {
2218 if (svr4_same_1 (interp_name, so->so_original_name))
2219 {
2220 load_addr_found = 1;
2221 loader_found_in_list = 1;
2222 load_addr = lm_addr_check (so, tmp_bfd);
2223 break;
2224 }
2225 so = so->next;
2226 }
2227
2228 /* If we were not able to find the base address of the loader
2229 from our so_list, then try using the AT_BASE auxilliary entry. */
2230 if (!load_addr_found)
2231 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
2232 {
2233 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
2234
2235 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
2236 that `+ load_addr' will overflow CORE_ADDR width not creating
2237 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
2238 GDB. */
2239
2240 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
2241 {
2242 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
2243 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
2244 tmp_bfd_target);
2245
2246 gdb_assert (load_addr < space_size);
2247
2248 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
2249 64bit ld.so with 32bit executable, it should not happen. */
2250
2251 if (tmp_entry_point < space_size
2252 && tmp_entry_point + load_addr >= space_size)
2253 load_addr -= space_size;
2254 }
2255
2256 load_addr_found = 1;
2257 }
2258
2259 /* Otherwise we find the dynamic linker's base address by examining
2260 the current pc (which should point at the entry point for the
2261 dynamic linker) and subtracting the offset of the entry point.
2262
2263 This is more fragile than the previous approaches, but is a good
2264 fallback method because it has actually been working well in
2265 most cases. */
2266 if (!load_addr_found)
2267 {
2268 struct regcache *regcache
2269 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
2270
2271 load_addr = (regcache_read_pc (regcache)
2272 - exec_entry_point (tmp_bfd, tmp_bfd_target));
2273 }
2274
2275 if (!loader_found_in_list)
2276 {
2277 info->debug_loader_name = xstrdup (interp_name);
2278 info->debug_loader_offset_p = 1;
2279 info->debug_loader_offset = load_addr;
2280 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2281 }
2282
2283 /* Record the relocated start and end address of the dynamic linker
2284 text and plt section for svr4_in_dynsym_resolve_code. */
2285 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2286 if (interp_sect)
2287 {
2288 info->interp_text_sect_low =
2289 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2290 info->interp_text_sect_high =
2291 info->interp_text_sect_low
2292 + bfd_section_size (tmp_bfd, interp_sect);
2293 }
2294 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2295 if (interp_sect)
2296 {
2297 info->interp_plt_sect_low =
2298 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2299 info->interp_plt_sect_high =
2300 info->interp_plt_sect_low
2301 + bfd_section_size (tmp_bfd, interp_sect);
2302 }
2303
2304 /* Now try to set a breakpoint in the dynamic linker. */
2305 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2306 {
2307 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
2308 (void *) *bkpt_namep);
2309 if (sym_addr != 0)
2310 break;
2311 }
2312
2313 if (sym_addr != 0)
2314 /* Convert 'sym_addr' from a function pointer to an address.
2315 Because we pass tmp_bfd_target instead of the current
2316 target, this will always produce an unrelocated value. */
2317 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2318 sym_addr,
2319 tmp_bfd_target);
2320
2321 /* We're done with both the temporary bfd and target. Closing
2322 the target closes the underlying bfd, because it holds the
2323 only remaining reference. */
2324 target_close (tmp_bfd_target);
2325
2326 if (sym_addr != 0)
2327 {
2328 svr4_create_solib_event_breakpoints (target_gdbarch (),
2329 load_addr + sym_addr);
2330 xfree (interp_name);
2331 return 1;
2332 }
2333
2334 /* For whatever reason we couldn't set a breakpoint in the dynamic
2335 linker. Warn and drop into the old code. */
2336 bkpt_at_symbol:
2337 xfree (interp_name);
2338 warning (_("Unable to find dynamic linker breakpoint function.\n"
2339 "GDB will be unable to debug shared library initializers\n"
2340 "and track explicitly loaded dynamic code."));
2341 }
2342
2343 /* Scan through the lists of symbols, trying to look up the symbol and
2344 set a breakpoint there. Terminate loop when we/if we succeed. */
2345
2346 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2347 {
2348 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2349 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
2350 {
2351 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
2352 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2353 sym_addr,
2354 &current_target);
2355 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2356 return 1;
2357 }
2358 }
2359
2360 if (interp_name != NULL && !current_inferior ()->attach_flag)
2361 {
2362 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
2363 {
2364 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2365 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
2366 {
2367 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
2368 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2369 sym_addr,
2370 &current_target);
2371 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2372 return 1;
2373 }
2374 }
2375 }
2376 return 0;
2377 }
2378
2379 /* Implement the "special_symbol_handling" target_so_ops method. */
2380
2381 static void
2382 svr4_special_symbol_handling (void)
2383 {
2384 /* Nothing to do. */
2385 }
2386
2387 /* Read the ELF program headers from ABFD. Return the contents and
2388 set *PHDRS_SIZE to the size of the program headers. */
2389
2390 static gdb_byte *
2391 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
2392 {
2393 Elf_Internal_Ehdr *ehdr;
2394 gdb_byte *buf;
2395
2396 ehdr = elf_elfheader (abfd);
2397
2398 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
2399 if (*phdrs_size == 0)
2400 return NULL;
2401
2402 buf = xmalloc (*phdrs_size);
2403 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
2404 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
2405 {
2406 xfree (buf);
2407 return NULL;
2408 }
2409
2410 return buf;
2411 }
2412
2413 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
2414 exec_bfd. Otherwise return 0.
2415
2416 We relocate all of the sections by the same amount. This
2417 behavior is mandated by recent editions of the System V ABI.
2418 According to the System V Application Binary Interface,
2419 Edition 4.1, page 5-5:
2420
2421 ... Though the system chooses virtual addresses for
2422 individual processes, it maintains the segments' relative
2423 positions. Because position-independent code uses relative
2424 addressesing between segments, the difference between
2425 virtual addresses in memory must match the difference
2426 between virtual addresses in the file. The difference
2427 between the virtual address of any segment in memory and
2428 the corresponding virtual address in the file is thus a
2429 single constant value for any one executable or shared
2430 object in a given process. This difference is the base
2431 address. One use of the base address is to relocate the
2432 memory image of the program during dynamic linking.
2433
2434 The same language also appears in Edition 4.0 of the System V
2435 ABI and is left unspecified in some of the earlier editions.
2436
2437 Decide if the objfile needs to be relocated. As indicated above, we will
2438 only be here when execution is stopped. But during attachment PC can be at
2439 arbitrary address therefore regcache_read_pc can be misleading (contrary to
2440 the auxv AT_ENTRY value). Moreover for executable with interpreter section
2441 regcache_read_pc would point to the interpreter and not the main executable.
2442
2443 So, to summarize, relocations are necessary when the start address obtained
2444 from the executable is different from the address in auxv AT_ENTRY entry.
2445
2446 [ The astute reader will note that we also test to make sure that
2447 the executable in question has the DYNAMIC flag set. It is my
2448 opinion that this test is unnecessary (undesirable even). It
2449 was added to avoid inadvertent relocation of an executable
2450 whose e_type member in the ELF header is not ET_DYN. There may
2451 be a time in the future when it is desirable to do relocations
2452 on other types of files as well in which case this condition
2453 should either be removed or modified to accomodate the new file
2454 type. - Kevin, Nov 2000. ] */
2455
2456 static int
2457 svr4_exec_displacement (CORE_ADDR *displacementp)
2458 {
2459 /* ENTRY_POINT is a possible function descriptor - before
2460 a call to gdbarch_convert_from_func_ptr_addr. */
2461 CORE_ADDR entry_point, displacement;
2462
2463 if (exec_bfd == NULL)
2464 return 0;
2465
2466 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
2467 being executed themselves and PIE (Position Independent Executable)
2468 executables are ET_DYN. */
2469
2470 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
2471 return 0;
2472
2473 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
2474 return 0;
2475
2476 displacement = entry_point - bfd_get_start_address (exec_bfd);
2477
2478 /* Verify the DISPLACEMENT candidate complies with the required page
2479 alignment. It is cheaper than the program headers comparison below. */
2480
2481 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2482 {
2483 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
2484
2485 /* p_align of PT_LOAD segments does not specify any alignment but
2486 only congruency of addresses:
2487 p_offset % p_align == p_vaddr % p_align
2488 Kernel is free to load the executable with lower alignment. */
2489
2490 if ((displacement & (elf->minpagesize - 1)) != 0)
2491 return 0;
2492 }
2493
2494 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
2495 comparing their program headers. If the program headers in the auxilliary
2496 vector do not match the program headers in the executable, then we are
2497 looking at a different file than the one used by the kernel - for
2498 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
2499
2500 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2501 {
2502 /* Be optimistic and clear OK only if GDB was able to verify the headers
2503 really do not match. */
2504 int phdrs_size, phdrs2_size, ok = 1;
2505 gdb_byte *buf, *buf2;
2506 int arch_size;
2507
2508 buf = read_program_header (-1, &phdrs_size, &arch_size);
2509 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
2510 if (buf != NULL && buf2 != NULL)
2511 {
2512 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
2513
2514 /* We are dealing with three different addresses. EXEC_BFD
2515 represents current address in on-disk file. target memory content
2516 may be different from EXEC_BFD as the file may have been prelinked
2517 to a different address after the executable has been loaded.
2518 Moreover the address of placement in target memory can be
2519 different from what the program headers in target memory say -
2520 this is the goal of PIE.
2521
2522 Detected DISPLACEMENT covers both the offsets of PIE placement and
2523 possible new prelink performed after start of the program. Here
2524 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
2525 content offset for the verification purpose. */
2526
2527 if (phdrs_size != phdrs2_size
2528 || bfd_get_arch_size (exec_bfd) != arch_size)
2529 ok = 0;
2530 else if (arch_size == 32
2531 && phdrs_size >= sizeof (Elf32_External_Phdr)
2532 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
2533 {
2534 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2535 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2536 CORE_ADDR displacement = 0;
2537 int i;
2538
2539 /* DISPLACEMENT could be found more easily by the difference of
2540 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2541 already have enough information to compute that displacement
2542 with what we've read. */
2543
2544 for (i = 0; i < ehdr2->e_phnum; i++)
2545 if (phdr2[i].p_type == PT_LOAD)
2546 {
2547 Elf32_External_Phdr *phdrp;
2548 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2549 CORE_ADDR vaddr, paddr;
2550 CORE_ADDR displacement_vaddr = 0;
2551 CORE_ADDR displacement_paddr = 0;
2552
2553 phdrp = &((Elf32_External_Phdr *) buf)[i];
2554 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2555 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2556
2557 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2558 byte_order);
2559 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2560
2561 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2562 byte_order);
2563 displacement_paddr = paddr - phdr2[i].p_paddr;
2564
2565 if (displacement_vaddr == displacement_paddr)
2566 displacement = displacement_vaddr;
2567
2568 break;
2569 }
2570
2571 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2572
2573 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
2574 {
2575 Elf32_External_Phdr *phdrp;
2576 Elf32_External_Phdr *phdr2p;
2577 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2578 CORE_ADDR vaddr, paddr;
2579 asection *plt2_asect;
2580
2581 phdrp = &((Elf32_External_Phdr *) buf)[i];
2582 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2583 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2584 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
2585
2586 /* PT_GNU_STACK is an exception by being never relocated by
2587 prelink as its addresses are always zero. */
2588
2589 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2590 continue;
2591
2592 /* Check also other adjustment combinations - PR 11786. */
2593
2594 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2595 byte_order);
2596 vaddr -= displacement;
2597 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
2598
2599 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2600 byte_order);
2601 paddr -= displacement;
2602 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
2603
2604 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2605 continue;
2606
2607 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2608 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2609 if (plt2_asect)
2610 {
2611 int content2;
2612 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2613 CORE_ADDR filesz;
2614
2615 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2616 & SEC_HAS_CONTENTS) != 0;
2617
2618 filesz = extract_unsigned_integer (buf_filesz_p, 4,
2619 byte_order);
2620
2621 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2622 FILESZ is from the in-memory image. */
2623 if (content2)
2624 filesz += bfd_get_section_size (plt2_asect);
2625 else
2626 filesz -= bfd_get_section_size (plt2_asect);
2627
2628 store_unsigned_integer (buf_filesz_p, 4, byte_order,
2629 filesz);
2630
2631 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2632 continue;
2633 }
2634
2635 ok = 0;
2636 break;
2637 }
2638 }
2639 else if (arch_size == 64
2640 && phdrs_size >= sizeof (Elf64_External_Phdr)
2641 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2642 {
2643 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2644 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2645 CORE_ADDR displacement = 0;
2646 int i;
2647
2648 /* DISPLACEMENT could be found more easily by the difference of
2649 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2650 already have enough information to compute that displacement
2651 with what we've read. */
2652
2653 for (i = 0; i < ehdr2->e_phnum; i++)
2654 if (phdr2[i].p_type == PT_LOAD)
2655 {
2656 Elf64_External_Phdr *phdrp;
2657 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2658 CORE_ADDR vaddr, paddr;
2659 CORE_ADDR displacement_vaddr = 0;
2660 CORE_ADDR displacement_paddr = 0;
2661
2662 phdrp = &((Elf64_External_Phdr *) buf)[i];
2663 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2664 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2665
2666 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2667 byte_order);
2668 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2669
2670 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2671 byte_order);
2672 displacement_paddr = paddr - phdr2[i].p_paddr;
2673
2674 if (displacement_vaddr == displacement_paddr)
2675 displacement = displacement_vaddr;
2676
2677 break;
2678 }
2679
2680 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2681
2682 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2683 {
2684 Elf64_External_Phdr *phdrp;
2685 Elf64_External_Phdr *phdr2p;
2686 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2687 CORE_ADDR vaddr, paddr;
2688 asection *plt2_asect;
2689
2690 phdrp = &((Elf64_External_Phdr *) buf)[i];
2691 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2692 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2693 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2694
2695 /* PT_GNU_STACK is an exception by being never relocated by
2696 prelink as its addresses are always zero. */
2697
2698 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2699 continue;
2700
2701 /* Check also other adjustment combinations - PR 11786. */
2702
2703 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2704 byte_order);
2705 vaddr -= displacement;
2706 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2707
2708 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2709 byte_order);
2710 paddr -= displacement;
2711 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2712
2713 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2714 continue;
2715
2716 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2717 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2718 if (plt2_asect)
2719 {
2720 int content2;
2721 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2722 CORE_ADDR filesz;
2723
2724 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2725 & SEC_HAS_CONTENTS) != 0;
2726
2727 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2728 byte_order);
2729
2730 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2731 FILESZ is from the in-memory image. */
2732 if (content2)
2733 filesz += bfd_get_section_size (plt2_asect);
2734 else
2735 filesz -= bfd_get_section_size (plt2_asect);
2736
2737 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2738 filesz);
2739
2740 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2741 continue;
2742 }
2743
2744 ok = 0;
2745 break;
2746 }
2747 }
2748 else
2749 ok = 0;
2750 }
2751
2752 xfree (buf);
2753 xfree (buf2);
2754
2755 if (!ok)
2756 return 0;
2757 }
2758
2759 if (info_verbose)
2760 {
2761 /* It can be printed repeatedly as there is no easy way to check
2762 the executable symbols/file has been already relocated to
2763 displacement. */
2764
2765 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2766 "displacement %s for \"%s\".\n"),
2767 paddress (target_gdbarch (), displacement),
2768 bfd_get_filename (exec_bfd));
2769 }
2770
2771 *displacementp = displacement;
2772 return 1;
2773 }
2774
2775 /* Relocate the main executable. This function should be called upon
2776 stopping the inferior process at the entry point to the program.
2777 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2778 different, the main executable is relocated by the proper amount. */
2779
2780 static void
2781 svr4_relocate_main_executable (void)
2782 {
2783 CORE_ADDR displacement;
2784
2785 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2786 probably contains the offsets computed using the PIE displacement
2787 from the previous run, which of course are irrelevant for this run.
2788 So we need to determine the new PIE displacement and recompute the
2789 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2790 already contains pre-computed offsets.
2791
2792 If we cannot compute the PIE displacement, either:
2793
2794 - The executable is not PIE.
2795
2796 - SYMFILE_OBJFILE does not match the executable started in the target.
2797 This can happen for main executable symbols loaded at the host while
2798 `ld.so --ld-args main-executable' is loaded in the target.
2799
2800 Then we leave the section offsets untouched and use them as is for
2801 this run. Either:
2802
2803 - These section offsets were properly reset earlier, and thus
2804 already contain the correct values. This can happen for instance
2805 when reconnecting via the remote protocol to a target that supports
2806 the `qOffsets' packet.
2807
2808 - The section offsets were not reset earlier, and the best we can
2809 hope is that the old offsets are still applicable to the new run. */
2810
2811 if (! svr4_exec_displacement (&displacement))
2812 return;
2813
2814 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2815 addresses. */
2816
2817 if (symfile_objfile)
2818 {
2819 struct section_offsets *new_offsets;
2820 int i;
2821
2822 new_offsets = alloca (symfile_objfile->num_sections
2823 * sizeof (*new_offsets));
2824
2825 for (i = 0; i < symfile_objfile->num_sections; i++)
2826 new_offsets->offsets[i] = displacement;
2827
2828 objfile_relocate (symfile_objfile, new_offsets);
2829 }
2830 else if (exec_bfd)
2831 {
2832 asection *asect;
2833
2834 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2835 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2836 (bfd_section_vma (exec_bfd, asect)
2837 + displacement));
2838 }
2839 }
2840
2841 /* Implement the "create_inferior_hook" target_solib_ops method.
2842
2843 For SVR4 executables, this first instruction is either the first
2844 instruction in the dynamic linker (for dynamically linked
2845 executables) or the instruction at "start" for statically linked
2846 executables. For dynamically linked executables, the system
2847 first exec's /lib/libc.so.N, which contains the dynamic linker,
2848 and starts it running. The dynamic linker maps in any needed
2849 shared libraries, maps in the actual user executable, and then
2850 jumps to "start" in the user executable.
2851
2852 We can arrange to cooperate with the dynamic linker to discover the
2853 names of shared libraries that are dynamically linked, and the base
2854 addresses to which they are linked.
2855
2856 This function is responsible for discovering those names and
2857 addresses, and saving sufficient information about them to allow
2858 their symbols to be read at a later time. */
2859
2860 static void
2861 svr4_solib_create_inferior_hook (int from_tty)
2862 {
2863 struct svr4_info *info;
2864
2865 info = get_svr4_info ();
2866
2867 /* Clear the probes-based interface's state. */
2868 free_probes_table (info);
2869 free_solib_list (info);
2870
2871 /* Relocate the main executable if necessary. */
2872 svr4_relocate_main_executable ();
2873
2874 /* No point setting a breakpoint in the dynamic linker if we can't
2875 hit it (e.g., a core file, or a trace file). */
2876 if (!target_has_execution)
2877 return;
2878
2879 if (!svr4_have_link_map_offsets ())
2880 return;
2881
2882 if (!enable_break (info, from_tty))
2883 return;
2884 }
2885
2886 static void
2887 svr4_clear_solib (void)
2888 {
2889 struct svr4_info *info;
2890
2891 info = get_svr4_info ();
2892 info->debug_base = 0;
2893 info->debug_loader_offset_p = 0;
2894 info->debug_loader_offset = 0;
2895 xfree (info->debug_loader_name);
2896 info->debug_loader_name = NULL;
2897 }
2898
2899 /* Clear any bits of ADDR that wouldn't fit in a target-format
2900 data pointer. "Data pointer" here refers to whatever sort of
2901 address the dynamic linker uses to manage its sections. At the
2902 moment, we don't support shared libraries on any processors where
2903 code and data pointers are different sizes.
2904
2905 This isn't really the right solution. What we really need here is
2906 a way to do arithmetic on CORE_ADDR values that respects the
2907 natural pointer/address correspondence. (For example, on the MIPS,
2908 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2909 sign-extend the value. There, simply truncating the bits above
2910 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2911 be a new gdbarch method or something. */
2912 static CORE_ADDR
2913 svr4_truncate_ptr (CORE_ADDR addr)
2914 {
2915 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
2916 /* We don't need to truncate anything, and the bit twiddling below
2917 will fail due to overflow problems. */
2918 return addr;
2919 else
2920 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
2921 }
2922
2923
2924 static void
2925 svr4_relocate_section_addresses (struct so_list *so,
2926 struct target_section *sec)
2927 {
2928 bfd *abfd = sec->the_bfd_section->owner;
2929
2930 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so, abfd));
2931 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so, abfd));
2932 }
2933 \f
2934
2935 /* Architecture-specific operations. */
2936
2937 /* Per-architecture data key. */
2938 static struct gdbarch_data *solib_svr4_data;
2939
2940 struct solib_svr4_ops
2941 {
2942 /* Return a description of the layout of `struct link_map'. */
2943 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2944 };
2945
2946 /* Return a default for the architecture-specific operations. */
2947
2948 static void *
2949 solib_svr4_init (struct obstack *obstack)
2950 {
2951 struct solib_svr4_ops *ops;
2952
2953 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2954 ops->fetch_link_map_offsets = NULL;
2955 return ops;
2956 }
2957
2958 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2959 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2960
2961 void
2962 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2963 struct link_map_offsets *(*flmo) (void))
2964 {
2965 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2966
2967 ops->fetch_link_map_offsets = flmo;
2968
2969 set_solib_ops (gdbarch, &svr4_so_ops);
2970 }
2971
2972 /* Fetch a link_map_offsets structure using the architecture-specific
2973 `struct link_map_offsets' fetcher. */
2974
2975 static struct link_map_offsets *
2976 svr4_fetch_link_map_offsets (void)
2977 {
2978 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2979
2980 gdb_assert (ops->fetch_link_map_offsets);
2981 return ops->fetch_link_map_offsets ();
2982 }
2983
2984 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2985
2986 static int
2987 svr4_have_link_map_offsets (void)
2988 {
2989 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2990
2991 return (ops->fetch_link_map_offsets != NULL);
2992 }
2993 \f
2994
2995 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2996 `struct r_debug' and a `struct link_map' that are binary compatible
2997 with the origional SVR4 implementation. */
2998
2999 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3000 for an ILP32 SVR4 system. */
3001
3002 struct link_map_offsets *
3003 svr4_ilp32_fetch_link_map_offsets (void)
3004 {
3005 static struct link_map_offsets lmo;
3006 static struct link_map_offsets *lmp = NULL;
3007
3008 if (lmp == NULL)
3009 {
3010 lmp = &lmo;
3011
3012 lmo.r_version_offset = 0;
3013 lmo.r_version_size = 4;
3014 lmo.r_map_offset = 4;
3015 lmo.r_brk_offset = 8;
3016 lmo.r_ldsomap_offset = 20;
3017
3018 /* Everything we need is in the first 20 bytes. */
3019 lmo.link_map_size = 20;
3020 lmo.l_addr_offset = 0;
3021 lmo.l_name_offset = 4;
3022 lmo.l_ld_offset = 8;
3023 lmo.l_next_offset = 12;
3024 lmo.l_prev_offset = 16;
3025 }
3026
3027 return lmp;
3028 }
3029
3030 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3031 for an LP64 SVR4 system. */
3032
3033 struct link_map_offsets *
3034 svr4_lp64_fetch_link_map_offsets (void)
3035 {
3036 static struct link_map_offsets lmo;
3037 static struct link_map_offsets *lmp = NULL;
3038
3039 if (lmp == NULL)
3040 {
3041 lmp = &lmo;
3042
3043 lmo.r_version_offset = 0;
3044 lmo.r_version_size = 4;
3045 lmo.r_map_offset = 8;
3046 lmo.r_brk_offset = 16;
3047 lmo.r_ldsomap_offset = 40;
3048
3049 /* Everything we need is in the first 40 bytes. */
3050 lmo.link_map_size = 40;
3051 lmo.l_addr_offset = 0;
3052 lmo.l_name_offset = 8;
3053 lmo.l_ld_offset = 16;
3054 lmo.l_next_offset = 24;
3055 lmo.l_prev_offset = 32;
3056 }
3057
3058 return lmp;
3059 }
3060 \f
3061
3062 struct target_so_ops svr4_so_ops;
3063
3064 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
3065 different rule for symbol lookup. The lookup begins here in the DSO, not in
3066 the main executable. */
3067
3068 static struct symbol *
3069 elf_lookup_lib_symbol (const struct objfile *objfile,
3070 const char *name,
3071 const domain_enum domain)
3072 {
3073 bfd *abfd;
3074
3075 if (objfile == symfile_objfile)
3076 abfd = exec_bfd;
3077 else
3078 {
3079 /* OBJFILE should have been passed as the non-debug one. */
3080 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
3081
3082 abfd = objfile->obfd;
3083 }
3084
3085 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
3086 return NULL;
3087
3088 return lookup_global_symbol_from_objfile (objfile, name, domain);
3089 }
3090
3091 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
3092
3093 void
3094 _initialize_svr4_solib (void)
3095 {
3096 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
3097 solib_svr4_pspace_data
3098 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
3099
3100 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
3101 svr4_so_ops.free_so = svr4_free_so;
3102 svr4_so_ops.clear_so = svr4_clear_so;
3103 svr4_so_ops.clear_solib = svr4_clear_solib;
3104 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
3105 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
3106 svr4_so_ops.current_sos = svr4_current_sos;
3107 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
3108 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
3109 svr4_so_ops.bfd_open = solib_bfd_open;
3110 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
3111 svr4_so_ops.same = svr4_same;
3112 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
3113 svr4_so_ops.update_breakpoints = svr4_update_solib_event_breakpoints;
3114 svr4_so_ops.handle_event = svr4_handle_solib_event;
3115 }
This page took 0.096161 seconds and 4 git commands to generate.