* python/py-utils.c (gdb_pymodule_addobject): Cast away const.
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "regcache.h"
34 #include "gdbthread.h"
35 #include "observer.h"
36
37 #include "gdb_assert.h"
38
39 #include "solist.h"
40 #include "solib.h"
41 #include "solib-svr4.h"
42
43 #include "bfd-target.h"
44 #include "elf-bfd.h"
45 #include "exec.h"
46 #include "auxv.h"
47 #include "exceptions.h"
48 #include "gdb_bfd.h"
49 #include "probe.h"
50
51 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52 static int svr4_have_link_map_offsets (void);
53 static void svr4_relocate_main_executable (void);
54 static void svr4_free_library_list (void *p_list);
55
56 /* Link map info to include in an allocated so_list entry. */
57
58 struct lm_info
59 {
60 /* Amount by which addresses in the binary should be relocated to
61 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
62 When prelinking is involved and the prelink base address changes,
63 we may need a different offset - the recomputed offset is in L_ADDR.
64 It is commonly the same value. It is cached as we want to warn about
65 the difference and compute it only once. L_ADDR is valid
66 iff L_ADDR_P. */
67 CORE_ADDR l_addr, l_addr_inferior;
68 unsigned int l_addr_p : 1;
69
70 /* The target location of lm. */
71 CORE_ADDR lm_addr;
72
73 /* Values read in from inferior's fields of the same name. */
74 CORE_ADDR l_ld, l_next, l_prev, l_name;
75 };
76
77 /* On SVR4 systems, a list of symbols in the dynamic linker where
78 GDB can try to place a breakpoint to monitor shared library
79 events.
80
81 If none of these symbols are found, or other errors occur, then
82 SVR4 systems will fall back to using a symbol as the "startup
83 mapping complete" breakpoint address. */
84
85 static const char * const solib_break_names[] =
86 {
87 "r_debug_state",
88 "_r_debug_state",
89 "_dl_debug_state",
90 "rtld_db_dlactivity",
91 "__dl_rtld_db_dlactivity",
92 "_rtld_debug_state",
93
94 NULL
95 };
96
97 static const char * const bkpt_names[] =
98 {
99 "_start",
100 "__start",
101 "main",
102 NULL
103 };
104
105 static const char * const main_name_list[] =
106 {
107 "main_$main",
108 NULL
109 };
110
111 /* What to do when a probe stop occurs. */
112
113 enum probe_action
114 {
115 /* Something went seriously wrong. Stop using probes and
116 revert to using the older interface. */
117 PROBES_INTERFACE_FAILED,
118
119 /* No action is required. The shared object list is still
120 valid. */
121 DO_NOTHING,
122
123 /* The shared object list should be reloaded entirely. */
124 FULL_RELOAD,
125
126 /* Attempt to incrementally update the shared object list. If
127 the update fails or is not possible, fall back to reloading
128 the list in full. */
129 UPDATE_OR_RELOAD,
130 };
131
132 /* A probe's name and its associated action. */
133
134 struct probe_info
135 {
136 /* The name of the probe. */
137 const char *name;
138
139 /* What to do when a probe stop occurs. */
140 enum probe_action action;
141 };
142
143 /* A list of named probes and their associated actions. If all
144 probes are present in the dynamic linker then the probes-based
145 interface will be used. */
146
147 static const struct probe_info probe_info[] =
148 {
149 { "init_start", DO_NOTHING },
150 { "init_complete", FULL_RELOAD },
151 { "map_start", DO_NOTHING },
152 { "map_failed", DO_NOTHING },
153 { "reloc_complete", UPDATE_OR_RELOAD },
154 { "unmap_start", DO_NOTHING },
155 { "unmap_complete", FULL_RELOAD },
156 };
157
158 #define NUM_PROBES ARRAY_SIZE (probe_info)
159
160 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
161 the same shared library. */
162
163 static int
164 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
165 {
166 if (strcmp (gdb_so_name, inferior_so_name) == 0)
167 return 1;
168
169 /* On Solaris, when starting inferior we think that dynamic linker is
170 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
171 contains /lib/ld.so.1. Sometimes one file is a link to another, but
172 sometimes they have identical content, but are not linked to each
173 other. We don't restrict this check for Solaris, but the chances
174 of running into this situation elsewhere are very low. */
175 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
176 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
177 return 1;
178
179 /* Similarly, we observed the same issue with sparc64, but with
180 different locations. */
181 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
182 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
183 return 1;
184
185 return 0;
186 }
187
188 static int
189 svr4_same (struct so_list *gdb, struct so_list *inferior)
190 {
191 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
192 }
193
194 static struct lm_info *
195 lm_info_read (CORE_ADDR lm_addr)
196 {
197 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
198 gdb_byte *lm;
199 struct lm_info *lm_info;
200 struct cleanup *back_to;
201
202 lm = xmalloc (lmo->link_map_size);
203 back_to = make_cleanup (xfree, lm);
204
205 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
206 {
207 warning (_("Error reading shared library list entry at %s"),
208 paddress (target_gdbarch (), lm_addr)),
209 lm_info = NULL;
210 }
211 else
212 {
213 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
214
215 lm_info = xzalloc (sizeof (*lm_info));
216 lm_info->lm_addr = lm_addr;
217
218 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
219 ptr_type);
220 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
221 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
222 ptr_type);
223 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
224 ptr_type);
225 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
226 ptr_type);
227 }
228
229 do_cleanups (back_to);
230
231 return lm_info;
232 }
233
234 static int
235 has_lm_dynamic_from_link_map (void)
236 {
237 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
238
239 return lmo->l_ld_offset >= 0;
240 }
241
242 static CORE_ADDR
243 lm_addr_check (const struct so_list *so, bfd *abfd)
244 {
245 if (!so->lm_info->l_addr_p)
246 {
247 struct bfd_section *dyninfo_sect;
248 CORE_ADDR l_addr, l_dynaddr, dynaddr;
249
250 l_addr = so->lm_info->l_addr_inferior;
251
252 if (! abfd || ! has_lm_dynamic_from_link_map ())
253 goto set_addr;
254
255 l_dynaddr = so->lm_info->l_ld;
256
257 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
258 if (dyninfo_sect == NULL)
259 goto set_addr;
260
261 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
262
263 if (dynaddr + l_addr != l_dynaddr)
264 {
265 CORE_ADDR align = 0x1000;
266 CORE_ADDR minpagesize = align;
267
268 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
269 {
270 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
271 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
272 int i;
273
274 align = 1;
275
276 for (i = 0; i < ehdr->e_phnum; i++)
277 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
278 align = phdr[i].p_align;
279
280 minpagesize = get_elf_backend_data (abfd)->minpagesize;
281 }
282
283 /* Turn it into a mask. */
284 align--;
285
286 /* If the changes match the alignment requirements, we
287 assume we're using a core file that was generated by the
288 same binary, just prelinked with a different base offset.
289 If it doesn't match, we may have a different binary, the
290 same binary with the dynamic table loaded at an unrelated
291 location, or anything, really. To avoid regressions,
292 don't adjust the base offset in the latter case, although
293 odds are that, if things really changed, debugging won't
294 quite work.
295
296 One could expect more the condition
297 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
298 but the one below is relaxed for PPC. The PPC kernel supports
299 either 4k or 64k page sizes. To be prepared for 64k pages,
300 PPC ELF files are built using an alignment requirement of 64k.
301 However, when running on a kernel supporting 4k pages, the memory
302 mapping of the library may not actually happen on a 64k boundary!
303
304 (In the usual case where (l_addr & align) == 0, this check is
305 equivalent to the possibly expected check above.)
306
307 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
308
309 l_addr = l_dynaddr - dynaddr;
310
311 if ((l_addr & (minpagesize - 1)) == 0
312 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
313 {
314 if (info_verbose)
315 printf_unfiltered (_("Using PIC (Position Independent Code) "
316 "prelink displacement %s for \"%s\".\n"),
317 paddress (target_gdbarch (), l_addr),
318 so->so_name);
319 }
320 else
321 {
322 /* There is no way to verify the library file matches. prelink
323 can during prelinking of an unprelinked file (or unprelinking
324 of a prelinked file) shift the DYNAMIC segment by arbitrary
325 offset without any page size alignment. There is no way to
326 find out the ELF header and/or Program Headers for a limited
327 verification if it they match. One could do a verification
328 of the DYNAMIC segment. Still the found address is the best
329 one GDB could find. */
330
331 warning (_(".dynamic section for \"%s\" "
332 "is not at the expected address "
333 "(wrong library or version mismatch?)"), so->so_name);
334 }
335 }
336
337 set_addr:
338 so->lm_info->l_addr = l_addr;
339 so->lm_info->l_addr_p = 1;
340 }
341
342 return so->lm_info->l_addr;
343 }
344
345 /* Per pspace SVR4 specific data. */
346
347 struct svr4_info
348 {
349 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
350
351 /* Validity flag for debug_loader_offset. */
352 int debug_loader_offset_p;
353
354 /* Load address for the dynamic linker, inferred. */
355 CORE_ADDR debug_loader_offset;
356
357 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
358 char *debug_loader_name;
359
360 /* Load map address for the main executable. */
361 CORE_ADDR main_lm_addr;
362
363 CORE_ADDR interp_text_sect_low;
364 CORE_ADDR interp_text_sect_high;
365 CORE_ADDR interp_plt_sect_low;
366 CORE_ADDR interp_plt_sect_high;
367
368 /* Nonzero if the list of objects was last obtained from the target
369 via qXfer:libraries-svr4:read. */
370 int using_xfer;
371
372 /* Table of struct probe_and_action instances, used by the
373 probes-based interface to map breakpoint addresses to probes
374 and their associated actions. Lookup is performed using
375 probe_and_action->probe->address. */
376 htab_t probes_table;
377
378 /* List of objects loaded into the inferior, used by the probes-
379 based interface. */
380 struct so_list *solib_list;
381 };
382
383 /* Per-program-space data key. */
384 static const struct program_space_data *solib_svr4_pspace_data;
385
386 /* Free the probes table. */
387
388 static void
389 free_probes_table (struct svr4_info *info)
390 {
391 if (info->probes_table == NULL)
392 return;
393
394 htab_delete (info->probes_table);
395 info->probes_table = NULL;
396 }
397
398 /* Free the solib list. */
399
400 static void
401 free_solib_list (struct svr4_info *info)
402 {
403 svr4_free_library_list (&info->solib_list);
404 info->solib_list = NULL;
405 }
406
407 static void
408 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
409 {
410 struct svr4_info *info;
411
412 info = program_space_data (pspace, solib_svr4_pspace_data);
413 if (info == NULL)
414 return;
415
416 free_probes_table (info);
417 free_solib_list (info);
418
419 xfree (info);
420 }
421
422 /* Get the current svr4 data. If none is found yet, add it now. This
423 function always returns a valid object. */
424
425 static struct svr4_info *
426 get_svr4_info (void)
427 {
428 struct svr4_info *info;
429
430 info = program_space_data (current_program_space, solib_svr4_pspace_data);
431 if (info != NULL)
432 return info;
433
434 info = XZALLOC (struct svr4_info);
435 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
436 return info;
437 }
438
439 /* Local function prototypes */
440
441 static int match_main (const char *);
442
443 /* Read program header TYPE from inferior memory. The header is found
444 by scanning the OS auxillary vector.
445
446 If TYPE == -1, return the program headers instead of the contents of
447 one program header.
448
449 Return a pointer to allocated memory holding the program header contents,
450 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
451 size of those contents is returned to P_SECT_SIZE. Likewise, the target
452 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
453
454 static gdb_byte *
455 read_program_header (int type, int *p_sect_size, int *p_arch_size)
456 {
457 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
458 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
459 int arch_size, sect_size;
460 CORE_ADDR sect_addr;
461 gdb_byte *buf;
462 int pt_phdr_p = 0;
463
464 /* Get required auxv elements from target. */
465 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
466 return 0;
467 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
468 return 0;
469 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
470 return 0;
471 if (!at_phdr || !at_phnum)
472 return 0;
473
474 /* Determine ELF architecture type. */
475 if (at_phent == sizeof (Elf32_External_Phdr))
476 arch_size = 32;
477 else if (at_phent == sizeof (Elf64_External_Phdr))
478 arch_size = 64;
479 else
480 return 0;
481
482 /* Find the requested segment. */
483 if (type == -1)
484 {
485 sect_addr = at_phdr;
486 sect_size = at_phent * at_phnum;
487 }
488 else if (arch_size == 32)
489 {
490 Elf32_External_Phdr phdr;
491 int i;
492
493 /* Search for requested PHDR. */
494 for (i = 0; i < at_phnum; i++)
495 {
496 int p_type;
497
498 if (target_read_memory (at_phdr + i * sizeof (phdr),
499 (gdb_byte *)&phdr, sizeof (phdr)))
500 return 0;
501
502 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
503 4, byte_order);
504
505 if (p_type == PT_PHDR)
506 {
507 pt_phdr_p = 1;
508 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
509 4, byte_order);
510 }
511
512 if (p_type == type)
513 break;
514 }
515
516 if (i == at_phnum)
517 return 0;
518
519 /* Retrieve address and size. */
520 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
521 4, byte_order);
522 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
523 4, byte_order);
524 }
525 else
526 {
527 Elf64_External_Phdr phdr;
528 int i;
529
530 /* Search for requested PHDR. */
531 for (i = 0; i < at_phnum; i++)
532 {
533 int p_type;
534
535 if (target_read_memory (at_phdr + i * sizeof (phdr),
536 (gdb_byte *)&phdr, sizeof (phdr)))
537 return 0;
538
539 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
540 4, byte_order);
541
542 if (p_type == PT_PHDR)
543 {
544 pt_phdr_p = 1;
545 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
546 8, byte_order);
547 }
548
549 if (p_type == type)
550 break;
551 }
552
553 if (i == at_phnum)
554 return 0;
555
556 /* Retrieve address and size. */
557 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
558 8, byte_order);
559 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
560 8, byte_order);
561 }
562
563 /* PT_PHDR is optional, but we really need it
564 for PIE to make this work in general. */
565
566 if (pt_phdr_p)
567 {
568 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
569 Relocation offset is the difference between the two. */
570 sect_addr = sect_addr + (at_phdr - pt_phdr);
571 }
572
573 /* Read in requested program header. */
574 buf = xmalloc (sect_size);
575 if (target_read_memory (sect_addr, buf, sect_size))
576 {
577 xfree (buf);
578 return NULL;
579 }
580
581 if (p_arch_size)
582 *p_arch_size = arch_size;
583 if (p_sect_size)
584 *p_sect_size = sect_size;
585
586 return buf;
587 }
588
589
590 /* Return program interpreter string. */
591 static char *
592 find_program_interpreter (void)
593 {
594 gdb_byte *buf = NULL;
595
596 /* If we have an exec_bfd, use its section table. */
597 if (exec_bfd
598 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
599 {
600 struct bfd_section *interp_sect;
601
602 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
603 if (interp_sect != NULL)
604 {
605 int sect_size = bfd_section_size (exec_bfd, interp_sect);
606
607 buf = xmalloc (sect_size);
608 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
609 }
610 }
611
612 /* If we didn't find it, use the target auxillary vector. */
613 if (!buf)
614 buf = read_program_header (PT_INTERP, NULL, NULL);
615
616 return (char *) buf;
617 }
618
619
620 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
621 returned and the corresponding PTR is set. */
622
623 static int
624 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
625 {
626 int arch_size, step, sect_size;
627 long dyn_tag;
628 CORE_ADDR dyn_ptr, dyn_addr;
629 gdb_byte *bufend, *bufstart, *buf;
630 Elf32_External_Dyn *x_dynp_32;
631 Elf64_External_Dyn *x_dynp_64;
632 struct bfd_section *sect;
633 struct target_section *target_section;
634
635 if (abfd == NULL)
636 return 0;
637
638 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
639 return 0;
640
641 arch_size = bfd_get_arch_size (abfd);
642 if (arch_size == -1)
643 return 0;
644
645 /* Find the start address of the .dynamic section. */
646 sect = bfd_get_section_by_name (abfd, ".dynamic");
647 if (sect == NULL)
648 return 0;
649
650 for (target_section = current_target_sections->sections;
651 target_section < current_target_sections->sections_end;
652 target_section++)
653 if (sect == target_section->the_bfd_section)
654 break;
655 if (target_section < current_target_sections->sections_end)
656 dyn_addr = target_section->addr;
657 else
658 {
659 /* ABFD may come from OBJFILE acting only as a symbol file without being
660 loaded into the target (see add_symbol_file_command). This case is
661 such fallback to the file VMA address without the possibility of
662 having the section relocated to its actual in-memory address. */
663
664 dyn_addr = bfd_section_vma (abfd, sect);
665 }
666
667 /* Read in .dynamic from the BFD. We will get the actual value
668 from memory later. */
669 sect_size = bfd_section_size (abfd, sect);
670 buf = bufstart = alloca (sect_size);
671 if (!bfd_get_section_contents (abfd, sect,
672 buf, 0, sect_size))
673 return 0;
674
675 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
676 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
677 : sizeof (Elf64_External_Dyn);
678 for (bufend = buf + sect_size;
679 buf < bufend;
680 buf += step)
681 {
682 if (arch_size == 32)
683 {
684 x_dynp_32 = (Elf32_External_Dyn *) buf;
685 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
686 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
687 }
688 else
689 {
690 x_dynp_64 = (Elf64_External_Dyn *) buf;
691 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
692 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
693 }
694 if (dyn_tag == DT_NULL)
695 return 0;
696 if (dyn_tag == dyntag)
697 {
698 /* If requested, try to read the runtime value of this .dynamic
699 entry. */
700 if (ptr)
701 {
702 struct type *ptr_type;
703 gdb_byte ptr_buf[8];
704 CORE_ADDR ptr_addr;
705
706 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
707 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
708 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
709 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
710 *ptr = dyn_ptr;
711 }
712 return 1;
713 }
714 }
715
716 return 0;
717 }
718
719 /* Scan for DYNTAG in .dynamic section of the target's main executable,
720 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
721 returned and the corresponding PTR is set. */
722
723 static int
724 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
725 {
726 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
727 int sect_size, arch_size, step;
728 long dyn_tag;
729 CORE_ADDR dyn_ptr;
730 gdb_byte *bufend, *bufstart, *buf;
731
732 /* Read in .dynamic section. */
733 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
734 if (!buf)
735 return 0;
736
737 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
738 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
739 : sizeof (Elf64_External_Dyn);
740 for (bufend = buf + sect_size;
741 buf < bufend;
742 buf += step)
743 {
744 if (arch_size == 32)
745 {
746 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
747
748 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
749 4, byte_order);
750 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
751 4, byte_order);
752 }
753 else
754 {
755 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
756
757 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
758 8, byte_order);
759 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
760 8, byte_order);
761 }
762 if (dyn_tag == DT_NULL)
763 break;
764
765 if (dyn_tag == dyntag)
766 {
767 if (ptr)
768 *ptr = dyn_ptr;
769
770 xfree (bufstart);
771 return 1;
772 }
773 }
774
775 xfree (bufstart);
776 return 0;
777 }
778
779 /* Locate the base address of dynamic linker structs for SVR4 elf
780 targets.
781
782 For SVR4 elf targets the address of the dynamic linker's runtime
783 structure is contained within the dynamic info section in the
784 executable file. The dynamic section is also mapped into the
785 inferior address space. Because the runtime loader fills in the
786 real address before starting the inferior, we have to read in the
787 dynamic info section from the inferior address space.
788 If there are any errors while trying to find the address, we
789 silently return 0, otherwise the found address is returned. */
790
791 static CORE_ADDR
792 elf_locate_base (void)
793 {
794 struct minimal_symbol *msymbol;
795 CORE_ADDR dyn_ptr;
796
797 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
798 instead of DT_DEBUG, although they sometimes contain an unused
799 DT_DEBUG. */
800 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
801 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
802 {
803 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
804 gdb_byte *pbuf;
805 int pbuf_size = TYPE_LENGTH (ptr_type);
806
807 pbuf = alloca (pbuf_size);
808 /* DT_MIPS_RLD_MAP contains a pointer to the address
809 of the dynamic link structure. */
810 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
811 return 0;
812 return extract_typed_address (pbuf, ptr_type);
813 }
814
815 /* Find DT_DEBUG. */
816 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
817 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
818 return dyn_ptr;
819
820 /* This may be a static executable. Look for the symbol
821 conventionally named _r_debug, as a last resort. */
822 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
823 if (msymbol != NULL)
824 return SYMBOL_VALUE_ADDRESS (msymbol);
825
826 /* DT_DEBUG entry not found. */
827 return 0;
828 }
829
830 /* Locate the base address of dynamic linker structs.
831
832 For both the SunOS and SVR4 shared library implementations, if the
833 inferior executable has been linked dynamically, there is a single
834 address somewhere in the inferior's data space which is the key to
835 locating all of the dynamic linker's runtime structures. This
836 address is the value of the debug base symbol. The job of this
837 function is to find and return that address, or to return 0 if there
838 is no such address (the executable is statically linked for example).
839
840 For SunOS, the job is almost trivial, since the dynamic linker and
841 all of it's structures are statically linked to the executable at
842 link time. Thus the symbol for the address we are looking for has
843 already been added to the minimal symbol table for the executable's
844 objfile at the time the symbol file's symbols were read, and all we
845 have to do is look it up there. Note that we explicitly do NOT want
846 to find the copies in the shared library.
847
848 The SVR4 version is a bit more complicated because the address
849 is contained somewhere in the dynamic info section. We have to go
850 to a lot more work to discover the address of the debug base symbol.
851 Because of this complexity, we cache the value we find and return that
852 value on subsequent invocations. Note there is no copy in the
853 executable symbol tables. */
854
855 static CORE_ADDR
856 locate_base (struct svr4_info *info)
857 {
858 /* Check to see if we have a currently valid address, and if so, avoid
859 doing all this work again and just return the cached address. If
860 we have no cached address, try to locate it in the dynamic info
861 section for ELF executables. There's no point in doing any of this
862 though if we don't have some link map offsets to work with. */
863
864 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
865 info->debug_base = elf_locate_base ();
866 return info->debug_base;
867 }
868
869 /* Find the first element in the inferior's dynamic link map, and
870 return its address in the inferior. Return zero if the address
871 could not be determined.
872
873 FIXME: Perhaps we should validate the info somehow, perhaps by
874 checking r_version for a known version number, or r_state for
875 RT_CONSISTENT. */
876
877 static CORE_ADDR
878 solib_svr4_r_map (struct svr4_info *info)
879 {
880 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
881 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
882 CORE_ADDR addr = 0;
883 volatile struct gdb_exception ex;
884
885 TRY_CATCH (ex, RETURN_MASK_ERROR)
886 {
887 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
888 ptr_type);
889 }
890 exception_print (gdb_stderr, ex);
891 return addr;
892 }
893
894 /* Find r_brk from the inferior's debug base. */
895
896 static CORE_ADDR
897 solib_svr4_r_brk (struct svr4_info *info)
898 {
899 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
900 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
901
902 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
903 ptr_type);
904 }
905
906 /* Find the link map for the dynamic linker (if it is not in the
907 normal list of loaded shared objects). */
908
909 static CORE_ADDR
910 solib_svr4_r_ldsomap (struct svr4_info *info)
911 {
912 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
913 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
914 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
915 ULONGEST version;
916
917 /* Check version, and return zero if `struct r_debug' doesn't have
918 the r_ldsomap member. */
919 version
920 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
921 lmo->r_version_size, byte_order);
922 if (version < 2 || lmo->r_ldsomap_offset == -1)
923 return 0;
924
925 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
926 ptr_type);
927 }
928
929 /* On Solaris systems with some versions of the dynamic linker,
930 ld.so's l_name pointer points to the SONAME in the string table
931 rather than into writable memory. So that GDB can find shared
932 libraries when loading a core file generated by gcore, ensure that
933 memory areas containing the l_name string are saved in the core
934 file. */
935
936 static int
937 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
938 {
939 struct svr4_info *info;
940 CORE_ADDR ldsomap;
941 struct so_list *new;
942 struct cleanup *old_chain;
943 CORE_ADDR name_lm;
944
945 info = get_svr4_info ();
946
947 info->debug_base = 0;
948 locate_base (info);
949 if (!info->debug_base)
950 return 0;
951
952 ldsomap = solib_svr4_r_ldsomap (info);
953 if (!ldsomap)
954 return 0;
955
956 new = XZALLOC (struct so_list);
957 old_chain = make_cleanup (xfree, new);
958 new->lm_info = lm_info_read (ldsomap);
959 make_cleanup (xfree, new->lm_info);
960 name_lm = new->lm_info ? new->lm_info->l_name : 0;
961 do_cleanups (old_chain);
962
963 return (name_lm >= vaddr && name_lm < vaddr + size);
964 }
965
966 /* Implement the "open_symbol_file_object" target_so_ops method.
967
968 If no open symbol file, attempt to locate and open the main symbol
969 file. On SVR4 systems, this is the first link map entry. If its
970 name is here, we can open it. Useful when attaching to a process
971 without first loading its symbol file. */
972
973 static int
974 open_symbol_file_object (void *from_ttyp)
975 {
976 CORE_ADDR lm, l_name;
977 char *filename;
978 int errcode;
979 int from_tty = *(int *)from_ttyp;
980 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
981 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
982 int l_name_size = TYPE_LENGTH (ptr_type);
983 gdb_byte *l_name_buf = xmalloc (l_name_size);
984 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
985 struct svr4_info *info = get_svr4_info ();
986
987 if (symfile_objfile)
988 if (!query (_("Attempt to reload symbols from process? ")))
989 {
990 do_cleanups (cleanups);
991 return 0;
992 }
993
994 /* Always locate the debug struct, in case it has moved. */
995 info->debug_base = 0;
996 if (locate_base (info) == 0)
997 {
998 do_cleanups (cleanups);
999 return 0; /* failed somehow... */
1000 }
1001
1002 /* First link map member should be the executable. */
1003 lm = solib_svr4_r_map (info);
1004 if (lm == 0)
1005 {
1006 do_cleanups (cleanups);
1007 return 0; /* failed somehow... */
1008 }
1009
1010 /* Read address of name from target memory to GDB. */
1011 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1012
1013 /* Convert the address to host format. */
1014 l_name = extract_typed_address (l_name_buf, ptr_type);
1015
1016 if (l_name == 0)
1017 {
1018 do_cleanups (cleanups);
1019 return 0; /* No filename. */
1020 }
1021
1022 /* Now fetch the filename from target memory. */
1023 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1024 make_cleanup (xfree, filename);
1025
1026 if (errcode)
1027 {
1028 warning (_("failed to read exec filename from attached file: %s"),
1029 safe_strerror (errcode));
1030 do_cleanups (cleanups);
1031 return 0;
1032 }
1033
1034 /* Have a pathname: read the symbol file. */
1035 symbol_file_add_main (filename, from_tty);
1036
1037 do_cleanups (cleanups);
1038 return 1;
1039 }
1040
1041 /* Data exchange structure for the XML parser as returned by
1042 svr4_current_sos_via_xfer_libraries. */
1043
1044 struct svr4_library_list
1045 {
1046 struct so_list *head, **tailp;
1047
1048 /* Inferior address of struct link_map used for the main executable. It is
1049 NULL if not known. */
1050 CORE_ADDR main_lm;
1051 };
1052
1053 /* Implementation for target_so_ops.free_so. */
1054
1055 static void
1056 svr4_free_so (struct so_list *so)
1057 {
1058 xfree (so->lm_info);
1059 }
1060
1061 /* Implement target_so_ops.clear_so. */
1062
1063 static void
1064 svr4_clear_so (struct so_list *so)
1065 {
1066 if (so->lm_info != NULL)
1067 so->lm_info->l_addr_p = 0;
1068 }
1069
1070 /* Free so_list built so far (called via cleanup). */
1071
1072 static void
1073 svr4_free_library_list (void *p_list)
1074 {
1075 struct so_list *list = *(struct so_list **) p_list;
1076
1077 while (list != NULL)
1078 {
1079 struct so_list *next = list->next;
1080
1081 free_so (list);
1082 list = next;
1083 }
1084 }
1085
1086 /* Copy library list. */
1087
1088 static struct so_list *
1089 svr4_copy_library_list (struct so_list *src)
1090 {
1091 struct so_list *dst = NULL;
1092 struct so_list **link = &dst;
1093
1094 while (src != NULL)
1095 {
1096 struct so_list *new;
1097
1098 new = xmalloc (sizeof (struct so_list));
1099 memcpy (new, src, sizeof (struct so_list));
1100
1101 new->lm_info = xmalloc (sizeof (struct lm_info));
1102 memcpy (new->lm_info, src->lm_info, sizeof (struct lm_info));
1103
1104 new->next = NULL;
1105 *link = new;
1106 link = &new->next;
1107
1108 src = src->next;
1109 }
1110
1111 return dst;
1112 }
1113
1114 #ifdef HAVE_LIBEXPAT
1115
1116 #include "xml-support.h"
1117
1118 /* Handle the start of a <library> element. Note: new elements are added
1119 at the tail of the list, keeping the list in order. */
1120
1121 static void
1122 library_list_start_library (struct gdb_xml_parser *parser,
1123 const struct gdb_xml_element *element,
1124 void *user_data, VEC(gdb_xml_value_s) *attributes)
1125 {
1126 struct svr4_library_list *list = user_data;
1127 const char *name = xml_find_attribute (attributes, "name")->value;
1128 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1129 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1130 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1131 struct so_list *new_elem;
1132
1133 new_elem = XZALLOC (struct so_list);
1134 new_elem->lm_info = XZALLOC (struct lm_info);
1135 new_elem->lm_info->lm_addr = *lmp;
1136 new_elem->lm_info->l_addr_inferior = *l_addrp;
1137 new_elem->lm_info->l_ld = *l_ldp;
1138
1139 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1140 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1141 strcpy (new_elem->so_original_name, new_elem->so_name);
1142
1143 *list->tailp = new_elem;
1144 list->tailp = &new_elem->next;
1145 }
1146
1147 /* Handle the start of a <library-list-svr4> element. */
1148
1149 static void
1150 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1151 const struct gdb_xml_element *element,
1152 void *user_data, VEC(gdb_xml_value_s) *attributes)
1153 {
1154 struct svr4_library_list *list = user_data;
1155 const char *version = xml_find_attribute (attributes, "version")->value;
1156 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1157
1158 if (strcmp (version, "1.0") != 0)
1159 gdb_xml_error (parser,
1160 _("SVR4 Library list has unsupported version \"%s\""),
1161 version);
1162
1163 if (main_lm)
1164 list->main_lm = *(ULONGEST *) main_lm->value;
1165 }
1166
1167 /* The allowed elements and attributes for an XML library list.
1168 The root element is a <library-list>. */
1169
1170 static const struct gdb_xml_attribute svr4_library_attributes[] =
1171 {
1172 { "name", GDB_XML_AF_NONE, NULL, NULL },
1173 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1174 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1175 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1176 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1177 };
1178
1179 static const struct gdb_xml_element svr4_library_list_children[] =
1180 {
1181 {
1182 "library", svr4_library_attributes, NULL,
1183 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1184 library_list_start_library, NULL
1185 },
1186 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1187 };
1188
1189 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1190 {
1191 { "version", GDB_XML_AF_NONE, NULL, NULL },
1192 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1193 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1194 };
1195
1196 static const struct gdb_xml_element svr4_library_list_elements[] =
1197 {
1198 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1199 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1200 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1201 };
1202
1203 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1204
1205 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1206 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1207 empty, caller is responsible for freeing all its entries. */
1208
1209 static int
1210 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1211 {
1212 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1213 &list->head);
1214
1215 memset (list, 0, sizeof (*list));
1216 list->tailp = &list->head;
1217 if (gdb_xml_parse_quick (_("target library list"), "library-list.dtd",
1218 svr4_library_list_elements, document, list) == 0)
1219 {
1220 /* Parsed successfully, keep the result. */
1221 discard_cleanups (back_to);
1222 return 1;
1223 }
1224
1225 do_cleanups (back_to);
1226 return 0;
1227 }
1228
1229 /* Attempt to get so_list from target via qXfer:libraries-svr4:read packet.
1230
1231 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1232 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1233 empty, caller is responsible for freeing all its entries.
1234
1235 Note that ANNEX must be NULL if the remote does not explicitly allow
1236 qXfer:libraries-svr4:read packets with non-empty annexes. Support for
1237 this can be checked using target_augmented_libraries_svr4_read (). */
1238
1239 static int
1240 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1241 const char *annex)
1242 {
1243 char *svr4_library_document;
1244 int result;
1245 struct cleanup *back_to;
1246
1247 gdb_assert (annex == NULL || target_augmented_libraries_svr4_read ());
1248
1249 /* Fetch the list of shared libraries. */
1250 svr4_library_document = target_read_stralloc (&current_target,
1251 TARGET_OBJECT_LIBRARIES_SVR4,
1252 annex);
1253 if (svr4_library_document == NULL)
1254 return 0;
1255
1256 back_to = make_cleanup (xfree, svr4_library_document);
1257 result = svr4_parse_libraries (svr4_library_document, list);
1258 do_cleanups (back_to);
1259
1260 return result;
1261 }
1262
1263 #else
1264
1265 static int
1266 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1267 const char *annex)
1268 {
1269 return 0;
1270 }
1271
1272 #endif
1273
1274 /* If no shared library information is available from the dynamic
1275 linker, build a fallback list from other sources. */
1276
1277 static struct so_list *
1278 svr4_default_sos (void)
1279 {
1280 struct svr4_info *info = get_svr4_info ();
1281 struct so_list *new;
1282
1283 if (!info->debug_loader_offset_p)
1284 return NULL;
1285
1286 new = XZALLOC (struct so_list);
1287
1288 new->lm_info = xzalloc (sizeof (struct lm_info));
1289
1290 /* Nothing will ever check the other fields if we set l_addr_p. */
1291 new->lm_info->l_addr = info->debug_loader_offset;
1292 new->lm_info->l_addr_p = 1;
1293
1294 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1295 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1296 strcpy (new->so_original_name, new->so_name);
1297
1298 return new;
1299 }
1300
1301 /* Read the whole inferior libraries chain starting at address LM.
1302 Expect the first entry in the chain's previous entry to be PREV_LM.
1303 Add the entries to the tail referenced by LINK_PTR_PTR. Ignore the
1304 first entry if IGNORE_FIRST and set global MAIN_LM_ADDR according
1305 to it. Returns nonzero upon success. If zero is returned the
1306 entries stored to LINK_PTR_PTR are still valid although they may
1307 represent only part of the inferior library list. */
1308
1309 static int
1310 svr4_read_so_list (CORE_ADDR lm, CORE_ADDR prev_lm,
1311 struct so_list ***link_ptr_ptr, int ignore_first)
1312 {
1313 CORE_ADDR next_lm;
1314
1315 for (; lm != 0; prev_lm = lm, lm = next_lm)
1316 {
1317 struct so_list *new;
1318 struct cleanup *old_chain;
1319 int errcode;
1320 char *buffer;
1321
1322 new = XZALLOC (struct so_list);
1323 old_chain = make_cleanup_free_so (new);
1324
1325 new->lm_info = lm_info_read (lm);
1326 if (new->lm_info == NULL)
1327 {
1328 do_cleanups (old_chain);
1329 return 0;
1330 }
1331
1332 next_lm = new->lm_info->l_next;
1333
1334 if (new->lm_info->l_prev != prev_lm)
1335 {
1336 warning (_("Corrupted shared library list: %s != %s"),
1337 paddress (target_gdbarch (), prev_lm),
1338 paddress (target_gdbarch (), new->lm_info->l_prev));
1339 do_cleanups (old_chain);
1340 return 0;
1341 }
1342
1343 /* For SVR4 versions, the first entry in the link map is for the
1344 inferior executable, so we must ignore it. For some versions of
1345 SVR4, it has no name. For others (Solaris 2.3 for example), it
1346 does have a name, so we can no longer use a missing name to
1347 decide when to ignore it. */
1348 if (ignore_first && new->lm_info->l_prev == 0)
1349 {
1350 struct svr4_info *info = get_svr4_info ();
1351
1352 info->main_lm_addr = new->lm_info->lm_addr;
1353 do_cleanups (old_chain);
1354 continue;
1355 }
1356
1357 /* Extract this shared object's name. */
1358 target_read_string (new->lm_info->l_name, &buffer,
1359 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1360 if (errcode != 0)
1361 {
1362 warning (_("Can't read pathname for load map: %s."),
1363 safe_strerror (errcode));
1364 do_cleanups (old_chain);
1365 continue;
1366 }
1367
1368 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1369 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1370 strcpy (new->so_original_name, new->so_name);
1371 xfree (buffer);
1372
1373 /* If this entry has no name, or its name matches the name
1374 for the main executable, don't include it in the list. */
1375 if (! new->so_name[0] || match_main (new->so_name))
1376 {
1377 do_cleanups (old_chain);
1378 continue;
1379 }
1380
1381 discard_cleanups (old_chain);
1382 new->next = 0;
1383 **link_ptr_ptr = new;
1384 *link_ptr_ptr = &new->next;
1385 }
1386
1387 return 1;
1388 }
1389
1390 /* Read the full list of currently loaded shared objects directly
1391 from the inferior, without referring to any libraries read and
1392 stored by the probes interface. Handle special cases relating
1393 to the first elements of the list. */
1394
1395 static struct so_list *
1396 svr4_current_sos_direct (struct svr4_info *info)
1397 {
1398 CORE_ADDR lm;
1399 struct so_list *head = NULL;
1400 struct so_list **link_ptr = &head;
1401 struct cleanup *back_to;
1402 int ignore_first;
1403 struct svr4_library_list library_list;
1404
1405 /* Fall back to manual examination of the target if the packet is not
1406 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1407 tests a case where gdbserver cannot find the shared libraries list while
1408 GDB itself is able to find it via SYMFILE_OBJFILE.
1409
1410 Unfortunately statically linked inferiors will also fall back through this
1411 suboptimal code path. */
1412
1413 info->using_xfer = svr4_current_sos_via_xfer_libraries (&library_list,
1414 NULL);
1415 if (info->using_xfer)
1416 {
1417 if (library_list.main_lm)
1418 info->main_lm_addr = library_list.main_lm;
1419
1420 return library_list.head ? library_list.head : svr4_default_sos ();
1421 }
1422
1423 /* Always locate the debug struct, in case it has moved. */
1424 info->debug_base = 0;
1425 locate_base (info);
1426
1427 /* If we can't find the dynamic linker's base structure, this
1428 must not be a dynamically linked executable. Hmm. */
1429 if (! info->debug_base)
1430 return svr4_default_sos ();
1431
1432 /* Assume that everything is a library if the dynamic loader was loaded
1433 late by a static executable. */
1434 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1435 ignore_first = 0;
1436 else
1437 ignore_first = 1;
1438
1439 back_to = make_cleanup (svr4_free_library_list, &head);
1440
1441 /* Walk the inferior's link map list, and build our list of
1442 `struct so_list' nodes. */
1443 lm = solib_svr4_r_map (info);
1444 if (lm)
1445 svr4_read_so_list (lm, 0, &link_ptr, ignore_first);
1446
1447 /* On Solaris, the dynamic linker is not in the normal list of
1448 shared objects, so make sure we pick it up too. Having
1449 symbol information for the dynamic linker is quite crucial
1450 for skipping dynamic linker resolver code. */
1451 lm = solib_svr4_r_ldsomap (info);
1452 if (lm)
1453 svr4_read_so_list (lm, 0, &link_ptr, 0);
1454
1455 discard_cleanups (back_to);
1456
1457 if (head == NULL)
1458 return svr4_default_sos ();
1459
1460 return head;
1461 }
1462
1463 /* Implement the "current_sos" target_so_ops method. */
1464
1465 static struct so_list *
1466 svr4_current_sos (void)
1467 {
1468 struct svr4_info *info = get_svr4_info ();
1469
1470 /* If the solib list has been read and stored by the probes
1471 interface then we return a copy of the stored list. */
1472 if (info->solib_list != NULL)
1473 return svr4_copy_library_list (info->solib_list);
1474
1475 /* Otherwise obtain the solib list directly from the inferior. */
1476 return svr4_current_sos_direct (info);
1477 }
1478
1479 /* Get the address of the link_map for a given OBJFILE. */
1480
1481 CORE_ADDR
1482 svr4_fetch_objfile_link_map (struct objfile *objfile)
1483 {
1484 struct so_list *so;
1485 struct svr4_info *info = get_svr4_info ();
1486
1487 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1488 if (info->main_lm_addr == 0)
1489 solib_add (NULL, 0, &current_target, auto_solib_add);
1490
1491 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1492 if (objfile == symfile_objfile)
1493 return info->main_lm_addr;
1494
1495 /* The other link map addresses may be found by examining the list
1496 of shared libraries. */
1497 for (so = master_so_list (); so; so = so->next)
1498 if (so->objfile == objfile)
1499 return so->lm_info->lm_addr;
1500
1501 /* Not found! */
1502 return 0;
1503 }
1504
1505 /* On some systems, the only way to recognize the link map entry for
1506 the main executable file is by looking at its name. Return
1507 non-zero iff SONAME matches one of the known main executable names. */
1508
1509 static int
1510 match_main (const char *soname)
1511 {
1512 const char * const *mainp;
1513
1514 for (mainp = main_name_list; *mainp != NULL; mainp++)
1515 {
1516 if (strcmp (soname, *mainp) == 0)
1517 return (1);
1518 }
1519
1520 return (0);
1521 }
1522
1523 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1524 SVR4 run time loader. */
1525
1526 int
1527 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1528 {
1529 struct svr4_info *info = get_svr4_info ();
1530
1531 return ((pc >= info->interp_text_sect_low
1532 && pc < info->interp_text_sect_high)
1533 || (pc >= info->interp_plt_sect_low
1534 && pc < info->interp_plt_sect_high)
1535 || in_plt_section (pc, NULL)
1536 || in_gnu_ifunc_stub (pc));
1537 }
1538
1539 /* Given an executable's ABFD and target, compute the entry-point
1540 address. */
1541
1542 static CORE_ADDR
1543 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1544 {
1545 CORE_ADDR addr;
1546
1547 /* KevinB wrote ... for most targets, the address returned by
1548 bfd_get_start_address() is the entry point for the start
1549 function. But, for some targets, bfd_get_start_address() returns
1550 the address of a function descriptor from which the entry point
1551 address may be extracted. This address is extracted by
1552 gdbarch_convert_from_func_ptr_addr(). The method
1553 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1554 function for targets which don't use function descriptors. */
1555 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1556 bfd_get_start_address (abfd),
1557 targ);
1558 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1559 }
1560
1561 /* A probe and its associated action. */
1562
1563 struct probe_and_action
1564 {
1565 /* The probe. */
1566 struct probe *probe;
1567
1568 /* The action. */
1569 enum probe_action action;
1570 };
1571
1572 /* Returns a hash code for the probe_and_action referenced by p. */
1573
1574 static hashval_t
1575 hash_probe_and_action (const void *p)
1576 {
1577 const struct probe_and_action *pa = p;
1578
1579 return (hashval_t) pa->probe->address;
1580 }
1581
1582 /* Returns non-zero if the probe_and_actions referenced by p1 and p2
1583 are equal. */
1584
1585 static int
1586 equal_probe_and_action (const void *p1, const void *p2)
1587 {
1588 const struct probe_and_action *pa1 = p1;
1589 const struct probe_and_action *pa2 = p2;
1590
1591 return pa1->probe->address == pa2->probe->address;
1592 }
1593
1594 /* Register a solib event probe and its associated action in the
1595 probes table. */
1596
1597 static void
1598 register_solib_event_probe (struct probe *probe, enum probe_action action)
1599 {
1600 struct svr4_info *info = get_svr4_info ();
1601 struct probe_and_action lookup, *pa;
1602 void **slot;
1603
1604 /* Create the probes table, if necessary. */
1605 if (info->probes_table == NULL)
1606 info->probes_table = htab_create_alloc (1, hash_probe_and_action,
1607 equal_probe_and_action,
1608 xfree, xcalloc, xfree);
1609
1610 lookup.probe = probe;
1611 slot = htab_find_slot (info->probes_table, &lookup, INSERT);
1612 gdb_assert (*slot == HTAB_EMPTY_ENTRY);
1613
1614 pa = XCNEW (struct probe_and_action);
1615 pa->probe = probe;
1616 pa->action = action;
1617
1618 *slot = pa;
1619 }
1620
1621 /* Get the solib event probe at the specified location, and the
1622 action associated with it. Returns NULL if no solib event probe
1623 was found. */
1624
1625 static struct probe_and_action *
1626 solib_event_probe_at (struct svr4_info *info, CORE_ADDR address)
1627 {
1628 struct probe lookup_probe;
1629 struct probe_and_action lookup;
1630 void **slot;
1631
1632 lookup_probe.address = address;
1633 lookup.probe = &lookup_probe;
1634 slot = htab_find_slot (info->probes_table, &lookup, NO_INSERT);
1635
1636 if (slot == NULL)
1637 return NULL;
1638
1639 return (struct probe_and_action *) *slot;
1640 }
1641
1642 /* Decide what action to take when the specified solib event probe is
1643 hit. */
1644
1645 static enum probe_action
1646 solib_event_probe_action (struct probe_and_action *pa)
1647 {
1648 enum probe_action action;
1649 unsigned probe_argc;
1650
1651 action = pa->action;
1652 if (action == DO_NOTHING || action == PROBES_INTERFACE_FAILED)
1653 return action;
1654
1655 gdb_assert (action == FULL_RELOAD || action == UPDATE_OR_RELOAD);
1656
1657 /* Check that an appropriate number of arguments has been supplied.
1658 We expect:
1659 arg0: Lmid_t lmid (mandatory)
1660 arg1: struct r_debug *debug_base (mandatory)
1661 arg2: struct link_map *new (optional, for incremental updates) */
1662 probe_argc = get_probe_argument_count (pa->probe);
1663 if (probe_argc == 2)
1664 action = FULL_RELOAD;
1665 else if (probe_argc < 2)
1666 action = PROBES_INTERFACE_FAILED;
1667
1668 return action;
1669 }
1670
1671 /* Populate the shared object list by reading the entire list of
1672 shared objects from the inferior. Handle special cases relating
1673 to the first elements of the list. Returns nonzero on success. */
1674
1675 static int
1676 solist_update_full (struct svr4_info *info)
1677 {
1678 free_solib_list (info);
1679 info->solib_list = svr4_current_sos_direct (info);
1680
1681 return 1;
1682 }
1683
1684 /* Update the shared object list starting from the link-map entry
1685 passed by the linker in the probe's third argument. Returns
1686 nonzero if the list was successfully updated, or zero to indicate
1687 failure. */
1688
1689 static int
1690 solist_update_incremental (struct svr4_info *info, CORE_ADDR lm)
1691 {
1692 struct so_list *tail;
1693 CORE_ADDR prev_lm;
1694
1695 /* svr4_current_sos_direct contains logic to handle a number of
1696 special cases relating to the first elements of the list. To
1697 avoid duplicating this logic we defer to solist_update_full
1698 if the list is empty. */
1699 if (info->solib_list == NULL)
1700 return 0;
1701
1702 /* Fall back to a full update if we are using a remote target
1703 that does not support incremental transfers. */
1704 if (info->using_xfer && !target_augmented_libraries_svr4_read ())
1705 return 0;
1706
1707 /* Walk to the end of the list. */
1708 for (tail = info->solib_list; tail->next != NULL; tail = tail->next)
1709 /* Nothing. */;
1710 prev_lm = tail->lm_info->lm_addr;
1711
1712 /* Read the new objects. */
1713 if (info->using_xfer)
1714 {
1715 struct svr4_library_list library_list;
1716 char annex[64];
1717
1718 xsnprintf (annex, sizeof (annex), "start=%s;prev=%s",
1719 phex_nz (lm, sizeof (lm)),
1720 phex_nz (prev_lm, sizeof (prev_lm)));
1721 if (!svr4_current_sos_via_xfer_libraries (&library_list, annex))
1722 return 0;
1723
1724 tail->next = library_list.head;
1725 }
1726 else
1727 {
1728 struct so_list **link = &tail->next;
1729
1730 /* IGNORE_FIRST may safely be set to zero here because the
1731 above check and deferral to solist_update_full ensures
1732 that this call to svr4_read_so_list will never see the
1733 first element. */
1734 if (!svr4_read_so_list (lm, prev_lm, &link, 0))
1735 return 0;
1736 }
1737
1738 return 1;
1739 }
1740
1741 /* Disable the probes-based linker interface and revert to the
1742 original interface. We don't reset the breakpoints as the
1743 ones set up for the probes-based interface are adequate. */
1744
1745 static void
1746 disable_probes_interface_cleanup (void *arg)
1747 {
1748 struct svr4_info *info = get_svr4_info ();
1749
1750 warning (_("Probes-based dynamic linker interface failed.\n"
1751 "Reverting to original interface.\n"));
1752
1753 free_probes_table (info);
1754 free_solib_list (info);
1755 }
1756
1757 /* Update the solib list as appropriate when using the
1758 probes-based linker interface. Do nothing if using the
1759 standard interface. */
1760
1761 static void
1762 svr4_handle_solib_event (void)
1763 {
1764 struct svr4_info *info = get_svr4_info ();
1765 struct probe_and_action *pa;
1766 enum probe_action action;
1767 struct cleanup *old_chain, *usm_chain;
1768 struct value *val;
1769 CORE_ADDR pc, debug_base, lm = 0;
1770 int is_initial_ns;
1771
1772 /* Do nothing if not using the probes interface. */
1773 if (info->probes_table == NULL)
1774 return;
1775
1776 /* If anything goes wrong we revert to the original linker
1777 interface. */
1778 old_chain = make_cleanup (disable_probes_interface_cleanup, NULL);
1779
1780 pc = regcache_read_pc (get_current_regcache ());
1781 pa = solib_event_probe_at (info, pc);
1782 if (pa == NULL)
1783 {
1784 do_cleanups (old_chain);
1785 return;
1786 }
1787
1788 action = solib_event_probe_action (pa);
1789 if (action == PROBES_INTERFACE_FAILED)
1790 {
1791 do_cleanups (old_chain);
1792 return;
1793 }
1794
1795 if (action == DO_NOTHING)
1796 {
1797 discard_cleanups (old_chain);
1798 return;
1799 }
1800
1801 /* evaluate_probe_argument looks up symbols in the dynamic linker
1802 using find_pc_section. find_pc_section is accelerated by a cache
1803 called the section map. The section map is invalidated every
1804 time a shared library is loaded or unloaded, and if the inferior
1805 is generating a lot of shared library events then the section map
1806 will be updated every time svr4_handle_solib_event is called.
1807 We called find_pc_section in svr4_create_solib_event_breakpoints,
1808 so we can guarantee that the dynamic linker's sections are in the
1809 section map. We can therefore inhibit section map updates across
1810 these calls to evaluate_probe_argument and save a lot of time. */
1811 inhibit_section_map_updates (current_program_space);
1812 usm_chain = make_cleanup (resume_section_map_updates_cleanup,
1813 current_program_space);
1814
1815 val = evaluate_probe_argument (pa->probe, 1);
1816 if (val == NULL)
1817 {
1818 do_cleanups (old_chain);
1819 return;
1820 }
1821
1822 debug_base = value_as_address (val);
1823 if (debug_base == 0)
1824 {
1825 do_cleanups (old_chain);
1826 return;
1827 }
1828
1829 /* Always locate the debug struct, in case it moved. */
1830 info->debug_base = 0;
1831 if (locate_base (info) == 0)
1832 {
1833 do_cleanups (old_chain);
1834 return;
1835 }
1836
1837 /* GDB does not currently support libraries loaded via dlmopen
1838 into namespaces other than the initial one. We must ignore
1839 any namespace other than the initial namespace here until
1840 support for this is added to GDB. */
1841 if (debug_base != info->debug_base)
1842 action = DO_NOTHING;
1843
1844 if (action == UPDATE_OR_RELOAD)
1845 {
1846 val = evaluate_probe_argument (pa->probe, 2);
1847 if (val != NULL)
1848 lm = value_as_address (val);
1849
1850 if (lm == 0)
1851 action = FULL_RELOAD;
1852 }
1853
1854 /* Resume section map updates. */
1855 do_cleanups (usm_chain);
1856
1857 if (action == UPDATE_OR_RELOAD)
1858 {
1859 if (!solist_update_incremental (info, lm))
1860 action = FULL_RELOAD;
1861 }
1862
1863 if (action == FULL_RELOAD)
1864 {
1865 if (!solist_update_full (info))
1866 {
1867 do_cleanups (old_chain);
1868 return;
1869 }
1870 }
1871
1872 discard_cleanups (old_chain);
1873 }
1874
1875 /* Helper function for svr4_update_solib_event_breakpoints. */
1876
1877 static int
1878 svr4_update_solib_event_breakpoint (struct breakpoint *b, void *arg)
1879 {
1880 struct bp_location *loc;
1881
1882 if (b->type != bp_shlib_event)
1883 {
1884 /* Continue iterating. */
1885 return 0;
1886 }
1887
1888 for (loc = b->loc; loc != NULL; loc = loc->next)
1889 {
1890 struct svr4_info *info;
1891 struct probe_and_action *pa;
1892
1893 info = program_space_data (loc->pspace, solib_svr4_pspace_data);
1894 if (info == NULL || info->probes_table == NULL)
1895 continue;
1896
1897 pa = solib_event_probe_at (info, loc->address);
1898 if (pa == NULL)
1899 continue;
1900
1901 if (pa->action == DO_NOTHING)
1902 {
1903 if (b->enable_state == bp_disabled && stop_on_solib_events)
1904 enable_breakpoint (b);
1905 else if (b->enable_state == bp_enabled && !stop_on_solib_events)
1906 disable_breakpoint (b);
1907 }
1908
1909 break;
1910 }
1911
1912 /* Continue iterating. */
1913 return 0;
1914 }
1915
1916 /* Enable or disable optional solib event breakpoints as appropriate.
1917 Called whenever stop_on_solib_events is changed. */
1918
1919 static void
1920 svr4_update_solib_event_breakpoints (void)
1921 {
1922 iterate_over_breakpoints (svr4_update_solib_event_breakpoint, NULL);
1923 }
1924
1925 /* Create and register solib event breakpoints. PROBES is an array
1926 of NUM_PROBES elements, each of which is vector of probes. A
1927 solib event breakpoint will be created and registered for each
1928 probe. */
1929
1930 static void
1931 svr4_create_probe_breakpoints (struct gdbarch *gdbarch,
1932 VEC (probe_p) **probes)
1933 {
1934 int i;
1935
1936 for (i = 0; i < NUM_PROBES; i++)
1937 {
1938 enum probe_action action = probe_info[i].action;
1939 struct probe *probe;
1940 int ix;
1941
1942 for (ix = 0;
1943 VEC_iterate (probe_p, probes[i], ix, probe);
1944 ++ix)
1945 {
1946 create_solib_event_breakpoint (gdbarch, probe->address);
1947 register_solib_event_probe (probe, action);
1948 }
1949 }
1950
1951 svr4_update_solib_event_breakpoints ();
1952 }
1953
1954 /* Both the SunOS and the SVR4 dynamic linkers call a marker function
1955 before and after mapping and unmapping shared libraries. The sole
1956 purpose of this method is to allow debuggers to set a breakpoint so
1957 they can track these changes.
1958
1959 Some versions of the glibc dynamic linker contain named probes
1960 to allow more fine grained stopping. Given the address of the
1961 original marker function, this function attempts to find these
1962 probes, and if found, sets breakpoints on those instead. If the
1963 probes aren't found, a single breakpoint is set on the original
1964 marker function. */
1965
1966 static void
1967 svr4_create_solib_event_breakpoints (struct gdbarch *gdbarch,
1968 CORE_ADDR address)
1969 {
1970 struct obj_section *os;
1971
1972 os = find_pc_section (address);
1973 if (os != NULL)
1974 {
1975 int with_prefix;
1976
1977 for (with_prefix = 0; with_prefix <= 1; with_prefix++)
1978 {
1979 VEC (probe_p) *probes[NUM_PROBES];
1980 int all_probes_found = 1;
1981 int i;
1982
1983 memset (probes, 0, sizeof (probes));
1984 for (i = 0; i < NUM_PROBES; i++)
1985 {
1986 const char *name = probe_info[i].name;
1987 char buf[32];
1988
1989 /* Fedora 17 and Red Hat Enterprise Linux 6.2-6.4
1990 shipped with an early version of the probes code in
1991 which the probes' names were prefixed with "rtld_"
1992 and the "map_failed" probe did not exist. The
1993 locations of the probes are otherwise the same, so
1994 we check for probes with prefixed names if probes
1995 with unprefixed names are not present. */
1996 if (with_prefix)
1997 {
1998 xsnprintf (buf, sizeof (buf), "rtld_%s", name);
1999 name = buf;
2000 }
2001
2002 probes[i] = find_probes_in_objfile (os->objfile, "rtld", name);
2003
2004 /* The "map_failed" probe did not exist in early
2005 versions of the probes code in which the probes'
2006 names were prefixed with "rtld_". */
2007 if (strcmp (name, "rtld_map_failed") == 0)
2008 continue;
2009
2010 if (VEC_empty (probe_p, probes[i]))
2011 {
2012 all_probes_found = 0;
2013 break;
2014 }
2015 }
2016
2017 if (all_probes_found)
2018 svr4_create_probe_breakpoints (gdbarch, probes);
2019
2020 for (i = 0; i < NUM_PROBES; i++)
2021 VEC_free (probe_p, probes[i]);
2022
2023 if (all_probes_found)
2024 return;
2025 }
2026 }
2027
2028 create_solib_event_breakpoint (gdbarch, address);
2029 }
2030
2031 /* Helper function for gdb_bfd_lookup_symbol. */
2032
2033 static int
2034 cmp_name_and_sec_flags (asymbol *sym, void *data)
2035 {
2036 return (strcmp (sym->name, (const char *) data) == 0
2037 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
2038 }
2039 /* Arrange for dynamic linker to hit breakpoint.
2040
2041 Both the SunOS and the SVR4 dynamic linkers have, as part of their
2042 debugger interface, support for arranging for the inferior to hit
2043 a breakpoint after mapping in the shared libraries. This function
2044 enables that breakpoint.
2045
2046 For SunOS, there is a special flag location (in_debugger) which we
2047 set to 1. When the dynamic linker sees this flag set, it will set
2048 a breakpoint at a location known only to itself, after saving the
2049 original contents of that place and the breakpoint address itself,
2050 in it's own internal structures. When we resume the inferior, it
2051 will eventually take a SIGTRAP when it runs into the breakpoint.
2052 We handle this (in a different place) by restoring the contents of
2053 the breakpointed location (which is only known after it stops),
2054 chasing around to locate the shared libraries that have been
2055 loaded, then resuming.
2056
2057 For SVR4, the debugger interface structure contains a member (r_brk)
2058 which is statically initialized at the time the shared library is
2059 built, to the offset of a function (_r_debug_state) which is guaran-
2060 teed to be called once before mapping in a library, and again when
2061 the mapping is complete. At the time we are examining this member,
2062 it contains only the unrelocated offset of the function, so we have
2063 to do our own relocation. Later, when the dynamic linker actually
2064 runs, it relocates r_brk to be the actual address of _r_debug_state().
2065
2066 The debugger interface structure also contains an enumeration which
2067 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
2068 depending upon whether or not the library is being mapped or unmapped,
2069 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
2070
2071 static int
2072 enable_break (struct svr4_info *info, int from_tty)
2073 {
2074 struct minimal_symbol *msymbol;
2075 const char * const *bkpt_namep;
2076 asection *interp_sect;
2077 char *interp_name;
2078 CORE_ADDR sym_addr;
2079
2080 info->interp_text_sect_low = info->interp_text_sect_high = 0;
2081 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
2082
2083 /* If we already have a shared library list in the target, and
2084 r_debug contains r_brk, set the breakpoint there - this should
2085 mean r_brk has already been relocated. Assume the dynamic linker
2086 is the object containing r_brk. */
2087
2088 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2089 sym_addr = 0;
2090 if (info->debug_base && solib_svr4_r_map (info) != 0)
2091 sym_addr = solib_svr4_r_brk (info);
2092
2093 if (sym_addr != 0)
2094 {
2095 struct obj_section *os;
2096
2097 sym_addr = gdbarch_addr_bits_remove
2098 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2099 sym_addr,
2100 &current_target));
2101
2102 /* On at least some versions of Solaris there's a dynamic relocation
2103 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
2104 we get control before the dynamic linker has self-relocated.
2105 Check if SYM_ADDR is in a known section, if it is assume we can
2106 trust its value. This is just a heuristic though, it could go away
2107 or be replaced if it's getting in the way.
2108
2109 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
2110 however it's spelled in your particular system) is ARM or Thumb.
2111 That knowledge is encoded in the address, if it's Thumb the low bit
2112 is 1. However, we've stripped that info above and it's not clear
2113 what all the consequences are of passing a non-addr_bits_remove'd
2114 address to svr4_create_solib_event_breakpoints. The call to
2115 find_pc_section verifies we know about the address and have some
2116 hope of computing the right kind of breakpoint to use (via
2117 symbol info). It does mean that GDB needs to be pointed at a
2118 non-stripped version of the dynamic linker in order to obtain
2119 information it already knows about. Sigh. */
2120
2121 os = find_pc_section (sym_addr);
2122 if (os != NULL)
2123 {
2124 /* Record the relocated start and end address of the dynamic linker
2125 text and plt section for svr4_in_dynsym_resolve_code. */
2126 bfd *tmp_bfd;
2127 CORE_ADDR load_addr;
2128
2129 tmp_bfd = os->objfile->obfd;
2130 load_addr = ANOFFSET (os->objfile->section_offsets,
2131 SECT_OFF_TEXT (os->objfile));
2132
2133 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2134 if (interp_sect)
2135 {
2136 info->interp_text_sect_low =
2137 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2138 info->interp_text_sect_high =
2139 info->interp_text_sect_low
2140 + bfd_section_size (tmp_bfd, interp_sect);
2141 }
2142 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2143 if (interp_sect)
2144 {
2145 info->interp_plt_sect_low =
2146 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2147 info->interp_plt_sect_high =
2148 info->interp_plt_sect_low
2149 + bfd_section_size (tmp_bfd, interp_sect);
2150 }
2151
2152 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2153 return 1;
2154 }
2155 }
2156
2157 /* Find the program interpreter; if not found, warn the user and drop
2158 into the old breakpoint at symbol code. */
2159 interp_name = find_program_interpreter ();
2160 if (interp_name)
2161 {
2162 CORE_ADDR load_addr = 0;
2163 int load_addr_found = 0;
2164 int loader_found_in_list = 0;
2165 struct so_list *so;
2166 bfd *tmp_bfd = NULL;
2167 struct target_ops *tmp_bfd_target;
2168 volatile struct gdb_exception ex;
2169
2170 sym_addr = 0;
2171
2172 /* Now we need to figure out where the dynamic linker was
2173 loaded so that we can load its symbols and place a breakpoint
2174 in the dynamic linker itself.
2175
2176 This address is stored on the stack. However, I've been unable
2177 to find any magic formula to find it for Solaris (appears to
2178 be trivial on GNU/Linux). Therefore, we have to try an alternate
2179 mechanism to find the dynamic linker's base address. */
2180
2181 TRY_CATCH (ex, RETURN_MASK_ALL)
2182 {
2183 tmp_bfd = solib_bfd_open (interp_name);
2184 }
2185 if (tmp_bfd == NULL)
2186 goto bkpt_at_symbol;
2187
2188 /* Now convert the TMP_BFD into a target. That way target, as
2189 well as BFD operations can be used. */
2190 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
2191 /* target_bfd_reopen acquired its own reference, so we can
2192 release ours now. */
2193 gdb_bfd_unref (tmp_bfd);
2194
2195 /* On a running target, we can get the dynamic linker's base
2196 address from the shared library table. */
2197 so = master_so_list ();
2198 while (so)
2199 {
2200 if (svr4_same_1 (interp_name, so->so_original_name))
2201 {
2202 load_addr_found = 1;
2203 loader_found_in_list = 1;
2204 load_addr = lm_addr_check (so, tmp_bfd);
2205 break;
2206 }
2207 so = so->next;
2208 }
2209
2210 /* If we were not able to find the base address of the loader
2211 from our so_list, then try using the AT_BASE auxilliary entry. */
2212 if (!load_addr_found)
2213 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
2214 {
2215 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
2216
2217 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
2218 that `+ load_addr' will overflow CORE_ADDR width not creating
2219 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
2220 GDB. */
2221
2222 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
2223 {
2224 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
2225 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
2226 tmp_bfd_target);
2227
2228 gdb_assert (load_addr < space_size);
2229
2230 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
2231 64bit ld.so with 32bit executable, it should not happen. */
2232
2233 if (tmp_entry_point < space_size
2234 && tmp_entry_point + load_addr >= space_size)
2235 load_addr -= space_size;
2236 }
2237
2238 load_addr_found = 1;
2239 }
2240
2241 /* Otherwise we find the dynamic linker's base address by examining
2242 the current pc (which should point at the entry point for the
2243 dynamic linker) and subtracting the offset of the entry point.
2244
2245 This is more fragile than the previous approaches, but is a good
2246 fallback method because it has actually been working well in
2247 most cases. */
2248 if (!load_addr_found)
2249 {
2250 struct regcache *regcache
2251 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
2252
2253 load_addr = (regcache_read_pc (regcache)
2254 - exec_entry_point (tmp_bfd, tmp_bfd_target));
2255 }
2256
2257 if (!loader_found_in_list)
2258 {
2259 info->debug_loader_name = xstrdup (interp_name);
2260 info->debug_loader_offset_p = 1;
2261 info->debug_loader_offset = load_addr;
2262 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2263 }
2264
2265 /* Record the relocated start and end address of the dynamic linker
2266 text and plt section for svr4_in_dynsym_resolve_code. */
2267 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2268 if (interp_sect)
2269 {
2270 info->interp_text_sect_low =
2271 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2272 info->interp_text_sect_high =
2273 info->interp_text_sect_low
2274 + bfd_section_size (tmp_bfd, interp_sect);
2275 }
2276 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2277 if (interp_sect)
2278 {
2279 info->interp_plt_sect_low =
2280 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2281 info->interp_plt_sect_high =
2282 info->interp_plt_sect_low
2283 + bfd_section_size (tmp_bfd, interp_sect);
2284 }
2285
2286 /* Now try to set a breakpoint in the dynamic linker. */
2287 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2288 {
2289 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
2290 (void *) *bkpt_namep);
2291 if (sym_addr != 0)
2292 break;
2293 }
2294
2295 if (sym_addr != 0)
2296 /* Convert 'sym_addr' from a function pointer to an address.
2297 Because we pass tmp_bfd_target instead of the current
2298 target, this will always produce an unrelocated value. */
2299 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2300 sym_addr,
2301 tmp_bfd_target);
2302
2303 /* We're done with both the temporary bfd and target. Closing
2304 the target closes the underlying bfd, because it holds the
2305 only remaining reference. */
2306 target_close (tmp_bfd_target);
2307
2308 if (sym_addr != 0)
2309 {
2310 svr4_create_solib_event_breakpoints (target_gdbarch (),
2311 load_addr + sym_addr);
2312 xfree (interp_name);
2313 return 1;
2314 }
2315
2316 /* For whatever reason we couldn't set a breakpoint in the dynamic
2317 linker. Warn and drop into the old code. */
2318 bkpt_at_symbol:
2319 xfree (interp_name);
2320 warning (_("Unable to find dynamic linker breakpoint function.\n"
2321 "GDB will be unable to debug shared library initializers\n"
2322 "and track explicitly loaded dynamic code."));
2323 }
2324
2325 /* Scan through the lists of symbols, trying to look up the symbol and
2326 set a breakpoint there. Terminate loop when we/if we succeed. */
2327
2328 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2329 {
2330 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2331 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
2332 {
2333 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
2334 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2335 sym_addr,
2336 &current_target);
2337 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2338 return 1;
2339 }
2340 }
2341
2342 if (interp_name != NULL && !current_inferior ()->attach_flag)
2343 {
2344 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
2345 {
2346 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2347 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
2348 {
2349 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
2350 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2351 sym_addr,
2352 &current_target);
2353 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2354 return 1;
2355 }
2356 }
2357 }
2358 return 0;
2359 }
2360
2361 /* Implement the "special_symbol_handling" target_so_ops method. */
2362
2363 static void
2364 svr4_special_symbol_handling (void)
2365 {
2366 /* Nothing to do. */
2367 }
2368
2369 /* Read the ELF program headers from ABFD. Return the contents and
2370 set *PHDRS_SIZE to the size of the program headers. */
2371
2372 static gdb_byte *
2373 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
2374 {
2375 Elf_Internal_Ehdr *ehdr;
2376 gdb_byte *buf;
2377
2378 ehdr = elf_elfheader (abfd);
2379
2380 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
2381 if (*phdrs_size == 0)
2382 return NULL;
2383
2384 buf = xmalloc (*phdrs_size);
2385 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
2386 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
2387 {
2388 xfree (buf);
2389 return NULL;
2390 }
2391
2392 return buf;
2393 }
2394
2395 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
2396 exec_bfd. Otherwise return 0.
2397
2398 We relocate all of the sections by the same amount. This
2399 behavior is mandated by recent editions of the System V ABI.
2400 According to the System V Application Binary Interface,
2401 Edition 4.1, page 5-5:
2402
2403 ... Though the system chooses virtual addresses for
2404 individual processes, it maintains the segments' relative
2405 positions. Because position-independent code uses relative
2406 addressesing between segments, the difference between
2407 virtual addresses in memory must match the difference
2408 between virtual addresses in the file. The difference
2409 between the virtual address of any segment in memory and
2410 the corresponding virtual address in the file is thus a
2411 single constant value for any one executable or shared
2412 object in a given process. This difference is the base
2413 address. One use of the base address is to relocate the
2414 memory image of the program during dynamic linking.
2415
2416 The same language also appears in Edition 4.0 of the System V
2417 ABI and is left unspecified in some of the earlier editions.
2418
2419 Decide if the objfile needs to be relocated. As indicated above, we will
2420 only be here when execution is stopped. But during attachment PC can be at
2421 arbitrary address therefore regcache_read_pc can be misleading (contrary to
2422 the auxv AT_ENTRY value). Moreover for executable with interpreter section
2423 regcache_read_pc would point to the interpreter and not the main executable.
2424
2425 So, to summarize, relocations are necessary when the start address obtained
2426 from the executable is different from the address in auxv AT_ENTRY entry.
2427
2428 [ The astute reader will note that we also test to make sure that
2429 the executable in question has the DYNAMIC flag set. It is my
2430 opinion that this test is unnecessary (undesirable even). It
2431 was added to avoid inadvertent relocation of an executable
2432 whose e_type member in the ELF header is not ET_DYN. There may
2433 be a time in the future when it is desirable to do relocations
2434 on other types of files as well in which case this condition
2435 should either be removed or modified to accomodate the new file
2436 type. - Kevin, Nov 2000. ] */
2437
2438 static int
2439 svr4_exec_displacement (CORE_ADDR *displacementp)
2440 {
2441 /* ENTRY_POINT is a possible function descriptor - before
2442 a call to gdbarch_convert_from_func_ptr_addr. */
2443 CORE_ADDR entry_point, displacement;
2444
2445 if (exec_bfd == NULL)
2446 return 0;
2447
2448 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
2449 being executed themselves and PIE (Position Independent Executable)
2450 executables are ET_DYN. */
2451
2452 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
2453 return 0;
2454
2455 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
2456 return 0;
2457
2458 displacement = entry_point - bfd_get_start_address (exec_bfd);
2459
2460 /* Verify the DISPLACEMENT candidate complies with the required page
2461 alignment. It is cheaper than the program headers comparison below. */
2462
2463 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2464 {
2465 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
2466
2467 /* p_align of PT_LOAD segments does not specify any alignment but
2468 only congruency of addresses:
2469 p_offset % p_align == p_vaddr % p_align
2470 Kernel is free to load the executable with lower alignment. */
2471
2472 if ((displacement & (elf->minpagesize - 1)) != 0)
2473 return 0;
2474 }
2475
2476 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
2477 comparing their program headers. If the program headers in the auxilliary
2478 vector do not match the program headers in the executable, then we are
2479 looking at a different file than the one used by the kernel - for
2480 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
2481
2482 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2483 {
2484 /* Be optimistic and clear OK only if GDB was able to verify the headers
2485 really do not match. */
2486 int phdrs_size, phdrs2_size, ok = 1;
2487 gdb_byte *buf, *buf2;
2488 int arch_size;
2489
2490 buf = read_program_header (-1, &phdrs_size, &arch_size);
2491 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
2492 if (buf != NULL && buf2 != NULL)
2493 {
2494 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
2495
2496 /* We are dealing with three different addresses. EXEC_BFD
2497 represents current address in on-disk file. target memory content
2498 may be different from EXEC_BFD as the file may have been prelinked
2499 to a different address after the executable has been loaded.
2500 Moreover the address of placement in target memory can be
2501 different from what the program headers in target memory say -
2502 this is the goal of PIE.
2503
2504 Detected DISPLACEMENT covers both the offsets of PIE placement and
2505 possible new prelink performed after start of the program. Here
2506 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
2507 content offset for the verification purpose. */
2508
2509 if (phdrs_size != phdrs2_size
2510 || bfd_get_arch_size (exec_bfd) != arch_size)
2511 ok = 0;
2512 else if (arch_size == 32
2513 && phdrs_size >= sizeof (Elf32_External_Phdr)
2514 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
2515 {
2516 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2517 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2518 CORE_ADDR displacement = 0;
2519 int i;
2520
2521 /* DISPLACEMENT could be found more easily by the difference of
2522 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2523 already have enough information to compute that displacement
2524 with what we've read. */
2525
2526 for (i = 0; i < ehdr2->e_phnum; i++)
2527 if (phdr2[i].p_type == PT_LOAD)
2528 {
2529 Elf32_External_Phdr *phdrp;
2530 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2531 CORE_ADDR vaddr, paddr;
2532 CORE_ADDR displacement_vaddr = 0;
2533 CORE_ADDR displacement_paddr = 0;
2534
2535 phdrp = &((Elf32_External_Phdr *) buf)[i];
2536 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2537 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2538
2539 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2540 byte_order);
2541 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2542
2543 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2544 byte_order);
2545 displacement_paddr = paddr - phdr2[i].p_paddr;
2546
2547 if (displacement_vaddr == displacement_paddr)
2548 displacement = displacement_vaddr;
2549
2550 break;
2551 }
2552
2553 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2554
2555 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
2556 {
2557 Elf32_External_Phdr *phdrp;
2558 Elf32_External_Phdr *phdr2p;
2559 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2560 CORE_ADDR vaddr, paddr;
2561 asection *plt2_asect;
2562
2563 phdrp = &((Elf32_External_Phdr *) buf)[i];
2564 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2565 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2566 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
2567
2568 /* PT_GNU_STACK is an exception by being never relocated by
2569 prelink as its addresses are always zero. */
2570
2571 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2572 continue;
2573
2574 /* Check also other adjustment combinations - PR 11786. */
2575
2576 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2577 byte_order);
2578 vaddr -= displacement;
2579 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
2580
2581 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2582 byte_order);
2583 paddr -= displacement;
2584 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
2585
2586 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2587 continue;
2588
2589 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2590 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2591 if (plt2_asect)
2592 {
2593 int content2;
2594 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2595 CORE_ADDR filesz;
2596
2597 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2598 & SEC_HAS_CONTENTS) != 0;
2599
2600 filesz = extract_unsigned_integer (buf_filesz_p, 4,
2601 byte_order);
2602
2603 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2604 FILESZ is from the in-memory image. */
2605 if (content2)
2606 filesz += bfd_get_section_size (plt2_asect);
2607 else
2608 filesz -= bfd_get_section_size (plt2_asect);
2609
2610 store_unsigned_integer (buf_filesz_p, 4, byte_order,
2611 filesz);
2612
2613 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2614 continue;
2615 }
2616
2617 ok = 0;
2618 break;
2619 }
2620 }
2621 else if (arch_size == 64
2622 && phdrs_size >= sizeof (Elf64_External_Phdr)
2623 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2624 {
2625 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2626 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2627 CORE_ADDR displacement = 0;
2628 int i;
2629
2630 /* DISPLACEMENT could be found more easily by the difference of
2631 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2632 already have enough information to compute that displacement
2633 with what we've read. */
2634
2635 for (i = 0; i < ehdr2->e_phnum; i++)
2636 if (phdr2[i].p_type == PT_LOAD)
2637 {
2638 Elf64_External_Phdr *phdrp;
2639 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2640 CORE_ADDR vaddr, paddr;
2641 CORE_ADDR displacement_vaddr = 0;
2642 CORE_ADDR displacement_paddr = 0;
2643
2644 phdrp = &((Elf64_External_Phdr *) buf)[i];
2645 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2646 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2647
2648 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2649 byte_order);
2650 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2651
2652 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2653 byte_order);
2654 displacement_paddr = paddr - phdr2[i].p_paddr;
2655
2656 if (displacement_vaddr == displacement_paddr)
2657 displacement = displacement_vaddr;
2658
2659 break;
2660 }
2661
2662 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2663
2664 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2665 {
2666 Elf64_External_Phdr *phdrp;
2667 Elf64_External_Phdr *phdr2p;
2668 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2669 CORE_ADDR vaddr, paddr;
2670 asection *plt2_asect;
2671
2672 phdrp = &((Elf64_External_Phdr *) buf)[i];
2673 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2674 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2675 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2676
2677 /* PT_GNU_STACK is an exception by being never relocated by
2678 prelink as its addresses are always zero. */
2679
2680 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2681 continue;
2682
2683 /* Check also other adjustment combinations - PR 11786. */
2684
2685 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2686 byte_order);
2687 vaddr -= displacement;
2688 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2689
2690 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2691 byte_order);
2692 paddr -= displacement;
2693 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2694
2695 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2696 continue;
2697
2698 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2699 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2700 if (plt2_asect)
2701 {
2702 int content2;
2703 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2704 CORE_ADDR filesz;
2705
2706 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2707 & SEC_HAS_CONTENTS) != 0;
2708
2709 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2710 byte_order);
2711
2712 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2713 FILESZ is from the in-memory image. */
2714 if (content2)
2715 filesz += bfd_get_section_size (plt2_asect);
2716 else
2717 filesz -= bfd_get_section_size (plt2_asect);
2718
2719 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2720 filesz);
2721
2722 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2723 continue;
2724 }
2725
2726 ok = 0;
2727 break;
2728 }
2729 }
2730 else
2731 ok = 0;
2732 }
2733
2734 xfree (buf);
2735 xfree (buf2);
2736
2737 if (!ok)
2738 return 0;
2739 }
2740
2741 if (info_verbose)
2742 {
2743 /* It can be printed repeatedly as there is no easy way to check
2744 the executable symbols/file has been already relocated to
2745 displacement. */
2746
2747 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2748 "displacement %s for \"%s\".\n"),
2749 paddress (target_gdbarch (), displacement),
2750 bfd_get_filename (exec_bfd));
2751 }
2752
2753 *displacementp = displacement;
2754 return 1;
2755 }
2756
2757 /* Relocate the main executable. This function should be called upon
2758 stopping the inferior process at the entry point to the program.
2759 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2760 different, the main executable is relocated by the proper amount. */
2761
2762 static void
2763 svr4_relocate_main_executable (void)
2764 {
2765 CORE_ADDR displacement;
2766
2767 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2768 probably contains the offsets computed using the PIE displacement
2769 from the previous run, which of course are irrelevant for this run.
2770 So we need to determine the new PIE displacement and recompute the
2771 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2772 already contains pre-computed offsets.
2773
2774 If we cannot compute the PIE displacement, either:
2775
2776 - The executable is not PIE.
2777
2778 - SYMFILE_OBJFILE does not match the executable started in the target.
2779 This can happen for main executable symbols loaded at the host while
2780 `ld.so --ld-args main-executable' is loaded in the target.
2781
2782 Then we leave the section offsets untouched and use them as is for
2783 this run. Either:
2784
2785 - These section offsets were properly reset earlier, and thus
2786 already contain the correct values. This can happen for instance
2787 when reconnecting via the remote protocol to a target that supports
2788 the `qOffsets' packet.
2789
2790 - The section offsets were not reset earlier, and the best we can
2791 hope is that the old offsets are still applicable to the new run. */
2792
2793 if (! svr4_exec_displacement (&displacement))
2794 return;
2795
2796 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2797 addresses. */
2798
2799 if (symfile_objfile)
2800 {
2801 struct section_offsets *new_offsets;
2802 int i;
2803
2804 new_offsets = alloca (symfile_objfile->num_sections
2805 * sizeof (*new_offsets));
2806
2807 for (i = 0; i < symfile_objfile->num_sections; i++)
2808 new_offsets->offsets[i] = displacement;
2809
2810 objfile_relocate (symfile_objfile, new_offsets);
2811 }
2812 else if (exec_bfd)
2813 {
2814 asection *asect;
2815
2816 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2817 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2818 (bfd_section_vma (exec_bfd, asect)
2819 + displacement));
2820 }
2821 }
2822
2823 /* Implement the "create_inferior_hook" target_solib_ops method.
2824
2825 For SVR4 executables, this first instruction is either the first
2826 instruction in the dynamic linker (for dynamically linked
2827 executables) or the instruction at "start" for statically linked
2828 executables. For dynamically linked executables, the system
2829 first exec's /lib/libc.so.N, which contains the dynamic linker,
2830 and starts it running. The dynamic linker maps in any needed
2831 shared libraries, maps in the actual user executable, and then
2832 jumps to "start" in the user executable.
2833
2834 We can arrange to cooperate with the dynamic linker to discover the
2835 names of shared libraries that are dynamically linked, and the base
2836 addresses to which they are linked.
2837
2838 This function is responsible for discovering those names and
2839 addresses, and saving sufficient information about them to allow
2840 their symbols to be read at a later time. */
2841
2842 static void
2843 svr4_solib_create_inferior_hook (int from_tty)
2844 {
2845 struct svr4_info *info;
2846
2847 info = get_svr4_info ();
2848
2849 /* Clear the probes-based interface's state. */
2850 free_probes_table (info);
2851 free_solib_list (info);
2852
2853 /* Relocate the main executable if necessary. */
2854 svr4_relocate_main_executable ();
2855
2856 /* No point setting a breakpoint in the dynamic linker if we can't
2857 hit it (e.g., a core file, or a trace file). */
2858 if (!target_has_execution)
2859 return;
2860
2861 if (!svr4_have_link_map_offsets ())
2862 return;
2863
2864 if (!enable_break (info, from_tty))
2865 return;
2866 }
2867
2868 static void
2869 svr4_clear_solib (void)
2870 {
2871 struct svr4_info *info;
2872
2873 info = get_svr4_info ();
2874 info->debug_base = 0;
2875 info->debug_loader_offset_p = 0;
2876 info->debug_loader_offset = 0;
2877 xfree (info->debug_loader_name);
2878 info->debug_loader_name = NULL;
2879 }
2880
2881 /* Clear any bits of ADDR that wouldn't fit in a target-format
2882 data pointer. "Data pointer" here refers to whatever sort of
2883 address the dynamic linker uses to manage its sections. At the
2884 moment, we don't support shared libraries on any processors where
2885 code and data pointers are different sizes.
2886
2887 This isn't really the right solution. What we really need here is
2888 a way to do arithmetic on CORE_ADDR values that respects the
2889 natural pointer/address correspondence. (For example, on the MIPS,
2890 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2891 sign-extend the value. There, simply truncating the bits above
2892 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2893 be a new gdbarch method or something. */
2894 static CORE_ADDR
2895 svr4_truncate_ptr (CORE_ADDR addr)
2896 {
2897 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
2898 /* We don't need to truncate anything, and the bit twiddling below
2899 will fail due to overflow problems. */
2900 return addr;
2901 else
2902 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
2903 }
2904
2905
2906 static void
2907 svr4_relocate_section_addresses (struct so_list *so,
2908 struct target_section *sec)
2909 {
2910 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so,
2911 sec->bfd));
2912 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so,
2913 sec->bfd));
2914 }
2915 \f
2916
2917 /* Architecture-specific operations. */
2918
2919 /* Per-architecture data key. */
2920 static struct gdbarch_data *solib_svr4_data;
2921
2922 struct solib_svr4_ops
2923 {
2924 /* Return a description of the layout of `struct link_map'. */
2925 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2926 };
2927
2928 /* Return a default for the architecture-specific operations. */
2929
2930 static void *
2931 solib_svr4_init (struct obstack *obstack)
2932 {
2933 struct solib_svr4_ops *ops;
2934
2935 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2936 ops->fetch_link_map_offsets = NULL;
2937 return ops;
2938 }
2939
2940 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2941 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2942
2943 void
2944 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2945 struct link_map_offsets *(*flmo) (void))
2946 {
2947 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2948
2949 ops->fetch_link_map_offsets = flmo;
2950
2951 set_solib_ops (gdbarch, &svr4_so_ops);
2952 }
2953
2954 /* Fetch a link_map_offsets structure using the architecture-specific
2955 `struct link_map_offsets' fetcher. */
2956
2957 static struct link_map_offsets *
2958 svr4_fetch_link_map_offsets (void)
2959 {
2960 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2961
2962 gdb_assert (ops->fetch_link_map_offsets);
2963 return ops->fetch_link_map_offsets ();
2964 }
2965
2966 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2967
2968 static int
2969 svr4_have_link_map_offsets (void)
2970 {
2971 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2972
2973 return (ops->fetch_link_map_offsets != NULL);
2974 }
2975 \f
2976
2977 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2978 `struct r_debug' and a `struct link_map' that are binary compatible
2979 with the origional SVR4 implementation. */
2980
2981 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2982 for an ILP32 SVR4 system. */
2983
2984 struct link_map_offsets *
2985 svr4_ilp32_fetch_link_map_offsets (void)
2986 {
2987 static struct link_map_offsets lmo;
2988 static struct link_map_offsets *lmp = NULL;
2989
2990 if (lmp == NULL)
2991 {
2992 lmp = &lmo;
2993
2994 lmo.r_version_offset = 0;
2995 lmo.r_version_size = 4;
2996 lmo.r_map_offset = 4;
2997 lmo.r_brk_offset = 8;
2998 lmo.r_ldsomap_offset = 20;
2999
3000 /* Everything we need is in the first 20 bytes. */
3001 lmo.link_map_size = 20;
3002 lmo.l_addr_offset = 0;
3003 lmo.l_name_offset = 4;
3004 lmo.l_ld_offset = 8;
3005 lmo.l_next_offset = 12;
3006 lmo.l_prev_offset = 16;
3007 }
3008
3009 return lmp;
3010 }
3011
3012 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3013 for an LP64 SVR4 system. */
3014
3015 struct link_map_offsets *
3016 svr4_lp64_fetch_link_map_offsets (void)
3017 {
3018 static struct link_map_offsets lmo;
3019 static struct link_map_offsets *lmp = NULL;
3020
3021 if (lmp == NULL)
3022 {
3023 lmp = &lmo;
3024
3025 lmo.r_version_offset = 0;
3026 lmo.r_version_size = 4;
3027 lmo.r_map_offset = 8;
3028 lmo.r_brk_offset = 16;
3029 lmo.r_ldsomap_offset = 40;
3030
3031 /* Everything we need is in the first 40 bytes. */
3032 lmo.link_map_size = 40;
3033 lmo.l_addr_offset = 0;
3034 lmo.l_name_offset = 8;
3035 lmo.l_ld_offset = 16;
3036 lmo.l_next_offset = 24;
3037 lmo.l_prev_offset = 32;
3038 }
3039
3040 return lmp;
3041 }
3042 \f
3043
3044 struct target_so_ops svr4_so_ops;
3045
3046 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
3047 different rule for symbol lookup. The lookup begins here in the DSO, not in
3048 the main executable. */
3049
3050 static struct symbol *
3051 elf_lookup_lib_symbol (const struct objfile *objfile,
3052 const char *name,
3053 const domain_enum domain)
3054 {
3055 bfd *abfd;
3056
3057 if (objfile == symfile_objfile)
3058 abfd = exec_bfd;
3059 else
3060 {
3061 /* OBJFILE should have been passed as the non-debug one. */
3062 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
3063
3064 abfd = objfile->obfd;
3065 }
3066
3067 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
3068 return NULL;
3069
3070 return lookup_global_symbol_from_objfile (objfile, name, domain);
3071 }
3072
3073 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
3074
3075 void
3076 _initialize_svr4_solib (void)
3077 {
3078 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
3079 solib_svr4_pspace_data
3080 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
3081
3082 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
3083 svr4_so_ops.free_so = svr4_free_so;
3084 svr4_so_ops.clear_so = svr4_clear_so;
3085 svr4_so_ops.clear_solib = svr4_clear_solib;
3086 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
3087 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
3088 svr4_so_ops.current_sos = svr4_current_sos;
3089 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
3090 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
3091 svr4_so_ops.bfd_open = solib_bfd_open;
3092 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
3093 svr4_so_ops.same = svr4_same;
3094 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
3095 svr4_so_ops.update_breakpoints = svr4_update_solib_event_breakpoints;
3096 svr4_so_ops.handle_event = svr4_handle_solib_event;
3097 }
This page took 0.131576 seconds and 4 git commands to generate.