* elf32-cris.c (cris_elf_gc_sweep_hook) <R_CRIS_16_GOTPLT>
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
... / ...
CommitLineData
1/* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000,
4 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
5 Free Software Foundation, Inc.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23
24#include "elf/external.h"
25#include "elf/common.h"
26#include "elf/mips.h"
27
28#include "symtab.h"
29#include "bfd.h"
30#include "symfile.h"
31#include "objfiles.h"
32#include "gdbcore.h"
33#include "target.h"
34#include "inferior.h"
35#include "regcache.h"
36#include "gdbthread.h"
37#include "observer.h"
38
39#include "gdb_assert.h"
40
41#include "solist.h"
42#include "solib.h"
43#include "solib-svr4.h"
44
45#include "bfd-target.h"
46#include "elf-bfd.h"
47#include "exec.h"
48#include "auxv.h"
49#include "exceptions.h"
50
51static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52static int svr4_have_link_map_offsets (void);
53static void svr4_relocate_main_executable (void);
54
55/* Link map info to include in an allocated so_list entry. */
56
57struct lm_info
58 {
59 /* Pointer to copy of link map from inferior. The type is char *
60 rather than void *, so that we may use byte offsets to find the
61 various fields without the need for a cast. */
62 gdb_byte *lm;
63
64 /* Amount by which addresses in the binary should be relocated to
65 match the inferior. This could most often be taken directly
66 from lm, but when prelinking is involved and the prelink base
67 address changes, we may need a different offset, we want to
68 warn about the difference and compute it only once. */
69 CORE_ADDR l_addr;
70
71 /* The target location of lm. */
72 CORE_ADDR lm_addr;
73 };
74
75/* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
77 events.
78
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
82
83static const char * const solib_break_names[] =
84{
85 "r_debug_state",
86 "_r_debug_state",
87 "_dl_debug_state",
88 "rtld_db_dlactivity",
89 "__dl_rtld_db_dlactivity",
90 "_rtld_debug_state",
91
92 NULL
93};
94
95static const char * const bkpt_names[] =
96{
97 "_start",
98 "__start",
99 "main",
100 NULL
101};
102
103static const char * const main_name_list[] =
104{
105 "main_$main",
106 NULL
107};
108
109/* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
110 the same shared library. */
111
112static int
113svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
114{
115 if (strcmp (gdb_so_name, inferior_so_name) == 0)
116 return 1;
117
118 /* On Solaris, when starting inferior we think that dynamic linker is
119 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
120 contains /lib/ld.so.1. Sometimes one file is a link to another, but
121 sometimes they have identical content, but are not linked to each
122 other. We don't restrict this check for Solaris, but the chances
123 of running into this situation elsewhere are very low. */
124 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
125 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
126 return 1;
127
128 /* Similarly, we observed the same issue with sparc64, but with
129 different locations. */
130 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
131 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
132 return 1;
133
134 return 0;
135}
136
137static int
138svr4_same (struct so_list *gdb, struct so_list *inferior)
139{
140 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
141}
142
143/* link map access functions. */
144
145static CORE_ADDR
146lm_addr_from_link_map (struct so_list *so)
147{
148 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
149 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
150
151 return extract_typed_address (so->lm_info->lm + lmo->l_addr_offset,
152 ptr_type);
153}
154
155static int
156has_lm_dynamic_from_link_map (void)
157{
158 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
159
160 return lmo->l_ld_offset >= 0;
161}
162
163static CORE_ADDR
164lm_dynamic_from_link_map (struct so_list *so)
165{
166 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
167 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
168
169 return extract_typed_address (so->lm_info->lm + lmo->l_ld_offset,
170 ptr_type);
171}
172
173static CORE_ADDR
174lm_addr_check (struct so_list *so, bfd *abfd)
175{
176 if (so->lm_info->l_addr == (CORE_ADDR)-1)
177 {
178 struct bfd_section *dyninfo_sect;
179 CORE_ADDR l_addr, l_dynaddr, dynaddr;
180
181 l_addr = lm_addr_from_link_map (so);
182
183 if (! abfd || ! has_lm_dynamic_from_link_map ())
184 goto set_addr;
185
186 l_dynaddr = lm_dynamic_from_link_map (so);
187
188 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
189 if (dyninfo_sect == NULL)
190 goto set_addr;
191
192 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
193
194 if (dynaddr + l_addr != l_dynaddr)
195 {
196 CORE_ADDR align = 0x1000;
197 CORE_ADDR minpagesize = align;
198
199 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
200 {
201 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
202 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
203 int i;
204
205 align = 1;
206
207 for (i = 0; i < ehdr->e_phnum; i++)
208 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
209 align = phdr[i].p_align;
210
211 minpagesize = get_elf_backend_data (abfd)->minpagesize;
212 }
213
214 /* Turn it into a mask. */
215 align--;
216
217 /* If the changes match the alignment requirements, we
218 assume we're using a core file that was generated by the
219 same binary, just prelinked with a different base offset.
220 If it doesn't match, we may have a different binary, the
221 same binary with the dynamic table loaded at an unrelated
222 location, or anything, really. To avoid regressions,
223 don't adjust the base offset in the latter case, although
224 odds are that, if things really changed, debugging won't
225 quite work.
226
227 One could expect more the condition
228 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
229 but the one below is relaxed for PPC. The PPC kernel supports
230 either 4k or 64k page sizes. To be prepared for 64k pages,
231 PPC ELF files are built using an alignment requirement of 64k.
232 However, when running on a kernel supporting 4k pages, the memory
233 mapping of the library may not actually happen on a 64k boundary!
234
235 (In the usual case where (l_addr & align) == 0, this check is
236 equivalent to the possibly expected check above.)
237
238 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
239
240 l_addr = l_dynaddr - dynaddr;
241
242 if ((l_addr & (minpagesize - 1)) == 0
243 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
244 {
245 if (info_verbose)
246 printf_unfiltered (_("Using PIC (Position Independent Code) "
247 "prelink displacement %s for \"%s\".\n"),
248 paddress (target_gdbarch, l_addr),
249 so->so_name);
250 }
251 else
252 {
253 /* There is no way to verify the library file matches. prelink
254 can during prelinking of an unprelinked file (or unprelinking
255 of a prelinked file) shift the DYNAMIC segment by arbitrary
256 offset without any page size alignment. There is no way to
257 find out the ELF header and/or Program Headers for a limited
258 verification if it they match. One could do a verification
259 of the DYNAMIC segment. Still the found address is the best
260 one GDB could find. */
261
262 warning (_(".dynamic section for \"%s\" "
263 "is not at the expected address "
264 "(wrong library or version mismatch?)"), so->so_name);
265 }
266 }
267
268 set_addr:
269 so->lm_info->l_addr = l_addr;
270 }
271
272 return so->lm_info->l_addr;
273}
274
275static CORE_ADDR
276lm_next (struct so_list *so)
277{
278 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
279 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
280
281 return extract_typed_address (so->lm_info->lm + lmo->l_next_offset,
282 ptr_type);
283}
284
285static CORE_ADDR
286lm_prev (struct so_list *so)
287{
288 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
289 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
290
291 return extract_typed_address (so->lm_info->lm + lmo->l_prev_offset,
292 ptr_type);
293}
294
295static CORE_ADDR
296lm_name (struct so_list *so)
297{
298 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
299 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
300
301 return extract_typed_address (so->lm_info->lm + lmo->l_name_offset,
302 ptr_type);
303}
304
305static int
306ignore_first_link_map_entry (struct so_list *so)
307{
308 /* Assume that everything is a library if the dynamic loader was loaded
309 late by a static executable. */
310 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
311 return 0;
312
313 return lm_prev (so) == 0;
314}
315
316/* Per pspace SVR4 specific data. */
317
318struct svr4_info
319{
320 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
321
322 /* Validity flag for debug_loader_offset. */
323 int debug_loader_offset_p;
324
325 /* Load address for the dynamic linker, inferred. */
326 CORE_ADDR debug_loader_offset;
327
328 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
329 char *debug_loader_name;
330
331 /* Load map address for the main executable. */
332 CORE_ADDR main_lm_addr;
333
334 CORE_ADDR interp_text_sect_low;
335 CORE_ADDR interp_text_sect_high;
336 CORE_ADDR interp_plt_sect_low;
337 CORE_ADDR interp_plt_sect_high;
338};
339
340/* Per-program-space data key. */
341static const struct program_space_data *solib_svr4_pspace_data;
342
343static void
344svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
345{
346 struct svr4_info *info;
347
348 info = program_space_data (pspace, solib_svr4_pspace_data);
349 xfree (info);
350}
351
352/* Get the current svr4 data. If none is found yet, add it now. This
353 function always returns a valid object. */
354
355static struct svr4_info *
356get_svr4_info (void)
357{
358 struct svr4_info *info;
359
360 info = program_space_data (current_program_space, solib_svr4_pspace_data);
361 if (info != NULL)
362 return info;
363
364 info = XZALLOC (struct svr4_info);
365 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
366 return info;
367}
368
369/* Local function prototypes */
370
371static int match_main (const char *);
372
373/* Read program header TYPE from inferior memory. The header is found
374 by scanning the OS auxillary vector.
375
376 If TYPE == -1, return the program headers instead of the contents of
377 one program header.
378
379 Return a pointer to allocated memory holding the program header contents,
380 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
381 size of those contents is returned to P_SECT_SIZE. Likewise, the target
382 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
383
384static gdb_byte *
385read_program_header (int type, int *p_sect_size, int *p_arch_size)
386{
387 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
388 CORE_ADDR at_phdr, at_phent, at_phnum;
389 int arch_size, sect_size;
390 CORE_ADDR sect_addr;
391 gdb_byte *buf;
392
393 /* Get required auxv elements from target. */
394 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
395 return 0;
396 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
397 return 0;
398 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
399 return 0;
400 if (!at_phdr || !at_phnum)
401 return 0;
402
403 /* Determine ELF architecture type. */
404 if (at_phent == sizeof (Elf32_External_Phdr))
405 arch_size = 32;
406 else if (at_phent == sizeof (Elf64_External_Phdr))
407 arch_size = 64;
408 else
409 return 0;
410
411 /* Find the requested segment. */
412 if (type == -1)
413 {
414 sect_addr = at_phdr;
415 sect_size = at_phent * at_phnum;
416 }
417 else if (arch_size == 32)
418 {
419 Elf32_External_Phdr phdr;
420 int i;
421
422 /* Search for requested PHDR. */
423 for (i = 0; i < at_phnum; i++)
424 {
425 if (target_read_memory (at_phdr + i * sizeof (phdr),
426 (gdb_byte *)&phdr, sizeof (phdr)))
427 return 0;
428
429 if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
430 4, byte_order) == type)
431 break;
432 }
433
434 if (i == at_phnum)
435 return 0;
436
437 /* Retrieve address and size. */
438 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
439 4, byte_order);
440 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
441 4, byte_order);
442 }
443 else
444 {
445 Elf64_External_Phdr phdr;
446 int i;
447
448 /* Search for requested PHDR. */
449 for (i = 0; i < at_phnum; i++)
450 {
451 if (target_read_memory (at_phdr + i * sizeof (phdr),
452 (gdb_byte *)&phdr, sizeof (phdr)))
453 return 0;
454
455 if (extract_unsigned_integer ((gdb_byte *)phdr.p_type,
456 4, byte_order) == type)
457 break;
458 }
459
460 if (i == at_phnum)
461 return 0;
462
463 /* Retrieve address and size. */
464 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
465 8, byte_order);
466 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
467 8, byte_order);
468 }
469
470 /* Read in requested program header. */
471 buf = xmalloc (sect_size);
472 if (target_read_memory (sect_addr, buf, sect_size))
473 {
474 xfree (buf);
475 return NULL;
476 }
477
478 if (p_arch_size)
479 *p_arch_size = arch_size;
480 if (p_sect_size)
481 *p_sect_size = sect_size;
482
483 return buf;
484}
485
486
487/* Return program interpreter string. */
488static gdb_byte *
489find_program_interpreter (void)
490{
491 gdb_byte *buf = NULL;
492
493 /* If we have an exec_bfd, use its section table. */
494 if (exec_bfd
495 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
496 {
497 struct bfd_section *interp_sect;
498
499 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
500 if (interp_sect != NULL)
501 {
502 int sect_size = bfd_section_size (exec_bfd, interp_sect);
503
504 buf = xmalloc (sect_size);
505 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
506 }
507 }
508
509 /* If we didn't find it, use the target auxillary vector. */
510 if (!buf)
511 buf = read_program_header (PT_INTERP, NULL, NULL);
512
513 return buf;
514}
515
516
517/* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
518 returned and the corresponding PTR is set. */
519
520static int
521scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
522{
523 int arch_size, step, sect_size;
524 long dyn_tag;
525 CORE_ADDR dyn_ptr, dyn_addr;
526 gdb_byte *bufend, *bufstart, *buf;
527 Elf32_External_Dyn *x_dynp_32;
528 Elf64_External_Dyn *x_dynp_64;
529 struct bfd_section *sect;
530 struct target_section *target_section;
531
532 if (abfd == NULL)
533 return 0;
534
535 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
536 return 0;
537
538 arch_size = bfd_get_arch_size (abfd);
539 if (arch_size == -1)
540 return 0;
541
542 /* Find the start address of the .dynamic section. */
543 sect = bfd_get_section_by_name (abfd, ".dynamic");
544 if (sect == NULL)
545 return 0;
546
547 for (target_section = current_target_sections->sections;
548 target_section < current_target_sections->sections_end;
549 target_section++)
550 if (sect == target_section->the_bfd_section)
551 break;
552 if (target_section < current_target_sections->sections_end)
553 dyn_addr = target_section->addr;
554 else
555 {
556 /* ABFD may come from OBJFILE acting only as a symbol file without being
557 loaded into the target (see add_symbol_file_command). This case is
558 such fallback to the file VMA address without the possibility of
559 having the section relocated to its actual in-memory address. */
560
561 dyn_addr = bfd_section_vma (abfd, sect);
562 }
563
564 /* Read in .dynamic from the BFD. We will get the actual value
565 from memory later. */
566 sect_size = bfd_section_size (abfd, sect);
567 buf = bufstart = alloca (sect_size);
568 if (!bfd_get_section_contents (abfd, sect,
569 buf, 0, sect_size))
570 return 0;
571
572 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
573 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
574 : sizeof (Elf64_External_Dyn);
575 for (bufend = buf + sect_size;
576 buf < bufend;
577 buf += step)
578 {
579 if (arch_size == 32)
580 {
581 x_dynp_32 = (Elf32_External_Dyn *) buf;
582 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
583 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
584 }
585 else
586 {
587 x_dynp_64 = (Elf64_External_Dyn *) buf;
588 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
589 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
590 }
591 if (dyn_tag == DT_NULL)
592 return 0;
593 if (dyn_tag == dyntag)
594 {
595 /* If requested, try to read the runtime value of this .dynamic
596 entry. */
597 if (ptr)
598 {
599 struct type *ptr_type;
600 gdb_byte ptr_buf[8];
601 CORE_ADDR ptr_addr;
602
603 ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
604 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
605 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
606 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
607 *ptr = dyn_ptr;
608 }
609 return 1;
610 }
611 }
612
613 return 0;
614}
615
616/* Scan for DYNTAG in .dynamic section of the target's main executable,
617 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
618 returned and the corresponding PTR is set. */
619
620static int
621scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
622{
623 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
624 int sect_size, arch_size, step;
625 long dyn_tag;
626 CORE_ADDR dyn_ptr;
627 gdb_byte *bufend, *bufstart, *buf;
628
629 /* Read in .dynamic section. */
630 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
631 if (!buf)
632 return 0;
633
634 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
635 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
636 : sizeof (Elf64_External_Dyn);
637 for (bufend = buf + sect_size;
638 buf < bufend;
639 buf += step)
640 {
641 if (arch_size == 32)
642 {
643 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
644
645 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
646 4, byte_order);
647 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
648 4, byte_order);
649 }
650 else
651 {
652 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
653
654 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
655 8, byte_order);
656 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
657 8, byte_order);
658 }
659 if (dyn_tag == DT_NULL)
660 break;
661
662 if (dyn_tag == dyntag)
663 {
664 if (ptr)
665 *ptr = dyn_ptr;
666
667 xfree (bufstart);
668 return 1;
669 }
670 }
671
672 xfree (bufstart);
673 return 0;
674}
675
676/* Locate the base address of dynamic linker structs for SVR4 elf
677 targets.
678
679 For SVR4 elf targets the address of the dynamic linker's runtime
680 structure is contained within the dynamic info section in the
681 executable file. The dynamic section is also mapped into the
682 inferior address space. Because the runtime loader fills in the
683 real address before starting the inferior, we have to read in the
684 dynamic info section from the inferior address space.
685 If there are any errors while trying to find the address, we
686 silently return 0, otherwise the found address is returned. */
687
688static CORE_ADDR
689elf_locate_base (void)
690{
691 struct minimal_symbol *msymbol;
692 CORE_ADDR dyn_ptr;
693
694 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
695 instead of DT_DEBUG, although they sometimes contain an unused
696 DT_DEBUG. */
697 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
698 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
699 {
700 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
701 gdb_byte *pbuf;
702 int pbuf_size = TYPE_LENGTH (ptr_type);
703
704 pbuf = alloca (pbuf_size);
705 /* DT_MIPS_RLD_MAP contains a pointer to the address
706 of the dynamic link structure. */
707 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
708 return 0;
709 return extract_typed_address (pbuf, ptr_type);
710 }
711
712 /* Find DT_DEBUG. */
713 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
714 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
715 return dyn_ptr;
716
717 /* This may be a static executable. Look for the symbol
718 conventionally named _r_debug, as a last resort. */
719 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
720 if (msymbol != NULL)
721 return SYMBOL_VALUE_ADDRESS (msymbol);
722
723 /* DT_DEBUG entry not found. */
724 return 0;
725}
726
727/* Locate the base address of dynamic linker structs.
728
729 For both the SunOS and SVR4 shared library implementations, if the
730 inferior executable has been linked dynamically, there is a single
731 address somewhere in the inferior's data space which is the key to
732 locating all of the dynamic linker's runtime structures. This
733 address is the value of the debug base symbol. The job of this
734 function is to find and return that address, or to return 0 if there
735 is no such address (the executable is statically linked for example).
736
737 For SunOS, the job is almost trivial, since the dynamic linker and
738 all of it's structures are statically linked to the executable at
739 link time. Thus the symbol for the address we are looking for has
740 already been added to the minimal symbol table for the executable's
741 objfile at the time the symbol file's symbols were read, and all we
742 have to do is look it up there. Note that we explicitly do NOT want
743 to find the copies in the shared library.
744
745 The SVR4 version is a bit more complicated because the address
746 is contained somewhere in the dynamic info section. We have to go
747 to a lot more work to discover the address of the debug base symbol.
748 Because of this complexity, we cache the value we find and return that
749 value on subsequent invocations. Note there is no copy in the
750 executable symbol tables. */
751
752static CORE_ADDR
753locate_base (struct svr4_info *info)
754{
755 /* Check to see if we have a currently valid address, and if so, avoid
756 doing all this work again and just return the cached address. If
757 we have no cached address, try to locate it in the dynamic info
758 section for ELF executables. There's no point in doing any of this
759 though if we don't have some link map offsets to work with. */
760
761 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
762 info->debug_base = elf_locate_base ();
763 return info->debug_base;
764}
765
766/* Find the first element in the inferior's dynamic link map, and
767 return its address in the inferior. Return zero if the address
768 could not be determined.
769
770 FIXME: Perhaps we should validate the info somehow, perhaps by
771 checking r_version for a known version number, or r_state for
772 RT_CONSISTENT. */
773
774static CORE_ADDR
775solib_svr4_r_map (struct svr4_info *info)
776{
777 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
778 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
779 CORE_ADDR addr = 0;
780 volatile struct gdb_exception ex;
781
782 TRY_CATCH (ex, RETURN_MASK_ERROR)
783 {
784 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
785 ptr_type);
786 }
787 exception_print (gdb_stderr, ex);
788 return addr;
789}
790
791/* Find r_brk from the inferior's debug base. */
792
793static CORE_ADDR
794solib_svr4_r_brk (struct svr4_info *info)
795{
796 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
797 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
798
799 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
800 ptr_type);
801}
802
803/* Find the link map for the dynamic linker (if it is not in the
804 normal list of loaded shared objects). */
805
806static CORE_ADDR
807solib_svr4_r_ldsomap (struct svr4_info *info)
808{
809 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
810 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
811 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
812 ULONGEST version;
813
814 /* Check version, and return zero if `struct r_debug' doesn't have
815 the r_ldsomap member. */
816 version
817 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
818 lmo->r_version_size, byte_order);
819 if (version < 2 || lmo->r_ldsomap_offset == -1)
820 return 0;
821
822 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
823 ptr_type);
824}
825
826/* On Solaris systems with some versions of the dynamic linker,
827 ld.so's l_name pointer points to the SONAME in the string table
828 rather than into writable memory. So that GDB can find shared
829 libraries when loading a core file generated by gcore, ensure that
830 memory areas containing the l_name string are saved in the core
831 file. */
832
833static int
834svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
835{
836 struct svr4_info *info;
837 CORE_ADDR ldsomap;
838 struct so_list *new;
839 struct cleanup *old_chain;
840 struct link_map_offsets *lmo;
841 CORE_ADDR name_lm;
842
843 info = get_svr4_info ();
844
845 info->debug_base = 0;
846 locate_base (info);
847 if (!info->debug_base)
848 return 0;
849
850 ldsomap = solib_svr4_r_ldsomap (info);
851 if (!ldsomap)
852 return 0;
853
854 lmo = svr4_fetch_link_map_offsets ();
855 new = XZALLOC (struct so_list);
856 old_chain = make_cleanup (xfree, new);
857 new->lm_info = xmalloc (sizeof (struct lm_info));
858 make_cleanup (xfree, new->lm_info);
859 new->lm_info->l_addr = (CORE_ADDR)-1;
860 new->lm_info->lm_addr = ldsomap;
861 new->lm_info->lm = xzalloc (lmo->link_map_size);
862 make_cleanup (xfree, new->lm_info->lm);
863 read_memory (ldsomap, new->lm_info->lm, lmo->link_map_size);
864 name_lm = lm_name (new);
865 do_cleanups (old_chain);
866
867 return (name_lm >= vaddr && name_lm < vaddr + size);
868}
869
870/* Implement the "open_symbol_file_object" target_so_ops method.
871
872 If no open symbol file, attempt to locate and open the main symbol
873 file. On SVR4 systems, this is the first link map entry. If its
874 name is here, we can open it. Useful when attaching to a process
875 without first loading its symbol file. */
876
877static int
878open_symbol_file_object (void *from_ttyp)
879{
880 CORE_ADDR lm, l_name;
881 char *filename;
882 int errcode;
883 int from_tty = *(int *)from_ttyp;
884 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
885 struct type *ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr;
886 int l_name_size = TYPE_LENGTH (ptr_type);
887 gdb_byte *l_name_buf = xmalloc (l_name_size);
888 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
889 struct svr4_info *info = get_svr4_info ();
890
891 if (symfile_objfile)
892 if (!query (_("Attempt to reload symbols from process? ")))
893 {
894 do_cleanups (cleanups);
895 return 0;
896 }
897
898 /* Always locate the debug struct, in case it has moved. */
899 info->debug_base = 0;
900 if (locate_base (info) == 0)
901 {
902 do_cleanups (cleanups);
903 return 0; /* failed somehow... */
904 }
905
906 /* First link map member should be the executable. */
907 lm = solib_svr4_r_map (info);
908 if (lm == 0)
909 {
910 do_cleanups (cleanups);
911 return 0; /* failed somehow... */
912 }
913
914 /* Read address of name from target memory to GDB. */
915 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
916
917 /* Convert the address to host format. */
918 l_name = extract_typed_address (l_name_buf, ptr_type);
919
920 if (l_name == 0)
921 {
922 do_cleanups (cleanups);
923 return 0; /* No filename. */
924 }
925
926 /* Now fetch the filename from target memory. */
927 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
928 make_cleanup (xfree, filename);
929
930 if (errcode)
931 {
932 warning (_("failed to read exec filename from attached file: %s"),
933 safe_strerror (errcode));
934 do_cleanups (cleanups);
935 return 0;
936 }
937
938 /* Have a pathname: read the symbol file. */
939 symbol_file_add_main (filename, from_tty);
940
941 do_cleanups (cleanups);
942 return 1;
943}
944
945/* If no shared library information is available from the dynamic
946 linker, build a fallback list from other sources. */
947
948static struct so_list *
949svr4_default_sos (void)
950{
951 struct svr4_info *info = get_svr4_info ();
952 struct so_list *new;
953
954 if (!info->debug_loader_offset_p)
955 return NULL;
956
957 new = XZALLOC (struct so_list);
958
959 new->lm_info = xmalloc (sizeof (struct lm_info));
960
961 /* Nothing will ever check the cached copy of the link
962 map if we set l_addr. */
963 new->lm_info->l_addr = info->debug_loader_offset;
964 new->lm_info->lm_addr = 0;
965 new->lm_info->lm = NULL;
966
967 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
968 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
969 strcpy (new->so_original_name, new->so_name);
970
971 return new;
972}
973
974/* Implement the "current_sos" target_so_ops method. */
975
976static struct so_list *
977svr4_current_sos (void)
978{
979 CORE_ADDR lm, prev_lm;
980 struct so_list *head = 0;
981 struct so_list **link_ptr = &head;
982 CORE_ADDR ldsomap = 0;
983 struct svr4_info *info;
984
985 info = get_svr4_info ();
986
987 /* Always locate the debug struct, in case it has moved. */
988 info->debug_base = 0;
989 locate_base (info);
990
991 /* If we can't find the dynamic linker's base structure, this
992 must not be a dynamically linked executable. Hmm. */
993 if (! info->debug_base)
994 return svr4_default_sos ();
995
996 /* Walk the inferior's link map list, and build our list of
997 `struct so_list' nodes. */
998 prev_lm = 0;
999 lm = solib_svr4_r_map (info);
1000
1001 while (lm)
1002 {
1003 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1004 struct so_list *new = XZALLOC (struct so_list);
1005 struct cleanup *old_chain = make_cleanup (xfree, new);
1006 CORE_ADDR next_lm;
1007
1008 new->lm_info = xmalloc (sizeof (struct lm_info));
1009 make_cleanup (xfree, new->lm_info);
1010
1011 new->lm_info->l_addr = (CORE_ADDR)-1;
1012 new->lm_info->lm_addr = lm;
1013 new->lm_info->lm = xzalloc (lmo->link_map_size);
1014 make_cleanup (xfree, new->lm_info->lm);
1015
1016 read_memory (lm, new->lm_info->lm, lmo->link_map_size);
1017
1018 next_lm = lm_next (new);
1019
1020 if (lm_prev (new) != prev_lm)
1021 {
1022 warning (_("Corrupted shared library list"));
1023 free_so (new);
1024 next_lm = 0;
1025 }
1026
1027 /* For SVR4 versions, the first entry in the link map is for the
1028 inferior executable, so we must ignore it. For some versions of
1029 SVR4, it has no name. For others (Solaris 2.3 for example), it
1030 does have a name, so we can no longer use a missing name to
1031 decide when to ignore it. */
1032 else if (ignore_first_link_map_entry (new) && ldsomap == 0)
1033 {
1034 info->main_lm_addr = new->lm_info->lm_addr;
1035 free_so (new);
1036 }
1037 else
1038 {
1039 int errcode;
1040 char *buffer;
1041
1042 /* Extract this shared object's name. */
1043 target_read_string (lm_name (new), &buffer,
1044 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1045 if (errcode != 0)
1046 warning (_("Can't read pathname for load map: %s."),
1047 safe_strerror (errcode));
1048 else
1049 {
1050 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1051 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1052 strcpy (new->so_original_name, new->so_name);
1053 }
1054 xfree (buffer);
1055
1056 /* If this entry has no name, or its name matches the name
1057 for the main executable, don't include it in the list. */
1058 if (! new->so_name[0]
1059 || match_main (new->so_name))
1060 free_so (new);
1061 else
1062 {
1063 new->next = 0;
1064 *link_ptr = new;
1065 link_ptr = &new->next;
1066 }
1067 }
1068
1069 prev_lm = lm;
1070 lm = next_lm;
1071
1072 /* On Solaris, the dynamic linker is not in the normal list of
1073 shared objects, so make sure we pick it up too. Having
1074 symbol information for the dynamic linker is quite crucial
1075 for skipping dynamic linker resolver code. */
1076 if (lm == 0 && ldsomap == 0)
1077 {
1078 lm = ldsomap = solib_svr4_r_ldsomap (info);
1079 prev_lm = 0;
1080 }
1081
1082 discard_cleanups (old_chain);
1083 }
1084
1085 if (head == NULL)
1086 return svr4_default_sos ();
1087
1088 return head;
1089}
1090
1091/* Get the address of the link_map for a given OBJFILE. */
1092
1093CORE_ADDR
1094svr4_fetch_objfile_link_map (struct objfile *objfile)
1095{
1096 struct so_list *so;
1097 struct svr4_info *info = get_svr4_info ();
1098
1099 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1100 if (info->main_lm_addr == 0)
1101 solib_add (NULL, 0, &current_target, auto_solib_add);
1102
1103 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1104 if (objfile == symfile_objfile)
1105 return info->main_lm_addr;
1106
1107 /* The other link map addresses may be found by examining the list
1108 of shared libraries. */
1109 for (so = master_so_list (); so; so = so->next)
1110 if (so->objfile == objfile)
1111 return so->lm_info->lm_addr;
1112
1113 /* Not found! */
1114 return 0;
1115}
1116
1117/* On some systems, the only way to recognize the link map entry for
1118 the main executable file is by looking at its name. Return
1119 non-zero iff SONAME matches one of the known main executable names. */
1120
1121static int
1122match_main (const char *soname)
1123{
1124 const char * const *mainp;
1125
1126 for (mainp = main_name_list; *mainp != NULL; mainp++)
1127 {
1128 if (strcmp (soname, *mainp) == 0)
1129 return (1);
1130 }
1131
1132 return (0);
1133}
1134
1135/* Return 1 if PC lies in the dynamic symbol resolution code of the
1136 SVR4 run time loader. */
1137
1138int
1139svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1140{
1141 struct svr4_info *info = get_svr4_info ();
1142
1143 return ((pc >= info->interp_text_sect_low
1144 && pc < info->interp_text_sect_high)
1145 || (pc >= info->interp_plt_sect_low
1146 && pc < info->interp_plt_sect_high)
1147 || in_plt_section (pc, NULL)
1148 || in_gnu_ifunc_stub (pc));
1149}
1150
1151/* Given an executable's ABFD and target, compute the entry-point
1152 address. */
1153
1154static CORE_ADDR
1155exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1156{
1157 /* KevinB wrote ... for most targets, the address returned by
1158 bfd_get_start_address() is the entry point for the start
1159 function. But, for some targets, bfd_get_start_address() returns
1160 the address of a function descriptor from which the entry point
1161 address may be extracted. This address is extracted by
1162 gdbarch_convert_from_func_ptr_addr(). The method
1163 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1164 function for targets which don't use function descriptors. */
1165 return gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1166 bfd_get_start_address (abfd),
1167 targ);
1168}
1169
1170/* Helper function for gdb_bfd_lookup_symbol. */
1171
1172static int
1173cmp_name_and_sec_flags (asymbol *sym, void *data)
1174{
1175 return (strcmp (sym->name, (const char *) data) == 0
1176 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
1177}
1178/* Arrange for dynamic linker to hit breakpoint.
1179
1180 Both the SunOS and the SVR4 dynamic linkers have, as part of their
1181 debugger interface, support for arranging for the inferior to hit
1182 a breakpoint after mapping in the shared libraries. This function
1183 enables that breakpoint.
1184
1185 For SunOS, there is a special flag location (in_debugger) which we
1186 set to 1. When the dynamic linker sees this flag set, it will set
1187 a breakpoint at a location known only to itself, after saving the
1188 original contents of that place and the breakpoint address itself,
1189 in it's own internal structures. When we resume the inferior, it
1190 will eventually take a SIGTRAP when it runs into the breakpoint.
1191 We handle this (in a different place) by restoring the contents of
1192 the breakpointed location (which is only known after it stops),
1193 chasing around to locate the shared libraries that have been
1194 loaded, then resuming.
1195
1196 For SVR4, the debugger interface structure contains a member (r_brk)
1197 which is statically initialized at the time the shared library is
1198 built, to the offset of a function (_r_debug_state) which is guaran-
1199 teed to be called once before mapping in a library, and again when
1200 the mapping is complete. At the time we are examining this member,
1201 it contains only the unrelocated offset of the function, so we have
1202 to do our own relocation. Later, when the dynamic linker actually
1203 runs, it relocates r_brk to be the actual address of _r_debug_state().
1204
1205 The debugger interface structure also contains an enumeration which
1206 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1207 depending upon whether or not the library is being mapped or unmapped,
1208 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
1209
1210static int
1211enable_break (struct svr4_info *info, int from_tty)
1212{
1213 struct minimal_symbol *msymbol;
1214 const char * const *bkpt_namep;
1215 asection *interp_sect;
1216 gdb_byte *interp_name;
1217 CORE_ADDR sym_addr;
1218
1219 info->interp_text_sect_low = info->interp_text_sect_high = 0;
1220 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1221
1222 /* If we already have a shared library list in the target, and
1223 r_debug contains r_brk, set the breakpoint there - this should
1224 mean r_brk has already been relocated. Assume the dynamic linker
1225 is the object containing r_brk. */
1226
1227 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1228 sym_addr = 0;
1229 if (info->debug_base && solib_svr4_r_map (info) != 0)
1230 sym_addr = solib_svr4_r_brk (info);
1231
1232 if (sym_addr != 0)
1233 {
1234 struct obj_section *os;
1235
1236 sym_addr = gdbarch_addr_bits_remove
1237 (target_gdbarch, gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1238 sym_addr,
1239 &current_target));
1240
1241 /* On at least some versions of Solaris there's a dynamic relocation
1242 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1243 we get control before the dynamic linker has self-relocated.
1244 Check if SYM_ADDR is in a known section, if it is assume we can
1245 trust its value. This is just a heuristic though, it could go away
1246 or be replaced if it's getting in the way.
1247
1248 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1249 however it's spelled in your particular system) is ARM or Thumb.
1250 That knowledge is encoded in the address, if it's Thumb the low bit
1251 is 1. However, we've stripped that info above and it's not clear
1252 what all the consequences are of passing a non-addr_bits_remove'd
1253 address to create_solib_event_breakpoint. The call to
1254 find_pc_section verifies we know about the address and have some
1255 hope of computing the right kind of breakpoint to use (via
1256 symbol info). It does mean that GDB needs to be pointed at a
1257 non-stripped version of the dynamic linker in order to obtain
1258 information it already knows about. Sigh. */
1259
1260 os = find_pc_section (sym_addr);
1261 if (os != NULL)
1262 {
1263 /* Record the relocated start and end address of the dynamic linker
1264 text and plt section for svr4_in_dynsym_resolve_code. */
1265 bfd *tmp_bfd;
1266 CORE_ADDR load_addr;
1267
1268 tmp_bfd = os->objfile->obfd;
1269 load_addr = ANOFFSET (os->objfile->section_offsets,
1270 os->objfile->sect_index_text);
1271
1272 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1273 if (interp_sect)
1274 {
1275 info->interp_text_sect_low =
1276 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1277 info->interp_text_sect_high =
1278 info->interp_text_sect_low
1279 + bfd_section_size (tmp_bfd, interp_sect);
1280 }
1281 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1282 if (interp_sect)
1283 {
1284 info->interp_plt_sect_low =
1285 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1286 info->interp_plt_sect_high =
1287 info->interp_plt_sect_low
1288 + bfd_section_size (tmp_bfd, interp_sect);
1289 }
1290
1291 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1292 return 1;
1293 }
1294 }
1295
1296 /* Find the program interpreter; if not found, warn the user and drop
1297 into the old breakpoint at symbol code. */
1298 interp_name = find_program_interpreter ();
1299 if (interp_name)
1300 {
1301 CORE_ADDR load_addr = 0;
1302 int load_addr_found = 0;
1303 int loader_found_in_list = 0;
1304 struct so_list *so;
1305 bfd *tmp_bfd = NULL;
1306 struct target_ops *tmp_bfd_target;
1307 volatile struct gdb_exception ex;
1308
1309 sym_addr = 0;
1310
1311 /* Now we need to figure out where the dynamic linker was
1312 loaded so that we can load its symbols and place a breakpoint
1313 in the dynamic linker itself.
1314
1315 This address is stored on the stack. However, I've been unable
1316 to find any magic formula to find it for Solaris (appears to
1317 be trivial on GNU/Linux). Therefore, we have to try an alternate
1318 mechanism to find the dynamic linker's base address. */
1319
1320 TRY_CATCH (ex, RETURN_MASK_ALL)
1321 {
1322 tmp_bfd = solib_bfd_open (interp_name);
1323 }
1324 if (tmp_bfd == NULL)
1325 goto bkpt_at_symbol;
1326
1327 /* Now convert the TMP_BFD into a target. That way target, as
1328 well as BFD operations can be used. Note that closing the
1329 target will also close the underlying bfd. */
1330 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1331
1332 /* On a running target, we can get the dynamic linker's base
1333 address from the shared library table. */
1334 so = master_so_list ();
1335 while (so)
1336 {
1337 if (svr4_same_1 (interp_name, so->so_original_name))
1338 {
1339 load_addr_found = 1;
1340 loader_found_in_list = 1;
1341 load_addr = lm_addr_check (so, tmp_bfd);
1342 break;
1343 }
1344 so = so->next;
1345 }
1346
1347 /* If we were not able to find the base address of the loader
1348 from our so_list, then try using the AT_BASE auxilliary entry. */
1349 if (!load_addr_found)
1350 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1351 {
1352 int addr_bit = gdbarch_addr_bit (target_gdbarch);
1353
1354 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1355 that `+ load_addr' will overflow CORE_ADDR width not creating
1356 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1357 GDB. */
1358
1359 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1360 {
1361 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1362 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1363 tmp_bfd_target);
1364
1365 gdb_assert (load_addr < space_size);
1366
1367 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1368 64bit ld.so with 32bit executable, it should not happen. */
1369
1370 if (tmp_entry_point < space_size
1371 && tmp_entry_point + load_addr >= space_size)
1372 load_addr -= space_size;
1373 }
1374
1375 load_addr_found = 1;
1376 }
1377
1378 /* Otherwise we find the dynamic linker's base address by examining
1379 the current pc (which should point at the entry point for the
1380 dynamic linker) and subtracting the offset of the entry point.
1381
1382 This is more fragile than the previous approaches, but is a good
1383 fallback method because it has actually been working well in
1384 most cases. */
1385 if (!load_addr_found)
1386 {
1387 struct regcache *regcache
1388 = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1389
1390 load_addr = (regcache_read_pc (regcache)
1391 - exec_entry_point (tmp_bfd, tmp_bfd_target));
1392 }
1393
1394 if (!loader_found_in_list)
1395 {
1396 info->debug_loader_name = xstrdup (interp_name);
1397 info->debug_loader_offset_p = 1;
1398 info->debug_loader_offset = load_addr;
1399 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1400 }
1401
1402 /* Record the relocated start and end address of the dynamic linker
1403 text and plt section for svr4_in_dynsym_resolve_code. */
1404 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1405 if (interp_sect)
1406 {
1407 info->interp_text_sect_low =
1408 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1409 info->interp_text_sect_high =
1410 info->interp_text_sect_low
1411 + bfd_section_size (tmp_bfd, interp_sect);
1412 }
1413 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1414 if (interp_sect)
1415 {
1416 info->interp_plt_sect_low =
1417 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1418 info->interp_plt_sect_high =
1419 info->interp_plt_sect_low
1420 + bfd_section_size (tmp_bfd, interp_sect);
1421 }
1422
1423 /* Now try to set a breakpoint in the dynamic linker. */
1424 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1425 {
1426 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
1427 (void *) *bkpt_namep);
1428 if (sym_addr != 0)
1429 break;
1430 }
1431
1432 if (sym_addr != 0)
1433 /* Convert 'sym_addr' from a function pointer to an address.
1434 Because we pass tmp_bfd_target instead of the current
1435 target, this will always produce an unrelocated value. */
1436 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1437 sym_addr,
1438 tmp_bfd_target);
1439
1440 /* We're done with both the temporary bfd and target. Remember,
1441 closing the target closes the underlying bfd. */
1442 target_close (tmp_bfd_target, 0);
1443
1444 if (sym_addr != 0)
1445 {
1446 create_solib_event_breakpoint (target_gdbarch, load_addr + sym_addr);
1447 xfree (interp_name);
1448 return 1;
1449 }
1450
1451 /* For whatever reason we couldn't set a breakpoint in the dynamic
1452 linker. Warn and drop into the old code. */
1453 bkpt_at_symbol:
1454 xfree (interp_name);
1455 warning (_("Unable to find dynamic linker breakpoint function.\n"
1456 "GDB will be unable to debug shared library initializers\n"
1457 "and track explicitly loaded dynamic code."));
1458 }
1459
1460 /* Scan through the lists of symbols, trying to look up the symbol and
1461 set a breakpoint there. Terminate loop when we/if we succeed. */
1462
1463 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1464 {
1465 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1466 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1467 {
1468 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1469 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1470 sym_addr,
1471 &current_target);
1472 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1473 return 1;
1474 }
1475 }
1476
1477 if (!current_inferior ()->attach_flag)
1478 {
1479 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1480 {
1481 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1482 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1483 {
1484 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1485 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch,
1486 sym_addr,
1487 &current_target);
1488 create_solib_event_breakpoint (target_gdbarch, sym_addr);
1489 return 1;
1490 }
1491 }
1492 }
1493 return 0;
1494}
1495
1496/* Implement the "special_symbol_handling" target_so_ops method. */
1497
1498static void
1499svr4_special_symbol_handling (void)
1500{
1501 /* Nothing to do. */
1502}
1503
1504/* Read the ELF program headers from ABFD. Return the contents and
1505 set *PHDRS_SIZE to the size of the program headers. */
1506
1507static gdb_byte *
1508read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1509{
1510 Elf_Internal_Ehdr *ehdr;
1511 gdb_byte *buf;
1512
1513 ehdr = elf_elfheader (abfd);
1514
1515 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1516 if (*phdrs_size == 0)
1517 return NULL;
1518
1519 buf = xmalloc (*phdrs_size);
1520 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1521 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1522 {
1523 xfree (buf);
1524 return NULL;
1525 }
1526
1527 return buf;
1528}
1529
1530/* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1531 exec_bfd. Otherwise return 0.
1532
1533 We relocate all of the sections by the same amount. This
1534 behavior is mandated by recent editions of the System V ABI.
1535 According to the System V Application Binary Interface,
1536 Edition 4.1, page 5-5:
1537
1538 ... Though the system chooses virtual addresses for
1539 individual processes, it maintains the segments' relative
1540 positions. Because position-independent code uses relative
1541 addressesing between segments, the difference between
1542 virtual addresses in memory must match the difference
1543 between virtual addresses in the file. The difference
1544 between the virtual address of any segment in memory and
1545 the corresponding virtual address in the file is thus a
1546 single constant value for any one executable or shared
1547 object in a given process. This difference is the base
1548 address. One use of the base address is to relocate the
1549 memory image of the program during dynamic linking.
1550
1551 The same language also appears in Edition 4.0 of the System V
1552 ABI and is left unspecified in some of the earlier editions.
1553
1554 Decide if the objfile needs to be relocated. As indicated above, we will
1555 only be here when execution is stopped. But during attachment PC can be at
1556 arbitrary address therefore regcache_read_pc can be misleading (contrary to
1557 the auxv AT_ENTRY value). Moreover for executable with interpreter section
1558 regcache_read_pc would point to the interpreter and not the main executable.
1559
1560 So, to summarize, relocations are necessary when the start address obtained
1561 from the executable is different from the address in auxv AT_ENTRY entry.
1562
1563 [ The astute reader will note that we also test to make sure that
1564 the executable in question has the DYNAMIC flag set. It is my
1565 opinion that this test is unnecessary (undesirable even). It
1566 was added to avoid inadvertent relocation of an executable
1567 whose e_type member in the ELF header is not ET_DYN. There may
1568 be a time in the future when it is desirable to do relocations
1569 on other types of files as well in which case this condition
1570 should either be removed or modified to accomodate the new file
1571 type. - Kevin, Nov 2000. ] */
1572
1573static int
1574svr4_exec_displacement (CORE_ADDR *displacementp)
1575{
1576 /* ENTRY_POINT is a possible function descriptor - before
1577 a call to gdbarch_convert_from_func_ptr_addr. */
1578 CORE_ADDR entry_point, displacement;
1579
1580 if (exec_bfd == NULL)
1581 return 0;
1582
1583 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
1584 being executed themselves and PIE (Position Independent Executable)
1585 executables are ET_DYN. */
1586
1587 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1588 return 0;
1589
1590 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1591 return 0;
1592
1593 displacement = entry_point - bfd_get_start_address (exec_bfd);
1594
1595 /* Verify the DISPLACEMENT candidate complies with the required page
1596 alignment. It is cheaper than the program headers comparison below. */
1597
1598 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1599 {
1600 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1601
1602 /* p_align of PT_LOAD segments does not specify any alignment but
1603 only congruency of addresses:
1604 p_offset % p_align == p_vaddr % p_align
1605 Kernel is free to load the executable with lower alignment. */
1606
1607 if ((displacement & (elf->minpagesize - 1)) != 0)
1608 return 0;
1609 }
1610
1611 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1612 comparing their program headers. If the program headers in the auxilliary
1613 vector do not match the program headers in the executable, then we are
1614 looking at a different file than the one used by the kernel - for
1615 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
1616
1617 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1618 {
1619 /* Be optimistic and clear OK only if GDB was able to verify the headers
1620 really do not match. */
1621 int phdrs_size, phdrs2_size, ok = 1;
1622 gdb_byte *buf, *buf2;
1623 int arch_size;
1624
1625 buf = read_program_header (-1, &phdrs_size, &arch_size);
1626 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1627 if (buf != NULL && buf2 != NULL)
1628 {
1629 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
1630
1631 /* We are dealing with three different addresses. EXEC_BFD
1632 represents current address in on-disk file. target memory content
1633 may be different from EXEC_BFD as the file may have been prelinked
1634 to a different address after the executable has been loaded.
1635 Moreover the address of placement in target memory can be
1636 different from what the program headers in target memory say -
1637 this is the goal of PIE.
1638
1639 Detected DISPLACEMENT covers both the offsets of PIE placement and
1640 possible new prelink performed after start of the program. Here
1641 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1642 content offset for the verification purpose. */
1643
1644 if (phdrs_size != phdrs2_size
1645 || bfd_get_arch_size (exec_bfd) != arch_size)
1646 ok = 0;
1647 else if (arch_size == 32
1648 && phdrs_size >= sizeof (Elf32_External_Phdr)
1649 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1650 {
1651 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1652 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1653 CORE_ADDR displacement = 0;
1654 int i;
1655
1656 /* DISPLACEMENT could be found more easily by the difference of
1657 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1658 already have enough information to compute that displacement
1659 with what we've read. */
1660
1661 for (i = 0; i < ehdr2->e_phnum; i++)
1662 if (phdr2[i].p_type == PT_LOAD)
1663 {
1664 Elf32_External_Phdr *phdrp;
1665 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1666 CORE_ADDR vaddr, paddr;
1667 CORE_ADDR displacement_vaddr = 0;
1668 CORE_ADDR displacement_paddr = 0;
1669
1670 phdrp = &((Elf32_External_Phdr *) buf)[i];
1671 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1672 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1673
1674 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1675 byte_order);
1676 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1677
1678 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1679 byte_order);
1680 displacement_paddr = paddr - phdr2[i].p_paddr;
1681
1682 if (displacement_vaddr == displacement_paddr)
1683 displacement = displacement_vaddr;
1684
1685 break;
1686 }
1687
1688 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1689
1690 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1691 {
1692 Elf32_External_Phdr *phdrp;
1693 Elf32_External_Phdr *phdr2p;
1694 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1695 CORE_ADDR vaddr, paddr;
1696 asection *plt2_asect;
1697
1698 phdrp = &((Elf32_External_Phdr *) buf)[i];
1699 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1700 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1701 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1702
1703 /* PT_GNU_STACK is an exception by being never relocated by
1704 prelink as its addresses are always zero. */
1705
1706 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1707 continue;
1708
1709 /* Check also other adjustment combinations - PR 11786. */
1710
1711 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1712 byte_order);
1713 vaddr -= displacement;
1714 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1715
1716 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1717 byte_order);
1718 paddr -= displacement;
1719 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1720
1721 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1722 continue;
1723
1724 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
1725 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1726 if (plt2_asect)
1727 {
1728 int content2;
1729 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1730 CORE_ADDR filesz;
1731
1732 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1733 & SEC_HAS_CONTENTS) != 0;
1734
1735 filesz = extract_unsigned_integer (buf_filesz_p, 4,
1736 byte_order);
1737
1738 /* PLT2_ASECT is from on-disk file (exec_bfd) while
1739 FILESZ is from the in-memory image. */
1740 if (content2)
1741 filesz += bfd_get_section_size (plt2_asect);
1742 else
1743 filesz -= bfd_get_section_size (plt2_asect);
1744
1745 store_unsigned_integer (buf_filesz_p, 4, byte_order,
1746 filesz);
1747
1748 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1749 continue;
1750 }
1751
1752 ok = 0;
1753 break;
1754 }
1755 }
1756 else if (arch_size == 64
1757 && phdrs_size >= sizeof (Elf64_External_Phdr)
1758 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
1759 {
1760 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1761 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1762 CORE_ADDR displacement = 0;
1763 int i;
1764
1765 /* DISPLACEMENT could be found more easily by the difference of
1766 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1767 already have enough information to compute that displacement
1768 with what we've read. */
1769
1770 for (i = 0; i < ehdr2->e_phnum; i++)
1771 if (phdr2[i].p_type == PT_LOAD)
1772 {
1773 Elf64_External_Phdr *phdrp;
1774 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1775 CORE_ADDR vaddr, paddr;
1776 CORE_ADDR displacement_vaddr = 0;
1777 CORE_ADDR displacement_paddr = 0;
1778
1779 phdrp = &((Elf64_External_Phdr *) buf)[i];
1780 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1781 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1782
1783 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
1784 byte_order);
1785 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1786
1787 paddr = extract_unsigned_integer (buf_paddr_p, 8,
1788 byte_order);
1789 displacement_paddr = paddr - phdr2[i].p_paddr;
1790
1791 if (displacement_vaddr == displacement_paddr)
1792 displacement = displacement_vaddr;
1793
1794 break;
1795 }
1796
1797 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1798
1799 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
1800 {
1801 Elf64_External_Phdr *phdrp;
1802 Elf64_External_Phdr *phdr2p;
1803 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1804 CORE_ADDR vaddr, paddr;
1805 asection *plt2_asect;
1806
1807 phdrp = &((Elf64_External_Phdr *) buf)[i];
1808 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1809 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1810 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
1811
1812 /* PT_GNU_STACK is an exception by being never relocated by
1813 prelink as its addresses are always zero. */
1814
1815 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1816 continue;
1817
1818 /* Check also other adjustment combinations - PR 11786. */
1819
1820 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
1821 byte_order);
1822 vaddr -= displacement;
1823 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
1824
1825 paddr = extract_unsigned_integer (buf_paddr_p, 8,
1826 byte_order);
1827 paddr -= displacement;
1828 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
1829
1830 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1831 continue;
1832
1833 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
1834 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1835 if (plt2_asect)
1836 {
1837 int content2;
1838 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1839 CORE_ADDR filesz;
1840
1841 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1842 & SEC_HAS_CONTENTS) != 0;
1843
1844 filesz = extract_unsigned_integer (buf_filesz_p, 8,
1845 byte_order);
1846
1847 /* PLT2_ASECT is from on-disk file (exec_bfd) while
1848 FILESZ is from the in-memory image. */
1849 if (content2)
1850 filesz += bfd_get_section_size (plt2_asect);
1851 else
1852 filesz -= bfd_get_section_size (plt2_asect);
1853
1854 store_unsigned_integer (buf_filesz_p, 8, byte_order,
1855 filesz);
1856
1857 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1858 continue;
1859 }
1860
1861 ok = 0;
1862 break;
1863 }
1864 }
1865 else
1866 ok = 0;
1867 }
1868
1869 xfree (buf);
1870 xfree (buf2);
1871
1872 if (!ok)
1873 return 0;
1874 }
1875
1876 if (info_verbose)
1877 {
1878 /* It can be printed repeatedly as there is no easy way to check
1879 the executable symbols/file has been already relocated to
1880 displacement. */
1881
1882 printf_unfiltered (_("Using PIE (Position Independent Executable) "
1883 "displacement %s for \"%s\".\n"),
1884 paddress (target_gdbarch, displacement),
1885 bfd_get_filename (exec_bfd));
1886 }
1887
1888 *displacementp = displacement;
1889 return 1;
1890}
1891
1892/* Relocate the main executable. This function should be called upon
1893 stopping the inferior process at the entry point to the program.
1894 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
1895 different, the main executable is relocated by the proper amount. */
1896
1897static void
1898svr4_relocate_main_executable (void)
1899{
1900 CORE_ADDR displacement;
1901
1902 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
1903 probably contains the offsets computed using the PIE displacement
1904 from the previous run, which of course are irrelevant for this run.
1905 So we need to determine the new PIE displacement and recompute the
1906 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
1907 already contains pre-computed offsets.
1908
1909 If we cannot compute the PIE displacement, either:
1910
1911 - The executable is not PIE.
1912
1913 - SYMFILE_OBJFILE does not match the executable started in the target.
1914 This can happen for main executable symbols loaded at the host while
1915 `ld.so --ld-args main-executable' is loaded in the target.
1916
1917 Then we leave the section offsets untouched and use them as is for
1918 this run. Either:
1919
1920 - These section offsets were properly reset earlier, and thus
1921 already contain the correct values. This can happen for instance
1922 when reconnecting via the remote protocol to a target that supports
1923 the `qOffsets' packet.
1924
1925 - The section offsets were not reset earlier, and the best we can
1926 hope is that the old offsets are still applicable to the new run. */
1927
1928 if (! svr4_exec_displacement (&displacement))
1929 return;
1930
1931 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
1932 addresses. */
1933
1934 if (symfile_objfile)
1935 {
1936 struct section_offsets *new_offsets;
1937 int i;
1938
1939 new_offsets = alloca (symfile_objfile->num_sections
1940 * sizeof (*new_offsets));
1941
1942 for (i = 0; i < symfile_objfile->num_sections; i++)
1943 new_offsets->offsets[i] = displacement;
1944
1945 objfile_relocate (symfile_objfile, new_offsets);
1946 }
1947 else if (exec_bfd)
1948 {
1949 asection *asect;
1950
1951 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
1952 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
1953 (bfd_section_vma (exec_bfd, asect)
1954 + displacement));
1955 }
1956}
1957
1958/* Implement the "create_inferior_hook" target_solib_ops method.
1959
1960 For SVR4 executables, this first instruction is either the first
1961 instruction in the dynamic linker (for dynamically linked
1962 executables) or the instruction at "start" for statically linked
1963 executables. For dynamically linked executables, the system
1964 first exec's /lib/libc.so.N, which contains the dynamic linker,
1965 and starts it running. The dynamic linker maps in any needed
1966 shared libraries, maps in the actual user executable, and then
1967 jumps to "start" in the user executable.
1968
1969 We can arrange to cooperate with the dynamic linker to discover the
1970 names of shared libraries that are dynamically linked, and the base
1971 addresses to which they are linked.
1972
1973 This function is responsible for discovering those names and
1974 addresses, and saving sufficient information about them to allow
1975 their symbols to be read at a later time.
1976
1977 FIXME
1978
1979 Between enable_break() and disable_break(), this code does not
1980 properly handle hitting breakpoints which the user might have
1981 set in the startup code or in the dynamic linker itself. Proper
1982 handling will probably have to wait until the implementation is
1983 changed to use the "breakpoint handler function" method.
1984
1985 Also, what if child has exit()ed? Must exit loop somehow. */
1986
1987static void
1988svr4_solib_create_inferior_hook (int from_tty)
1989{
1990#if defined(_SCO_DS)
1991 struct inferior *inf;
1992 struct thread_info *tp;
1993#endif /* defined(_SCO_DS) */
1994 struct svr4_info *info;
1995
1996 info = get_svr4_info ();
1997
1998 /* Relocate the main executable if necessary. */
1999 svr4_relocate_main_executable ();
2000
2001 /* No point setting a breakpoint in the dynamic linker if we can't
2002 hit it (e.g., a core file, or a trace file). */
2003 if (!target_has_execution)
2004 return;
2005
2006 if (!svr4_have_link_map_offsets ())
2007 return;
2008
2009 if (!enable_break (info, from_tty))
2010 return;
2011
2012#if defined(_SCO_DS)
2013 /* SCO needs the loop below, other systems should be using the
2014 special shared library breakpoints and the shared library breakpoint
2015 service routine.
2016
2017 Now run the target. It will eventually hit the breakpoint, at
2018 which point all of the libraries will have been mapped in and we
2019 can go groveling around in the dynamic linker structures to find
2020 out what we need to know about them. */
2021
2022 inf = current_inferior ();
2023 tp = inferior_thread ();
2024
2025 clear_proceed_status ();
2026 inf->control.stop_soon = STOP_QUIETLY;
2027 tp->suspend.stop_signal = TARGET_SIGNAL_0;
2028 do
2029 {
2030 target_resume (pid_to_ptid (-1), 0, tp->suspend.stop_signal);
2031 wait_for_inferior ();
2032 }
2033 while (tp->suspend.stop_signal != TARGET_SIGNAL_TRAP);
2034 inf->control.stop_soon = NO_STOP_QUIETLY;
2035#endif /* defined(_SCO_DS) */
2036}
2037
2038static void
2039svr4_clear_solib (void)
2040{
2041 struct svr4_info *info;
2042
2043 info = get_svr4_info ();
2044 info->debug_base = 0;
2045 info->debug_loader_offset_p = 0;
2046 info->debug_loader_offset = 0;
2047 xfree (info->debug_loader_name);
2048 info->debug_loader_name = NULL;
2049}
2050
2051static void
2052svr4_free_so (struct so_list *so)
2053{
2054 xfree (so->lm_info->lm);
2055 xfree (so->lm_info);
2056}
2057
2058
2059/* Clear any bits of ADDR that wouldn't fit in a target-format
2060 data pointer. "Data pointer" here refers to whatever sort of
2061 address the dynamic linker uses to manage its sections. At the
2062 moment, we don't support shared libraries on any processors where
2063 code and data pointers are different sizes.
2064
2065 This isn't really the right solution. What we really need here is
2066 a way to do arithmetic on CORE_ADDR values that respects the
2067 natural pointer/address correspondence. (For example, on the MIPS,
2068 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2069 sign-extend the value. There, simply truncating the bits above
2070 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2071 be a new gdbarch method or something. */
2072static CORE_ADDR
2073svr4_truncate_ptr (CORE_ADDR addr)
2074{
2075 if (gdbarch_ptr_bit (target_gdbarch) == sizeof (CORE_ADDR) * 8)
2076 /* We don't need to truncate anything, and the bit twiddling below
2077 will fail due to overflow problems. */
2078 return addr;
2079 else
2080 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch)) - 1);
2081}
2082
2083
2084static void
2085svr4_relocate_section_addresses (struct so_list *so,
2086 struct target_section *sec)
2087{
2088 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so,
2089 sec->bfd));
2090 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so,
2091 sec->bfd));
2092}
2093\f
2094
2095/* Architecture-specific operations. */
2096
2097/* Per-architecture data key. */
2098static struct gdbarch_data *solib_svr4_data;
2099
2100struct solib_svr4_ops
2101{
2102 /* Return a description of the layout of `struct link_map'. */
2103 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2104};
2105
2106/* Return a default for the architecture-specific operations. */
2107
2108static void *
2109solib_svr4_init (struct obstack *obstack)
2110{
2111 struct solib_svr4_ops *ops;
2112
2113 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2114 ops->fetch_link_map_offsets = NULL;
2115 return ops;
2116}
2117
2118/* Set the architecture-specific `struct link_map_offsets' fetcher for
2119 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2120
2121void
2122set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2123 struct link_map_offsets *(*flmo) (void))
2124{
2125 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2126
2127 ops->fetch_link_map_offsets = flmo;
2128
2129 set_solib_ops (gdbarch, &svr4_so_ops);
2130}
2131
2132/* Fetch a link_map_offsets structure using the architecture-specific
2133 `struct link_map_offsets' fetcher. */
2134
2135static struct link_map_offsets *
2136svr4_fetch_link_map_offsets (void)
2137{
2138 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2139
2140 gdb_assert (ops->fetch_link_map_offsets);
2141 return ops->fetch_link_map_offsets ();
2142}
2143
2144/* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2145
2146static int
2147svr4_have_link_map_offsets (void)
2148{
2149 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch, solib_svr4_data);
2150
2151 return (ops->fetch_link_map_offsets != NULL);
2152}
2153\f
2154
2155/* Most OS'es that have SVR4-style ELF dynamic libraries define a
2156 `struct r_debug' and a `struct link_map' that are binary compatible
2157 with the origional SVR4 implementation. */
2158
2159/* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2160 for an ILP32 SVR4 system. */
2161
2162struct link_map_offsets *
2163svr4_ilp32_fetch_link_map_offsets (void)
2164{
2165 static struct link_map_offsets lmo;
2166 static struct link_map_offsets *lmp = NULL;
2167
2168 if (lmp == NULL)
2169 {
2170 lmp = &lmo;
2171
2172 lmo.r_version_offset = 0;
2173 lmo.r_version_size = 4;
2174 lmo.r_map_offset = 4;
2175 lmo.r_brk_offset = 8;
2176 lmo.r_ldsomap_offset = 20;
2177
2178 /* Everything we need is in the first 20 bytes. */
2179 lmo.link_map_size = 20;
2180 lmo.l_addr_offset = 0;
2181 lmo.l_name_offset = 4;
2182 lmo.l_ld_offset = 8;
2183 lmo.l_next_offset = 12;
2184 lmo.l_prev_offset = 16;
2185 }
2186
2187 return lmp;
2188}
2189
2190/* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2191 for an LP64 SVR4 system. */
2192
2193struct link_map_offsets *
2194svr4_lp64_fetch_link_map_offsets (void)
2195{
2196 static struct link_map_offsets lmo;
2197 static struct link_map_offsets *lmp = NULL;
2198
2199 if (lmp == NULL)
2200 {
2201 lmp = &lmo;
2202
2203 lmo.r_version_offset = 0;
2204 lmo.r_version_size = 4;
2205 lmo.r_map_offset = 8;
2206 lmo.r_brk_offset = 16;
2207 lmo.r_ldsomap_offset = 40;
2208
2209 /* Everything we need is in the first 40 bytes. */
2210 lmo.link_map_size = 40;
2211 lmo.l_addr_offset = 0;
2212 lmo.l_name_offset = 8;
2213 lmo.l_ld_offset = 16;
2214 lmo.l_next_offset = 24;
2215 lmo.l_prev_offset = 32;
2216 }
2217
2218 return lmp;
2219}
2220\f
2221
2222struct target_so_ops svr4_so_ops;
2223
2224/* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2225 different rule for symbol lookup. The lookup begins here in the DSO, not in
2226 the main executable. */
2227
2228static struct symbol *
2229elf_lookup_lib_symbol (const struct objfile *objfile,
2230 const char *name,
2231 const domain_enum domain)
2232{
2233 bfd *abfd;
2234
2235 if (objfile == symfile_objfile)
2236 abfd = exec_bfd;
2237 else
2238 {
2239 /* OBJFILE should have been passed as the non-debug one. */
2240 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2241
2242 abfd = objfile->obfd;
2243 }
2244
2245 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2246 return NULL;
2247
2248 return lookup_global_symbol_from_objfile (objfile, name, domain);
2249}
2250
2251extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2252
2253void
2254_initialize_svr4_solib (void)
2255{
2256 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2257 solib_svr4_pspace_data
2258 = register_program_space_data_with_cleanup (svr4_pspace_data_cleanup);
2259
2260 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2261 svr4_so_ops.free_so = svr4_free_so;
2262 svr4_so_ops.clear_solib = svr4_clear_solib;
2263 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2264 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2265 svr4_so_ops.current_sos = svr4_current_sos;
2266 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2267 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2268 svr4_so_ops.bfd_open = solib_bfd_open;
2269 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2270 svr4_so_ops.same = svr4_same;
2271 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2272}
This page took 0.033133 seconds and 4 git commands to generate.