* gdbarch.sh (target_gdbarch): Remove macro.
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-1996, 1998-2001, 2003-2012 Free Software
4 Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "elf/external.h"
24 #include "elf/common.h"
25 #include "elf/mips.h"
26
27 #include "symtab.h"
28 #include "bfd.h"
29 #include "symfile.h"
30 #include "objfiles.h"
31 #include "gdbcore.h"
32 #include "target.h"
33 #include "inferior.h"
34 #include "regcache.h"
35 #include "gdbthread.h"
36 #include "observer.h"
37
38 #include "gdb_assert.h"
39
40 #include "solist.h"
41 #include "solib.h"
42 #include "solib-svr4.h"
43
44 #include "bfd-target.h"
45 #include "elf-bfd.h"
46 #include "exec.h"
47 #include "auxv.h"
48 #include "exceptions.h"
49 #include "gdb_bfd.h"
50
51 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52 static int svr4_have_link_map_offsets (void);
53 static void svr4_relocate_main_executable (void);
54
55 /* Link map info to include in an allocated so_list entry. */
56
57 struct lm_info
58 {
59 /* Amount by which addresses in the binary should be relocated to
60 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
61 When prelinking is involved and the prelink base address changes,
62 we may need a different offset - the recomputed offset is in L_ADDR.
63 It is commonly the same value. It is cached as we want to warn about
64 the difference and compute it only once. L_ADDR is valid
65 iff L_ADDR_P. */
66 CORE_ADDR l_addr, l_addr_inferior;
67 unsigned int l_addr_p : 1;
68
69 /* The target location of lm. */
70 CORE_ADDR lm_addr;
71
72 /* Values read in from inferior's fields of the same name. */
73 CORE_ADDR l_ld, l_next, l_prev, l_name;
74 };
75
76 /* On SVR4 systems, a list of symbols in the dynamic linker where
77 GDB can try to place a breakpoint to monitor shared library
78 events.
79
80 If none of these symbols are found, or other errors occur, then
81 SVR4 systems will fall back to using a symbol as the "startup
82 mapping complete" breakpoint address. */
83
84 static const char * const solib_break_names[] =
85 {
86 "r_debug_state",
87 "_r_debug_state",
88 "_dl_debug_state",
89 "rtld_db_dlactivity",
90 "__dl_rtld_db_dlactivity",
91 "_rtld_debug_state",
92
93 NULL
94 };
95
96 static const char * const bkpt_names[] =
97 {
98 "_start",
99 "__start",
100 "main",
101 NULL
102 };
103
104 static const char * const main_name_list[] =
105 {
106 "main_$main",
107 NULL
108 };
109
110 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
111 the same shared library. */
112
113 static int
114 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
115 {
116 if (strcmp (gdb_so_name, inferior_so_name) == 0)
117 return 1;
118
119 /* On Solaris, when starting inferior we think that dynamic linker is
120 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
121 contains /lib/ld.so.1. Sometimes one file is a link to another, but
122 sometimes they have identical content, but are not linked to each
123 other. We don't restrict this check for Solaris, but the chances
124 of running into this situation elsewhere are very low. */
125 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
126 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
127 return 1;
128
129 /* Similarly, we observed the same issue with sparc64, but with
130 different locations. */
131 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
132 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
133 return 1;
134
135 return 0;
136 }
137
138 static int
139 svr4_same (struct so_list *gdb, struct so_list *inferior)
140 {
141 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
142 }
143
144 static struct lm_info *
145 lm_info_read (CORE_ADDR lm_addr)
146 {
147 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
148 gdb_byte *lm;
149 struct lm_info *lm_info;
150 struct cleanup *back_to;
151
152 lm = xmalloc (lmo->link_map_size);
153 back_to = make_cleanup (xfree, lm);
154
155 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
156 {
157 warning (_("Error reading shared library list entry at %s"),
158 paddress (target_gdbarch (), lm_addr)),
159 lm_info = NULL;
160 }
161 else
162 {
163 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
164
165 lm_info = xzalloc (sizeof (*lm_info));
166 lm_info->lm_addr = lm_addr;
167
168 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
169 ptr_type);
170 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
171 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
172 ptr_type);
173 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
174 ptr_type);
175 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
176 ptr_type);
177 }
178
179 do_cleanups (back_to);
180
181 return lm_info;
182 }
183
184 static int
185 has_lm_dynamic_from_link_map (void)
186 {
187 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
188
189 return lmo->l_ld_offset >= 0;
190 }
191
192 static CORE_ADDR
193 lm_addr_check (struct so_list *so, bfd *abfd)
194 {
195 if (!so->lm_info->l_addr_p)
196 {
197 struct bfd_section *dyninfo_sect;
198 CORE_ADDR l_addr, l_dynaddr, dynaddr;
199
200 l_addr = so->lm_info->l_addr_inferior;
201
202 if (! abfd || ! has_lm_dynamic_from_link_map ())
203 goto set_addr;
204
205 l_dynaddr = so->lm_info->l_ld;
206
207 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
208 if (dyninfo_sect == NULL)
209 goto set_addr;
210
211 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
212
213 if (dynaddr + l_addr != l_dynaddr)
214 {
215 CORE_ADDR align = 0x1000;
216 CORE_ADDR minpagesize = align;
217
218 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
219 {
220 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
221 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
222 int i;
223
224 align = 1;
225
226 for (i = 0; i < ehdr->e_phnum; i++)
227 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
228 align = phdr[i].p_align;
229
230 minpagesize = get_elf_backend_data (abfd)->minpagesize;
231 }
232
233 /* Turn it into a mask. */
234 align--;
235
236 /* If the changes match the alignment requirements, we
237 assume we're using a core file that was generated by the
238 same binary, just prelinked with a different base offset.
239 If it doesn't match, we may have a different binary, the
240 same binary with the dynamic table loaded at an unrelated
241 location, or anything, really. To avoid regressions,
242 don't adjust the base offset in the latter case, although
243 odds are that, if things really changed, debugging won't
244 quite work.
245
246 One could expect more the condition
247 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
248 but the one below is relaxed for PPC. The PPC kernel supports
249 either 4k or 64k page sizes. To be prepared for 64k pages,
250 PPC ELF files are built using an alignment requirement of 64k.
251 However, when running on a kernel supporting 4k pages, the memory
252 mapping of the library may not actually happen on a 64k boundary!
253
254 (In the usual case where (l_addr & align) == 0, this check is
255 equivalent to the possibly expected check above.)
256
257 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
258
259 l_addr = l_dynaddr - dynaddr;
260
261 if ((l_addr & (minpagesize - 1)) == 0
262 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
263 {
264 if (info_verbose)
265 printf_unfiltered (_("Using PIC (Position Independent Code) "
266 "prelink displacement %s for \"%s\".\n"),
267 paddress (target_gdbarch (), l_addr),
268 so->so_name);
269 }
270 else
271 {
272 /* There is no way to verify the library file matches. prelink
273 can during prelinking of an unprelinked file (or unprelinking
274 of a prelinked file) shift the DYNAMIC segment by arbitrary
275 offset without any page size alignment. There is no way to
276 find out the ELF header and/or Program Headers for a limited
277 verification if it they match. One could do a verification
278 of the DYNAMIC segment. Still the found address is the best
279 one GDB could find. */
280
281 warning (_(".dynamic section for \"%s\" "
282 "is not at the expected address "
283 "(wrong library or version mismatch?)"), so->so_name);
284 }
285 }
286
287 set_addr:
288 so->lm_info->l_addr = l_addr;
289 so->lm_info->l_addr_p = 1;
290 }
291
292 return so->lm_info->l_addr;
293 }
294
295 /* Per pspace SVR4 specific data. */
296
297 struct svr4_info
298 {
299 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
300
301 /* Validity flag for debug_loader_offset. */
302 int debug_loader_offset_p;
303
304 /* Load address for the dynamic linker, inferred. */
305 CORE_ADDR debug_loader_offset;
306
307 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
308 char *debug_loader_name;
309
310 /* Load map address for the main executable. */
311 CORE_ADDR main_lm_addr;
312
313 CORE_ADDR interp_text_sect_low;
314 CORE_ADDR interp_text_sect_high;
315 CORE_ADDR interp_plt_sect_low;
316 CORE_ADDR interp_plt_sect_high;
317 };
318
319 /* Per-program-space data key. */
320 static const struct program_space_data *solib_svr4_pspace_data;
321
322 static void
323 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
324 {
325 struct svr4_info *info;
326
327 info = program_space_data (pspace, solib_svr4_pspace_data);
328 xfree (info);
329 }
330
331 /* Get the current svr4 data. If none is found yet, add it now. This
332 function always returns a valid object. */
333
334 static struct svr4_info *
335 get_svr4_info (void)
336 {
337 struct svr4_info *info;
338
339 info = program_space_data (current_program_space, solib_svr4_pspace_data);
340 if (info != NULL)
341 return info;
342
343 info = XZALLOC (struct svr4_info);
344 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
345 return info;
346 }
347
348 /* Local function prototypes */
349
350 static int match_main (const char *);
351
352 /* Read program header TYPE from inferior memory. The header is found
353 by scanning the OS auxillary vector.
354
355 If TYPE == -1, return the program headers instead of the contents of
356 one program header.
357
358 Return a pointer to allocated memory holding the program header contents,
359 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
360 size of those contents is returned to P_SECT_SIZE. Likewise, the target
361 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
362
363 static gdb_byte *
364 read_program_header (int type, int *p_sect_size, int *p_arch_size)
365 {
366 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
367 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
368 int arch_size, sect_size;
369 CORE_ADDR sect_addr;
370 gdb_byte *buf;
371 int pt_phdr_p = 0;
372
373 /* Get required auxv elements from target. */
374 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
375 return 0;
376 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
377 return 0;
378 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
379 return 0;
380 if (!at_phdr || !at_phnum)
381 return 0;
382
383 /* Determine ELF architecture type. */
384 if (at_phent == sizeof (Elf32_External_Phdr))
385 arch_size = 32;
386 else if (at_phent == sizeof (Elf64_External_Phdr))
387 arch_size = 64;
388 else
389 return 0;
390
391 /* Find the requested segment. */
392 if (type == -1)
393 {
394 sect_addr = at_phdr;
395 sect_size = at_phent * at_phnum;
396 }
397 else if (arch_size == 32)
398 {
399 Elf32_External_Phdr phdr;
400 int i;
401
402 /* Search for requested PHDR. */
403 for (i = 0; i < at_phnum; i++)
404 {
405 int p_type;
406
407 if (target_read_memory (at_phdr + i * sizeof (phdr),
408 (gdb_byte *)&phdr, sizeof (phdr)))
409 return 0;
410
411 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
412 4, byte_order);
413
414 if (p_type == PT_PHDR)
415 {
416 pt_phdr_p = 1;
417 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
418 4, byte_order);
419 }
420
421 if (p_type == type)
422 break;
423 }
424
425 if (i == at_phnum)
426 return 0;
427
428 /* Retrieve address and size. */
429 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
430 4, byte_order);
431 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
432 4, byte_order);
433 }
434 else
435 {
436 Elf64_External_Phdr phdr;
437 int i;
438
439 /* Search for requested PHDR. */
440 for (i = 0; i < at_phnum; i++)
441 {
442 int p_type;
443
444 if (target_read_memory (at_phdr + i * sizeof (phdr),
445 (gdb_byte *)&phdr, sizeof (phdr)))
446 return 0;
447
448 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
449 4, byte_order);
450
451 if (p_type == PT_PHDR)
452 {
453 pt_phdr_p = 1;
454 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
455 8, byte_order);
456 }
457
458 if (p_type == type)
459 break;
460 }
461
462 if (i == at_phnum)
463 return 0;
464
465 /* Retrieve address and size. */
466 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
467 8, byte_order);
468 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
469 8, byte_order);
470 }
471
472 /* PT_PHDR is optional, but we really need it
473 for PIE to make this work in general. */
474
475 if (pt_phdr_p)
476 {
477 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
478 Relocation offset is the difference between the two. */
479 sect_addr = sect_addr + (at_phdr - pt_phdr);
480 }
481
482 /* Read in requested program header. */
483 buf = xmalloc (sect_size);
484 if (target_read_memory (sect_addr, buf, sect_size))
485 {
486 xfree (buf);
487 return NULL;
488 }
489
490 if (p_arch_size)
491 *p_arch_size = arch_size;
492 if (p_sect_size)
493 *p_sect_size = sect_size;
494
495 return buf;
496 }
497
498
499 /* Return program interpreter string. */
500 static gdb_byte *
501 find_program_interpreter (void)
502 {
503 gdb_byte *buf = NULL;
504
505 /* If we have an exec_bfd, use its section table. */
506 if (exec_bfd
507 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
508 {
509 struct bfd_section *interp_sect;
510
511 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
512 if (interp_sect != NULL)
513 {
514 int sect_size = bfd_section_size (exec_bfd, interp_sect);
515
516 buf = xmalloc (sect_size);
517 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
518 }
519 }
520
521 /* If we didn't find it, use the target auxillary vector. */
522 if (!buf)
523 buf = read_program_header (PT_INTERP, NULL, NULL);
524
525 return buf;
526 }
527
528
529 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
530 returned and the corresponding PTR is set. */
531
532 static int
533 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
534 {
535 int arch_size, step, sect_size;
536 long dyn_tag;
537 CORE_ADDR dyn_ptr, dyn_addr;
538 gdb_byte *bufend, *bufstart, *buf;
539 Elf32_External_Dyn *x_dynp_32;
540 Elf64_External_Dyn *x_dynp_64;
541 struct bfd_section *sect;
542 struct target_section *target_section;
543
544 if (abfd == NULL)
545 return 0;
546
547 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
548 return 0;
549
550 arch_size = bfd_get_arch_size (abfd);
551 if (arch_size == -1)
552 return 0;
553
554 /* Find the start address of the .dynamic section. */
555 sect = bfd_get_section_by_name (abfd, ".dynamic");
556 if (sect == NULL)
557 return 0;
558
559 for (target_section = current_target_sections->sections;
560 target_section < current_target_sections->sections_end;
561 target_section++)
562 if (sect == target_section->the_bfd_section)
563 break;
564 if (target_section < current_target_sections->sections_end)
565 dyn_addr = target_section->addr;
566 else
567 {
568 /* ABFD may come from OBJFILE acting only as a symbol file without being
569 loaded into the target (see add_symbol_file_command). This case is
570 such fallback to the file VMA address without the possibility of
571 having the section relocated to its actual in-memory address. */
572
573 dyn_addr = bfd_section_vma (abfd, sect);
574 }
575
576 /* Read in .dynamic from the BFD. We will get the actual value
577 from memory later. */
578 sect_size = bfd_section_size (abfd, sect);
579 buf = bufstart = alloca (sect_size);
580 if (!bfd_get_section_contents (abfd, sect,
581 buf, 0, sect_size))
582 return 0;
583
584 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
585 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
586 : sizeof (Elf64_External_Dyn);
587 for (bufend = buf + sect_size;
588 buf < bufend;
589 buf += step)
590 {
591 if (arch_size == 32)
592 {
593 x_dynp_32 = (Elf32_External_Dyn *) buf;
594 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
595 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
596 }
597 else
598 {
599 x_dynp_64 = (Elf64_External_Dyn *) buf;
600 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
601 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
602 }
603 if (dyn_tag == DT_NULL)
604 return 0;
605 if (dyn_tag == dyntag)
606 {
607 /* If requested, try to read the runtime value of this .dynamic
608 entry. */
609 if (ptr)
610 {
611 struct type *ptr_type;
612 gdb_byte ptr_buf[8];
613 CORE_ADDR ptr_addr;
614
615 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
616 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
617 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
618 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
619 *ptr = dyn_ptr;
620 }
621 return 1;
622 }
623 }
624
625 return 0;
626 }
627
628 /* Scan for DYNTAG in .dynamic section of the target's main executable,
629 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
630 returned and the corresponding PTR is set. */
631
632 static int
633 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
634 {
635 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
636 int sect_size, arch_size, step;
637 long dyn_tag;
638 CORE_ADDR dyn_ptr;
639 gdb_byte *bufend, *bufstart, *buf;
640
641 /* Read in .dynamic section. */
642 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
643 if (!buf)
644 return 0;
645
646 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
647 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
648 : sizeof (Elf64_External_Dyn);
649 for (bufend = buf + sect_size;
650 buf < bufend;
651 buf += step)
652 {
653 if (arch_size == 32)
654 {
655 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
656
657 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
658 4, byte_order);
659 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
660 4, byte_order);
661 }
662 else
663 {
664 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
665
666 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
667 8, byte_order);
668 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
669 8, byte_order);
670 }
671 if (dyn_tag == DT_NULL)
672 break;
673
674 if (dyn_tag == dyntag)
675 {
676 if (ptr)
677 *ptr = dyn_ptr;
678
679 xfree (bufstart);
680 return 1;
681 }
682 }
683
684 xfree (bufstart);
685 return 0;
686 }
687
688 /* Locate the base address of dynamic linker structs for SVR4 elf
689 targets.
690
691 For SVR4 elf targets the address of the dynamic linker's runtime
692 structure is contained within the dynamic info section in the
693 executable file. The dynamic section is also mapped into the
694 inferior address space. Because the runtime loader fills in the
695 real address before starting the inferior, we have to read in the
696 dynamic info section from the inferior address space.
697 If there are any errors while trying to find the address, we
698 silently return 0, otherwise the found address is returned. */
699
700 static CORE_ADDR
701 elf_locate_base (void)
702 {
703 struct minimal_symbol *msymbol;
704 CORE_ADDR dyn_ptr;
705
706 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
707 instead of DT_DEBUG, although they sometimes contain an unused
708 DT_DEBUG. */
709 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
710 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
711 {
712 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
713 gdb_byte *pbuf;
714 int pbuf_size = TYPE_LENGTH (ptr_type);
715
716 pbuf = alloca (pbuf_size);
717 /* DT_MIPS_RLD_MAP contains a pointer to the address
718 of the dynamic link structure. */
719 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
720 return 0;
721 return extract_typed_address (pbuf, ptr_type);
722 }
723
724 /* Find DT_DEBUG. */
725 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
726 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
727 return dyn_ptr;
728
729 /* This may be a static executable. Look for the symbol
730 conventionally named _r_debug, as a last resort. */
731 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
732 if (msymbol != NULL)
733 return SYMBOL_VALUE_ADDRESS (msymbol);
734
735 /* DT_DEBUG entry not found. */
736 return 0;
737 }
738
739 /* Locate the base address of dynamic linker structs.
740
741 For both the SunOS and SVR4 shared library implementations, if the
742 inferior executable has been linked dynamically, there is a single
743 address somewhere in the inferior's data space which is the key to
744 locating all of the dynamic linker's runtime structures. This
745 address is the value of the debug base symbol. The job of this
746 function is to find and return that address, or to return 0 if there
747 is no such address (the executable is statically linked for example).
748
749 For SunOS, the job is almost trivial, since the dynamic linker and
750 all of it's structures are statically linked to the executable at
751 link time. Thus the symbol for the address we are looking for has
752 already been added to the minimal symbol table for the executable's
753 objfile at the time the symbol file's symbols were read, and all we
754 have to do is look it up there. Note that we explicitly do NOT want
755 to find the copies in the shared library.
756
757 The SVR4 version is a bit more complicated because the address
758 is contained somewhere in the dynamic info section. We have to go
759 to a lot more work to discover the address of the debug base symbol.
760 Because of this complexity, we cache the value we find and return that
761 value on subsequent invocations. Note there is no copy in the
762 executable symbol tables. */
763
764 static CORE_ADDR
765 locate_base (struct svr4_info *info)
766 {
767 /* Check to see if we have a currently valid address, and if so, avoid
768 doing all this work again and just return the cached address. If
769 we have no cached address, try to locate it in the dynamic info
770 section for ELF executables. There's no point in doing any of this
771 though if we don't have some link map offsets to work with. */
772
773 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
774 info->debug_base = elf_locate_base ();
775 return info->debug_base;
776 }
777
778 /* Find the first element in the inferior's dynamic link map, and
779 return its address in the inferior. Return zero if the address
780 could not be determined.
781
782 FIXME: Perhaps we should validate the info somehow, perhaps by
783 checking r_version for a known version number, or r_state for
784 RT_CONSISTENT. */
785
786 static CORE_ADDR
787 solib_svr4_r_map (struct svr4_info *info)
788 {
789 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
790 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
791 CORE_ADDR addr = 0;
792 volatile struct gdb_exception ex;
793
794 TRY_CATCH (ex, RETURN_MASK_ERROR)
795 {
796 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
797 ptr_type);
798 }
799 exception_print (gdb_stderr, ex);
800 return addr;
801 }
802
803 /* Find r_brk from the inferior's debug base. */
804
805 static CORE_ADDR
806 solib_svr4_r_brk (struct svr4_info *info)
807 {
808 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
809 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
810
811 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
812 ptr_type);
813 }
814
815 /* Find the link map for the dynamic linker (if it is not in the
816 normal list of loaded shared objects). */
817
818 static CORE_ADDR
819 solib_svr4_r_ldsomap (struct svr4_info *info)
820 {
821 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
822 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
823 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
824 ULONGEST version;
825
826 /* Check version, and return zero if `struct r_debug' doesn't have
827 the r_ldsomap member. */
828 version
829 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
830 lmo->r_version_size, byte_order);
831 if (version < 2 || lmo->r_ldsomap_offset == -1)
832 return 0;
833
834 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
835 ptr_type);
836 }
837
838 /* On Solaris systems with some versions of the dynamic linker,
839 ld.so's l_name pointer points to the SONAME in the string table
840 rather than into writable memory. So that GDB can find shared
841 libraries when loading a core file generated by gcore, ensure that
842 memory areas containing the l_name string are saved in the core
843 file. */
844
845 static int
846 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
847 {
848 struct svr4_info *info;
849 CORE_ADDR ldsomap;
850 struct so_list *new;
851 struct cleanup *old_chain;
852 struct link_map_offsets *lmo;
853 CORE_ADDR name_lm;
854
855 info = get_svr4_info ();
856
857 info->debug_base = 0;
858 locate_base (info);
859 if (!info->debug_base)
860 return 0;
861
862 ldsomap = solib_svr4_r_ldsomap (info);
863 if (!ldsomap)
864 return 0;
865
866 lmo = svr4_fetch_link_map_offsets ();
867 new = XZALLOC (struct so_list);
868 old_chain = make_cleanup (xfree, new);
869 new->lm_info = lm_info_read (ldsomap);
870 make_cleanup (xfree, new->lm_info);
871 name_lm = new->lm_info ? new->lm_info->l_name : 0;
872 do_cleanups (old_chain);
873
874 return (name_lm >= vaddr && name_lm < vaddr + size);
875 }
876
877 /* Implement the "open_symbol_file_object" target_so_ops method.
878
879 If no open symbol file, attempt to locate and open the main symbol
880 file. On SVR4 systems, this is the first link map entry. If its
881 name is here, we can open it. Useful when attaching to a process
882 without first loading its symbol file. */
883
884 static int
885 open_symbol_file_object (void *from_ttyp)
886 {
887 CORE_ADDR lm, l_name;
888 char *filename;
889 int errcode;
890 int from_tty = *(int *)from_ttyp;
891 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
892 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
893 int l_name_size = TYPE_LENGTH (ptr_type);
894 gdb_byte *l_name_buf = xmalloc (l_name_size);
895 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
896 struct svr4_info *info = get_svr4_info ();
897
898 if (symfile_objfile)
899 if (!query (_("Attempt to reload symbols from process? ")))
900 {
901 do_cleanups (cleanups);
902 return 0;
903 }
904
905 /* Always locate the debug struct, in case it has moved. */
906 info->debug_base = 0;
907 if (locate_base (info) == 0)
908 {
909 do_cleanups (cleanups);
910 return 0; /* failed somehow... */
911 }
912
913 /* First link map member should be the executable. */
914 lm = solib_svr4_r_map (info);
915 if (lm == 0)
916 {
917 do_cleanups (cleanups);
918 return 0; /* failed somehow... */
919 }
920
921 /* Read address of name from target memory to GDB. */
922 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
923
924 /* Convert the address to host format. */
925 l_name = extract_typed_address (l_name_buf, ptr_type);
926
927 if (l_name == 0)
928 {
929 do_cleanups (cleanups);
930 return 0; /* No filename. */
931 }
932
933 /* Now fetch the filename from target memory. */
934 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
935 make_cleanup (xfree, filename);
936
937 if (errcode)
938 {
939 warning (_("failed to read exec filename from attached file: %s"),
940 safe_strerror (errcode));
941 do_cleanups (cleanups);
942 return 0;
943 }
944
945 /* Have a pathname: read the symbol file. */
946 symbol_file_add_main (filename, from_tty);
947
948 do_cleanups (cleanups);
949 return 1;
950 }
951
952 /* Data exchange structure for the XML parser as returned by
953 svr4_current_sos_via_xfer_libraries. */
954
955 struct svr4_library_list
956 {
957 struct so_list *head, **tailp;
958
959 /* Inferior address of struct link_map used for the main executable. It is
960 NULL if not known. */
961 CORE_ADDR main_lm;
962 };
963
964 /* Implementation for target_so_ops.free_so. */
965
966 static void
967 svr4_free_so (struct so_list *so)
968 {
969 xfree (so->lm_info);
970 }
971
972 /* Free so_list built so far (called via cleanup). */
973
974 static void
975 svr4_free_library_list (void *p_list)
976 {
977 struct so_list *list = *(struct so_list **) p_list;
978
979 while (list != NULL)
980 {
981 struct so_list *next = list->next;
982
983 free_so (list);
984 list = next;
985 }
986 }
987
988 #ifdef HAVE_LIBEXPAT
989
990 #include "xml-support.h"
991
992 /* Handle the start of a <library> element. Note: new elements are added
993 at the tail of the list, keeping the list in order. */
994
995 static void
996 library_list_start_library (struct gdb_xml_parser *parser,
997 const struct gdb_xml_element *element,
998 void *user_data, VEC(gdb_xml_value_s) *attributes)
999 {
1000 struct svr4_library_list *list = user_data;
1001 const char *name = xml_find_attribute (attributes, "name")->value;
1002 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1003 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1004 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1005 struct so_list *new_elem;
1006
1007 new_elem = XZALLOC (struct so_list);
1008 new_elem->lm_info = XZALLOC (struct lm_info);
1009 new_elem->lm_info->lm_addr = *lmp;
1010 new_elem->lm_info->l_addr_inferior = *l_addrp;
1011 new_elem->lm_info->l_ld = *l_ldp;
1012
1013 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1014 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1015 strcpy (new_elem->so_original_name, new_elem->so_name);
1016
1017 *list->tailp = new_elem;
1018 list->tailp = &new_elem->next;
1019 }
1020
1021 /* Handle the start of a <library-list-svr4> element. */
1022
1023 static void
1024 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1025 const struct gdb_xml_element *element,
1026 void *user_data, VEC(gdb_xml_value_s) *attributes)
1027 {
1028 struct svr4_library_list *list = user_data;
1029 const char *version = xml_find_attribute (attributes, "version")->value;
1030 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1031
1032 if (strcmp (version, "1.0") != 0)
1033 gdb_xml_error (parser,
1034 _("SVR4 Library list has unsupported version \"%s\""),
1035 version);
1036
1037 if (main_lm)
1038 list->main_lm = *(ULONGEST *) main_lm->value;
1039 }
1040
1041 /* The allowed elements and attributes for an XML library list.
1042 The root element is a <library-list>. */
1043
1044 static const struct gdb_xml_attribute svr4_library_attributes[] =
1045 {
1046 { "name", GDB_XML_AF_NONE, NULL, NULL },
1047 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1048 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1049 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1050 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1051 };
1052
1053 static const struct gdb_xml_element svr4_library_list_children[] =
1054 {
1055 {
1056 "library", svr4_library_attributes, NULL,
1057 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1058 library_list_start_library, NULL
1059 },
1060 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1061 };
1062
1063 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1064 {
1065 { "version", GDB_XML_AF_NONE, NULL, NULL },
1066 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1067 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1068 };
1069
1070 static const struct gdb_xml_element svr4_library_list_elements[] =
1071 {
1072 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1073 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1074 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1075 };
1076
1077 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1078
1079 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1080 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1081 empty, caller is responsible for freeing all its entries. */
1082
1083 static int
1084 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1085 {
1086 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1087 &list->head);
1088
1089 memset (list, 0, sizeof (*list));
1090 list->tailp = &list->head;
1091 if (gdb_xml_parse_quick (_("target library list"), "library-list.dtd",
1092 svr4_library_list_elements, document, list) == 0)
1093 {
1094 /* Parsed successfully, keep the result. */
1095 discard_cleanups (back_to);
1096 return 1;
1097 }
1098
1099 do_cleanups (back_to);
1100 return 0;
1101 }
1102
1103 /* Attempt to get so_list from target via qXfer:libraries:read packet.
1104
1105 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1106 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1107 empty, caller is responsible for freeing all its entries. */
1108
1109 static int
1110 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1111 {
1112 char *svr4_library_document;
1113 int result;
1114 struct cleanup *back_to;
1115
1116 /* Fetch the list of shared libraries. */
1117 svr4_library_document = target_read_stralloc (&current_target,
1118 TARGET_OBJECT_LIBRARIES_SVR4,
1119 NULL);
1120 if (svr4_library_document == NULL)
1121 return 0;
1122
1123 back_to = make_cleanup (xfree, svr4_library_document);
1124 result = svr4_parse_libraries (svr4_library_document, list);
1125 do_cleanups (back_to);
1126
1127 return result;
1128 }
1129
1130 #else
1131
1132 static int
1133 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1134 {
1135 return 0;
1136 }
1137
1138 #endif
1139
1140 /* If no shared library information is available from the dynamic
1141 linker, build a fallback list from other sources. */
1142
1143 static struct so_list *
1144 svr4_default_sos (void)
1145 {
1146 struct svr4_info *info = get_svr4_info ();
1147 struct so_list *new;
1148
1149 if (!info->debug_loader_offset_p)
1150 return NULL;
1151
1152 new = XZALLOC (struct so_list);
1153
1154 new->lm_info = xzalloc (sizeof (struct lm_info));
1155
1156 /* Nothing will ever check the other fields if we set l_addr_p. */
1157 new->lm_info->l_addr = info->debug_loader_offset;
1158 new->lm_info->l_addr_p = 1;
1159
1160 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1161 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1162 strcpy (new->so_original_name, new->so_name);
1163
1164 return new;
1165 }
1166
1167 /* Read the whole inferior libraries chain starting at address LM. Add the
1168 entries to the tail referenced by LINK_PTR_PTR. Ignore the first entry if
1169 IGNORE_FIRST and set global MAIN_LM_ADDR according to it. */
1170
1171 static void
1172 svr4_read_so_list (CORE_ADDR lm, struct so_list ***link_ptr_ptr,
1173 int ignore_first)
1174 {
1175 CORE_ADDR prev_lm = 0, next_lm;
1176
1177 for (; lm != 0; prev_lm = lm, lm = next_lm)
1178 {
1179 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1180 struct so_list *new;
1181 struct cleanup *old_chain;
1182 int errcode;
1183 char *buffer;
1184
1185 new = XZALLOC (struct so_list);
1186 old_chain = make_cleanup_free_so (new);
1187
1188 new->lm_info = lm_info_read (lm);
1189 if (new->lm_info == NULL)
1190 {
1191 do_cleanups (old_chain);
1192 break;
1193 }
1194
1195 next_lm = new->lm_info->l_next;
1196
1197 if (new->lm_info->l_prev != prev_lm)
1198 {
1199 warning (_("Corrupted shared library list: %s != %s"),
1200 paddress (target_gdbarch (), prev_lm),
1201 paddress (target_gdbarch (), new->lm_info->l_prev));
1202 do_cleanups (old_chain);
1203 break;
1204 }
1205
1206 /* For SVR4 versions, the first entry in the link map is for the
1207 inferior executable, so we must ignore it. For some versions of
1208 SVR4, it has no name. For others (Solaris 2.3 for example), it
1209 does have a name, so we can no longer use a missing name to
1210 decide when to ignore it. */
1211 if (ignore_first && new->lm_info->l_prev == 0)
1212 {
1213 struct svr4_info *info = get_svr4_info ();
1214
1215 info->main_lm_addr = new->lm_info->lm_addr;
1216 do_cleanups (old_chain);
1217 continue;
1218 }
1219
1220 /* Extract this shared object's name. */
1221 target_read_string (new->lm_info->l_name, &buffer,
1222 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1223 if (errcode != 0)
1224 {
1225 warning (_("Can't read pathname for load map: %s."),
1226 safe_strerror (errcode));
1227 do_cleanups (old_chain);
1228 continue;
1229 }
1230
1231 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1232 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1233 strcpy (new->so_original_name, new->so_name);
1234 xfree (buffer);
1235
1236 /* If this entry has no name, or its name matches the name
1237 for the main executable, don't include it in the list. */
1238 if (! new->so_name[0] || match_main (new->so_name))
1239 {
1240 do_cleanups (old_chain);
1241 continue;
1242 }
1243
1244 discard_cleanups (old_chain);
1245 new->next = 0;
1246 **link_ptr_ptr = new;
1247 *link_ptr_ptr = &new->next;
1248 }
1249 }
1250
1251 /* Implement the "current_sos" target_so_ops method. */
1252
1253 static struct so_list *
1254 svr4_current_sos (void)
1255 {
1256 CORE_ADDR lm;
1257 struct so_list *head = NULL;
1258 struct so_list **link_ptr = &head;
1259 struct svr4_info *info;
1260 struct cleanup *back_to;
1261 int ignore_first;
1262 struct svr4_library_list library_list;
1263
1264 /* Fall back to manual examination of the target if the packet is not
1265 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1266 tests a case where gdbserver cannot find the shared libraries list while
1267 GDB itself is able to find it via SYMFILE_OBJFILE.
1268
1269 Unfortunately statically linked inferiors will also fall back through this
1270 suboptimal code path. */
1271
1272 if (svr4_current_sos_via_xfer_libraries (&library_list))
1273 {
1274 if (library_list.main_lm)
1275 {
1276 info = get_svr4_info ();
1277 info->main_lm_addr = library_list.main_lm;
1278 }
1279
1280 return library_list.head ? library_list.head : svr4_default_sos ();
1281 }
1282
1283 info = get_svr4_info ();
1284
1285 /* Always locate the debug struct, in case it has moved. */
1286 info->debug_base = 0;
1287 locate_base (info);
1288
1289 /* If we can't find the dynamic linker's base structure, this
1290 must not be a dynamically linked executable. Hmm. */
1291 if (! info->debug_base)
1292 return svr4_default_sos ();
1293
1294 /* Assume that everything is a library if the dynamic loader was loaded
1295 late by a static executable. */
1296 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1297 ignore_first = 0;
1298 else
1299 ignore_first = 1;
1300
1301 back_to = make_cleanup (svr4_free_library_list, &head);
1302
1303 /* Walk the inferior's link map list, and build our list of
1304 `struct so_list' nodes. */
1305 lm = solib_svr4_r_map (info);
1306 if (lm)
1307 svr4_read_so_list (lm, &link_ptr, ignore_first);
1308
1309 /* On Solaris, the dynamic linker is not in the normal list of
1310 shared objects, so make sure we pick it up too. Having
1311 symbol information for the dynamic linker is quite crucial
1312 for skipping dynamic linker resolver code. */
1313 lm = solib_svr4_r_ldsomap (info);
1314 if (lm)
1315 svr4_read_so_list (lm, &link_ptr, 0);
1316
1317 discard_cleanups (back_to);
1318
1319 if (head == NULL)
1320 return svr4_default_sos ();
1321
1322 return head;
1323 }
1324
1325 /* Get the address of the link_map for a given OBJFILE. */
1326
1327 CORE_ADDR
1328 svr4_fetch_objfile_link_map (struct objfile *objfile)
1329 {
1330 struct so_list *so;
1331 struct svr4_info *info = get_svr4_info ();
1332
1333 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1334 if (info->main_lm_addr == 0)
1335 solib_add (NULL, 0, &current_target, auto_solib_add);
1336
1337 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1338 if (objfile == symfile_objfile)
1339 return info->main_lm_addr;
1340
1341 /* The other link map addresses may be found by examining the list
1342 of shared libraries. */
1343 for (so = master_so_list (); so; so = so->next)
1344 if (so->objfile == objfile)
1345 return so->lm_info->lm_addr;
1346
1347 /* Not found! */
1348 return 0;
1349 }
1350
1351 /* On some systems, the only way to recognize the link map entry for
1352 the main executable file is by looking at its name. Return
1353 non-zero iff SONAME matches one of the known main executable names. */
1354
1355 static int
1356 match_main (const char *soname)
1357 {
1358 const char * const *mainp;
1359
1360 for (mainp = main_name_list; *mainp != NULL; mainp++)
1361 {
1362 if (strcmp (soname, *mainp) == 0)
1363 return (1);
1364 }
1365
1366 return (0);
1367 }
1368
1369 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1370 SVR4 run time loader. */
1371
1372 int
1373 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1374 {
1375 struct svr4_info *info = get_svr4_info ();
1376
1377 return ((pc >= info->interp_text_sect_low
1378 && pc < info->interp_text_sect_high)
1379 || (pc >= info->interp_plt_sect_low
1380 && pc < info->interp_plt_sect_high)
1381 || in_plt_section (pc, NULL)
1382 || in_gnu_ifunc_stub (pc));
1383 }
1384
1385 /* Given an executable's ABFD and target, compute the entry-point
1386 address. */
1387
1388 static CORE_ADDR
1389 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1390 {
1391 /* KevinB wrote ... for most targets, the address returned by
1392 bfd_get_start_address() is the entry point for the start
1393 function. But, for some targets, bfd_get_start_address() returns
1394 the address of a function descriptor from which the entry point
1395 address may be extracted. This address is extracted by
1396 gdbarch_convert_from_func_ptr_addr(). The method
1397 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1398 function for targets which don't use function descriptors. */
1399 return gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1400 bfd_get_start_address (abfd),
1401 targ);
1402 }
1403
1404 /* Helper function for gdb_bfd_lookup_symbol. */
1405
1406 static int
1407 cmp_name_and_sec_flags (asymbol *sym, void *data)
1408 {
1409 return (strcmp (sym->name, (const char *) data) == 0
1410 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
1411 }
1412 /* Arrange for dynamic linker to hit breakpoint.
1413
1414 Both the SunOS and the SVR4 dynamic linkers have, as part of their
1415 debugger interface, support for arranging for the inferior to hit
1416 a breakpoint after mapping in the shared libraries. This function
1417 enables that breakpoint.
1418
1419 For SunOS, there is a special flag location (in_debugger) which we
1420 set to 1. When the dynamic linker sees this flag set, it will set
1421 a breakpoint at a location known only to itself, after saving the
1422 original contents of that place and the breakpoint address itself,
1423 in it's own internal structures. When we resume the inferior, it
1424 will eventually take a SIGTRAP when it runs into the breakpoint.
1425 We handle this (in a different place) by restoring the contents of
1426 the breakpointed location (which is only known after it stops),
1427 chasing around to locate the shared libraries that have been
1428 loaded, then resuming.
1429
1430 For SVR4, the debugger interface structure contains a member (r_brk)
1431 which is statically initialized at the time the shared library is
1432 built, to the offset of a function (_r_debug_state) which is guaran-
1433 teed to be called once before mapping in a library, and again when
1434 the mapping is complete. At the time we are examining this member,
1435 it contains only the unrelocated offset of the function, so we have
1436 to do our own relocation. Later, when the dynamic linker actually
1437 runs, it relocates r_brk to be the actual address of _r_debug_state().
1438
1439 The debugger interface structure also contains an enumeration which
1440 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1441 depending upon whether or not the library is being mapped or unmapped,
1442 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
1443
1444 static int
1445 enable_break (struct svr4_info *info, int from_tty)
1446 {
1447 struct minimal_symbol *msymbol;
1448 const char * const *bkpt_namep;
1449 asection *interp_sect;
1450 gdb_byte *interp_name;
1451 CORE_ADDR sym_addr;
1452
1453 info->interp_text_sect_low = info->interp_text_sect_high = 0;
1454 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1455
1456 /* If we already have a shared library list in the target, and
1457 r_debug contains r_brk, set the breakpoint there - this should
1458 mean r_brk has already been relocated. Assume the dynamic linker
1459 is the object containing r_brk. */
1460
1461 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1462 sym_addr = 0;
1463 if (info->debug_base && solib_svr4_r_map (info) != 0)
1464 sym_addr = solib_svr4_r_brk (info);
1465
1466 if (sym_addr != 0)
1467 {
1468 struct obj_section *os;
1469
1470 sym_addr = gdbarch_addr_bits_remove
1471 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1472 sym_addr,
1473 &current_target));
1474
1475 /* On at least some versions of Solaris there's a dynamic relocation
1476 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1477 we get control before the dynamic linker has self-relocated.
1478 Check if SYM_ADDR is in a known section, if it is assume we can
1479 trust its value. This is just a heuristic though, it could go away
1480 or be replaced if it's getting in the way.
1481
1482 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1483 however it's spelled in your particular system) is ARM or Thumb.
1484 That knowledge is encoded in the address, if it's Thumb the low bit
1485 is 1. However, we've stripped that info above and it's not clear
1486 what all the consequences are of passing a non-addr_bits_remove'd
1487 address to create_solib_event_breakpoint. The call to
1488 find_pc_section verifies we know about the address and have some
1489 hope of computing the right kind of breakpoint to use (via
1490 symbol info). It does mean that GDB needs to be pointed at a
1491 non-stripped version of the dynamic linker in order to obtain
1492 information it already knows about. Sigh. */
1493
1494 os = find_pc_section (sym_addr);
1495 if (os != NULL)
1496 {
1497 /* Record the relocated start and end address of the dynamic linker
1498 text and plt section for svr4_in_dynsym_resolve_code. */
1499 bfd *tmp_bfd;
1500 CORE_ADDR load_addr;
1501
1502 tmp_bfd = os->objfile->obfd;
1503 load_addr = ANOFFSET (os->objfile->section_offsets,
1504 os->objfile->sect_index_text);
1505
1506 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1507 if (interp_sect)
1508 {
1509 info->interp_text_sect_low =
1510 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1511 info->interp_text_sect_high =
1512 info->interp_text_sect_low
1513 + bfd_section_size (tmp_bfd, interp_sect);
1514 }
1515 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1516 if (interp_sect)
1517 {
1518 info->interp_plt_sect_low =
1519 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1520 info->interp_plt_sect_high =
1521 info->interp_plt_sect_low
1522 + bfd_section_size (tmp_bfd, interp_sect);
1523 }
1524
1525 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1526 return 1;
1527 }
1528 }
1529
1530 /* Find the program interpreter; if not found, warn the user and drop
1531 into the old breakpoint at symbol code. */
1532 interp_name = find_program_interpreter ();
1533 if (interp_name)
1534 {
1535 CORE_ADDR load_addr = 0;
1536 int load_addr_found = 0;
1537 int loader_found_in_list = 0;
1538 struct so_list *so;
1539 bfd *tmp_bfd = NULL;
1540 struct target_ops *tmp_bfd_target;
1541 volatile struct gdb_exception ex;
1542
1543 sym_addr = 0;
1544
1545 /* Now we need to figure out where the dynamic linker was
1546 loaded so that we can load its symbols and place a breakpoint
1547 in the dynamic linker itself.
1548
1549 This address is stored on the stack. However, I've been unable
1550 to find any magic formula to find it for Solaris (appears to
1551 be trivial on GNU/Linux). Therefore, we have to try an alternate
1552 mechanism to find the dynamic linker's base address. */
1553
1554 TRY_CATCH (ex, RETURN_MASK_ALL)
1555 {
1556 tmp_bfd = solib_bfd_open (interp_name);
1557 }
1558 if (tmp_bfd == NULL)
1559 goto bkpt_at_symbol;
1560
1561 /* Now convert the TMP_BFD into a target. That way target, as
1562 well as BFD operations can be used. */
1563 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1564 /* target_bfd_reopen acquired its own reference, so we can
1565 release ours now. */
1566 gdb_bfd_unref (tmp_bfd);
1567
1568 /* On a running target, we can get the dynamic linker's base
1569 address from the shared library table. */
1570 so = master_so_list ();
1571 while (so)
1572 {
1573 if (svr4_same_1 (interp_name, so->so_original_name))
1574 {
1575 load_addr_found = 1;
1576 loader_found_in_list = 1;
1577 load_addr = lm_addr_check (so, tmp_bfd);
1578 break;
1579 }
1580 so = so->next;
1581 }
1582
1583 /* If we were not able to find the base address of the loader
1584 from our so_list, then try using the AT_BASE auxilliary entry. */
1585 if (!load_addr_found)
1586 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1587 {
1588 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
1589
1590 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1591 that `+ load_addr' will overflow CORE_ADDR width not creating
1592 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1593 GDB. */
1594
1595 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1596 {
1597 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1598 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1599 tmp_bfd_target);
1600
1601 gdb_assert (load_addr < space_size);
1602
1603 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1604 64bit ld.so with 32bit executable, it should not happen. */
1605
1606 if (tmp_entry_point < space_size
1607 && tmp_entry_point + load_addr >= space_size)
1608 load_addr -= space_size;
1609 }
1610
1611 load_addr_found = 1;
1612 }
1613
1614 /* Otherwise we find the dynamic linker's base address by examining
1615 the current pc (which should point at the entry point for the
1616 dynamic linker) and subtracting the offset of the entry point.
1617
1618 This is more fragile than the previous approaches, but is a good
1619 fallback method because it has actually been working well in
1620 most cases. */
1621 if (!load_addr_found)
1622 {
1623 struct regcache *regcache
1624 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
1625
1626 load_addr = (regcache_read_pc (regcache)
1627 - exec_entry_point (tmp_bfd, tmp_bfd_target));
1628 }
1629
1630 if (!loader_found_in_list)
1631 {
1632 info->debug_loader_name = xstrdup (interp_name);
1633 info->debug_loader_offset_p = 1;
1634 info->debug_loader_offset = load_addr;
1635 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1636 }
1637
1638 /* Record the relocated start and end address of the dynamic linker
1639 text and plt section for svr4_in_dynsym_resolve_code. */
1640 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1641 if (interp_sect)
1642 {
1643 info->interp_text_sect_low =
1644 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1645 info->interp_text_sect_high =
1646 info->interp_text_sect_low
1647 + bfd_section_size (tmp_bfd, interp_sect);
1648 }
1649 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1650 if (interp_sect)
1651 {
1652 info->interp_plt_sect_low =
1653 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1654 info->interp_plt_sect_high =
1655 info->interp_plt_sect_low
1656 + bfd_section_size (tmp_bfd, interp_sect);
1657 }
1658
1659 /* Now try to set a breakpoint in the dynamic linker. */
1660 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1661 {
1662 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
1663 (void *) *bkpt_namep);
1664 if (sym_addr != 0)
1665 break;
1666 }
1667
1668 if (sym_addr != 0)
1669 /* Convert 'sym_addr' from a function pointer to an address.
1670 Because we pass tmp_bfd_target instead of the current
1671 target, this will always produce an unrelocated value. */
1672 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1673 sym_addr,
1674 tmp_bfd_target);
1675
1676 /* We're done with both the temporary bfd and target. Closing
1677 the target closes the underlying bfd, because it holds the
1678 only remaining reference. */
1679 target_close (tmp_bfd_target, 0);
1680
1681 if (sym_addr != 0)
1682 {
1683 create_solib_event_breakpoint (target_gdbarch (), load_addr + sym_addr);
1684 xfree (interp_name);
1685 return 1;
1686 }
1687
1688 /* For whatever reason we couldn't set a breakpoint in the dynamic
1689 linker. Warn and drop into the old code. */
1690 bkpt_at_symbol:
1691 xfree (interp_name);
1692 warning (_("Unable to find dynamic linker breakpoint function.\n"
1693 "GDB will be unable to debug shared library initializers\n"
1694 "and track explicitly loaded dynamic code."));
1695 }
1696
1697 /* Scan through the lists of symbols, trying to look up the symbol and
1698 set a breakpoint there. Terminate loop when we/if we succeed. */
1699
1700 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1701 {
1702 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1703 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1704 {
1705 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1706 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1707 sym_addr,
1708 &current_target);
1709 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1710 return 1;
1711 }
1712 }
1713
1714 if (interp_name != NULL && !current_inferior ()->attach_flag)
1715 {
1716 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1717 {
1718 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1719 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1720 {
1721 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1722 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1723 sym_addr,
1724 &current_target);
1725 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1726 return 1;
1727 }
1728 }
1729 }
1730 return 0;
1731 }
1732
1733 /* Implement the "special_symbol_handling" target_so_ops method. */
1734
1735 static void
1736 svr4_special_symbol_handling (void)
1737 {
1738 /* Nothing to do. */
1739 }
1740
1741 /* Read the ELF program headers from ABFD. Return the contents and
1742 set *PHDRS_SIZE to the size of the program headers. */
1743
1744 static gdb_byte *
1745 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1746 {
1747 Elf_Internal_Ehdr *ehdr;
1748 gdb_byte *buf;
1749
1750 ehdr = elf_elfheader (abfd);
1751
1752 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1753 if (*phdrs_size == 0)
1754 return NULL;
1755
1756 buf = xmalloc (*phdrs_size);
1757 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1758 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1759 {
1760 xfree (buf);
1761 return NULL;
1762 }
1763
1764 return buf;
1765 }
1766
1767 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1768 exec_bfd. Otherwise return 0.
1769
1770 We relocate all of the sections by the same amount. This
1771 behavior is mandated by recent editions of the System V ABI.
1772 According to the System V Application Binary Interface,
1773 Edition 4.1, page 5-5:
1774
1775 ... Though the system chooses virtual addresses for
1776 individual processes, it maintains the segments' relative
1777 positions. Because position-independent code uses relative
1778 addressesing between segments, the difference between
1779 virtual addresses in memory must match the difference
1780 between virtual addresses in the file. The difference
1781 between the virtual address of any segment in memory and
1782 the corresponding virtual address in the file is thus a
1783 single constant value for any one executable or shared
1784 object in a given process. This difference is the base
1785 address. One use of the base address is to relocate the
1786 memory image of the program during dynamic linking.
1787
1788 The same language also appears in Edition 4.0 of the System V
1789 ABI and is left unspecified in some of the earlier editions.
1790
1791 Decide if the objfile needs to be relocated. As indicated above, we will
1792 only be here when execution is stopped. But during attachment PC can be at
1793 arbitrary address therefore regcache_read_pc can be misleading (contrary to
1794 the auxv AT_ENTRY value). Moreover for executable with interpreter section
1795 regcache_read_pc would point to the interpreter and not the main executable.
1796
1797 So, to summarize, relocations are necessary when the start address obtained
1798 from the executable is different from the address in auxv AT_ENTRY entry.
1799
1800 [ The astute reader will note that we also test to make sure that
1801 the executable in question has the DYNAMIC flag set. It is my
1802 opinion that this test is unnecessary (undesirable even). It
1803 was added to avoid inadvertent relocation of an executable
1804 whose e_type member in the ELF header is not ET_DYN. There may
1805 be a time in the future when it is desirable to do relocations
1806 on other types of files as well in which case this condition
1807 should either be removed or modified to accomodate the new file
1808 type. - Kevin, Nov 2000. ] */
1809
1810 static int
1811 svr4_exec_displacement (CORE_ADDR *displacementp)
1812 {
1813 /* ENTRY_POINT is a possible function descriptor - before
1814 a call to gdbarch_convert_from_func_ptr_addr. */
1815 CORE_ADDR entry_point, displacement;
1816
1817 if (exec_bfd == NULL)
1818 return 0;
1819
1820 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
1821 being executed themselves and PIE (Position Independent Executable)
1822 executables are ET_DYN. */
1823
1824 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1825 return 0;
1826
1827 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1828 return 0;
1829
1830 displacement = entry_point - bfd_get_start_address (exec_bfd);
1831
1832 /* Verify the DISPLACEMENT candidate complies with the required page
1833 alignment. It is cheaper than the program headers comparison below. */
1834
1835 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1836 {
1837 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1838
1839 /* p_align of PT_LOAD segments does not specify any alignment but
1840 only congruency of addresses:
1841 p_offset % p_align == p_vaddr % p_align
1842 Kernel is free to load the executable with lower alignment. */
1843
1844 if ((displacement & (elf->minpagesize - 1)) != 0)
1845 return 0;
1846 }
1847
1848 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1849 comparing their program headers. If the program headers in the auxilliary
1850 vector do not match the program headers in the executable, then we are
1851 looking at a different file than the one used by the kernel - for
1852 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
1853
1854 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1855 {
1856 /* Be optimistic and clear OK only if GDB was able to verify the headers
1857 really do not match. */
1858 int phdrs_size, phdrs2_size, ok = 1;
1859 gdb_byte *buf, *buf2;
1860 int arch_size;
1861
1862 buf = read_program_header (-1, &phdrs_size, &arch_size);
1863 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1864 if (buf != NULL && buf2 != NULL)
1865 {
1866 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
1867
1868 /* We are dealing with three different addresses. EXEC_BFD
1869 represents current address in on-disk file. target memory content
1870 may be different from EXEC_BFD as the file may have been prelinked
1871 to a different address after the executable has been loaded.
1872 Moreover the address of placement in target memory can be
1873 different from what the program headers in target memory say -
1874 this is the goal of PIE.
1875
1876 Detected DISPLACEMENT covers both the offsets of PIE placement and
1877 possible new prelink performed after start of the program. Here
1878 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1879 content offset for the verification purpose. */
1880
1881 if (phdrs_size != phdrs2_size
1882 || bfd_get_arch_size (exec_bfd) != arch_size)
1883 ok = 0;
1884 else if (arch_size == 32
1885 && phdrs_size >= sizeof (Elf32_External_Phdr)
1886 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1887 {
1888 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1889 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1890 CORE_ADDR displacement = 0;
1891 int i;
1892
1893 /* DISPLACEMENT could be found more easily by the difference of
1894 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1895 already have enough information to compute that displacement
1896 with what we've read. */
1897
1898 for (i = 0; i < ehdr2->e_phnum; i++)
1899 if (phdr2[i].p_type == PT_LOAD)
1900 {
1901 Elf32_External_Phdr *phdrp;
1902 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1903 CORE_ADDR vaddr, paddr;
1904 CORE_ADDR displacement_vaddr = 0;
1905 CORE_ADDR displacement_paddr = 0;
1906
1907 phdrp = &((Elf32_External_Phdr *) buf)[i];
1908 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1909 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1910
1911 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1912 byte_order);
1913 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1914
1915 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1916 byte_order);
1917 displacement_paddr = paddr - phdr2[i].p_paddr;
1918
1919 if (displacement_vaddr == displacement_paddr)
1920 displacement = displacement_vaddr;
1921
1922 break;
1923 }
1924
1925 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1926
1927 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1928 {
1929 Elf32_External_Phdr *phdrp;
1930 Elf32_External_Phdr *phdr2p;
1931 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1932 CORE_ADDR vaddr, paddr;
1933 asection *plt2_asect;
1934
1935 phdrp = &((Elf32_External_Phdr *) buf)[i];
1936 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1937 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1938 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1939
1940 /* PT_GNU_STACK is an exception by being never relocated by
1941 prelink as its addresses are always zero. */
1942
1943 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1944 continue;
1945
1946 /* Check also other adjustment combinations - PR 11786. */
1947
1948 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1949 byte_order);
1950 vaddr -= displacement;
1951 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1952
1953 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1954 byte_order);
1955 paddr -= displacement;
1956 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1957
1958 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1959 continue;
1960
1961 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
1962 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1963 if (plt2_asect)
1964 {
1965 int content2;
1966 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1967 CORE_ADDR filesz;
1968
1969 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1970 & SEC_HAS_CONTENTS) != 0;
1971
1972 filesz = extract_unsigned_integer (buf_filesz_p, 4,
1973 byte_order);
1974
1975 /* PLT2_ASECT is from on-disk file (exec_bfd) while
1976 FILESZ is from the in-memory image. */
1977 if (content2)
1978 filesz += bfd_get_section_size (plt2_asect);
1979 else
1980 filesz -= bfd_get_section_size (plt2_asect);
1981
1982 store_unsigned_integer (buf_filesz_p, 4, byte_order,
1983 filesz);
1984
1985 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1986 continue;
1987 }
1988
1989 ok = 0;
1990 break;
1991 }
1992 }
1993 else if (arch_size == 64
1994 && phdrs_size >= sizeof (Elf64_External_Phdr)
1995 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
1996 {
1997 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1998 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1999 CORE_ADDR displacement = 0;
2000 int i;
2001
2002 /* DISPLACEMENT could be found more easily by the difference of
2003 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2004 already have enough information to compute that displacement
2005 with what we've read. */
2006
2007 for (i = 0; i < ehdr2->e_phnum; i++)
2008 if (phdr2[i].p_type == PT_LOAD)
2009 {
2010 Elf64_External_Phdr *phdrp;
2011 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2012 CORE_ADDR vaddr, paddr;
2013 CORE_ADDR displacement_vaddr = 0;
2014 CORE_ADDR displacement_paddr = 0;
2015
2016 phdrp = &((Elf64_External_Phdr *) buf)[i];
2017 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2018 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2019
2020 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2021 byte_order);
2022 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2023
2024 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2025 byte_order);
2026 displacement_paddr = paddr - phdr2[i].p_paddr;
2027
2028 if (displacement_vaddr == displacement_paddr)
2029 displacement = displacement_vaddr;
2030
2031 break;
2032 }
2033
2034 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2035
2036 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2037 {
2038 Elf64_External_Phdr *phdrp;
2039 Elf64_External_Phdr *phdr2p;
2040 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2041 CORE_ADDR vaddr, paddr;
2042 asection *plt2_asect;
2043
2044 phdrp = &((Elf64_External_Phdr *) buf)[i];
2045 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2046 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2047 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2048
2049 /* PT_GNU_STACK is an exception by being never relocated by
2050 prelink as its addresses are always zero. */
2051
2052 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2053 continue;
2054
2055 /* Check also other adjustment combinations - PR 11786. */
2056
2057 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2058 byte_order);
2059 vaddr -= displacement;
2060 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2061
2062 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2063 byte_order);
2064 paddr -= displacement;
2065 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2066
2067 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2068 continue;
2069
2070 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2071 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2072 if (plt2_asect)
2073 {
2074 int content2;
2075 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2076 CORE_ADDR filesz;
2077
2078 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2079 & SEC_HAS_CONTENTS) != 0;
2080
2081 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2082 byte_order);
2083
2084 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2085 FILESZ is from the in-memory image. */
2086 if (content2)
2087 filesz += bfd_get_section_size (plt2_asect);
2088 else
2089 filesz -= bfd_get_section_size (plt2_asect);
2090
2091 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2092 filesz);
2093
2094 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2095 continue;
2096 }
2097
2098 ok = 0;
2099 break;
2100 }
2101 }
2102 else
2103 ok = 0;
2104 }
2105
2106 xfree (buf);
2107 xfree (buf2);
2108
2109 if (!ok)
2110 return 0;
2111 }
2112
2113 if (info_verbose)
2114 {
2115 /* It can be printed repeatedly as there is no easy way to check
2116 the executable symbols/file has been already relocated to
2117 displacement. */
2118
2119 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2120 "displacement %s for \"%s\".\n"),
2121 paddress (target_gdbarch (), displacement),
2122 bfd_get_filename (exec_bfd));
2123 }
2124
2125 *displacementp = displacement;
2126 return 1;
2127 }
2128
2129 /* Relocate the main executable. This function should be called upon
2130 stopping the inferior process at the entry point to the program.
2131 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2132 different, the main executable is relocated by the proper amount. */
2133
2134 static void
2135 svr4_relocate_main_executable (void)
2136 {
2137 CORE_ADDR displacement;
2138
2139 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2140 probably contains the offsets computed using the PIE displacement
2141 from the previous run, which of course are irrelevant for this run.
2142 So we need to determine the new PIE displacement and recompute the
2143 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2144 already contains pre-computed offsets.
2145
2146 If we cannot compute the PIE displacement, either:
2147
2148 - The executable is not PIE.
2149
2150 - SYMFILE_OBJFILE does not match the executable started in the target.
2151 This can happen for main executable symbols loaded at the host while
2152 `ld.so --ld-args main-executable' is loaded in the target.
2153
2154 Then we leave the section offsets untouched and use them as is for
2155 this run. Either:
2156
2157 - These section offsets were properly reset earlier, and thus
2158 already contain the correct values. This can happen for instance
2159 when reconnecting via the remote protocol to a target that supports
2160 the `qOffsets' packet.
2161
2162 - The section offsets were not reset earlier, and the best we can
2163 hope is that the old offsets are still applicable to the new run. */
2164
2165 if (! svr4_exec_displacement (&displacement))
2166 return;
2167
2168 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2169 addresses. */
2170
2171 if (symfile_objfile)
2172 {
2173 struct section_offsets *new_offsets;
2174 int i;
2175
2176 new_offsets = alloca (symfile_objfile->num_sections
2177 * sizeof (*new_offsets));
2178
2179 for (i = 0; i < symfile_objfile->num_sections; i++)
2180 new_offsets->offsets[i] = displacement;
2181
2182 objfile_relocate (symfile_objfile, new_offsets);
2183 }
2184 else if (exec_bfd)
2185 {
2186 asection *asect;
2187
2188 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2189 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2190 (bfd_section_vma (exec_bfd, asect)
2191 + displacement));
2192 }
2193 }
2194
2195 /* Implement the "create_inferior_hook" target_solib_ops method.
2196
2197 For SVR4 executables, this first instruction is either the first
2198 instruction in the dynamic linker (for dynamically linked
2199 executables) or the instruction at "start" for statically linked
2200 executables. For dynamically linked executables, the system
2201 first exec's /lib/libc.so.N, which contains the dynamic linker,
2202 and starts it running. The dynamic linker maps in any needed
2203 shared libraries, maps in the actual user executable, and then
2204 jumps to "start" in the user executable.
2205
2206 We can arrange to cooperate with the dynamic linker to discover the
2207 names of shared libraries that are dynamically linked, and the base
2208 addresses to which they are linked.
2209
2210 This function is responsible for discovering those names and
2211 addresses, and saving sufficient information about them to allow
2212 their symbols to be read at a later time.
2213
2214 FIXME
2215
2216 Between enable_break() and disable_break(), this code does not
2217 properly handle hitting breakpoints which the user might have
2218 set in the startup code or in the dynamic linker itself. Proper
2219 handling will probably have to wait until the implementation is
2220 changed to use the "breakpoint handler function" method.
2221
2222 Also, what if child has exit()ed? Must exit loop somehow. */
2223
2224 static void
2225 svr4_solib_create_inferior_hook (int from_tty)
2226 {
2227 #if defined(_SCO_DS)
2228 struct inferior *inf;
2229 struct thread_info *tp;
2230 #endif /* defined(_SCO_DS) */
2231 struct svr4_info *info;
2232
2233 info = get_svr4_info ();
2234
2235 /* Relocate the main executable if necessary. */
2236 svr4_relocate_main_executable ();
2237
2238 /* No point setting a breakpoint in the dynamic linker if we can't
2239 hit it (e.g., a core file, or a trace file). */
2240 if (!target_has_execution)
2241 return;
2242
2243 if (!svr4_have_link_map_offsets ())
2244 return;
2245
2246 if (!enable_break (info, from_tty))
2247 return;
2248
2249 #if defined(_SCO_DS)
2250 /* SCO needs the loop below, other systems should be using the
2251 special shared library breakpoints and the shared library breakpoint
2252 service routine.
2253
2254 Now run the target. It will eventually hit the breakpoint, at
2255 which point all of the libraries will have been mapped in and we
2256 can go groveling around in the dynamic linker structures to find
2257 out what we need to know about them. */
2258
2259 inf = current_inferior ();
2260 tp = inferior_thread ();
2261
2262 clear_proceed_status ();
2263 inf->control.stop_soon = STOP_QUIETLY;
2264 tp->suspend.stop_signal = GDB_SIGNAL_0;
2265 do
2266 {
2267 target_resume (pid_to_ptid (-1), 0, tp->suspend.stop_signal);
2268 wait_for_inferior ();
2269 }
2270 while (tp->suspend.stop_signal != GDB_SIGNAL_TRAP);
2271 inf->control.stop_soon = NO_STOP_QUIETLY;
2272 #endif /* defined(_SCO_DS) */
2273 }
2274
2275 static void
2276 svr4_clear_solib (void)
2277 {
2278 struct svr4_info *info;
2279
2280 info = get_svr4_info ();
2281 info->debug_base = 0;
2282 info->debug_loader_offset_p = 0;
2283 info->debug_loader_offset = 0;
2284 xfree (info->debug_loader_name);
2285 info->debug_loader_name = NULL;
2286 }
2287
2288 /* Clear any bits of ADDR that wouldn't fit in a target-format
2289 data pointer. "Data pointer" here refers to whatever sort of
2290 address the dynamic linker uses to manage its sections. At the
2291 moment, we don't support shared libraries on any processors where
2292 code and data pointers are different sizes.
2293
2294 This isn't really the right solution. What we really need here is
2295 a way to do arithmetic on CORE_ADDR values that respects the
2296 natural pointer/address correspondence. (For example, on the MIPS,
2297 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2298 sign-extend the value. There, simply truncating the bits above
2299 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2300 be a new gdbarch method or something. */
2301 static CORE_ADDR
2302 svr4_truncate_ptr (CORE_ADDR addr)
2303 {
2304 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
2305 /* We don't need to truncate anything, and the bit twiddling below
2306 will fail due to overflow problems. */
2307 return addr;
2308 else
2309 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
2310 }
2311
2312
2313 static void
2314 svr4_relocate_section_addresses (struct so_list *so,
2315 struct target_section *sec)
2316 {
2317 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so,
2318 sec->bfd));
2319 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so,
2320 sec->bfd));
2321 }
2322 \f
2323
2324 /* Architecture-specific operations. */
2325
2326 /* Per-architecture data key. */
2327 static struct gdbarch_data *solib_svr4_data;
2328
2329 struct solib_svr4_ops
2330 {
2331 /* Return a description of the layout of `struct link_map'. */
2332 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2333 };
2334
2335 /* Return a default for the architecture-specific operations. */
2336
2337 static void *
2338 solib_svr4_init (struct obstack *obstack)
2339 {
2340 struct solib_svr4_ops *ops;
2341
2342 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2343 ops->fetch_link_map_offsets = NULL;
2344 return ops;
2345 }
2346
2347 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2348 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2349
2350 void
2351 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2352 struct link_map_offsets *(*flmo) (void))
2353 {
2354 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2355
2356 ops->fetch_link_map_offsets = flmo;
2357
2358 set_solib_ops (gdbarch, &svr4_so_ops);
2359 }
2360
2361 /* Fetch a link_map_offsets structure using the architecture-specific
2362 `struct link_map_offsets' fetcher. */
2363
2364 static struct link_map_offsets *
2365 svr4_fetch_link_map_offsets (void)
2366 {
2367 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2368
2369 gdb_assert (ops->fetch_link_map_offsets);
2370 return ops->fetch_link_map_offsets ();
2371 }
2372
2373 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2374
2375 static int
2376 svr4_have_link_map_offsets (void)
2377 {
2378 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2379
2380 return (ops->fetch_link_map_offsets != NULL);
2381 }
2382 \f
2383
2384 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2385 `struct r_debug' and a `struct link_map' that are binary compatible
2386 with the origional SVR4 implementation. */
2387
2388 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2389 for an ILP32 SVR4 system. */
2390
2391 struct link_map_offsets *
2392 svr4_ilp32_fetch_link_map_offsets (void)
2393 {
2394 static struct link_map_offsets lmo;
2395 static struct link_map_offsets *lmp = NULL;
2396
2397 if (lmp == NULL)
2398 {
2399 lmp = &lmo;
2400
2401 lmo.r_version_offset = 0;
2402 lmo.r_version_size = 4;
2403 lmo.r_map_offset = 4;
2404 lmo.r_brk_offset = 8;
2405 lmo.r_ldsomap_offset = 20;
2406
2407 /* Everything we need is in the first 20 bytes. */
2408 lmo.link_map_size = 20;
2409 lmo.l_addr_offset = 0;
2410 lmo.l_name_offset = 4;
2411 lmo.l_ld_offset = 8;
2412 lmo.l_next_offset = 12;
2413 lmo.l_prev_offset = 16;
2414 }
2415
2416 return lmp;
2417 }
2418
2419 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2420 for an LP64 SVR4 system. */
2421
2422 struct link_map_offsets *
2423 svr4_lp64_fetch_link_map_offsets (void)
2424 {
2425 static struct link_map_offsets lmo;
2426 static struct link_map_offsets *lmp = NULL;
2427
2428 if (lmp == NULL)
2429 {
2430 lmp = &lmo;
2431
2432 lmo.r_version_offset = 0;
2433 lmo.r_version_size = 4;
2434 lmo.r_map_offset = 8;
2435 lmo.r_brk_offset = 16;
2436 lmo.r_ldsomap_offset = 40;
2437
2438 /* Everything we need is in the first 40 bytes. */
2439 lmo.link_map_size = 40;
2440 lmo.l_addr_offset = 0;
2441 lmo.l_name_offset = 8;
2442 lmo.l_ld_offset = 16;
2443 lmo.l_next_offset = 24;
2444 lmo.l_prev_offset = 32;
2445 }
2446
2447 return lmp;
2448 }
2449 \f
2450
2451 struct target_so_ops svr4_so_ops;
2452
2453 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2454 different rule for symbol lookup. The lookup begins here in the DSO, not in
2455 the main executable. */
2456
2457 static struct symbol *
2458 elf_lookup_lib_symbol (const struct objfile *objfile,
2459 const char *name,
2460 const domain_enum domain)
2461 {
2462 bfd *abfd;
2463
2464 if (objfile == symfile_objfile)
2465 abfd = exec_bfd;
2466 else
2467 {
2468 /* OBJFILE should have been passed as the non-debug one. */
2469 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2470
2471 abfd = objfile->obfd;
2472 }
2473
2474 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2475 return NULL;
2476
2477 return lookup_global_symbol_from_objfile (objfile, name, domain);
2478 }
2479
2480 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2481
2482 void
2483 _initialize_svr4_solib (void)
2484 {
2485 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2486 solib_svr4_pspace_data
2487 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
2488
2489 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2490 svr4_so_ops.free_so = svr4_free_so;
2491 svr4_so_ops.clear_solib = svr4_clear_solib;
2492 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2493 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2494 svr4_so_ops.current_sos = svr4_current_sos;
2495 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2496 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2497 svr4_so_ops.bfd_open = solib_bfd_open;
2498 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2499 svr4_so_ops.same = svr4_same;
2500 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2501 }
This page took 0.08007 seconds and 5 git commands to generate.