4e094728e89ec6f0a82f6418f0c003d88ba5ec89
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "regcache.h"
34 #include "gdbthread.h"
35 #include "observer.h"
36
37 #include "gdb_assert.h"
38
39 #include "solist.h"
40 #include "solib.h"
41 #include "solib-svr4.h"
42
43 #include "bfd-target.h"
44 #include "elf-bfd.h"
45 #include "exec.h"
46 #include "auxv.h"
47 #include "exceptions.h"
48 #include "gdb_bfd.h"
49
50 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
51 static int svr4_have_link_map_offsets (void);
52 static void svr4_relocate_main_executable (void);
53
54 /* Link map info to include in an allocated so_list entry. */
55
56 struct lm_info
57 {
58 /* Amount by which addresses in the binary should be relocated to
59 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
60 When prelinking is involved and the prelink base address changes,
61 we may need a different offset - the recomputed offset is in L_ADDR.
62 It is commonly the same value. It is cached as we want to warn about
63 the difference and compute it only once. L_ADDR is valid
64 iff L_ADDR_P. */
65 CORE_ADDR l_addr, l_addr_inferior;
66 unsigned int l_addr_p : 1;
67
68 /* The target location of lm. */
69 CORE_ADDR lm_addr;
70
71 /* Values read in from inferior's fields of the same name. */
72 CORE_ADDR l_ld, l_next, l_prev, l_name;
73 };
74
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
77 events.
78
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
82
83 static const char * const solib_break_names[] =
84 {
85 "r_debug_state",
86 "_r_debug_state",
87 "_dl_debug_state",
88 "rtld_db_dlactivity",
89 "__dl_rtld_db_dlactivity",
90 "_rtld_debug_state",
91
92 NULL
93 };
94
95 static const char * const bkpt_names[] =
96 {
97 "_start",
98 "__start",
99 "main",
100 NULL
101 };
102
103 static const char * const main_name_list[] =
104 {
105 "main_$main",
106 NULL
107 };
108
109 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
110 the same shared library. */
111
112 static int
113 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
114 {
115 if (strcmp (gdb_so_name, inferior_so_name) == 0)
116 return 1;
117
118 /* On Solaris, when starting inferior we think that dynamic linker is
119 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
120 contains /lib/ld.so.1. Sometimes one file is a link to another, but
121 sometimes they have identical content, but are not linked to each
122 other. We don't restrict this check for Solaris, but the chances
123 of running into this situation elsewhere are very low. */
124 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
125 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
126 return 1;
127
128 /* Similarly, we observed the same issue with sparc64, but with
129 different locations. */
130 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
131 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
132 return 1;
133
134 return 0;
135 }
136
137 static int
138 svr4_same (struct so_list *gdb, struct so_list *inferior)
139 {
140 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
141 }
142
143 static struct lm_info *
144 lm_info_read (CORE_ADDR lm_addr)
145 {
146 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
147 gdb_byte *lm;
148 struct lm_info *lm_info;
149 struct cleanup *back_to;
150
151 lm = xmalloc (lmo->link_map_size);
152 back_to = make_cleanup (xfree, lm);
153
154 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
155 {
156 warning (_("Error reading shared library list entry at %s"),
157 paddress (target_gdbarch (), lm_addr)),
158 lm_info = NULL;
159 }
160 else
161 {
162 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
163
164 lm_info = xzalloc (sizeof (*lm_info));
165 lm_info->lm_addr = lm_addr;
166
167 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
168 ptr_type);
169 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
170 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
171 ptr_type);
172 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
173 ptr_type);
174 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
175 ptr_type);
176 }
177
178 do_cleanups (back_to);
179
180 return lm_info;
181 }
182
183 static int
184 has_lm_dynamic_from_link_map (void)
185 {
186 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
187
188 return lmo->l_ld_offset >= 0;
189 }
190
191 static CORE_ADDR
192 lm_addr_check (const struct so_list *so, bfd *abfd)
193 {
194 if (!so->lm_info->l_addr_p)
195 {
196 struct bfd_section *dyninfo_sect;
197 CORE_ADDR l_addr, l_dynaddr, dynaddr;
198
199 l_addr = so->lm_info->l_addr_inferior;
200
201 if (! abfd || ! has_lm_dynamic_from_link_map ())
202 goto set_addr;
203
204 l_dynaddr = so->lm_info->l_ld;
205
206 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
207 if (dyninfo_sect == NULL)
208 goto set_addr;
209
210 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
211
212 if (dynaddr + l_addr != l_dynaddr)
213 {
214 CORE_ADDR align = 0x1000;
215 CORE_ADDR minpagesize = align;
216
217 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
218 {
219 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
220 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
221 int i;
222
223 align = 1;
224
225 for (i = 0; i < ehdr->e_phnum; i++)
226 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
227 align = phdr[i].p_align;
228
229 minpagesize = get_elf_backend_data (abfd)->minpagesize;
230 }
231
232 /* Turn it into a mask. */
233 align--;
234
235 /* If the changes match the alignment requirements, we
236 assume we're using a core file that was generated by the
237 same binary, just prelinked with a different base offset.
238 If it doesn't match, we may have a different binary, the
239 same binary with the dynamic table loaded at an unrelated
240 location, or anything, really. To avoid regressions,
241 don't adjust the base offset in the latter case, although
242 odds are that, if things really changed, debugging won't
243 quite work.
244
245 One could expect more the condition
246 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
247 but the one below is relaxed for PPC. The PPC kernel supports
248 either 4k or 64k page sizes. To be prepared for 64k pages,
249 PPC ELF files are built using an alignment requirement of 64k.
250 However, when running on a kernel supporting 4k pages, the memory
251 mapping of the library may not actually happen on a 64k boundary!
252
253 (In the usual case where (l_addr & align) == 0, this check is
254 equivalent to the possibly expected check above.)
255
256 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
257
258 l_addr = l_dynaddr - dynaddr;
259
260 if ((l_addr & (minpagesize - 1)) == 0
261 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
262 {
263 if (info_verbose)
264 printf_unfiltered (_("Using PIC (Position Independent Code) "
265 "prelink displacement %s for \"%s\".\n"),
266 paddress (target_gdbarch (), l_addr),
267 so->so_name);
268 }
269 else
270 {
271 /* There is no way to verify the library file matches. prelink
272 can during prelinking of an unprelinked file (or unprelinking
273 of a prelinked file) shift the DYNAMIC segment by arbitrary
274 offset without any page size alignment. There is no way to
275 find out the ELF header and/or Program Headers for a limited
276 verification if it they match. One could do a verification
277 of the DYNAMIC segment. Still the found address is the best
278 one GDB could find. */
279
280 warning (_(".dynamic section for \"%s\" "
281 "is not at the expected address "
282 "(wrong library or version mismatch?)"), so->so_name);
283 }
284 }
285
286 set_addr:
287 so->lm_info->l_addr = l_addr;
288 so->lm_info->l_addr_p = 1;
289 }
290
291 return so->lm_info->l_addr;
292 }
293
294 /* Per pspace SVR4 specific data. */
295
296 struct svr4_info
297 {
298 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
299
300 /* Validity flag for debug_loader_offset. */
301 int debug_loader_offset_p;
302
303 /* Load address for the dynamic linker, inferred. */
304 CORE_ADDR debug_loader_offset;
305
306 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
307 char *debug_loader_name;
308
309 /* Load map address for the main executable. */
310 CORE_ADDR main_lm_addr;
311
312 CORE_ADDR interp_text_sect_low;
313 CORE_ADDR interp_text_sect_high;
314 CORE_ADDR interp_plt_sect_low;
315 CORE_ADDR interp_plt_sect_high;
316 };
317
318 /* Per-program-space data key. */
319 static const struct program_space_data *solib_svr4_pspace_data;
320
321 static void
322 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
323 {
324 struct svr4_info *info;
325
326 info = program_space_data (pspace, solib_svr4_pspace_data);
327 xfree (info);
328 }
329
330 /* Get the current svr4 data. If none is found yet, add it now. This
331 function always returns a valid object. */
332
333 static struct svr4_info *
334 get_svr4_info (void)
335 {
336 struct svr4_info *info;
337
338 info = program_space_data (current_program_space, solib_svr4_pspace_data);
339 if (info != NULL)
340 return info;
341
342 info = XZALLOC (struct svr4_info);
343 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
344 return info;
345 }
346
347 /* Local function prototypes */
348
349 static int match_main (const char *);
350
351 /* Read program header TYPE from inferior memory. The header is found
352 by scanning the OS auxillary vector.
353
354 If TYPE == -1, return the program headers instead of the contents of
355 one program header.
356
357 Return a pointer to allocated memory holding the program header contents,
358 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
359 size of those contents is returned to P_SECT_SIZE. Likewise, the target
360 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
361
362 static gdb_byte *
363 read_program_header (int type, int *p_sect_size, int *p_arch_size)
364 {
365 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
366 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
367 int arch_size, sect_size;
368 CORE_ADDR sect_addr;
369 gdb_byte *buf;
370 int pt_phdr_p = 0;
371
372 /* Get required auxv elements from target. */
373 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
374 return 0;
375 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
376 return 0;
377 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
378 return 0;
379 if (!at_phdr || !at_phnum)
380 return 0;
381
382 /* Determine ELF architecture type. */
383 if (at_phent == sizeof (Elf32_External_Phdr))
384 arch_size = 32;
385 else if (at_phent == sizeof (Elf64_External_Phdr))
386 arch_size = 64;
387 else
388 return 0;
389
390 /* Find the requested segment. */
391 if (type == -1)
392 {
393 sect_addr = at_phdr;
394 sect_size = at_phent * at_phnum;
395 }
396 else if (arch_size == 32)
397 {
398 Elf32_External_Phdr phdr;
399 int i;
400
401 /* Search for requested PHDR. */
402 for (i = 0; i < at_phnum; i++)
403 {
404 int p_type;
405
406 if (target_read_memory (at_phdr + i * sizeof (phdr),
407 (gdb_byte *)&phdr, sizeof (phdr)))
408 return 0;
409
410 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
411 4, byte_order);
412
413 if (p_type == PT_PHDR)
414 {
415 pt_phdr_p = 1;
416 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
417 4, byte_order);
418 }
419
420 if (p_type == type)
421 break;
422 }
423
424 if (i == at_phnum)
425 return 0;
426
427 /* Retrieve address and size. */
428 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
429 4, byte_order);
430 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
431 4, byte_order);
432 }
433 else
434 {
435 Elf64_External_Phdr phdr;
436 int i;
437
438 /* Search for requested PHDR. */
439 for (i = 0; i < at_phnum; i++)
440 {
441 int p_type;
442
443 if (target_read_memory (at_phdr + i * sizeof (phdr),
444 (gdb_byte *)&phdr, sizeof (phdr)))
445 return 0;
446
447 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
448 4, byte_order);
449
450 if (p_type == PT_PHDR)
451 {
452 pt_phdr_p = 1;
453 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
454 8, byte_order);
455 }
456
457 if (p_type == type)
458 break;
459 }
460
461 if (i == at_phnum)
462 return 0;
463
464 /* Retrieve address and size. */
465 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
466 8, byte_order);
467 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
468 8, byte_order);
469 }
470
471 /* PT_PHDR is optional, but we really need it
472 for PIE to make this work in general. */
473
474 if (pt_phdr_p)
475 {
476 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
477 Relocation offset is the difference between the two. */
478 sect_addr = sect_addr + (at_phdr - pt_phdr);
479 }
480
481 /* Read in requested program header. */
482 buf = xmalloc (sect_size);
483 if (target_read_memory (sect_addr, buf, sect_size))
484 {
485 xfree (buf);
486 return NULL;
487 }
488
489 if (p_arch_size)
490 *p_arch_size = arch_size;
491 if (p_sect_size)
492 *p_sect_size = sect_size;
493
494 return buf;
495 }
496
497
498 /* Return program interpreter string. */
499 static char *
500 find_program_interpreter (void)
501 {
502 gdb_byte *buf = NULL;
503
504 /* If we have an exec_bfd, use its section table. */
505 if (exec_bfd
506 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
507 {
508 struct bfd_section *interp_sect;
509
510 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
511 if (interp_sect != NULL)
512 {
513 int sect_size = bfd_section_size (exec_bfd, interp_sect);
514
515 buf = xmalloc (sect_size);
516 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
517 }
518 }
519
520 /* If we didn't find it, use the target auxillary vector. */
521 if (!buf)
522 buf = read_program_header (PT_INTERP, NULL, NULL);
523
524 return (char *) buf;
525 }
526
527
528 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
529 returned and the corresponding PTR is set. */
530
531 static int
532 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
533 {
534 int arch_size, step, sect_size;
535 long dyn_tag;
536 CORE_ADDR dyn_ptr, dyn_addr;
537 gdb_byte *bufend, *bufstart, *buf;
538 Elf32_External_Dyn *x_dynp_32;
539 Elf64_External_Dyn *x_dynp_64;
540 struct bfd_section *sect;
541 struct target_section *target_section;
542
543 if (abfd == NULL)
544 return 0;
545
546 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
547 return 0;
548
549 arch_size = bfd_get_arch_size (abfd);
550 if (arch_size == -1)
551 return 0;
552
553 /* Find the start address of the .dynamic section. */
554 sect = bfd_get_section_by_name (abfd, ".dynamic");
555 if (sect == NULL)
556 return 0;
557
558 for (target_section = current_target_sections->sections;
559 target_section < current_target_sections->sections_end;
560 target_section++)
561 if (sect == target_section->the_bfd_section)
562 break;
563 if (target_section < current_target_sections->sections_end)
564 dyn_addr = target_section->addr;
565 else
566 {
567 /* ABFD may come from OBJFILE acting only as a symbol file without being
568 loaded into the target (see add_symbol_file_command). This case is
569 such fallback to the file VMA address without the possibility of
570 having the section relocated to its actual in-memory address. */
571
572 dyn_addr = bfd_section_vma (abfd, sect);
573 }
574
575 /* Read in .dynamic from the BFD. We will get the actual value
576 from memory later. */
577 sect_size = bfd_section_size (abfd, sect);
578 buf = bufstart = alloca (sect_size);
579 if (!bfd_get_section_contents (abfd, sect,
580 buf, 0, sect_size))
581 return 0;
582
583 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
584 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
585 : sizeof (Elf64_External_Dyn);
586 for (bufend = buf + sect_size;
587 buf < bufend;
588 buf += step)
589 {
590 if (arch_size == 32)
591 {
592 x_dynp_32 = (Elf32_External_Dyn *) buf;
593 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
594 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
595 }
596 else
597 {
598 x_dynp_64 = (Elf64_External_Dyn *) buf;
599 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
600 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
601 }
602 if (dyn_tag == DT_NULL)
603 return 0;
604 if (dyn_tag == dyntag)
605 {
606 /* If requested, try to read the runtime value of this .dynamic
607 entry. */
608 if (ptr)
609 {
610 struct type *ptr_type;
611 gdb_byte ptr_buf[8];
612 CORE_ADDR ptr_addr;
613
614 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
615 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
616 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
617 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
618 *ptr = dyn_ptr;
619 }
620 return 1;
621 }
622 }
623
624 return 0;
625 }
626
627 /* Scan for DYNTAG in .dynamic section of the target's main executable,
628 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
629 returned and the corresponding PTR is set. */
630
631 static int
632 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
633 {
634 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
635 int sect_size, arch_size, step;
636 long dyn_tag;
637 CORE_ADDR dyn_ptr;
638 gdb_byte *bufend, *bufstart, *buf;
639
640 /* Read in .dynamic section. */
641 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
642 if (!buf)
643 return 0;
644
645 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
646 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
647 : sizeof (Elf64_External_Dyn);
648 for (bufend = buf + sect_size;
649 buf < bufend;
650 buf += step)
651 {
652 if (arch_size == 32)
653 {
654 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
655
656 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
657 4, byte_order);
658 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
659 4, byte_order);
660 }
661 else
662 {
663 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
664
665 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
666 8, byte_order);
667 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
668 8, byte_order);
669 }
670 if (dyn_tag == DT_NULL)
671 break;
672
673 if (dyn_tag == dyntag)
674 {
675 if (ptr)
676 *ptr = dyn_ptr;
677
678 xfree (bufstart);
679 return 1;
680 }
681 }
682
683 xfree (bufstart);
684 return 0;
685 }
686
687 /* Locate the base address of dynamic linker structs for SVR4 elf
688 targets.
689
690 For SVR4 elf targets the address of the dynamic linker's runtime
691 structure is contained within the dynamic info section in the
692 executable file. The dynamic section is also mapped into the
693 inferior address space. Because the runtime loader fills in the
694 real address before starting the inferior, we have to read in the
695 dynamic info section from the inferior address space.
696 If there are any errors while trying to find the address, we
697 silently return 0, otherwise the found address is returned. */
698
699 static CORE_ADDR
700 elf_locate_base (void)
701 {
702 struct minimal_symbol *msymbol;
703 CORE_ADDR dyn_ptr;
704
705 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
706 instead of DT_DEBUG, although they sometimes contain an unused
707 DT_DEBUG. */
708 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
709 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
710 {
711 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
712 gdb_byte *pbuf;
713 int pbuf_size = TYPE_LENGTH (ptr_type);
714
715 pbuf = alloca (pbuf_size);
716 /* DT_MIPS_RLD_MAP contains a pointer to the address
717 of the dynamic link structure. */
718 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
719 return 0;
720 return extract_typed_address (pbuf, ptr_type);
721 }
722
723 /* Find DT_DEBUG. */
724 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
725 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
726 return dyn_ptr;
727
728 /* This may be a static executable. Look for the symbol
729 conventionally named _r_debug, as a last resort. */
730 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
731 if (msymbol != NULL)
732 return SYMBOL_VALUE_ADDRESS (msymbol);
733
734 /* DT_DEBUG entry not found. */
735 return 0;
736 }
737
738 /* Locate the base address of dynamic linker structs.
739
740 For both the SunOS and SVR4 shared library implementations, if the
741 inferior executable has been linked dynamically, there is a single
742 address somewhere in the inferior's data space which is the key to
743 locating all of the dynamic linker's runtime structures. This
744 address is the value of the debug base symbol. The job of this
745 function is to find and return that address, or to return 0 if there
746 is no such address (the executable is statically linked for example).
747
748 For SunOS, the job is almost trivial, since the dynamic linker and
749 all of it's structures are statically linked to the executable at
750 link time. Thus the symbol for the address we are looking for has
751 already been added to the minimal symbol table for the executable's
752 objfile at the time the symbol file's symbols were read, and all we
753 have to do is look it up there. Note that we explicitly do NOT want
754 to find the copies in the shared library.
755
756 The SVR4 version is a bit more complicated because the address
757 is contained somewhere in the dynamic info section. We have to go
758 to a lot more work to discover the address of the debug base symbol.
759 Because of this complexity, we cache the value we find and return that
760 value on subsequent invocations. Note there is no copy in the
761 executable symbol tables. */
762
763 static CORE_ADDR
764 locate_base (struct svr4_info *info)
765 {
766 /* Check to see if we have a currently valid address, and if so, avoid
767 doing all this work again and just return the cached address. If
768 we have no cached address, try to locate it in the dynamic info
769 section for ELF executables. There's no point in doing any of this
770 though if we don't have some link map offsets to work with. */
771
772 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
773 info->debug_base = elf_locate_base ();
774 return info->debug_base;
775 }
776
777 /* Find the first element in the inferior's dynamic link map, and
778 return its address in the inferior. Return zero if the address
779 could not be determined.
780
781 FIXME: Perhaps we should validate the info somehow, perhaps by
782 checking r_version for a known version number, or r_state for
783 RT_CONSISTENT. */
784
785 static CORE_ADDR
786 solib_svr4_r_map (struct svr4_info *info)
787 {
788 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
789 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
790 CORE_ADDR addr = 0;
791 volatile struct gdb_exception ex;
792
793 TRY_CATCH (ex, RETURN_MASK_ERROR)
794 {
795 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
796 ptr_type);
797 }
798 exception_print (gdb_stderr, ex);
799 return addr;
800 }
801
802 /* Find r_brk from the inferior's debug base. */
803
804 static CORE_ADDR
805 solib_svr4_r_brk (struct svr4_info *info)
806 {
807 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
808 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
809
810 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
811 ptr_type);
812 }
813
814 /* Find the link map for the dynamic linker (if it is not in the
815 normal list of loaded shared objects). */
816
817 static CORE_ADDR
818 solib_svr4_r_ldsomap (struct svr4_info *info)
819 {
820 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
821 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
822 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
823 ULONGEST version;
824
825 /* Check version, and return zero if `struct r_debug' doesn't have
826 the r_ldsomap member. */
827 version
828 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
829 lmo->r_version_size, byte_order);
830 if (version < 2 || lmo->r_ldsomap_offset == -1)
831 return 0;
832
833 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
834 ptr_type);
835 }
836
837 /* On Solaris systems with some versions of the dynamic linker,
838 ld.so's l_name pointer points to the SONAME in the string table
839 rather than into writable memory. So that GDB can find shared
840 libraries when loading a core file generated by gcore, ensure that
841 memory areas containing the l_name string are saved in the core
842 file. */
843
844 static int
845 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
846 {
847 struct svr4_info *info;
848 CORE_ADDR ldsomap;
849 struct so_list *new;
850 struct cleanup *old_chain;
851 CORE_ADDR name_lm;
852
853 info = get_svr4_info ();
854
855 info->debug_base = 0;
856 locate_base (info);
857 if (!info->debug_base)
858 return 0;
859
860 ldsomap = solib_svr4_r_ldsomap (info);
861 if (!ldsomap)
862 return 0;
863
864 new = XZALLOC (struct so_list);
865 old_chain = make_cleanup (xfree, new);
866 new->lm_info = lm_info_read (ldsomap);
867 make_cleanup (xfree, new->lm_info);
868 name_lm = new->lm_info ? new->lm_info->l_name : 0;
869 do_cleanups (old_chain);
870
871 return (name_lm >= vaddr && name_lm < vaddr + size);
872 }
873
874 /* Implement the "open_symbol_file_object" target_so_ops method.
875
876 If no open symbol file, attempt to locate and open the main symbol
877 file. On SVR4 systems, this is the first link map entry. If its
878 name is here, we can open it. Useful when attaching to a process
879 without first loading its symbol file. */
880
881 static int
882 open_symbol_file_object (void *from_ttyp)
883 {
884 CORE_ADDR lm, l_name;
885 char *filename;
886 int errcode;
887 int from_tty = *(int *)from_ttyp;
888 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
889 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
890 int l_name_size = TYPE_LENGTH (ptr_type);
891 gdb_byte *l_name_buf = xmalloc (l_name_size);
892 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
893 struct svr4_info *info = get_svr4_info ();
894
895 if (symfile_objfile)
896 if (!query (_("Attempt to reload symbols from process? ")))
897 {
898 do_cleanups (cleanups);
899 return 0;
900 }
901
902 /* Always locate the debug struct, in case it has moved. */
903 info->debug_base = 0;
904 if (locate_base (info) == 0)
905 {
906 do_cleanups (cleanups);
907 return 0; /* failed somehow... */
908 }
909
910 /* First link map member should be the executable. */
911 lm = solib_svr4_r_map (info);
912 if (lm == 0)
913 {
914 do_cleanups (cleanups);
915 return 0; /* failed somehow... */
916 }
917
918 /* Read address of name from target memory to GDB. */
919 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
920
921 /* Convert the address to host format. */
922 l_name = extract_typed_address (l_name_buf, ptr_type);
923
924 if (l_name == 0)
925 {
926 do_cleanups (cleanups);
927 return 0; /* No filename. */
928 }
929
930 /* Now fetch the filename from target memory. */
931 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
932 make_cleanup (xfree, filename);
933
934 if (errcode)
935 {
936 warning (_("failed to read exec filename from attached file: %s"),
937 safe_strerror (errcode));
938 do_cleanups (cleanups);
939 return 0;
940 }
941
942 /* Have a pathname: read the symbol file. */
943 symbol_file_add_main (filename, from_tty);
944
945 do_cleanups (cleanups);
946 return 1;
947 }
948
949 /* Data exchange structure for the XML parser as returned by
950 svr4_current_sos_via_xfer_libraries. */
951
952 struct svr4_library_list
953 {
954 struct so_list *head, **tailp;
955
956 /* Inferior address of struct link_map used for the main executable. It is
957 NULL if not known. */
958 CORE_ADDR main_lm;
959 };
960
961 /* Implementation for target_so_ops.free_so. */
962
963 static void
964 svr4_free_so (struct so_list *so)
965 {
966 xfree (so->lm_info);
967 }
968
969 /* Implement target_so_ops.clear_so. */
970
971 static void
972 svr4_clear_so (struct so_list *so)
973 {
974 so->lm_info->l_addr_p = 0;
975 }
976
977 /* Free so_list built so far (called via cleanup). */
978
979 static void
980 svr4_free_library_list (void *p_list)
981 {
982 struct so_list *list = *(struct so_list **) p_list;
983
984 while (list != NULL)
985 {
986 struct so_list *next = list->next;
987
988 free_so (list);
989 list = next;
990 }
991 }
992
993 #ifdef HAVE_LIBEXPAT
994
995 #include "xml-support.h"
996
997 /* Handle the start of a <library> element. Note: new elements are added
998 at the tail of the list, keeping the list in order. */
999
1000 static void
1001 library_list_start_library (struct gdb_xml_parser *parser,
1002 const struct gdb_xml_element *element,
1003 void *user_data, VEC(gdb_xml_value_s) *attributes)
1004 {
1005 struct svr4_library_list *list = user_data;
1006 const char *name = xml_find_attribute (attributes, "name")->value;
1007 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1008 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1009 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1010 struct so_list *new_elem;
1011
1012 new_elem = XZALLOC (struct so_list);
1013 new_elem->lm_info = XZALLOC (struct lm_info);
1014 new_elem->lm_info->lm_addr = *lmp;
1015 new_elem->lm_info->l_addr_inferior = *l_addrp;
1016 new_elem->lm_info->l_ld = *l_ldp;
1017
1018 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1019 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1020 strcpy (new_elem->so_original_name, new_elem->so_name);
1021
1022 *list->tailp = new_elem;
1023 list->tailp = &new_elem->next;
1024 }
1025
1026 /* Handle the start of a <library-list-svr4> element. */
1027
1028 static void
1029 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1030 const struct gdb_xml_element *element,
1031 void *user_data, VEC(gdb_xml_value_s) *attributes)
1032 {
1033 struct svr4_library_list *list = user_data;
1034 const char *version = xml_find_attribute (attributes, "version")->value;
1035 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1036
1037 if (strcmp (version, "1.0") != 0)
1038 gdb_xml_error (parser,
1039 _("SVR4 Library list has unsupported version \"%s\""),
1040 version);
1041
1042 if (main_lm)
1043 list->main_lm = *(ULONGEST *) main_lm->value;
1044 }
1045
1046 /* The allowed elements and attributes for an XML library list.
1047 The root element is a <library-list>. */
1048
1049 static const struct gdb_xml_attribute svr4_library_attributes[] =
1050 {
1051 { "name", GDB_XML_AF_NONE, NULL, NULL },
1052 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1053 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1054 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1055 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1056 };
1057
1058 static const struct gdb_xml_element svr4_library_list_children[] =
1059 {
1060 {
1061 "library", svr4_library_attributes, NULL,
1062 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1063 library_list_start_library, NULL
1064 },
1065 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1066 };
1067
1068 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1069 {
1070 { "version", GDB_XML_AF_NONE, NULL, NULL },
1071 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1072 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1073 };
1074
1075 static const struct gdb_xml_element svr4_library_list_elements[] =
1076 {
1077 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1078 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1079 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1080 };
1081
1082 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1083
1084 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1085 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1086 empty, caller is responsible for freeing all its entries. */
1087
1088 static int
1089 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1090 {
1091 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1092 &list->head);
1093
1094 memset (list, 0, sizeof (*list));
1095 list->tailp = &list->head;
1096 if (gdb_xml_parse_quick (_("target library list"), "library-list.dtd",
1097 svr4_library_list_elements, document, list) == 0)
1098 {
1099 /* Parsed successfully, keep the result. */
1100 discard_cleanups (back_to);
1101 return 1;
1102 }
1103
1104 do_cleanups (back_to);
1105 return 0;
1106 }
1107
1108 /* Attempt to get so_list from target via qXfer:libraries:read packet.
1109
1110 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1111 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1112 empty, caller is responsible for freeing all its entries. */
1113
1114 static int
1115 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1116 {
1117 char *svr4_library_document;
1118 int result;
1119 struct cleanup *back_to;
1120
1121 /* Fetch the list of shared libraries. */
1122 svr4_library_document = target_read_stralloc (&current_target,
1123 TARGET_OBJECT_LIBRARIES_SVR4,
1124 NULL);
1125 if (svr4_library_document == NULL)
1126 return 0;
1127
1128 back_to = make_cleanup (xfree, svr4_library_document);
1129 result = svr4_parse_libraries (svr4_library_document, list);
1130 do_cleanups (back_to);
1131
1132 return result;
1133 }
1134
1135 #else
1136
1137 static int
1138 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1139 {
1140 return 0;
1141 }
1142
1143 #endif
1144
1145 /* If no shared library information is available from the dynamic
1146 linker, build a fallback list from other sources. */
1147
1148 static struct so_list *
1149 svr4_default_sos (void)
1150 {
1151 struct svr4_info *info = get_svr4_info ();
1152 struct so_list *new;
1153
1154 if (!info->debug_loader_offset_p)
1155 return NULL;
1156
1157 new = XZALLOC (struct so_list);
1158
1159 new->lm_info = xzalloc (sizeof (struct lm_info));
1160
1161 /* Nothing will ever check the other fields if we set l_addr_p. */
1162 new->lm_info->l_addr = info->debug_loader_offset;
1163 new->lm_info->l_addr_p = 1;
1164
1165 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1166 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1167 strcpy (new->so_original_name, new->so_name);
1168
1169 return new;
1170 }
1171
1172 /* Read the whole inferior libraries chain starting at address LM. Add the
1173 entries to the tail referenced by LINK_PTR_PTR. Ignore the first entry if
1174 IGNORE_FIRST and set global MAIN_LM_ADDR according to it. */
1175
1176 static void
1177 svr4_read_so_list (CORE_ADDR lm, struct so_list ***link_ptr_ptr,
1178 int ignore_first)
1179 {
1180 CORE_ADDR prev_lm = 0, next_lm;
1181
1182 for (; lm != 0; prev_lm = lm, lm = next_lm)
1183 {
1184 struct so_list *new;
1185 struct cleanup *old_chain;
1186 int errcode;
1187 char *buffer;
1188
1189 new = XZALLOC (struct so_list);
1190 old_chain = make_cleanup_free_so (new);
1191
1192 new->lm_info = lm_info_read (lm);
1193 if (new->lm_info == NULL)
1194 {
1195 do_cleanups (old_chain);
1196 break;
1197 }
1198
1199 next_lm = new->lm_info->l_next;
1200
1201 if (new->lm_info->l_prev != prev_lm)
1202 {
1203 warning (_("Corrupted shared library list: %s != %s"),
1204 paddress (target_gdbarch (), prev_lm),
1205 paddress (target_gdbarch (), new->lm_info->l_prev));
1206 do_cleanups (old_chain);
1207 break;
1208 }
1209
1210 /* For SVR4 versions, the first entry in the link map is for the
1211 inferior executable, so we must ignore it. For some versions of
1212 SVR4, it has no name. For others (Solaris 2.3 for example), it
1213 does have a name, so we can no longer use a missing name to
1214 decide when to ignore it. */
1215 if (ignore_first && new->lm_info->l_prev == 0)
1216 {
1217 struct svr4_info *info = get_svr4_info ();
1218
1219 info->main_lm_addr = new->lm_info->lm_addr;
1220 do_cleanups (old_chain);
1221 continue;
1222 }
1223
1224 /* Extract this shared object's name. */
1225 target_read_string (new->lm_info->l_name, &buffer,
1226 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1227 if (errcode != 0)
1228 {
1229 warning (_("Can't read pathname for load map: %s."),
1230 safe_strerror (errcode));
1231 do_cleanups (old_chain);
1232 continue;
1233 }
1234
1235 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1236 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1237 strcpy (new->so_original_name, new->so_name);
1238 xfree (buffer);
1239
1240 /* If this entry has no name, or its name matches the name
1241 for the main executable, don't include it in the list. */
1242 if (! new->so_name[0] || match_main (new->so_name))
1243 {
1244 do_cleanups (old_chain);
1245 continue;
1246 }
1247
1248 discard_cleanups (old_chain);
1249 new->next = 0;
1250 **link_ptr_ptr = new;
1251 *link_ptr_ptr = &new->next;
1252 }
1253 }
1254
1255 /* Implement the "current_sos" target_so_ops method. */
1256
1257 static struct so_list *
1258 svr4_current_sos (void)
1259 {
1260 CORE_ADDR lm;
1261 struct so_list *head = NULL;
1262 struct so_list **link_ptr = &head;
1263 struct svr4_info *info;
1264 struct cleanup *back_to;
1265 int ignore_first;
1266 struct svr4_library_list library_list;
1267
1268 /* Fall back to manual examination of the target if the packet is not
1269 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1270 tests a case where gdbserver cannot find the shared libraries list while
1271 GDB itself is able to find it via SYMFILE_OBJFILE.
1272
1273 Unfortunately statically linked inferiors will also fall back through this
1274 suboptimal code path. */
1275
1276 if (svr4_current_sos_via_xfer_libraries (&library_list))
1277 {
1278 if (library_list.main_lm)
1279 {
1280 info = get_svr4_info ();
1281 info->main_lm_addr = library_list.main_lm;
1282 }
1283
1284 return library_list.head ? library_list.head : svr4_default_sos ();
1285 }
1286
1287 info = get_svr4_info ();
1288
1289 /* Always locate the debug struct, in case it has moved. */
1290 info->debug_base = 0;
1291 locate_base (info);
1292
1293 /* If we can't find the dynamic linker's base structure, this
1294 must not be a dynamically linked executable. Hmm. */
1295 if (! info->debug_base)
1296 return svr4_default_sos ();
1297
1298 /* Assume that everything is a library if the dynamic loader was loaded
1299 late by a static executable. */
1300 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1301 ignore_first = 0;
1302 else
1303 ignore_first = 1;
1304
1305 back_to = make_cleanup (svr4_free_library_list, &head);
1306
1307 /* Walk the inferior's link map list, and build our list of
1308 `struct so_list' nodes. */
1309 lm = solib_svr4_r_map (info);
1310 if (lm)
1311 svr4_read_so_list (lm, &link_ptr, ignore_first);
1312
1313 /* On Solaris, the dynamic linker is not in the normal list of
1314 shared objects, so make sure we pick it up too. Having
1315 symbol information for the dynamic linker is quite crucial
1316 for skipping dynamic linker resolver code. */
1317 lm = solib_svr4_r_ldsomap (info);
1318 if (lm)
1319 svr4_read_so_list (lm, &link_ptr, 0);
1320
1321 discard_cleanups (back_to);
1322
1323 if (head == NULL)
1324 return svr4_default_sos ();
1325
1326 return head;
1327 }
1328
1329 /* Get the address of the link_map for a given OBJFILE. */
1330
1331 CORE_ADDR
1332 svr4_fetch_objfile_link_map (struct objfile *objfile)
1333 {
1334 struct so_list *so;
1335 struct svr4_info *info = get_svr4_info ();
1336
1337 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1338 if (info->main_lm_addr == 0)
1339 solib_add (NULL, 0, &current_target, auto_solib_add);
1340
1341 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1342 if (objfile == symfile_objfile)
1343 return info->main_lm_addr;
1344
1345 /* The other link map addresses may be found by examining the list
1346 of shared libraries. */
1347 for (so = master_so_list (); so; so = so->next)
1348 if (so->objfile == objfile)
1349 return so->lm_info->lm_addr;
1350
1351 /* Not found! */
1352 return 0;
1353 }
1354
1355 /* On some systems, the only way to recognize the link map entry for
1356 the main executable file is by looking at its name. Return
1357 non-zero iff SONAME matches one of the known main executable names. */
1358
1359 static int
1360 match_main (const char *soname)
1361 {
1362 const char * const *mainp;
1363
1364 for (mainp = main_name_list; *mainp != NULL; mainp++)
1365 {
1366 if (strcmp (soname, *mainp) == 0)
1367 return (1);
1368 }
1369
1370 return (0);
1371 }
1372
1373 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1374 SVR4 run time loader. */
1375
1376 int
1377 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1378 {
1379 struct svr4_info *info = get_svr4_info ();
1380
1381 return ((pc >= info->interp_text_sect_low
1382 && pc < info->interp_text_sect_high)
1383 || (pc >= info->interp_plt_sect_low
1384 && pc < info->interp_plt_sect_high)
1385 || in_plt_section (pc, NULL)
1386 || in_gnu_ifunc_stub (pc));
1387 }
1388
1389 /* Given an executable's ABFD and target, compute the entry-point
1390 address. */
1391
1392 static CORE_ADDR
1393 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1394 {
1395 CORE_ADDR addr;
1396
1397 /* KevinB wrote ... for most targets, the address returned by
1398 bfd_get_start_address() is the entry point for the start
1399 function. But, for some targets, bfd_get_start_address() returns
1400 the address of a function descriptor from which the entry point
1401 address may be extracted. This address is extracted by
1402 gdbarch_convert_from_func_ptr_addr(). The method
1403 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1404 function for targets which don't use function descriptors. */
1405 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1406 bfd_get_start_address (abfd),
1407 targ);
1408 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1409 }
1410
1411 /* Helper function for gdb_bfd_lookup_symbol. */
1412
1413 static int
1414 cmp_name_and_sec_flags (asymbol *sym, void *data)
1415 {
1416 return (strcmp (sym->name, (const char *) data) == 0
1417 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
1418 }
1419 /* Arrange for dynamic linker to hit breakpoint.
1420
1421 Both the SunOS and the SVR4 dynamic linkers have, as part of their
1422 debugger interface, support for arranging for the inferior to hit
1423 a breakpoint after mapping in the shared libraries. This function
1424 enables that breakpoint.
1425
1426 For SunOS, there is a special flag location (in_debugger) which we
1427 set to 1. When the dynamic linker sees this flag set, it will set
1428 a breakpoint at a location known only to itself, after saving the
1429 original contents of that place and the breakpoint address itself,
1430 in it's own internal structures. When we resume the inferior, it
1431 will eventually take a SIGTRAP when it runs into the breakpoint.
1432 We handle this (in a different place) by restoring the contents of
1433 the breakpointed location (which is only known after it stops),
1434 chasing around to locate the shared libraries that have been
1435 loaded, then resuming.
1436
1437 For SVR4, the debugger interface structure contains a member (r_brk)
1438 which is statically initialized at the time the shared library is
1439 built, to the offset of a function (_r_debug_state) which is guaran-
1440 teed to be called once before mapping in a library, and again when
1441 the mapping is complete. At the time we are examining this member,
1442 it contains only the unrelocated offset of the function, so we have
1443 to do our own relocation. Later, when the dynamic linker actually
1444 runs, it relocates r_brk to be the actual address of _r_debug_state().
1445
1446 The debugger interface structure also contains an enumeration which
1447 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1448 depending upon whether or not the library is being mapped or unmapped,
1449 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
1450
1451 static int
1452 enable_break (struct svr4_info *info, int from_tty)
1453 {
1454 struct minimal_symbol *msymbol;
1455 const char * const *bkpt_namep;
1456 asection *interp_sect;
1457 char *interp_name;
1458 CORE_ADDR sym_addr;
1459
1460 info->interp_text_sect_low = info->interp_text_sect_high = 0;
1461 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1462
1463 /* If we already have a shared library list in the target, and
1464 r_debug contains r_brk, set the breakpoint there - this should
1465 mean r_brk has already been relocated. Assume the dynamic linker
1466 is the object containing r_brk. */
1467
1468 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1469 sym_addr = 0;
1470 if (info->debug_base && solib_svr4_r_map (info) != 0)
1471 sym_addr = solib_svr4_r_brk (info);
1472
1473 if (sym_addr != 0)
1474 {
1475 struct obj_section *os;
1476
1477 sym_addr = gdbarch_addr_bits_remove
1478 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1479 sym_addr,
1480 &current_target));
1481
1482 /* On at least some versions of Solaris there's a dynamic relocation
1483 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1484 we get control before the dynamic linker has self-relocated.
1485 Check if SYM_ADDR is in a known section, if it is assume we can
1486 trust its value. This is just a heuristic though, it could go away
1487 or be replaced if it's getting in the way.
1488
1489 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1490 however it's spelled in your particular system) is ARM or Thumb.
1491 That knowledge is encoded in the address, if it's Thumb the low bit
1492 is 1. However, we've stripped that info above and it's not clear
1493 what all the consequences are of passing a non-addr_bits_remove'd
1494 address to create_solib_event_breakpoint. The call to
1495 find_pc_section verifies we know about the address and have some
1496 hope of computing the right kind of breakpoint to use (via
1497 symbol info). It does mean that GDB needs to be pointed at a
1498 non-stripped version of the dynamic linker in order to obtain
1499 information it already knows about. Sigh. */
1500
1501 os = find_pc_section (sym_addr);
1502 if (os != NULL)
1503 {
1504 /* Record the relocated start and end address of the dynamic linker
1505 text and plt section for svr4_in_dynsym_resolve_code. */
1506 bfd *tmp_bfd;
1507 CORE_ADDR load_addr;
1508
1509 tmp_bfd = os->objfile->obfd;
1510 load_addr = ANOFFSET (os->objfile->section_offsets,
1511 SECT_OFF_TEXT (os->objfile));
1512
1513 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1514 if (interp_sect)
1515 {
1516 info->interp_text_sect_low =
1517 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1518 info->interp_text_sect_high =
1519 info->interp_text_sect_low
1520 + bfd_section_size (tmp_bfd, interp_sect);
1521 }
1522 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1523 if (interp_sect)
1524 {
1525 info->interp_plt_sect_low =
1526 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1527 info->interp_plt_sect_high =
1528 info->interp_plt_sect_low
1529 + bfd_section_size (tmp_bfd, interp_sect);
1530 }
1531
1532 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1533 return 1;
1534 }
1535 }
1536
1537 /* Find the program interpreter; if not found, warn the user and drop
1538 into the old breakpoint at symbol code. */
1539 interp_name = find_program_interpreter ();
1540 if (interp_name)
1541 {
1542 CORE_ADDR load_addr = 0;
1543 int load_addr_found = 0;
1544 int loader_found_in_list = 0;
1545 struct so_list *so;
1546 bfd *tmp_bfd = NULL;
1547 struct target_ops *tmp_bfd_target;
1548 volatile struct gdb_exception ex;
1549
1550 sym_addr = 0;
1551
1552 /* Now we need to figure out where the dynamic linker was
1553 loaded so that we can load its symbols and place a breakpoint
1554 in the dynamic linker itself.
1555
1556 This address is stored on the stack. However, I've been unable
1557 to find any magic formula to find it for Solaris (appears to
1558 be trivial on GNU/Linux). Therefore, we have to try an alternate
1559 mechanism to find the dynamic linker's base address. */
1560
1561 TRY_CATCH (ex, RETURN_MASK_ALL)
1562 {
1563 tmp_bfd = solib_bfd_open (interp_name);
1564 }
1565 if (tmp_bfd == NULL)
1566 goto bkpt_at_symbol;
1567
1568 /* Now convert the TMP_BFD into a target. That way target, as
1569 well as BFD operations can be used. */
1570 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1571 /* target_bfd_reopen acquired its own reference, so we can
1572 release ours now. */
1573 gdb_bfd_unref (tmp_bfd);
1574
1575 /* On a running target, we can get the dynamic linker's base
1576 address from the shared library table. */
1577 so = master_so_list ();
1578 while (so)
1579 {
1580 if (svr4_same_1 (interp_name, so->so_original_name))
1581 {
1582 load_addr_found = 1;
1583 loader_found_in_list = 1;
1584 load_addr = lm_addr_check (so, tmp_bfd);
1585 break;
1586 }
1587 so = so->next;
1588 }
1589
1590 /* If we were not able to find the base address of the loader
1591 from our so_list, then try using the AT_BASE auxilliary entry. */
1592 if (!load_addr_found)
1593 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1594 {
1595 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
1596
1597 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1598 that `+ load_addr' will overflow CORE_ADDR width not creating
1599 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1600 GDB. */
1601
1602 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1603 {
1604 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1605 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1606 tmp_bfd_target);
1607
1608 gdb_assert (load_addr < space_size);
1609
1610 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1611 64bit ld.so with 32bit executable, it should not happen. */
1612
1613 if (tmp_entry_point < space_size
1614 && tmp_entry_point + load_addr >= space_size)
1615 load_addr -= space_size;
1616 }
1617
1618 load_addr_found = 1;
1619 }
1620
1621 /* Otherwise we find the dynamic linker's base address by examining
1622 the current pc (which should point at the entry point for the
1623 dynamic linker) and subtracting the offset of the entry point.
1624
1625 This is more fragile than the previous approaches, but is a good
1626 fallback method because it has actually been working well in
1627 most cases. */
1628 if (!load_addr_found)
1629 {
1630 struct regcache *regcache
1631 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
1632
1633 load_addr = (regcache_read_pc (regcache)
1634 - exec_entry_point (tmp_bfd, tmp_bfd_target));
1635 }
1636
1637 if (!loader_found_in_list)
1638 {
1639 info->debug_loader_name = xstrdup (interp_name);
1640 info->debug_loader_offset_p = 1;
1641 info->debug_loader_offset = load_addr;
1642 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1643 }
1644
1645 /* Record the relocated start and end address of the dynamic linker
1646 text and plt section for svr4_in_dynsym_resolve_code. */
1647 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1648 if (interp_sect)
1649 {
1650 info->interp_text_sect_low =
1651 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1652 info->interp_text_sect_high =
1653 info->interp_text_sect_low
1654 + bfd_section_size (tmp_bfd, interp_sect);
1655 }
1656 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1657 if (interp_sect)
1658 {
1659 info->interp_plt_sect_low =
1660 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1661 info->interp_plt_sect_high =
1662 info->interp_plt_sect_low
1663 + bfd_section_size (tmp_bfd, interp_sect);
1664 }
1665
1666 /* Now try to set a breakpoint in the dynamic linker. */
1667 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1668 {
1669 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
1670 (void *) *bkpt_namep);
1671 if (sym_addr != 0)
1672 break;
1673 }
1674
1675 if (sym_addr != 0)
1676 /* Convert 'sym_addr' from a function pointer to an address.
1677 Because we pass tmp_bfd_target instead of the current
1678 target, this will always produce an unrelocated value. */
1679 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1680 sym_addr,
1681 tmp_bfd_target);
1682
1683 /* We're done with both the temporary bfd and target. Closing
1684 the target closes the underlying bfd, because it holds the
1685 only remaining reference. */
1686 target_close (tmp_bfd_target);
1687
1688 if (sym_addr != 0)
1689 {
1690 create_solib_event_breakpoint (target_gdbarch (), load_addr + sym_addr);
1691 xfree (interp_name);
1692 return 1;
1693 }
1694
1695 /* For whatever reason we couldn't set a breakpoint in the dynamic
1696 linker. Warn and drop into the old code. */
1697 bkpt_at_symbol:
1698 xfree (interp_name);
1699 warning (_("Unable to find dynamic linker breakpoint function.\n"
1700 "GDB will be unable to debug shared library initializers\n"
1701 "and track explicitly loaded dynamic code."));
1702 }
1703
1704 /* Scan through the lists of symbols, trying to look up the symbol and
1705 set a breakpoint there. Terminate loop when we/if we succeed. */
1706
1707 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1708 {
1709 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1710 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1711 {
1712 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1713 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1714 sym_addr,
1715 &current_target);
1716 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1717 return 1;
1718 }
1719 }
1720
1721 if (interp_name != NULL && !current_inferior ()->attach_flag)
1722 {
1723 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1724 {
1725 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1726 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1727 {
1728 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1729 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1730 sym_addr,
1731 &current_target);
1732 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1733 return 1;
1734 }
1735 }
1736 }
1737 return 0;
1738 }
1739
1740 /* Implement the "special_symbol_handling" target_so_ops method. */
1741
1742 static void
1743 svr4_special_symbol_handling (void)
1744 {
1745 /* Nothing to do. */
1746 }
1747
1748 /* Read the ELF program headers from ABFD. Return the contents and
1749 set *PHDRS_SIZE to the size of the program headers. */
1750
1751 static gdb_byte *
1752 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1753 {
1754 Elf_Internal_Ehdr *ehdr;
1755 gdb_byte *buf;
1756
1757 ehdr = elf_elfheader (abfd);
1758
1759 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1760 if (*phdrs_size == 0)
1761 return NULL;
1762
1763 buf = xmalloc (*phdrs_size);
1764 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1765 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1766 {
1767 xfree (buf);
1768 return NULL;
1769 }
1770
1771 return buf;
1772 }
1773
1774 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1775 exec_bfd. Otherwise return 0.
1776
1777 We relocate all of the sections by the same amount. This
1778 behavior is mandated by recent editions of the System V ABI.
1779 According to the System V Application Binary Interface,
1780 Edition 4.1, page 5-5:
1781
1782 ... Though the system chooses virtual addresses for
1783 individual processes, it maintains the segments' relative
1784 positions. Because position-independent code uses relative
1785 addressesing between segments, the difference between
1786 virtual addresses in memory must match the difference
1787 between virtual addresses in the file. The difference
1788 between the virtual address of any segment in memory and
1789 the corresponding virtual address in the file is thus a
1790 single constant value for any one executable or shared
1791 object in a given process. This difference is the base
1792 address. One use of the base address is to relocate the
1793 memory image of the program during dynamic linking.
1794
1795 The same language also appears in Edition 4.0 of the System V
1796 ABI and is left unspecified in some of the earlier editions.
1797
1798 Decide if the objfile needs to be relocated. As indicated above, we will
1799 only be here when execution is stopped. But during attachment PC can be at
1800 arbitrary address therefore regcache_read_pc can be misleading (contrary to
1801 the auxv AT_ENTRY value). Moreover for executable with interpreter section
1802 regcache_read_pc would point to the interpreter and not the main executable.
1803
1804 So, to summarize, relocations are necessary when the start address obtained
1805 from the executable is different from the address in auxv AT_ENTRY entry.
1806
1807 [ The astute reader will note that we also test to make sure that
1808 the executable in question has the DYNAMIC flag set. It is my
1809 opinion that this test is unnecessary (undesirable even). It
1810 was added to avoid inadvertent relocation of an executable
1811 whose e_type member in the ELF header is not ET_DYN. There may
1812 be a time in the future when it is desirable to do relocations
1813 on other types of files as well in which case this condition
1814 should either be removed or modified to accomodate the new file
1815 type. - Kevin, Nov 2000. ] */
1816
1817 static int
1818 svr4_exec_displacement (CORE_ADDR *displacementp)
1819 {
1820 /* ENTRY_POINT is a possible function descriptor - before
1821 a call to gdbarch_convert_from_func_ptr_addr. */
1822 CORE_ADDR entry_point, displacement;
1823
1824 if (exec_bfd == NULL)
1825 return 0;
1826
1827 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
1828 being executed themselves and PIE (Position Independent Executable)
1829 executables are ET_DYN. */
1830
1831 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1832 return 0;
1833
1834 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1835 return 0;
1836
1837 displacement = entry_point - bfd_get_start_address (exec_bfd);
1838
1839 /* Verify the DISPLACEMENT candidate complies with the required page
1840 alignment. It is cheaper than the program headers comparison below. */
1841
1842 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1843 {
1844 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1845
1846 /* p_align of PT_LOAD segments does not specify any alignment but
1847 only congruency of addresses:
1848 p_offset % p_align == p_vaddr % p_align
1849 Kernel is free to load the executable with lower alignment. */
1850
1851 if ((displacement & (elf->minpagesize - 1)) != 0)
1852 return 0;
1853 }
1854
1855 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1856 comparing their program headers. If the program headers in the auxilliary
1857 vector do not match the program headers in the executable, then we are
1858 looking at a different file than the one used by the kernel - for
1859 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
1860
1861 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1862 {
1863 /* Be optimistic and clear OK only if GDB was able to verify the headers
1864 really do not match. */
1865 int phdrs_size, phdrs2_size, ok = 1;
1866 gdb_byte *buf, *buf2;
1867 int arch_size;
1868
1869 buf = read_program_header (-1, &phdrs_size, &arch_size);
1870 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1871 if (buf != NULL && buf2 != NULL)
1872 {
1873 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
1874
1875 /* We are dealing with three different addresses. EXEC_BFD
1876 represents current address in on-disk file. target memory content
1877 may be different from EXEC_BFD as the file may have been prelinked
1878 to a different address after the executable has been loaded.
1879 Moreover the address of placement in target memory can be
1880 different from what the program headers in target memory say -
1881 this is the goal of PIE.
1882
1883 Detected DISPLACEMENT covers both the offsets of PIE placement and
1884 possible new prelink performed after start of the program. Here
1885 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1886 content offset for the verification purpose. */
1887
1888 if (phdrs_size != phdrs2_size
1889 || bfd_get_arch_size (exec_bfd) != arch_size)
1890 ok = 0;
1891 else if (arch_size == 32
1892 && phdrs_size >= sizeof (Elf32_External_Phdr)
1893 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1894 {
1895 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1896 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1897 CORE_ADDR displacement = 0;
1898 int i;
1899
1900 /* DISPLACEMENT could be found more easily by the difference of
1901 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1902 already have enough information to compute that displacement
1903 with what we've read. */
1904
1905 for (i = 0; i < ehdr2->e_phnum; i++)
1906 if (phdr2[i].p_type == PT_LOAD)
1907 {
1908 Elf32_External_Phdr *phdrp;
1909 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1910 CORE_ADDR vaddr, paddr;
1911 CORE_ADDR displacement_vaddr = 0;
1912 CORE_ADDR displacement_paddr = 0;
1913
1914 phdrp = &((Elf32_External_Phdr *) buf)[i];
1915 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1916 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1917
1918 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1919 byte_order);
1920 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1921
1922 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1923 byte_order);
1924 displacement_paddr = paddr - phdr2[i].p_paddr;
1925
1926 if (displacement_vaddr == displacement_paddr)
1927 displacement = displacement_vaddr;
1928
1929 break;
1930 }
1931
1932 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1933
1934 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1935 {
1936 Elf32_External_Phdr *phdrp;
1937 Elf32_External_Phdr *phdr2p;
1938 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1939 CORE_ADDR vaddr, paddr;
1940 asection *plt2_asect;
1941
1942 phdrp = &((Elf32_External_Phdr *) buf)[i];
1943 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1944 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1945 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1946
1947 /* PT_GNU_STACK is an exception by being never relocated by
1948 prelink as its addresses are always zero. */
1949
1950 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1951 continue;
1952
1953 /* Check also other adjustment combinations - PR 11786. */
1954
1955 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1956 byte_order);
1957 vaddr -= displacement;
1958 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1959
1960 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1961 byte_order);
1962 paddr -= displacement;
1963 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1964
1965 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1966 continue;
1967
1968 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
1969 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1970 if (plt2_asect)
1971 {
1972 int content2;
1973 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1974 CORE_ADDR filesz;
1975
1976 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1977 & SEC_HAS_CONTENTS) != 0;
1978
1979 filesz = extract_unsigned_integer (buf_filesz_p, 4,
1980 byte_order);
1981
1982 /* PLT2_ASECT is from on-disk file (exec_bfd) while
1983 FILESZ is from the in-memory image. */
1984 if (content2)
1985 filesz += bfd_get_section_size (plt2_asect);
1986 else
1987 filesz -= bfd_get_section_size (plt2_asect);
1988
1989 store_unsigned_integer (buf_filesz_p, 4, byte_order,
1990 filesz);
1991
1992 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1993 continue;
1994 }
1995
1996 ok = 0;
1997 break;
1998 }
1999 }
2000 else if (arch_size == 64
2001 && phdrs_size >= sizeof (Elf64_External_Phdr)
2002 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2003 {
2004 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2005 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2006 CORE_ADDR displacement = 0;
2007 int i;
2008
2009 /* DISPLACEMENT could be found more easily by the difference of
2010 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2011 already have enough information to compute that displacement
2012 with what we've read. */
2013
2014 for (i = 0; i < ehdr2->e_phnum; i++)
2015 if (phdr2[i].p_type == PT_LOAD)
2016 {
2017 Elf64_External_Phdr *phdrp;
2018 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2019 CORE_ADDR vaddr, paddr;
2020 CORE_ADDR displacement_vaddr = 0;
2021 CORE_ADDR displacement_paddr = 0;
2022
2023 phdrp = &((Elf64_External_Phdr *) buf)[i];
2024 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2025 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2026
2027 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2028 byte_order);
2029 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2030
2031 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2032 byte_order);
2033 displacement_paddr = paddr - phdr2[i].p_paddr;
2034
2035 if (displacement_vaddr == displacement_paddr)
2036 displacement = displacement_vaddr;
2037
2038 break;
2039 }
2040
2041 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2042
2043 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2044 {
2045 Elf64_External_Phdr *phdrp;
2046 Elf64_External_Phdr *phdr2p;
2047 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2048 CORE_ADDR vaddr, paddr;
2049 asection *plt2_asect;
2050
2051 phdrp = &((Elf64_External_Phdr *) buf)[i];
2052 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2053 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2054 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2055
2056 /* PT_GNU_STACK is an exception by being never relocated by
2057 prelink as its addresses are always zero. */
2058
2059 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2060 continue;
2061
2062 /* Check also other adjustment combinations - PR 11786. */
2063
2064 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2065 byte_order);
2066 vaddr -= displacement;
2067 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2068
2069 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2070 byte_order);
2071 paddr -= displacement;
2072 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2073
2074 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2075 continue;
2076
2077 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2078 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2079 if (plt2_asect)
2080 {
2081 int content2;
2082 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2083 CORE_ADDR filesz;
2084
2085 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2086 & SEC_HAS_CONTENTS) != 0;
2087
2088 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2089 byte_order);
2090
2091 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2092 FILESZ is from the in-memory image. */
2093 if (content2)
2094 filesz += bfd_get_section_size (plt2_asect);
2095 else
2096 filesz -= bfd_get_section_size (plt2_asect);
2097
2098 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2099 filesz);
2100
2101 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2102 continue;
2103 }
2104
2105 ok = 0;
2106 break;
2107 }
2108 }
2109 else
2110 ok = 0;
2111 }
2112
2113 xfree (buf);
2114 xfree (buf2);
2115
2116 if (!ok)
2117 return 0;
2118 }
2119
2120 if (info_verbose)
2121 {
2122 /* It can be printed repeatedly as there is no easy way to check
2123 the executable symbols/file has been already relocated to
2124 displacement. */
2125
2126 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2127 "displacement %s for \"%s\".\n"),
2128 paddress (target_gdbarch (), displacement),
2129 bfd_get_filename (exec_bfd));
2130 }
2131
2132 *displacementp = displacement;
2133 return 1;
2134 }
2135
2136 /* Relocate the main executable. This function should be called upon
2137 stopping the inferior process at the entry point to the program.
2138 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2139 different, the main executable is relocated by the proper amount. */
2140
2141 static void
2142 svr4_relocate_main_executable (void)
2143 {
2144 CORE_ADDR displacement;
2145
2146 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2147 probably contains the offsets computed using the PIE displacement
2148 from the previous run, which of course are irrelevant for this run.
2149 So we need to determine the new PIE displacement and recompute the
2150 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2151 already contains pre-computed offsets.
2152
2153 If we cannot compute the PIE displacement, either:
2154
2155 - The executable is not PIE.
2156
2157 - SYMFILE_OBJFILE does not match the executable started in the target.
2158 This can happen for main executable symbols loaded at the host while
2159 `ld.so --ld-args main-executable' is loaded in the target.
2160
2161 Then we leave the section offsets untouched and use them as is for
2162 this run. Either:
2163
2164 - These section offsets were properly reset earlier, and thus
2165 already contain the correct values. This can happen for instance
2166 when reconnecting via the remote protocol to a target that supports
2167 the `qOffsets' packet.
2168
2169 - The section offsets were not reset earlier, and the best we can
2170 hope is that the old offsets are still applicable to the new run. */
2171
2172 if (! svr4_exec_displacement (&displacement))
2173 return;
2174
2175 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2176 addresses. */
2177
2178 if (symfile_objfile)
2179 {
2180 struct section_offsets *new_offsets;
2181 int i;
2182
2183 new_offsets = alloca (symfile_objfile->num_sections
2184 * sizeof (*new_offsets));
2185
2186 for (i = 0; i < symfile_objfile->num_sections; i++)
2187 new_offsets->offsets[i] = displacement;
2188
2189 objfile_relocate (symfile_objfile, new_offsets);
2190 }
2191 else if (exec_bfd)
2192 {
2193 asection *asect;
2194
2195 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2196 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2197 (bfd_section_vma (exec_bfd, asect)
2198 + displacement));
2199 }
2200 }
2201
2202 /* Implement the "create_inferior_hook" target_solib_ops method.
2203
2204 For SVR4 executables, this first instruction is either the first
2205 instruction in the dynamic linker (for dynamically linked
2206 executables) or the instruction at "start" for statically linked
2207 executables. For dynamically linked executables, the system
2208 first exec's /lib/libc.so.N, which contains the dynamic linker,
2209 and starts it running. The dynamic linker maps in any needed
2210 shared libraries, maps in the actual user executable, and then
2211 jumps to "start" in the user executable.
2212
2213 We can arrange to cooperate with the dynamic linker to discover the
2214 names of shared libraries that are dynamically linked, and the base
2215 addresses to which they are linked.
2216
2217 This function is responsible for discovering those names and
2218 addresses, and saving sufficient information about them to allow
2219 their symbols to be read at a later time. */
2220
2221 static void
2222 svr4_solib_create_inferior_hook (int from_tty)
2223 {
2224 struct svr4_info *info;
2225
2226 info = get_svr4_info ();
2227
2228 /* Relocate the main executable if necessary. */
2229 svr4_relocate_main_executable ();
2230
2231 /* No point setting a breakpoint in the dynamic linker if we can't
2232 hit it (e.g., a core file, or a trace file). */
2233 if (!target_has_execution)
2234 return;
2235
2236 if (!svr4_have_link_map_offsets ())
2237 return;
2238
2239 if (!enable_break (info, from_tty))
2240 return;
2241 }
2242
2243 static void
2244 svr4_clear_solib (void)
2245 {
2246 struct svr4_info *info;
2247
2248 info = get_svr4_info ();
2249 info->debug_base = 0;
2250 info->debug_loader_offset_p = 0;
2251 info->debug_loader_offset = 0;
2252 xfree (info->debug_loader_name);
2253 info->debug_loader_name = NULL;
2254 }
2255
2256 /* Clear any bits of ADDR that wouldn't fit in a target-format
2257 data pointer. "Data pointer" here refers to whatever sort of
2258 address the dynamic linker uses to manage its sections. At the
2259 moment, we don't support shared libraries on any processors where
2260 code and data pointers are different sizes.
2261
2262 This isn't really the right solution. What we really need here is
2263 a way to do arithmetic on CORE_ADDR values that respects the
2264 natural pointer/address correspondence. (For example, on the MIPS,
2265 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2266 sign-extend the value. There, simply truncating the bits above
2267 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2268 be a new gdbarch method or something. */
2269 static CORE_ADDR
2270 svr4_truncate_ptr (CORE_ADDR addr)
2271 {
2272 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
2273 /* We don't need to truncate anything, and the bit twiddling below
2274 will fail due to overflow problems. */
2275 return addr;
2276 else
2277 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
2278 }
2279
2280
2281 static void
2282 svr4_relocate_section_addresses (struct so_list *so,
2283 struct target_section *sec)
2284 {
2285 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so,
2286 sec->bfd));
2287 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so,
2288 sec->bfd));
2289 }
2290 \f
2291
2292 /* Architecture-specific operations. */
2293
2294 /* Per-architecture data key. */
2295 static struct gdbarch_data *solib_svr4_data;
2296
2297 struct solib_svr4_ops
2298 {
2299 /* Return a description of the layout of `struct link_map'. */
2300 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2301 };
2302
2303 /* Return a default for the architecture-specific operations. */
2304
2305 static void *
2306 solib_svr4_init (struct obstack *obstack)
2307 {
2308 struct solib_svr4_ops *ops;
2309
2310 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2311 ops->fetch_link_map_offsets = NULL;
2312 return ops;
2313 }
2314
2315 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2316 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2317
2318 void
2319 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2320 struct link_map_offsets *(*flmo) (void))
2321 {
2322 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2323
2324 ops->fetch_link_map_offsets = flmo;
2325
2326 set_solib_ops (gdbarch, &svr4_so_ops);
2327 }
2328
2329 /* Fetch a link_map_offsets structure using the architecture-specific
2330 `struct link_map_offsets' fetcher. */
2331
2332 static struct link_map_offsets *
2333 svr4_fetch_link_map_offsets (void)
2334 {
2335 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2336
2337 gdb_assert (ops->fetch_link_map_offsets);
2338 return ops->fetch_link_map_offsets ();
2339 }
2340
2341 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2342
2343 static int
2344 svr4_have_link_map_offsets (void)
2345 {
2346 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2347
2348 return (ops->fetch_link_map_offsets != NULL);
2349 }
2350 \f
2351
2352 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2353 `struct r_debug' and a `struct link_map' that are binary compatible
2354 with the origional SVR4 implementation. */
2355
2356 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2357 for an ILP32 SVR4 system. */
2358
2359 struct link_map_offsets *
2360 svr4_ilp32_fetch_link_map_offsets (void)
2361 {
2362 static struct link_map_offsets lmo;
2363 static struct link_map_offsets *lmp = NULL;
2364
2365 if (lmp == NULL)
2366 {
2367 lmp = &lmo;
2368
2369 lmo.r_version_offset = 0;
2370 lmo.r_version_size = 4;
2371 lmo.r_map_offset = 4;
2372 lmo.r_brk_offset = 8;
2373 lmo.r_ldsomap_offset = 20;
2374
2375 /* Everything we need is in the first 20 bytes. */
2376 lmo.link_map_size = 20;
2377 lmo.l_addr_offset = 0;
2378 lmo.l_name_offset = 4;
2379 lmo.l_ld_offset = 8;
2380 lmo.l_next_offset = 12;
2381 lmo.l_prev_offset = 16;
2382 }
2383
2384 return lmp;
2385 }
2386
2387 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2388 for an LP64 SVR4 system. */
2389
2390 struct link_map_offsets *
2391 svr4_lp64_fetch_link_map_offsets (void)
2392 {
2393 static struct link_map_offsets lmo;
2394 static struct link_map_offsets *lmp = NULL;
2395
2396 if (lmp == NULL)
2397 {
2398 lmp = &lmo;
2399
2400 lmo.r_version_offset = 0;
2401 lmo.r_version_size = 4;
2402 lmo.r_map_offset = 8;
2403 lmo.r_brk_offset = 16;
2404 lmo.r_ldsomap_offset = 40;
2405
2406 /* Everything we need is in the first 40 bytes. */
2407 lmo.link_map_size = 40;
2408 lmo.l_addr_offset = 0;
2409 lmo.l_name_offset = 8;
2410 lmo.l_ld_offset = 16;
2411 lmo.l_next_offset = 24;
2412 lmo.l_prev_offset = 32;
2413 }
2414
2415 return lmp;
2416 }
2417 \f
2418
2419 struct target_so_ops svr4_so_ops;
2420
2421 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2422 different rule for symbol lookup. The lookup begins here in the DSO, not in
2423 the main executable. */
2424
2425 static struct symbol *
2426 elf_lookup_lib_symbol (const struct objfile *objfile,
2427 const char *name,
2428 const domain_enum domain)
2429 {
2430 bfd *abfd;
2431
2432 if (objfile == symfile_objfile)
2433 abfd = exec_bfd;
2434 else
2435 {
2436 /* OBJFILE should have been passed as the non-debug one. */
2437 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2438
2439 abfd = objfile->obfd;
2440 }
2441
2442 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2443 return NULL;
2444
2445 return lookup_global_symbol_from_objfile (objfile, name, domain);
2446 }
2447
2448 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2449
2450 void
2451 _initialize_svr4_solib (void)
2452 {
2453 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2454 solib_svr4_pspace_data
2455 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
2456
2457 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2458 svr4_so_ops.free_so = svr4_free_so;
2459 svr4_so_ops.clear_so = svr4_clear_so;
2460 svr4_so_ops.clear_solib = svr4_clear_solib;
2461 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2462 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2463 svr4_so_ops.current_sos = svr4_current_sos;
2464 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2465 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2466 svr4_so_ops.bfd_open = solib_bfd_open;
2467 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2468 svr4_so_ops.same = svr4_same;
2469 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2470 }
This page took 0.079493 seconds and 4 git commands to generate.