2012-12-10 Paul Koning <paul_koning@dell.com>
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-1996, 1998-2001, 2003-2012 Free Software
4 Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22
23 #include "elf/external.h"
24 #include "elf/common.h"
25 #include "elf/mips.h"
26
27 #include "symtab.h"
28 #include "bfd.h"
29 #include "symfile.h"
30 #include "objfiles.h"
31 #include "gdbcore.h"
32 #include "target.h"
33 #include "inferior.h"
34 #include "regcache.h"
35 #include "gdbthread.h"
36 #include "observer.h"
37
38 #include "gdb_assert.h"
39
40 #include "solist.h"
41 #include "solib.h"
42 #include "solib-svr4.h"
43
44 #include "bfd-target.h"
45 #include "elf-bfd.h"
46 #include "exec.h"
47 #include "auxv.h"
48 #include "exceptions.h"
49 #include "gdb_bfd.h"
50
51 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
52 static int svr4_have_link_map_offsets (void);
53 static void svr4_relocate_main_executable (void);
54
55 /* Link map info to include in an allocated so_list entry. */
56
57 struct lm_info
58 {
59 /* Amount by which addresses in the binary should be relocated to
60 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
61 When prelinking is involved and the prelink base address changes,
62 we may need a different offset - the recomputed offset is in L_ADDR.
63 It is commonly the same value. It is cached as we want to warn about
64 the difference and compute it only once. L_ADDR is valid
65 iff L_ADDR_P. */
66 CORE_ADDR l_addr, l_addr_inferior;
67 unsigned int l_addr_p : 1;
68
69 /* The target location of lm. */
70 CORE_ADDR lm_addr;
71
72 /* Values read in from inferior's fields of the same name. */
73 CORE_ADDR l_ld, l_next, l_prev, l_name;
74 };
75
76 /* On SVR4 systems, a list of symbols in the dynamic linker where
77 GDB can try to place a breakpoint to monitor shared library
78 events.
79
80 If none of these symbols are found, or other errors occur, then
81 SVR4 systems will fall back to using a symbol as the "startup
82 mapping complete" breakpoint address. */
83
84 static const char * const solib_break_names[] =
85 {
86 "r_debug_state",
87 "_r_debug_state",
88 "_dl_debug_state",
89 "rtld_db_dlactivity",
90 "__dl_rtld_db_dlactivity",
91 "_rtld_debug_state",
92
93 NULL
94 };
95
96 static const char * const bkpt_names[] =
97 {
98 "_start",
99 "__start",
100 "main",
101 NULL
102 };
103
104 static const char * const main_name_list[] =
105 {
106 "main_$main",
107 NULL
108 };
109
110 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
111 the same shared library. */
112
113 static int
114 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
115 {
116 if (strcmp (gdb_so_name, inferior_so_name) == 0)
117 return 1;
118
119 /* On Solaris, when starting inferior we think that dynamic linker is
120 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
121 contains /lib/ld.so.1. Sometimes one file is a link to another, but
122 sometimes they have identical content, but are not linked to each
123 other. We don't restrict this check for Solaris, but the chances
124 of running into this situation elsewhere are very low. */
125 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
126 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
127 return 1;
128
129 /* Similarly, we observed the same issue with sparc64, but with
130 different locations. */
131 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
132 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
133 return 1;
134
135 return 0;
136 }
137
138 static int
139 svr4_same (struct so_list *gdb, struct so_list *inferior)
140 {
141 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
142 }
143
144 static struct lm_info *
145 lm_info_read (CORE_ADDR lm_addr)
146 {
147 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
148 gdb_byte *lm;
149 struct lm_info *lm_info;
150 struct cleanup *back_to;
151
152 lm = xmalloc (lmo->link_map_size);
153 back_to = make_cleanup (xfree, lm);
154
155 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
156 {
157 warning (_("Error reading shared library list entry at %s"),
158 paddress (target_gdbarch (), lm_addr)),
159 lm_info = NULL;
160 }
161 else
162 {
163 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
164
165 lm_info = xzalloc (sizeof (*lm_info));
166 lm_info->lm_addr = lm_addr;
167
168 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
169 ptr_type);
170 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
171 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
172 ptr_type);
173 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
174 ptr_type);
175 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
176 ptr_type);
177 }
178
179 do_cleanups (back_to);
180
181 return lm_info;
182 }
183
184 static int
185 has_lm_dynamic_from_link_map (void)
186 {
187 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
188
189 return lmo->l_ld_offset >= 0;
190 }
191
192 static CORE_ADDR
193 lm_addr_check (struct so_list *so, bfd *abfd)
194 {
195 if (!so->lm_info->l_addr_p)
196 {
197 struct bfd_section *dyninfo_sect;
198 CORE_ADDR l_addr, l_dynaddr, dynaddr;
199
200 l_addr = so->lm_info->l_addr_inferior;
201
202 if (! abfd || ! has_lm_dynamic_from_link_map ())
203 goto set_addr;
204
205 l_dynaddr = so->lm_info->l_ld;
206
207 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
208 if (dyninfo_sect == NULL)
209 goto set_addr;
210
211 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
212
213 if (dynaddr + l_addr != l_dynaddr)
214 {
215 CORE_ADDR align = 0x1000;
216 CORE_ADDR minpagesize = align;
217
218 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
219 {
220 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
221 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
222 int i;
223
224 align = 1;
225
226 for (i = 0; i < ehdr->e_phnum; i++)
227 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
228 align = phdr[i].p_align;
229
230 minpagesize = get_elf_backend_data (abfd)->minpagesize;
231 }
232
233 /* Turn it into a mask. */
234 align--;
235
236 /* If the changes match the alignment requirements, we
237 assume we're using a core file that was generated by the
238 same binary, just prelinked with a different base offset.
239 If it doesn't match, we may have a different binary, the
240 same binary with the dynamic table loaded at an unrelated
241 location, or anything, really. To avoid regressions,
242 don't adjust the base offset in the latter case, although
243 odds are that, if things really changed, debugging won't
244 quite work.
245
246 One could expect more the condition
247 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
248 but the one below is relaxed for PPC. The PPC kernel supports
249 either 4k or 64k page sizes. To be prepared for 64k pages,
250 PPC ELF files are built using an alignment requirement of 64k.
251 However, when running on a kernel supporting 4k pages, the memory
252 mapping of the library may not actually happen on a 64k boundary!
253
254 (In the usual case where (l_addr & align) == 0, this check is
255 equivalent to the possibly expected check above.)
256
257 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
258
259 l_addr = l_dynaddr - dynaddr;
260
261 if ((l_addr & (minpagesize - 1)) == 0
262 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
263 {
264 if (info_verbose)
265 printf_unfiltered (_("Using PIC (Position Independent Code) "
266 "prelink displacement %s for \"%s\".\n"),
267 paddress (target_gdbarch (), l_addr),
268 so->so_name);
269 }
270 else
271 {
272 /* There is no way to verify the library file matches. prelink
273 can during prelinking of an unprelinked file (or unprelinking
274 of a prelinked file) shift the DYNAMIC segment by arbitrary
275 offset without any page size alignment. There is no way to
276 find out the ELF header and/or Program Headers for a limited
277 verification if it they match. One could do a verification
278 of the DYNAMIC segment. Still the found address is the best
279 one GDB could find. */
280
281 warning (_(".dynamic section for \"%s\" "
282 "is not at the expected address "
283 "(wrong library or version mismatch?)"), so->so_name);
284 }
285 }
286
287 set_addr:
288 so->lm_info->l_addr = l_addr;
289 so->lm_info->l_addr_p = 1;
290 }
291
292 return so->lm_info->l_addr;
293 }
294
295 /* Per pspace SVR4 specific data. */
296
297 struct svr4_info
298 {
299 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
300
301 /* Validity flag for debug_loader_offset. */
302 int debug_loader_offset_p;
303
304 /* Load address for the dynamic linker, inferred. */
305 CORE_ADDR debug_loader_offset;
306
307 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
308 char *debug_loader_name;
309
310 /* Load map address for the main executable. */
311 CORE_ADDR main_lm_addr;
312
313 CORE_ADDR interp_text_sect_low;
314 CORE_ADDR interp_text_sect_high;
315 CORE_ADDR interp_plt_sect_low;
316 CORE_ADDR interp_plt_sect_high;
317 };
318
319 /* Per-program-space data key. */
320 static const struct program_space_data *solib_svr4_pspace_data;
321
322 static void
323 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
324 {
325 struct svr4_info *info;
326
327 info = program_space_data (pspace, solib_svr4_pspace_data);
328 xfree (info);
329 }
330
331 /* Get the current svr4 data. If none is found yet, add it now. This
332 function always returns a valid object. */
333
334 static struct svr4_info *
335 get_svr4_info (void)
336 {
337 struct svr4_info *info;
338
339 info = program_space_data (current_program_space, solib_svr4_pspace_data);
340 if (info != NULL)
341 return info;
342
343 info = XZALLOC (struct svr4_info);
344 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
345 return info;
346 }
347
348 /* Local function prototypes */
349
350 static int match_main (const char *);
351
352 /* Read program header TYPE from inferior memory. The header is found
353 by scanning the OS auxillary vector.
354
355 If TYPE == -1, return the program headers instead of the contents of
356 one program header.
357
358 Return a pointer to allocated memory holding the program header contents,
359 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
360 size of those contents is returned to P_SECT_SIZE. Likewise, the target
361 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
362
363 static gdb_byte *
364 read_program_header (int type, int *p_sect_size, int *p_arch_size)
365 {
366 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
367 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
368 int arch_size, sect_size;
369 CORE_ADDR sect_addr;
370 gdb_byte *buf;
371 int pt_phdr_p = 0;
372
373 /* Get required auxv elements from target. */
374 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
375 return 0;
376 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
377 return 0;
378 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
379 return 0;
380 if (!at_phdr || !at_phnum)
381 return 0;
382
383 /* Determine ELF architecture type. */
384 if (at_phent == sizeof (Elf32_External_Phdr))
385 arch_size = 32;
386 else if (at_phent == sizeof (Elf64_External_Phdr))
387 arch_size = 64;
388 else
389 return 0;
390
391 /* Find the requested segment. */
392 if (type == -1)
393 {
394 sect_addr = at_phdr;
395 sect_size = at_phent * at_phnum;
396 }
397 else if (arch_size == 32)
398 {
399 Elf32_External_Phdr phdr;
400 int i;
401
402 /* Search for requested PHDR. */
403 for (i = 0; i < at_phnum; i++)
404 {
405 int p_type;
406
407 if (target_read_memory (at_phdr + i * sizeof (phdr),
408 (gdb_byte *)&phdr, sizeof (phdr)))
409 return 0;
410
411 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
412 4, byte_order);
413
414 if (p_type == PT_PHDR)
415 {
416 pt_phdr_p = 1;
417 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
418 4, byte_order);
419 }
420
421 if (p_type == type)
422 break;
423 }
424
425 if (i == at_phnum)
426 return 0;
427
428 /* Retrieve address and size. */
429 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
430 4, byte_order);
431 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
432 4, byte_order);
433 }
434 else
435 {
436 Elf64_External_Phdr phdr;
437 int i;
438
439 /* Search for requested PHDR. */
440 for (i = 0; i < at_phnum; i++)
441 {
442 int p_type;
443
444 if (target_read_memory (at_phdr + i * sizeof (phdr),
445 (gdb_byte *)&phdr, sizeof (phdr)))
446 return 0;
447
448 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
449 4, byte_order);
450
451 if (p_type == PT_PHDR)
452 {
453 pt_phdr_p = 1;
454 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
455 8, byte_order);
456 }
457
458 if (p_type == type)
459 break;
460 }
461
462 if (i == at_phnum)
463 return 0;
464
465 /* Retrieve address and size. */
466 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
467 8, byte_order);
468 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
469 8, byte_order);
470 }
471
472 /* PT_PHDR is optional, but we really need it
473 for PIE to make this work in general. */
474
475 if (pt_phdr_p)
476 {
477 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
478 Relocation offset is the difference between the two. */
479 sect_addr = sect_addr + (at_phdr - pt_phdr);
480 }
481
482 /* Read in requested program header. */
483 buf = xmalloc (sect_size);
484 if (target_read_memory (sect_addr, buf, sect_size))
485 {
486 xfree (buf);
487 return NULL;
488 }
489
490 if (p_arch_size)
491 *p_arch_size = arch_size;
492 if (p_sect_size)
493 *p_sect_size = sect_size;
494
495 return buf;
496 }
497
498
499 /* Return program interpreter string. */
500 static gdb_byte *
501 find_program_interpreter (void)
502 {
503 gdb_byte *buf = NULL;
504
505 /* If we have an exec_bfd, use its section table. */
506 if (exec_bfd
507 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
508 {
509 struct bfd_section *interp_sect;
510
511 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
512 if (interp_sect != NULL)
513 {
514 int sect_size = bfd_section_size (exec_bfd, interp_sect);
515
516 buf = xmalloc (sect_size);
517 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
518 }
519 }
520
521 /* If we didn't find it, use the target auxillary vector. */
522 if (!buf)
523 buf = read_program_header (PT_INTERP, NULL, NULL);
524
525 return buf;
526 }
527
528
529 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
530 returned and the corresponding PTR is set. */
531
532 static int
533 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
534 {
535 int arch_size, step, sect_size;
536 long dyn_tag;
537 CORE_ADDR dyn_ptr, dyn_addr;
538 gdb_byte *bufend, *bufstart, *buf;
539 Elf32_External_Dyn *x_dynp_32;
540 Elf64_External_Dyn *x_dynp_64;
541 struct bfd_section *sect;
542 struct target_section *target_section;
543
544 if (abfd == NULL)
545 return 0;
546
547 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
548 return 0;
549
550 arch_size = bfd_get_arch_size (abfd);
551 if (arch_size == -1)
552 return 0;
553
554 /* Find the start address of the .dynamic section. */
555 sect = bfd_get_section_by_name (abfd, ".dynamic");
556 if (sect == NULL)
557 return 0;
558
559 for (target_section = current_target_sections->sections;
560 target_section < current_target_sections->sections_end;
561 target_section++)
562 if (sect == target_section->the_bfd_section)
563 break;
564 if (target_section < current_target_sections->sections_end)
565 dyn_addr = target_section->addr;
566 else
567 {
568 /* ABFD may come from OBJFILE acting only as a symbol file without being
569 loaded into the target (see add_symbol_file_command). This case is
570 such fallback to the file VMA address without the possibility of
571 having the section relocated to its actual in-memory address. */
572
573 dyn_addr = bfd_section_vma (abfd, sect);
574 }
575
576 /* Read in .dynamic from the BFD. We will get the actual value
577 from memory later. */
578 sect_size = bfd_section_size (abfd, sect);
579 buf = bufstart = alloca (sect_size);
580 if (!bfd_get_section_contents (abfd, sect,
581 buf, 0, sect_size))
582 return 0;
583
584 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
585 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
586 : sizeof (Elf64_External_Dyn);
587 for (bufend = buf + sect_size;
588 buf < bufend;
589 buf += step)
590 {
591 if (arch_size == 32)
592 {
593 x_dynp_32 = (Elf32_External_Dyn *) buf;
594 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
595 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
596 }
597 else
598 {
599 x_dynp_64 = (Elf64_External_Dyn *) buf;
600 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
601 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
602 }
603 if (dyn_tag == DT_NULL)
604 return 0;
605 if (dyn_tag == dyntag)
606 {
607 /* If requested, try to read the runtime value of this .dynamic
608 entry. */
609 if (ptr)
610 {
611 struct type *ptr_type;
612 gdb_byte ptr_buf[8];
613 CORE_ADDR ptr_addr;
614
615 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
616 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
617 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
618 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
619 *ptr = dyn_ptr;
620 }
621 return 1;
622 }
623 }
624
625 return 0;
626 }
627
628 /* Scan for DYNTAG in .dynamic section of the target's main executable,
629 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
630 returned and the corresponding PTR is set. */
631
632 static int
633 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
634 {
635 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
636 int sect_size, arch_size, step;
637 long dyn_tag;
638 CORE_ADDR dyn_ptr;
639 gdb_byte *bufend, *bufstart, *buf;
640
641 /* Read in .dynamic section. */
642 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
643 if (!buf)
644 return 0;
645
646 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
647 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
648 : sizeof (Elf64_External_Dyn);
649 for (bufend = buf + sect_size;
650 buf < bufend;
651 buf += step)
652 {
653 if (arch_size == 32)
654 {
655 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
656
657 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
658 4, byte_order);
659 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
660 4, byte_order);
661 }
662 else
663 {
664 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
665
666 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
667 8, byte_order);
668 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
669 8, byte_order);
670 }
671 if (dyn_tag == DT_NULL)
672 break;
673
674 if (dyn_tag == dyntag)
675 {
676 if (ptr)
677 *ptr = dyn_ptr;
678
679 xfree (bufstart);
680 return 1;
681 }
682 }
683
684 xfree (bufstart);
685 return 0;
686 }
687
688 /* Locate the base address of dynamic linker structs for SVR4 elf
689 targets.
690
691 For SVR4 elf targets the address of the dynamic linker's runtime
692 structure is contained within the dynamic info section in the
693 executable file. The dynamic section is also mapped into the
694 inferior address space. Because the runtime loader fills in the
695 real address before starting the inferior, we have to read in the
696 dynamic info section from the inferior address space.
697 If there are any errors while trying to find the address, we
698 silently return 0, otherwise the found address is returned. */
699
700 static CORE_ADDR
701 elf_locate_base (void)
702 {
703 struct minimal_symbol *msymbol;
704 CORE_ADDR dyn_ptr;
705
706 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
707 instead of DT_DEBUG, although they sometimes contain an unused
708 DT_DEBUG. */
709 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
710 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
711 {
712 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
713 gdb_byte *pbuf;
714 int pbuf_size = TYPE_LENGTH (ptr_type);
715
716 pbuf = alloca (pbuf_size);
717 /* DT_MIPS_RLD_MAP contains a pointer to the address
718 of the dynamic link structure. */
719 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
720 return 0;
721 return extract_typed_address (pbuf, ptr_type);
722 }
723
724 /* Find DT_DEBUG. */
725 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
726 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
727 return dyn_ptr;
728
729 /* This may be a static executable. Look for the symbol
730 conventionally named _r_debug, as a last resort. */
731 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
732 if (msymbol != NULL)
733 return SYMBOL_VALUE_ADDRESS (msymbol);
734
735 /* DT_DEBUG entry not found. */
736 return 0;
737 }
738
739 /* Locate the base address of dynamic linker structs.
740
741 For both the SunOS and SVR4 shared library implementations, if the
742 inferior executable has been linked dynamically, there is a single
743 address somewhere in the inferior's data space which is the key to
744 locating all of the dynamic linker's runtime structures. This
745 address is the value of the debug base symbol. The job of this
746 function is to find and return that address, or to return 0 if there
747 is no such address (the executable is statically linked for example).
748
749 For SunOS, the job is almost trivial, since the dynamic linker and
750 all of it's structures are statically linked to the executable at
751 link time. Thus the symbol for the address we are looking for has
752 already been added to the minimal symbol table for the executable's
753 objfile at the time the symbol file's symbols were read, and all we
754 have to do is look it up there. Note that we explicitly do NOT want
755 to find the copies in the shared library.
756
757 The SVR4 version is a bit more complicated because the address
758 is contained somewhere in the dynamic info section. We have to go
759 to a lot more work to discover the address of the debug base symbol.
760 Because of this complexity, we cache the value we find and return that
761 value on subsequent invocations. Note there is no copy in the
762 executable symbol tables. */
763
764 static CORE_ADDR
765 locate_base (struct svr4_info *info)
766 {
767 /* Check to see if we have a currently valid address, and if so, avoid
768 doing all this work again and just return the cached address. If
769 we have no cached address, try to locate it in the dynamic info
770 section for ELF executables. There's no point in doing any of this
771 though if we don't have some link map offsets to work with. */
772
773 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
774 info->debug_base = elf_locate_base ();
775 return info->debug_base;
776 }
777
778 /* Find the first element in the inferior's dynamic link map, and
779 return its address in the inferior. Return zero if the address
780 could not be determined.
781
782 FIXME: Perhaps we should validate the info somehow, perhaps by
783 checking r_version for a known version number, or r_state for
784 RT_CONSISTENT. */
785
786 static CORE_ADDR
787 solib_svr4_r_map (struct svr4_info *info)
788 {
789 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
790 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
791 CORE_ADDR addr = 0;
792 volatile struct gdb_exception ex;
793
794 TRY_CATCH (ex, RETURN_MASK_ERROR)
795 {
796 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
797 ptr_type);
798 }
799 exception_print (gdb_stderr, ex);
800 return addr;
801 }
802
803 /* Find r_brk from the inferior's debug base. */
804
805 static CORE_ADDR
806 solib_svr4_r_brk (struct svr4_info *info)
807 {
808 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
809 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
810
811 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
812 ptr_type);
813 }
814
815 /* Find the link map for the dynamic linker (if it is not in the
816 normal list of loaded shared objects). */
817
818 static CORE_ADDR
819 solib_svr4_r_ldsomap (struct svr4_info *info)
820 {
821 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
822 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
823 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
824 ULONGEST version;
825
826 /* Check version, and return zero if `struct r_debug' doesn't have
827 the r_ldsomap member. */
828 version
829 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
830 lmo->r_version_size, byte_order);
831 if (version < 2 || lmo->r_ldsomap_offset == -1)
832 return 0;
833
834 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
835 ptr_type);
836 }
837
838 /* On Solaris systems with some versions of the dynamic linker,
839 ld.so's l_name pointer points to the SONAME in the string table
840 rather than into writable memory. So that GDB can find shared
841 libraries when loading a core file generated by gcore, ensure that
842 memory areas containing the l_name string are saved in the core
843 file. */
844
845 static int
846 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
847 {
848 struct svr4_info *info;
849 CORE_ADDR ldsomap;
850 struct so_list *new;
851 struct cleanup *old_chain;
852 struct link_map_offsets *lmo;
853 CORE_ADDR name_lm;
854
855 info = get_svr4_info ();
856
857 info->debug_base = 0;
858 locate_base (info);
859 if (!info->debug_base)
860 return 0;
861
862 ldsomap = solib_svr4_r_ldsomap (info);
863 if (!ldsomap)
864 return 0;
865
866 lmo = svr4_fetch_link_map_offsets ();
867 new = XZALLOC (struct so_list);
868 old_chain = make_cleanup (xfree, new);
869 new->lm_info = lm_info_read (ldsomap);
870 make_cleanup (xfree, new->lm_info);
871 name_lm = new->lm_info ? new->lm_info->l_name : 0;
872 do_cleanups (old_chain);
873
874 return (name_lm >= vaddr && name_lm < vaddr + size);
875 }
876
877 /* Implement the "open_symbol_file_object" target_so_ops method.
878
879 If no open symbol file, attempt to locate and open the main symbol
880 file. On SVR4 systems, this is the first link map entry. If its
881 name is here, we can open it. Useful when attaching to a process
882 without first loading its symbol file. */
883
884 static int
885 open_symbol_file_object (void *from_ttyp)
886 {
887 CORE_ADDR lm, l_name;
888 char *filename;
889 int errcode;
890 int from_tty = *(int *)from_ttyp;
891 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
892 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
893 int l_name_size = TYPE_LENGTH (ptr_type);
894 gdb_byte *l_name_buf = xmalloc (l_name_size);
895 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
896 struct svr4_info *info = get_svr4_info ();
897
898 if (symfile_objfile)
899 if (!query (_("Attempt to reload symbols from process? ")))
900 {
901 do_cleanups (cleanups);
902 return 0;
903 }
904
905 /* Always locate the debug struct, in case it has moved. */
906 info->debug_base = 0;
907 if (locate_base (info) == 0)
908 {
909 do_cleanups (cleanups);
910 return 0; /* failed somehow... */
911 }
912
913 /* First link map member should be the executable. */
914 lm = solib_svr4_r_map (info);
915 if (lm == 0)
916 {
917 do_cleanups (cleanups);
918 return 0; /* failed somehow... */
919 }
920
921 /* Read address of name from target memory to GDB. */
922 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
923
924 /* Convert the address to host format. */
925 l_name = extract_typed_address (l_name_buf, ptr_type);
926
927 if (l_name == 0)
928 {
929 do_cleanups (cleanups);
930 return 0; /* No filename. */
931 }
932
933 /* Now fetch the filename from target memory. */
934 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
935 make_cleanup (xfree, filename);
936
937 if (errcode)
938 {
939 warning (_("failed to read exec filename from attached file: %s"),
940 safe_strerror (errcode));
941 do_cleanups (cleanups);
942 return 0;
943 }
944
945 /* Have a pathname: read the symbol file. */
946 symbol_file_add_main (filename, from_tty);
947
948 do_cleanups (cleanups);
949 return 1;
950 }
951
952 /* Data exchange structure for the XML parser as returned by
953 svr4_current_sos_via_xfer_libraries. */
954
955 struct svr4_library_list
956 {
957 struct so_list *head, **tailp;
958
959 /* Inferior address of struct link_map used for the main executable. It is
960 NULL if not known. */
961 CORE_ADDR main_lm;
962 };
963
964 /* Implementation for target_so_ops.free_so. */
965
966 static void
967 svr4_free_so (struct so_list *so)
968 {
969 xfree (so->lm_info);
970 }
971
972 /* Free so_list built so far (called via cleanup). */
973
974 static void
975 svr4_free_library_list (void *p_list)
976 {
977 struct so_list *list = *(struct so_list **) p_list;
978
979 while (list != NULL)
980 {
981 struct so_list *next = list->next;
982
983 free_so (list);
984 list = next;
985 }
986 }
987
988 #ifdef HAVE_LIBEXPAT
989
990 #include "xml-support.h"
991
992 /* Handle the start of a <library> element. Note: new elements are added
993 at the tail of the list, keeping the list in order. */
994
995 static void
996 library_list_start_library (struct gdb_xml_parser *parser,
997 const struct gdb_xml_element *element,
998 void *user_data, VEC(gdb_xml_value_s) *attributes)
999 {
1000 struct svr4_library_list *list = user_data;
1001 const char *name = xml_find_attribute (attributes, "name")->value;
1002 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1003 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1004 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1005 struct so_list *new_elem;
1006
1007 new_elem = XZALLOC (struct so_list);
1008 new_elem->lm_info = XZALLOC (struct lm_info);
1009 new_elem->lm_info->lm_addr = *lmp;
1010 new_elem->lm_info->l_addr_inferior = *l_addrp;
1011 new_elem->lm_info->l_ld = *l_ldp;
1012
1013 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1014 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1015 strcpy (new_elem->so_original_name, new_elem->so_name);
1016
1017 *list->tailp = new_elem;
1018 list->tailp = &new_elem->next;
1019 }
1020
1021 /* Handle the start of a <library-list-svr4> element. */
1022
1023 static void
1024 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1025 const struct gdb_xml_element *element,
1026 void *user_data, VEC(gdb_xml_value_s) *attributes)
1027 {
1028 struct svr4_library_list *list = user_data;
1029 const char *version = xml_find_attribute (attributes, "version")->value;
1030 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1031
1032 if (strcmp (version, "1.0") != 0)
1033 gdb_xml_error (parser,
1034 _("SVR4 Library list has unsupported version \"%s\""),
1035 version);
1036
1037 if (main_lm)
1038 list->main_lm = *(ULONGEST *) main_lm->value;
1039 }
1040
1041 /* The allowed elements and attributes for an XML library list.
1042 The root element is a <library-list>. */
1043
1044 static const struct gdb_xml_attribute svr4_library_attributes[] =
1045 {
1046 { "name", GDB_XML_AF_NONE, NULL, NULL },
1047 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1048 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1049 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1050 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1051 };
1052
1053 static const struct gdb_xml_element svr4_library_list_children[] =
1054 {
1055 {
1056 "library", svr4_library_attributes, NULL,
1057 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1058 library_list_start_library, NULL
1059 },
1060 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1061 };
1062
1063 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1064 {
1065 { "version", GDB_XML_AF_NONE, NULL, NULL },
1066 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1067 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1068 };
1069
1070 static const struct gdb_xml_element svr4_library_list_elements[] =
1071 {
1072 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1073 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1074 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1075 };
1076
1077 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1078
1079 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1080 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1081 empty, caller is responsible for freeing all its entries. */
1082
1083 static int
1084 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1085 {
1086 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1087 &list->head);
1088
1089 memset (list, 0, sizeof (*list));
1090 list->tailp = &list->head;
1091 if (gdb_xml_parse_quick (_("target library list"), "library-list.dtd",
1092 svr4_library_list_elements, document, list) == 0)
1093 {
1094 /* Parsed successfully, keep the result. */
1095 discard_cleanups (back_to);
1096 return 1;
1097 }
1098
1099 do_cleanups (back_to);
1100 return 0;
1101 }
1102
1103 /* Attempt to get so_list from target via qXfer:libraries:read packet.
1104
1105 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1106 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1107 empty, caller is responsible for freeing all its entries. */
1108
1109 static int
1110 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1111 {
1112 char *svr4_library_document;
1113 int result;
1114 struct cleanup *back_to;
1115
1116 /* Fetch the list of shared libraries. */
1117 svr4_library_document = target_read_stralloc (&current_target,
1118 TARGET_OBJECT_LIBRARIES_SVR4,
1119 NULL);
1120 if (svr4_library_document == NULL)
1121 return 0;
1122
1123 back_to = make_cleanup (xfree, svr4_library_document);
1124 result = svr4_parse_libraries (svr4_library_document, list);
1125 do_cleanups (back_to);
1126
1127 return result;
1128 }
1129
1130 #else
1131
1132 static int
1133 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1134 {
1135 return 0;
1136 }
1137
1138 #endif
1139
1140 /* If no shared library information is available from the dynamic
1141 linker, build a fallback list from other sources. */
1142
1143 static struct so_list *
1144 svr4_default_sos (void)
1145 {
1146 struct svr4_info *info = get_svr4_info ();
1147 struct so_list *new;
1148
1149 if (!info->debug_loader_offset_p)
1150 return NULL;
1151
1152 new = XZALLOC (struct so_list);
1153
1154 new->lm_info = xzalloc (sizeof (struct lm_info));
1155
1156 /* Nothing will ever check the other fields if we set l_addr_p. */
1157 new->lm_info->l_addr = info->debug_loader_offset;
1158 new->lm_info->l_addr_p = 1;
1159
1160 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1161 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1162 strcpy (new->so_original_name, new->so_name);
1163
1164 return new;
1165 }
1166
1167 /* Read the whole inferior libraries chain starting at address LM. Add the
1168 entries to the tail referenced by LINK_PTR_PTR. Ignore the first entry if
1169 IGNORE_FIRST and set global MAIN_LM_ADDR according to it. */
1170
1171 static void
1172 svr4_read_so_list (CORE_ADDR lm, struct so_list ***link_ptr_ptr,
1173 int ignore_first)
1174 {
1175 CORE_ADDR prev_lm = 0, next_lm;
1176
1177 for (; lm != 0; prev_lm = lm, lm = next_lm)
1178 {
1179 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1180 struct so_list *new;
1181 struct cleanup *old_chain;
1182 int errcode;
1183 char *buffer;
1184
1185 new = XZALLOC (struct so_list);
1186 old_chain = make_cleanup_free_so (new);
1187
1188 new->lm_info = lm_info_read (lm);
1189 if (new->lm_info == NULL)
1190 {
1191 do_cleanups (old_chain);
1192 break;
1193 }
1194
1195 next_lm = new->lm_info->l_next;
1196
1197 if (new->lm_info->l_prev != prev_lm)
1198 {
1199 warning (_("Corrupted shared library list: %s != %s"),
1200 paddress (target_gdbarch (), prev_lm),
1201 paddress (target_gdbarch (), new->lm_info->l_prev));
1202 do_cleanups (old_chain);
1203 break;
1204 }
1205
1206 /* For SVR4 versions, the first entry in the link map is for the
1207 inferior executable, so we must ignore it. For some versions of
1208 SVR4, it has no name. For others (Solaris 2.3 for example), it
1209 does have a name, so we can no longer use a missing name to
1210 decide when to ignore it. */
1211 if (ignore_first && new->lm_info->l_prev == 0)
1212 {
1213 struct svr4_info *info = get_svr4_info ();
1214
1215 info->main_lm_addr = new->lm_info->lm_addr;
1216 do_cleanups (old_chain);
1217 continue;
1218 }
1219
1220 /* Extract this shared object's name. */
1221 target_read_string (new->lm_info->l_name, &buffer,
1222 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1223 if (errcode != 0)
1224 {
1225 warning (_("Can't read pathname for load map: %s."),
1226 safe_strerror (errcode));
1227 do_cleanups (old_chain);
1228 continue;
1229 }
1230
1231 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1232 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1233 strcpy (new->so_original_name, new->so_name);
1234 xfree (buffer);
1235
1236 /* If this entry has no name, or its name matches the name
1237 for the main executable, don't include it in the list. */
1238 if (! new->so_name[0] || match_main (new->so_name))
1239 {
1240 do_cleanups (old_chain);
1241 continue;
1242 }
1243
1244 discard_cleanups (old_chain);
1245 new->next = 0;
1246 **link_ptr_ptr = new;
1247 *link_ptr_ptr = &new->next;
1248 }
1249 }
1250
1251 /* Implement the "current_sos" target_so_ops method. */
1252
1253 static struct so_list *
1254 svr4_current_sos (void)
1255 {
1256 CORE_ADDR lm;
1257 struct so_list *head = NULL;
1258 struct so_list **link_ptr = &head;
1259 struct svr4_info *info;
1260 struct cleanup *back_to;
1261 int ignore_first;
1262 struct svr4_library_list library_list;
1263
1264 /* Fall back to manual examination of the target if the packet is not
1265 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1266 tests a case where gdbserver cannot find the shared libraries list while
1267 GDB itself is able to find it via SYMFILE_OBJFILE.
1268
1269 Unfortunately statically linked inferiors will also fall back through this
1270 suboptimal code path. */
1271
1272 if (svr4_current_sos_via_xfer_libraries (&library_list))
1273 {
1274 if (library_list.main_lm)
1275 {
1276 info = get_svr4_info ();
1277 info->main_lm_addr = library_list.main_lm;
1278 }
1279
1280 return library_list.head ? library_list.head : svr4_default_sos ();
1281 }
1282
1283 info = get_svr4_info ();
1284
1285 /* Always locate the debug struct, in case it has moved. */
1286 info->debug_base = 0;
1287 locate_base (info);
1288
1289 /* If we can't find the dynamic linker's base structure, this
1290 must not be a dynamically linked executable. Hmm. */
1291 if (! info->debug_base)
1292 return svr4_default_sos ();
1293
1294 /* Assume that everything is a library if the dynamic loader was loaded
1295 late by a static executable. */
1296 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1297 ignore_first = 0;
1298 else
1299 ignore_first = 1;
1300
1301 back_to = make_cleanup (svr4_free_library_list, &head);
1302
1303 /* Walk the inferior's link map list, and build our list of
1304 `struct so_list' nodes. */
1305 lm = solib_svr4_r_map (info);
1306 if (lm)
1307 svr4_read_so_list (lm, &link_ptr, ignore_first);
1308
1309 /* On Solaris, the dynamic linker is not in the normal list of
1310 shared objects, so make sure we pick it up too. Having
1311 symbol information for the dynamic linker is quite crucial
1312 for skipping dynamic linker resolver code. */
1313 lm = solib_svr4_r_ldsomap (info);
1314 if (lm)
1315 svr4_read_so_list (lm, &link_ptr, 0);
1316
1317 discard_cleanups (back_to);
1318
1319 if (head == NULL)
1320 return svr4_default_sos ();
1321
1322 return head;
1323 }
1324
1325 /* Get the address of the link_map for a given OBJFILE. */
1326
1327 CORE_ADDR
1328 svr4_fetch_objfile_link_map (struct objfile *objfile)
1329 {
1330 struct so_list *so;
1331 struct svr4_info *info = get_svr4_info ();
1332
1333 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1334 if (info->main_lm_addr == 0)
1335 solib_add (NULL, 0, &current_target, auto_solib_add);
1336
1337 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1338 if (objfile == symfile_objfile)
1339 return info->main_lm_addr;
1340
1341 /* The other link map addresses may be found by examining the list
1342 of shared libraries. */
1343 for (so = master_so_list (); so; so = so->next)
1344 if (so->objfile == objfile)
1345 return so->lm_info->lm_addr;
1346
1347 /* Not found! */
1348 return 0;
1349 }
1350
1351 /* On some systems, the only way to recognize the link map entry for
1352 the main executable file is by looking at its name. Return
1353 non-zero iff SONAME matches one of the known main executable names. */
1354
1355 static int
1356 match_main (const char *soname)
1357 {
1358 const char * const *mainp;
1359
1360 for (mainp = main_name_list; *mainp != NULL; mainp++)
1361 {
1362 if (strcmp (soname, *mainp) == 0)
1363 return (1);
1364 }
1365
1366 return (0);
1367 }
1368
1369 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1370 SVR4 run time loader. */
1371
1372 int
1373 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1374 {
1375 struct svr4_info *info = get_svr4_info ();
1376
1377 return ((pc >= info->interp_text_sect_low
1378 && pc < info->interp_text_sect_high)
1379 || (pc >= info->interp_plt_sect_low
1380 && pc < info->interp_plt_sect_high)
1381 || in_plt_section (pc, NULL)
1382 || in_gnu_ifunc_stub (pc));
1383 }
1384
1385 /* Given an executable's ABFD and target, compute the entry-point
1386 address. */
1387
1388 static CORE_ADDR
1389 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1390 {
1391 CORE_ADDR addr;
1392
1393 /* KevinB wrote ... for most targets, the address returned by
1394 bfd_get_start_address() is the entry point for the start
1395 function. But, for some targets, bfd_get_start_address() returns
1396 the address of a function descriptor from which the entry point
1397 address may be extracted. This address is extracted by
1398 gdbarch_convert_from_func_ptr_addr(). The method
1399 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1400 function for targets which don't use function descriptors. */
1401 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1402 bfd_get_start_address (abfd),
1403 targ);
1404 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1405 }
1406
1407 /* Helper function for gdb_bfd_lookup_symbol. */
1408
1409 static int
1410 cmp_name_and_sec_flags (asymbol *sym, void *data)
1411 {
1412 return (strcmp (sym->name, (const char *) data) == 0
1413 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
1414 }
1415 /* Arrange for dynamic linker to hit breakpoint.
1416
1417 Both the SunOS and the SVR4 dynamic linkers have, as part of their
1418 debugger interface, support for arranging for the inferior to hit
1419 a breakpoint after mapping in the shared libraries. This function
1420 enables that breakpoint.
1421
1422 For SunOS, there is a special flag location (in_debugger) which we
1423 set to 1. When the dynamic linker sees this flag set, it will set
1424 a breakpoint at a location known only to itself, after saving the
1425 original contents of that place and the breakpoint address itself,
1426 in it's own internal structures. When we resume the inferior, it
1427 will eventually take a SIGTRAP when it runs into the breakpoint.
1428 We handle this (in a different place) by restoring the contents of
1429 the breakpointed location (which is only known after it stops),
1430 chasing around to locate the shared libraries that have been
1431 loaded, then resuming.
1432
1433 For SVR4, the debugger interface structure contains a member (r_brk)
1434 which is statically initialized at the time the shared library is
1435 built, to the offset of a function (_r_debug_state) which is guaran-
1436 teed to be called once before mapping in a library, and again when
1437 the mapping is complete. At the time we are examining this member,
1438 it contains only the unrelocated offset of the function, so we have
1439 to do our own relocation. Later, when the dynamic linker actually
1440 runs, it relocates r_brk to be the actual address of _r_debug_state().
1441
1442 The debugger interface structure also contains an enumeration which
1443 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1444 depending upon whether or not the library is being mapped or unmapped,
1445 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
1446
1447 static int
1448 enable_break (struct svr4_info *info, int from_tty)
1449 {
1450 struct minimal_symbol *msymbol;
1451 const char * const *bkpt_namep;
1452 asection *interp_sect;
1453 gdb_byte *interp_name;
1454 CORE_ADDR sym_addr;
1455
1456 info->interp_text_sect_low = info->interp_text_sect_high = 0;
1457 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1458
1459 /* If we already have a shared library list in the target, and
1460 r_debug contains r_brk, set the breakpoint there - this should
1461 mean r_brk has already been relocated. Assume the dynamic linker
1462 is the object containing r_brk. */
1463
1464 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1465 sym_addr = 0;
1466 if (info->debug_base && solib_svr4_r_map (info) != 0)
1467 sym_addr = solib_svr4_r_brk (info);
1468
1469 if (sym_addr != 0)
1470 {
1471 struct obj_section *os;
1472
1473 sym_addr = gdbarch_addr_bits_remove
1474 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1475 sym_addr,
1476 &current_target));
1477
1478 /* On at least some versions of Solaris there's a dynamic relocation
1479 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1480 we get control before the dynamic linker has self-relocated.
1481 Check if SYM_ADDR is in a known section, if it is assume we can
1482 trust its value. This is just a heuristic though, it could go away
1483 or be replaced if it's getting in the way.
1484
1485 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1486 however it's spelled in your particular system) is ARM or Thumb.
1487 That knowledge is encoded in the address, if it's Thumb the low bit
1488 is 1. However, we've stripped that info above and it's not clear
1489 what all the consequences are of passing a non-addr_bits_remove'd
1490 address to create_solib_event_breakpoint. The call to
1491 find_pc_section verifies we know about the address and have some
1492 hope of computing the right kind of breakpoint to use (via
1493 symbol info). It does mean that GDB needs to be pointed at a
1494 non-stripped version of the dynamic linker in order to obtain
1495 information it already knows about. Sigh. */
1496
1497 os = find_pc_section (sym_addr);
1498 if (os != NULL)
1499 {
1500 /* Record the relocated start and end address of the dynamic linker
1501 text and plt section for svr4_in_dynsym_resolve_code. */
1502 bfd *tmp_bfd;
1503 CORE_ADDR load_addr;
1504
1505 tmp_bfd = os->objfile->obfd;
1506 load_addr = ANOFFSET (os->objfile->section_offsets,
1507 os->objfile->sect_index_text);
1508
1509 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1510 if (interp_sect)
1511 {
1512 info->interp_text_sect_low =
1513 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1514 info->interp_text_sect_high =
1515 info->interp_text_sect_low
1516 + bfd_section_size (tmp_bfd, interp_sect);
1517 }
1518 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1519 if (interp_sect)
1520 {
1521 info->interp_plt_sect_low =
1522 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1523 info->interp_plt_sect_high =
1524 info->interp_plt_sect_low
1525 + bfd_section_size (tmp_bfd, interp_sect);
1526 }
1527
1528 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1529 return 1;
1530 }
1531 }
1532
1533 /* Find the program interpreter; if not found, warn the user and drop
1534 into the old breakpoint at symbol code. */
1535 interp_name = find_program_interpreter ();
1536 if (interp_name)
1537 {
1538 CORE_ADDR load_addr = 0;
1539 int load_addr_found = 0;
1540 int loader_found_in_list = 0;
1541 struct so_list *so;
1542 bfd *tmp_bfd = NULL;
1543 struct target_ops *tmp_bfd_target;
1544 volatile struct gdb_exception ex;
1545
1546 sym_addr = 0;
1547
1548 /* Now we need to figure out where the dynamic linker was
1549 loaded so that we can load its symbols and place a breakpoint
1550 in the dynamic linker itself.
1551
1552 This address is stored on the stack. However, I've been unable
1553 to find any magic formula to find it for Solaris (appears to
1554 be trivial on GNU/Linux). Therefore, we have to try an alternate
1555 mechanism to find the dynamic linker's base address. */
1556
1557 TRY_CATCH (ex, RETURN_MASK_ALL)
1558 {
1559 tmp_bfd = solib_bfd_open (interp_name);
1560 }
1561 if (tmp_bfd == NULL)
1562 goto bkpt_at_symbol;
1563
1564 /* Now convert the TMP_BFD into a target. That way target, as
1565 well as BFD operations can be used. */
1566 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1567 /* target_bfd_reopen acquired its own reference, so we can
1568 release ours now. */
1569 gdb_bfd_unref (tmp_bfd);
1570
1571 /* On a running target, we can get the dynamic linker's base
1572 address from the shared library table. */
1573 so = master_so_list ();
1574 while (so)
1575 {
1576 if (svr4_same_1 (interp_name, so->so_original_name))
1577 {
1578 load_addr_found = 1;
1579 loader_found_in_list = 1;
1580 load_addr = lm_addr_check (so, tmp_bfd);
1581 break;
1582 }
1583 so = so->next;
1584 }
1585
1586 /* If we were not able to find the base address of the loader
1587 from our so_list, then try using the AT_BASE auxilliary entry. */
1588 if (!load_addr_found)
1589 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1590 {
1591 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
1592
1593 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1594 that `+ load_addr' will overflow CORE_ADDR width not creating
1595 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1596 GDB. */
1597
1598 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1599 {
1600 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1601 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1602 tmp_bfd_target);
1603
1604 gdb_assert (load_addr < space_size);
1605
1606 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1607 64bit ld.so with 32bit executable, it should not happen. */
1608
1609 if (tmp_entry_point < space_size
1610 && tmp_entry_point + load_addr >= space_size)
1611 load_addr -= space_size;
1612 }
1613
1614 load_addr_found = 1;
1615 }
1616
1617 /* Otherwise we find the dynamic linker's base address by examining
1618 the current pc (which should point at the entry point for the
1619 dynamic linker) and subtracting the offset of the entry point.
1620
1621 This is more fragile than the previous approaches, but is a good
1622 fallback method because it has actually been working well in
1623 most cases. */
1624 if (!load_addr_found)
1625 {
1626 struct regcache *regcache
1627 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
1628
1629 load_addr = (regcache_read_pc (regcache)
1630 - exec_entry_point (tmp_bfd, tmp_bfd_target));
1631 }
1632
1633 if (!loader_found_in_list)
1634 {
1635 info->debug_loader_name = xstrdup (interp_name);
1636 info->debug_loader_offset_p = 1;
1637 info->debug_loader_offset = load_addr;
1638 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1639 }
1640
1641 /* Record the relocated start and end address of the dynamic linker
1642 text and plt section for svr4_in_dynsym_resolve_code. */
1643 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1644 if (interp_sect)
1645 {
1646 info->interp_text_sect_low =
1647 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1648 info->interp_text_sect_high =
1649 info->interp_text_sect_low
1650 + bfd_section_size (tmp_bfd, interp_sect);
1651 }
1652 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1653 if (interp_sect)
1654 {
1655 info->interp_plt_sect_low =
1656 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1657 info->interp_plt_sect_high =
1658 info->interp_plt_sect_low
1659 + bfd_section_size (tmp_bfd, interp_sect);
1660 }
1661
1662 /* Now try to set a breakpoint in the dynamic linker. */
1663 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1664 {
1665 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
1666 (void *) *bkpt_namep);
1667 if (sym_addr != 0)
1668 break;
1669 }
1670
1671 if (sym_addr != 0)
1672 /* Convert 'sym_addr' from a function pointer to an address.
1673 Because we pass tmp_bfd_target instead of the current
1674 target, this will always produce an unrelocated value. */
1675 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1676 sym_addr,
1677 tmp_bfd_target);
1678
1679 /* We're done with both the temporary bfd and target. Closing
1680 the target closes the underlying bfd, because it holds the
1681 only remaining reference. */
1682 target_close (tmp_bfd_target, 0);
1683
1684 if (sym_addr != 0)
1685 {
1686 create_solib_event_breakpoint (target_gdbarch (), load_addr + sym_addr);
1687 xfree (interp_name);
1688 return 1;
1689 }
1690
1691 /* For whatever reason we couldn't set a breakpoint in the dynamic
1692 linker. Warn and drop into the old code. */
1693 bkpt_at_symbol:
1694 xfree (interp_name);
1695 warning (_("Unable to find dynamic linker breakpoint function.\n"
1696 "GDB will be unable to debug shared library initializers\n"
1697 "and track explicitly loaded dynamic code."));
1698 }
1699
1700 /* Scan through the lists of symbols, trying to look up the symbol and
1701 set a breakpoint there. Terminate loop when we/if we succeed. */
1702
1703 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1704 {
1705 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1706 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1707 {
1708 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1709 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1710 sym_addr,
1711 &current_target);
1712 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1713 return 1;
1714 }
1715 }
1716
1717 if (interp_name != NULL && !current_inferior ()->attach_flag)
1718 {
1719 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1720 {
1721 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1722 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1723 {
1724 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1725 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1726 sym_addr,
1727 &current_target);
1728 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1729 return 1;
1730 }
1731 }
1732 }
1733 return 0;
1734 }
1735
1736 /* Implement the "special_symbol_handling" target_so_ops method. */
1737
1738 static void
1739 svr4_special_symbol_handling (void)
1740 {
1741 /* Nothing to do. */
1742 }
1743
1744 /* Read the ELF program headers from ABFD. Return the contents and
1745 set *PHDRS_SIZE to the size of the program headers. */
1746
1747 static gdb_byte *
1748 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1749 {
1750 Elf_Internal_Ehdr *ehdr;
1751 gdb_byte *buf;
1752
1753 ehdr = elf_elfheader (abfd);
1754
1755 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1756 if (*phdrs_size == 0)
1757 return NULL;
1758
1759 buf = xmalloc (*phdrs_size);
1760 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1761 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1762 {
1763 xfree (buf);
1764 return NULL;
1765 }
1766
1767 return buf;
1768 }
1769
1770 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1771 exec_bfd. Otherwise return 0.
1772
1773 We relocate all of the sections by the same amount. This
1774 behavior is mandated by recent editions of the System V ABI.
1775 According to the System V Application Binary Interface,
1776 Edition 4.1, page 5-5:
1777
1778 ... Though the system chooses virtual addresses for
1779 individual processes, it maintains the segments' relative
1780 positions. Because position-independent code uses relative
1781 addressesing between segments, the difference between
1782 virtual addresses in memory must match the difference
1783 between virtual addresses in the file. The difference
1784 between the virtual address of any segment in memory and
1785 the corresponding virtual address in the file is thus a
1786 single constant value for any one executable or shared
1787 object in a given process. This difference is the base
1788 address. One use of the base address is to relocate the
1789 memory image of the program during dynamic linking.
1790
1791 The same language also appears in Edition 4.0 of the System V
1792 ABI and is left unspecified in some of the earlier editions.
1793
1794 Decide if the objfile needs to be relocated. As indicated above, we will
1795 only be here when execution is stopped. But during attachment PC can be at
1796 arbitrary address therefore regcache_read_pc can be misleading (contrary to
1797 the auxv AT_ENTRY value). Moreover for executable with interpreter section
1798 regcache_read_pc would point to the interpreter and not the main executable.
1799
1800 So, to summarize, relocations are necessary when the start address obtained
1801 from the executable is different from the address in auxv AT_ENTRY entry.
1802
1803 [ The astute reader will note that we also test to make sure that
1804 the executable in question has the DYNAMIC flag set. It is my
1805 opinion that this test is unnecessary (undesirable even). It
1806 was added to avoid inadvertent relocation of an executable
1807 whose e_type member in the ELF header is not ET_DYN. There may
1808 be a time in the future when it is desirable to do relocations
1809 on other types of files as well in which case this condition
1810 should either be removed or modified to accomodate the new file
1811 type. - Kevin, Nov 2000. ] */
1812
1813 static int
1814 svr4_exec_displacement (CORE_ADDR *displacementp)
1815 {
1816 /* ENTRY_POINT is a possible function descriptor - before
1817 a call to gdbarch_convert_from_func_ptr_addr. */
1818 CORE_ADDR entry_point, displacement;
1819
1820 if (exec_bfd == NULL)
1821 return 0;
1822
1823 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
1824 being executed themselves and PIE (Position Independent Executable)
1825 executables are ET_DYN. */
1826
1827 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1828 return 0;
1829
1830 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1831 return 0;
1832
1833 displacement = entry_point - bfd_get_start_address (exec_bfd);
1834
1835 /* Verify the DISPLACEMENT candidate complies with the required page
1836 alignment. It is cheaper than the program headers comparison below. */
1837
1838 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1839 {
1840 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1841
1842 /* p_align of PT_LOAD segments does not specify any alignment but
1843 only congruency of addresses:
1844 p_offset % p_align == p_vaddr % p_align
1845 Kernel is free to load the executable with lower alignment. */
1846
1847 if ((displacement & (elf->minpagesize - 1)) != 0)
1848 return 0;
1849 }
1850
1851 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1852 comparing their program headers. If the program headers in the auxilliary
1853 vector do not match the program headers in the executable, then we are
1854 looking at a different file than the one used by the kernel - for
1855 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
1856
1857 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1858 {
1859 /* Be optimistic and clear OK only if GDB was able to verify the headers
1860 really do not match. */
1861 int phdrs_size, phdrs2_size, ok = 1;
1862 gdb_byte *buf, *buf2;
1863 int arch_size;
1864
1865 buf = read_program_header (-1, &phdrs_size, &arch_size);
1866 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1867 if (buf != NULL && buf2 != NULL)
1868 {
1869 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
1870
1871 /* We are dealing with three different addresses. EXEC_BFD
1872 represents current address in on-disk file. target memory content
1873 may be different from EXEC_BFD as the file may have been prelinked
1874 to a different address after the executable has been loaded.
1875 Moreover the address of placement in target memory can be
1876 different from what the program headers in target memory say -
1877 this is the goal of PIE.
1878
1879 Detected DISPLACEMENT covers both the offsets of PIE placement and
1880 possible new prelink performed after start of the program. Here
1881 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1882 content offset for the verification purpose. */
1883
1884 if (phdrs_size != phdrs2_size
1885 || bfd_get_arch_size (exec_bfd) != arch_size)
1886 ok = 0;
1887 else if (arch_size == 32
1888 && phdrs_size >= sizeof (Elf32_External_Phdr)
1889 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1890 {
1891 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1892 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1893 CORE_ADDR displacement = 0;
1894 int i;
1895
1896 /* DISPLACEMENT could be found more easily by the difference of
1897 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1898 already have enough information to compute that displacement
1899 with what we've read. */
1900
1901 for (i = 0; i < ehdr2->e_phnum; i++)
1902 if (phdr2[i].p_type == PT_LOAD)
1903 {
1904 Elf32_External_Phdr *phdrp;
1905 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1906 CORE_ADDR vaddr, paddr;
1907 CORE_ADDR displacement_vaddr = 0;
1908 CORE_ADDR displacement_paddr = 0;
1909
1910 phdrp = &((Elf32_External_Phdr *) buf)[i];
1911 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1912 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1913
1914 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1915 byte_order);
1916 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1917
1918 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1919 byte_order);
1920 displacement_paddr = paddr - phdr2[i].p_paddr;
1921
1922 if (displacement_vaddr == displacement_paddr)
1923 displacement = displacement_vaddr;
1924
1925 break;
1926 }
1927
1928 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1929
1930 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1931 {
1932 Elf32_External_Phdr *phdrp;
1933 Elf32_External_Phdr *phdr2p;
1934 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1935 CORE_ADDR vaddr, paddr;
1936 asection *plt2_asect;
1937
1938 phdrp = &((Elf32_External_Phdr *) buf)[i];
1939 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1940 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1941 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1942
1943 /* PT_GNU_STACK is an exception by being never relocated by
1944 prelink as its addresses are always zero. */
1945
1946 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1947 continue;
1948
1949 /* Check also other adjustment combinations - PR 11786. */
1950
1951 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1952 byte_order);
1953 vaddr -= displacement;
1954 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1955
1956 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1957 byte_order);
1958 paddr -= displacement;
1959 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1960
1961 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1962 continue;
1963
1964 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
1965 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1966 if (plt2_asect)
1967 {
1968 int content2;
1969 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1970 CORE_ADDR filesz;
1971
1972 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1973 & SEC_HAS_CONTENTS) != 0;
1974
1975 filesz = extract_unsigned_integer (buf_filesz_p, 4,
1976 byte_order);
1977
1978 /* PLT2_ASECT is from on-disk file (exec_bfd) while
1979 FILESZ is from the in-memory image. */
1980 if (content2)
1981 filesz += bfd_get_section_size (plt2_asect);
1982 else
1983 filesz -= bfd_get_section_size (plt2_asect);
1984
1985 store_unsigned_integer (buf_filesz_p, 4, byte_order,
1986 filesz);
1987
1988 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1989 continue;
1990 }
1991
1992 ok = 0;
1993 break;
1994 }
1995 }
1996 else if (arch_size == 64
1997 && phdrs_size >= sizeof (Elf64_External_Phdr)
1998 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
1999 {
2000 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2001 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2002 CORE_ADDR displacement = 0;
2003 int i;
2004
2005 /* DISPLACEMENT could be found more easily by the difference of
2006 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2007 already have enough information to compute that displacement
2008 with what we've read. */
2009
2010 for (i = 0; i < ehdr2->e_phnum; i++)
2011 if (phdr2[i].p_type == PT_LOAD)
2012 {
2013 Elf64_External_Phdr *phdrp;
2014 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2015 CORE_ADDR vaddr, paddr;
2016 CORE_ADDR displacement_vaddr = 0;
2017 CORE_ADDR displacement_paddr = 0;
2018
2019 phdrp = &((Elf64_External_Phdr *) buf)[i];
2020 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2021 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2022
2023 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2024 byte_order);
2025 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2026
2027 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2028 byte_order);
2029 displacement_paddr = paddr - phdr2[i].p_paddr;
2030
2031 if (displacement_vaddr == displacement_paddr)
2032 displacement = displacement_vaddr;
2033
2034 break;
2035 }
2036
2037 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2038
2039 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2040 {
2041 Elf64_External_Phdr *phdrp;
2042 Elf64_External_Phdr *phdr2p;
2043 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2044 CORE_ADDR vaddr, paddr;
2045 asection *plt2_asect;
2046
2047 phdrp = &((Elf64_External_Phdr *) buf)[i];
2048 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2049 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2050 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2051
2052 /* PT_GNU_STACK is an exception by being never relocated by
2053 prelink as its addresses are always zero. */
2054
2055 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2056 continue;
2057
2058 /* Check also other adjustment combinations - PR 11786. */
2059
2060 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2061 byte_order);
2062 vaddr -= displacement;
2063 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2064
2065 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2066 byte_order);
2067 paddr -= displacement;
2068 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2069
2070 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2071 continue;
2072
2073 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2074 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2075 if (plt2_asect)
2076 {
2077 int content2;
2078 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2079 CORE_ADDR filesz;
2080
2081 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2082 & SEC_HAS_CONTENTS) != 0;
2083
2084 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2085 byte_order);
2086
2087 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2088 FILESZ is from the in-memory image. */
2089 if (content2)
2090 filesz += bfd_get_section_size (plt2_asect);
2091 else
2092 filesz -= bfd_get_section_size (plt2_asect);
2093
2094 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2095 filesz);
2096
2097 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2098 continue;
2099 }
2100
2101 ok = 0;
2102 break;
2103 }
2104 }
2105 else
2106 ok = 0;
2107 }
2108
2109 xfree (buf);
2110 xfree (buf2);
2111
2112 if (!ok)
2113 return 0;
2114 }
2115
2116 if (info_verbose)
2117 {
2118 /* It can be printed repeatedly as there is no easy way to check
2119 the executable symbols/file has been already relocated to
2120 displacement. */
2121
2122 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2123 "displacement %s for \"%s\".\n"),
2124 paddress (target_gdbarch (), displacement),
2125 bfd_get_filename (exec_bfd));
2126 }
2127
2128 *displacementp = displacement;
2129 return 1;
2130 }
2131
2132 /* Relocate the main executable. This function should be called upon
2133 stopping the inferior process at the entry point to the program.
2134 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2135 different, the main executable is relocated by the proper amount. */
2136
2137 static void
2138 svr4_relocate_main_executable (void)
2139 {
2140 CORE_ADDR displacement;
2141
2142 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2143 probably contains the offsets computed using the PIE displacement
2144 from the previous run, which of course are irrelevant for this run.
2145 So we need to determine the new PIE displacement and recompute the
2146 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2147 already contains pre-computed offsets.
2148
2149 If we cannot compute the PIE displacement, either:
2150
2151 - The executable is not PIE.
2152
2153 - SYMFILE_OBJFILE does not match the executable started in the target.
2154 This can happen for main executable symbols loaded at the host while
2155 `ld.so --ld-args main-executable' is loaded in the target.
2156
2157 Then we leave the section offsets untouched and use them as is for
2158 this run. Either:
2159
2160 - These section offsets were properly reset earlier, and thus
2161 already contain the correct values. This can happen for instance
2162 when reconnecting via the remote protocol to a target that supports
2163 the `qOffsets' packet.
2164
2165 - The section offsets were not reset earlier, and the best we can
2166 hope is that the old offsets are still applicable to the new run. */
2167
2168 if (! svr4_exec_displacement (&displacement))
2169 return;
2170
2171 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2172 addresses. */
2173
2174 if (symfile_objfile)
2175 {
2176 struct section_offsets *new_offsets;
2177 int i;
2178
2179 new_offsets = alloca (symfile_objfile->num_sections
2180 * sizeof (*new_offsets));
2181
2182 for (i = 0; i < symfile_objfile->num_sections; i++)
2183 new_offsets->offsets[i] = displacement;
2184
2185 objfile_relocate (symfile_objfile, new_offsets);
2186 }
2187 else if (exec_bfd)
2188 {
2189 asection *asect;
2190
2191 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2192 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2193 (bfd_section_vma (exec_bfd, asect)
2194 + displacement));
2195 }
2196 }
2197
2198 /* Implement the "create_inferior_hook" target_solib_ops method.
2199
2200 For SVR4 executables, this first instruction is either the first
2201 instruction in the dynamic linker (for dynamically linked
2202 executables) or the instruction at "start" for statically linked
2203 executables. For dynamically linked executables, the system
2204 first exec's /lib/libc.so.N, which contains the dynamic linker,
2205 and starts it running. The dynamic linker maps in any needed
2206 shared libraries, maps in the actual user executable, and then
2207 jumps to "start" in the user executable.
2208
2209 We can arrange to cooperate with the dynamic linker to discover the
2210 names of shared libraries that are dynamically linked, and the base
2211 addresses to which they are linked.
2212
2213 This function is responsible for discovering those names and
2214 addresses, and saving sufficient information about them to allow
2215 their symbols to be read at a later time. */
2216
2217 static void
2218 svr4_solib_create_inferior_hook (int from_tty)
2219 {
2220 struct svr4_info *info;
2221
2222 info = get_svr4_info ();
2223
2224 /* Relocate the main executable if necessary. */
2225 svr4_relocate_main_executable ();
2226
2227 /* No point setting a breakpoint in the dynamic linker if we can't
2228 hit it (e.g., a core file, or a trace file). */
2229 if (!target_has_execution)
2230 return;
2231
2232 if (!svr4_have_link_map_offsets ())
2233 return;
2234
2235 if (!enable_break (info, from_tty))
2236 return;
2237 }
2238
2239 static void
2240 svr4_clear_solib (void)
2241 {
2242 struct svr4_info *info;
2243
2244 info = get_svr4_info ();
2245 info->debug_base = 0;
2246 info->debug_loader_offset_p = 0;
2247 info->debug_loader_offset = 0;
2248 xfree (info->debug_loader_name);
2249 info->debug_loader_name = NULL;
2250 }
2251
2252 /* Clear any bits of ADDR that wouldn't fit in a target-format
2253 data pointer. "Data pointer" here refers to whatever sort of
2254 address the dynamic linker uses to manage its sections. At the
2255 moment, we don't support shared libraries on any processors where
2256 code and data pointers are different sizes.
2257
2258 This isn't really the right solution. What we really need here is
2259 a way to do arithmetic on CORE_ADDR values that respects the
2260 natural pointer/address correspondence. (For example, on the MIPS,
2261 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2262 sign-extend the value. There, simply truncating the bits above
2263 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2264 be a new gdbarch method or something. */
2265 static CORE_ADDR
2266 svr4_truncate_ptr (CORE_ADDR addr)
2267 {
2268 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
2269 /* We don't need to truncate anything, and the bit twiddling below
2270 will fail due to overflow problems. */
2271 return addr;
2272 else
2273 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
2274 }
2275
2276
2277 static void
2278 svr4_relocate_section_addresses (struct so_list *so,
2279 struct target_section *sec)
2280 {
2281 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so,
2282 sec->bfd));
2283 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so,
2284 sec->bfd));
2285 }
2286 \f
2287
2288 /* Architecture-specific operations. */
2289
2290 /* Per-architecture data key. */
2291 static struct gdbarch_data *solib_svr4_data;
2292
2293 struct solib_svr4_ops
2294 {
2295 /* Return a description of the layout of `struct link_map'. */
2296 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2297 };
2298
2299 /* Return a default for the architecture-specific operations. */
2300
2301 static void *
2302 solib_svr4_init (struct obstack *obstack)
2303 {
2304 struct solib_svr4_ops *ops;
2305
2306 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2307 ops->fetch_link_map_offsets = NULL;
2308 return ops;
2309 }
2310
2311 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2312 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2313
2314 void
2315 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2316 struct link_map_offsets *(*flmo) (void))
2317 {
2318 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2319
2320 ops->fetch_link_map_offsets = flmo;
2321
2322 set_solib_ops (gdbarch, &svr4_so_ops);
2323 }
2324
2325 /* Fetch a link_map_offsets structure using the architecture-specific
2326 `struct link_map_offsets' fetcher. */
2327
2328 static struct link_map_offsets *
2329 svr4_fetch_link_map_offsets (void)
2330 {
2331 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2332
2333 gdb_assert (ops->fetch_link_map_offsets);
2334 return ops->fetch_link_map_offsets ();
2335 }
2336
2337 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2338
2339 static int
2340 svr4_have_link_map_offsets (void)
2341 {
2342 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2343
2344 return (ops->fetch_link_map_offsets != NULL);
2345 }
2346 \f
2347
2348 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2349 `struct r_debug' and a `struct link_map' that are binary compatible
2350 with the origional SVR4 implementation. */
2351
2352 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2353 for an ILP32 SVR4 system. */
2354
2355 struct link_map_offsets *
2356 svr4_ilp32_fetch_link_map_offsets (void)
2357 {
2358 static struct link_map_offsets lmo;
2359 static struct link_map_offsets *lmp = NULL;
2360
2361 if (lmp == NULL)
2362 {
2363 lmp = &lmo;
2364
2365 lmo.r_version_offset = 0;
2366 lmo.r_version_size = 4;
2367 lmo.r_map_offset = 4;
2368 lmo.r_brk_offset = 8;
2369 lmo.r_ldsomap_offset = 20;
2370
2371 /* Everything we need is in the first 20 bytes. */
2372 lmo.link_map_size = 20;
2373 lmo.l_addr_offset = 0;
2374 lmo.l_name_offset = 4;
2375 lmo.l_ld_offset = 8;
2376 lmo.l_next_offset = 12;
2377 lmo.l_prev_offset = 16;
2378 }
2379
2380 return lmp;
2381 }
2382
2383 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2384 for an LP64 SVR4 system. */
2385
2386 struct link_map_offsets *
2387 svr4_lp64_fetch_link_map_offsets (void)
2388 {
2389 static struct link_map_offsets lmo;
2390 static struct link_map_offsets *lmp = NULL;
2391
2392 if (lmp == NULL)
2393 {
2394 lmp = &lmo;
2395
2396 lmo.r_version_offset = 0;
2397 lmo.r_version_size = 4;
2398 lmo.r_map_offset = 8;
2399 lmo.r_brk_offset = 16;
2400 lmo.r_ldsomap_offset = 40;
2401
2402 /* Everything we need is in the first 40 bytes. */
2403 lmo.link_map_size = 40;
2404 lmo.l_addr_offset = 0;
2405 lmo.l_name_offset = 8;
2406 lmo.l_ld_offset = 16;
2407 lmo.l_next_offset = 24;
2408 lmo.l_prev_offset = 32;
2409 }
2410
2411 return lmp;
2412 }
2413 \f
2414
2415 struct target_so_ops svr4_so_ops;
2416
2417 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2418 different rule for symbol lookup. The lookup begins here in the DSO, not in
2419 the main executable. */
2420
2421 static struct symbol *
2422 elf_lookup_lib_symbol (const struct objfile *objfile,
2423 const char *name,
2424 const domain_enum domain)
2425 {
2426 bfd *abfd;
2427
2428 if (objfile == symfile_objfile)
2429 abfd = exec_bfd;
2430 else
2431 {
2432 /* OBJFILE should have been passed as the non-debug one. */
2433 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2434
2435 abfd = objfile->obfd;
2436 }
2437
2438 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2439 return NULL;
2440
2441 return lookup_global_symbol_from_objfile (objfile, name, domain);
2442 }
2443
2444 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2445
2446 void
2447 _initialize_svr4_solib (void)
2448 {
2449 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2450 solib_svr4_pspace_data
2451 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
2452
2453 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2454 svr4_so_ops.free_so = svr4_free_so;
2455 svr4_so_ops.clear_solib = svr4_clear_solib;
2456 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2457 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2458 svr4_so_ops.current_sos = svr4_current_sos;
2459 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2460 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2461 svr4_so_ops.bfd_open = solib_bfd_open;
2462 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2463 svr4_so_ops.same = svr4_same;
2464 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2465 }
This page took 0.090132 seconds and 4 git commands to generate.