range stepping: gdbserver (x86 GNU/Linux)
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "regcache.h"
34 #include "gdbthread.h"
35 #include "observer.h"
36
37 #include "gdb_assert.h"
38
39 #include "solist.h"
40 #include "solib.h"
41 #include "solib-svr4.h"
42
43 #include "bfd-target.h"
44 #include "elf-bfd.h"
45 #include "exec.h"
46 #include "auxv.h"
47 #include "exceptions.h"
48 #include "gdb_bfd.h"
49
50 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
51 static int svr4_have_link_map_offsets (void);
52 static void svr4_relocate_main_executable (void);
53
54 /* Link map info to include in an allocated so_list entry. */
55
56 struct lm_info
57 {
58 /* Amount by which addresses in the binary should be relocated to
59 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
60 When prelinking is involved and the prelink base address changes,
61 we may need a different offset - the recomputed offset is in L_ADDR.
62 It is commonly the same value. It is cached as we want to warn about
63 the difference and compute it only once. L_ADDR is valid
64 iff L_ADDR_P. */
65 CORE_ADDR l_addr, l_addr_inferior;
66 unsigned int l_addr_p : 1;
67
68 /* The target location of lm. */
69 CORE_ADDR lm_addr;
70
71 /* Values read in from inferior's fields of the same name. */
72 CORE_ADDR l_ld, l_next, l_prev, l_name;
73 };
74
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
77 events.
78
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
82
83 static const char * const solib_break_names[] =
84 {
85 "r_debug_state",
86 "_r_debug_state",
87 "_dl_debug_state",
88 "rtld_db_dlactivity",
89 "__dl_rtld_db_dlactivity",
90 "_rtld_debug_state",
91
92 NULL
93 };
94
95 static const char * const bkpt_names[] =
96 {
97 "_start",
98 "__start",
99 "main",
100 NULL
101 };
102
103 static const char * const main_name_list[] =
104 {
105 "main_$main",
106 NULL
107 };
108
109 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
110 the same shared library. */
111
112 static int
113 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
114 {
115 if (strcmp (gdb_so_name, inferior_so_name) == 0)
116 return 1;
117
118 /* On Solaris, when starting inferior we think that dynamic linker is
119 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
120 contains /lib/ld.so.1. Sometimes one file is a link to another, but
121 sometimes they have identical content, but are not linked to each
122 other. We don't restrict this check for Solaris, but the chances
123 of running into this situation elsewhere are very low. */
124 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
125 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
126 return 1;
127
128 /* Similarly, we observed the same issue with sparc64, but with
129 different locations. */
130 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
131 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
132 return 1;
133
134 return 0;
135 }
136
137 static int
138 svr4_same (struct so_list *gdb, struct so_list *inferior)
139 {
140 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
141 }
142
143 static struct lm_info *
144 lm_info_read (CORE_ADDR lm_addr)
145 {
146 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
147 gdb_byte *lm;
148 struct lm_info *lm_info;
149 struct cleanup *back_to;
150
151 lm = xmalloc (lmo->link_map_size);
152 back_to = make_cleanup (xfree, lm);
153
154 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
155 {
156 warning (_("Error reading shared library list entry at %s"),
157 paddress (target_gdbarch (), lm_addr)),
158 lm_info = NULL;
159 }
160 else
161 {
162 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
163
164 lm_info = xzalloc (sizeof (*lm_info));
165 lm_info->lm_addr = lm_addr;
166
167 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
168 ptr_type);
169 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
170 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
171 ptr_type);
172 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
173 ptr_type);
174 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
175 ptr_type);
176 }
177
178 do_cleanups (back_to);
179
180 return lm_info;
181 }
182
183 static int
184 has_lm_dynamic_from_link_map (void)
185 {
186 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
187
188 return lmo->l_ld_offset >= 0;
189 }
190
191 static CORE_ADDR
192 lm_addr_check (const struct so_list *so, bfd *abfd)
193 {
194 if (!so->lm_info->l_addr_p)
195 {
196 struct bfd_section *dyninfo_sect;
197 CORE_ADDR l_addr, l_dynaddr, dynaddr;
198
199 l_addr = so->lm_info->l_addr_inferior;
200
201 if (! abfd || ! has_lm_dynamic_from_link_map ())
202 goto set_addr;
203
204 l_dynaddr = so->lm_info->l_ld;
205
206 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
207 if (dyninfo_sect == NULL)
208 goto set_addr;
209
210 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
211
212 if (dynaddr + l_addr != l_dynaddr)
213 {
214 CORE_ADDR align = 0x1000;
215 CORE_ADDR minpagesize = align;
216
217 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
218 {
219 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
220 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
221 int i;
222
223 align = 1;
224
225 for (i = 0; i < ehdr->e_phnum; i++)
226 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
227 align = phdr[i].p_align;
228
229 minpagesize = get_elf_backend_data (abfd)->minpagesize;
230 }
231
232 /* Turn it into a mask. */
233 align--;
234
235 /* If the changes match the alignment requirements, we
236 assume we're using a core file that was generated by the
237 same binary, just prelinked with a different base offset.
238 If it doesn't match, we may have a different binary, the
239 same binary with the dynamic table loaded at an unrelated
240 location, or anything, really. To avoid regressions,
241 don't adjust the base offset in the latter case, although
242 odds are that, if things really changed, debugging won't
243 quite work.
244
245 One could expect more the condition
246 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
247 but the one below is relaxed for PPC. The PPC kernel supports
248 either 4k or 64k page sizes. To be prepared for 64k pages,
249 PPC ELF files are built using an alignment requirement of 64k.
250 However, when running on a kernel supporting 4k pages, the memory
251 mapping of the library may not actually happen on a 64k boundary!
252
253 (In the usual case where (l_addr & align) == 0, this check is
254 equivalent to the possibly expected check above.)
255
256 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
257
258 l_addr = l_dynaddr - dynaddr;
259
260 if ((l_addr & (minpagesize - 1)) == 0
261 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
262 {
263 if (info_verbose)
264 printf_unfiltered (_("Using PIC (Position Independent Code) "
265 "prelink displacement %s for \"%s\".\n"),
266 paddress (target_gdbarch (), l_addr),
267 so->so_name);
268 }
269 else
270 {
271 /* There is no way to verify the library file matches. prelink
272 can during prelinking of an unprelinked file (or unprelinking
273 of a prelinked file) shift the DYNAMIC segment by arbitrary
274 offset without any page size alignment. There is no way to
275 find out the ELF header and/or Program Headers for a limited
276 verification if it they match. One could do a verification
277 of the DYNAMIC segment. Still the found address is the best
278 one GDB could find. */
279
280 warning (_(".dynamic section for \"%s\" "
281 "is not at the expected address "
282 "(wrong library or version mismatch?)"), so->so_name);
283 }
284 }
285
286 set_addr:
287 so->lm_info->l_addr = l_addr;
288 so->lm_info->l_addr_p = 1;
289 }
290
291 return so->lm_info->l_addr;
292 }
293
294 /* Per pspace SVR4 specific data. */
295
296 struct svr4_info
297 {
298 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
299
300 /* Validity flag for debug_loader_offset. */
301 int debug_loader_offset_p;
302
303 /* Load address for the dynamic linker, inferred. */
304 CORE_ADDR debug_loader_offset;
305
306 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
307 char *debug_loader_name;
308
309 /* Load map address for the main executable. */
310 CORE_ADDR main_lm_addr;
311
312 CORE_ADDR interp_text_sect_low;
313 CORE_ADDR interp_text_sect_high;
314 CORE_ADDR interp_plt_sect_low;
315 CORE_ADDR interp_plt_sect_high;
316 };
317
318 /* Per-program-space data key. */
319 static const struct program_space_data *solib_svr4_pspace_data;
320
321 static void
322 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
323 {
324 struct svr4_info *info;
325
326 info = program_space_data (pspace, solib_svr4_pspace_data);
327 xfree (info);
328 }
329
330 /* Get the current svr4 data. If none is found yet, add it now. This
331 function always returns a valid object. */
332
333 static struct svr4_info *
334 get_svr4_info (void)
335 {
336 struct svr4_info *info;
337
338 info = program_space_data (current_program_space, solib_svr4_pspace_data);
339 if (info != NULL)
340 return info;
341
342 info = XZALLOC (struct svr4_info);
343 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
344 return info;
345 }
346
347 /* Local function prototypes */
348
349 static int match_main (const char *);
350
351 /* Read program header TYPE from inferior memory. The header is found
352 by scanning the OS auxillary vector.
353
354 If TYPE == -1, return the program headers instead of the contents of
355 one program header.
356
357 Return a pointer to allocated memory holding the program header contents,
358 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
359 size of those contents is returned to P_SECT_SIZE. Likewise, the target
360 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE. */
361
362 static gdb_byte *
363 read_program_header (int type, int *p_sect_size, int *p_arch_size)
364 {
365 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
366 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
367 int arch_size, sect_size;
368 CORE_ADDR sect_addr;
369 gdb_byte *buf;
370 int pt_phdr_p = 0;
371
372 /* Get required auxv elements from target. */
373 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
374 return 0;
375 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
376 return 0;
377 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
378 return 0;
379 if (!at_phdr || !at_phnum)
380 return 0;
381
382 /* Determine ELF architecture type. */
383 if (at_phent == sizeof (Elf32_External_Phdr))
384 arch_size = 32;
385 else if (at_phent == sizeof (Elf64_External_Phdr))
386 arch_size = 64;
387 else
388 return 0;
389
390 /* Find the requested segment. */
391 if (type == -1)
392 {
393 sect_addr = at_phdr;
394 sect_size = at_phent * at_phnum;
395 }
396 else if (arch_size == 32)
397 {
398 Elf32_External_Phdr phdr;
399 int i;
400
401 /* Search for requested PHDR. */
402 for (i = 0; i < at_phnum; i++)
403 {
404 int p_type;
405
406 if (target_read_memory (at_phdr + i * sizeof (phdr),
407 (gdb_byte *)&phdr, sizeof (phdr)))
408 return 0;
409
410 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
411 4, byte_order);
412
413 if (p_type == PT_PHDR)
414 {
415 pt_phdr_p = 1;
416 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
417 4, byte_order);
418 }
419
420 if (p_type == type)
421 break;
422 }
423
424 if (i == at_phnum)
425 return 0;
426
427 /* Retrieve address and size. */
428 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
429 4, byte_order);
430 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
431 4, byte_order);
432 }
433 else
434 {
435 Elf64_External_Phdr phdr;
436 int i;
437
438 /* Search for requested PHDR. */
439 for (i = 0; i < at_phnum; i++)
440 {
441 int p_type;
442
443 if (target_read_memory (at_phdr + i * sizeof (phdr),
444 (gdb_byte *)&phdr, sizeof (phdr)))
445 return 0;
446
447 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
448 4, byte_order);
449
450 if (p_type == PT_PHDR)
451 {
452 pt_phdr_p = 1;
453 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
454 8, byte_order);
455 }
456
457 if (p_type == type)
458 break;
459 }
460
461 if (i == at_phnum)
462 return 0;
463
464 /* Retrieve address and size. */
465 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
466 8, byte_order);
467 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
468 8, byte_order);
469 }
470
471 /* PT_PHDR is optional, but we really need it
472 for PIE to make this work in general. */
473
474 if (pt_phdr_p)
475 {
476 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
477 Relocation offset is the difference between the two. */
478 sect_addr = sect_addr + (at_phdr - pt_phdr);
479 }
480
481 /* Read in requested program header. */
482 buf = xmalloc (sect_size);
483 if (target_read_memory (sect_addr, buf, sect_size))
484 {
485 xfree (buf);
486 return NULL;
487 }
488
489 if (p_arch_size)
490 *p_arch_size = arch_size;
491 if (p_sect_size)
492 *p_sect_size = sect_size;
493
494 return buf;
495 }
496
497
498 /* Return program interpreter string. */
499 static char *
500 find_program_interpreter (void)
501 {
502 gdb_byte *buf = NULL;
503
504 /* If we have an exec_bfd, use its section table. */
505 if (exec_bfd
506 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
507 {
508 struct bfd_section *interp_sect;
509
510 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
511 if (interp_sect != NULL)
512 {
513 int sect_size = bfd_section_size (exec_bfd, interp_sect);
514
515 buf = xmalloc (sect_size);
516 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
517 }
518 }
519
520 /* If we didn't find it, use the target auxillary vector. */
521 if (!buf)
522 buf = read_program_header (PT_INTERP, NULL, NULL);
523
524 return (char *) buf;
525 }
526
527
528 /* Scan for DYNTAG in .dynamic section of ABFD. If DYNTAG is found 1 is
529 returned and the corresponding PTR is set. */
530
531 static int
532 scan_dyntag (int dyntag, bfd *abfd, CORE_ADDR *ptr)
533 {
534 int arch_size, step, sect_size;
535 long dyn_tag;
536 CORE_ADDR dyn_ptr, dyn_addr;
537 gdb_byte *bufend, *bufstart, *buf;
538 Elf32_External_Dyn *x_dynp_32;
539 Elf64_External_Dyn *x_dynp_64;
540 struct bfd_section *sect;
541 struct target_section *target_section;
542
543 if (abfd == NULL)
544 return 0;
545
546 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
547 return 0;
548
549 arch_size = bfd_get_arch_size (abfd);
550 if (arch_size == -1)
551 return 0;
552
553 /* Find the start address of the .dynamic section. */
554 sect = bfd_get_section_by_name (abfd, ".dynamic");
555 if (sect == NULL)
556 return 0;
557
558 for (target_section = current_target_sections->sections;
559 target_section < current_target_sections->sections_end;
560 target_section++)
561 if (sect == target_section->the_bfd_section)
562 break;
563 if (target_section < current_target_sections->sections_end)
564 dyn_addr = target_section->addr;
565 else
566 {
567 /* ABFD may come from OBJFILE acting only as a symbol file without being
568 loaded into the target (see add_symbol_file_command). This case is
569 such fallback to the file VMA address without the possibility of
570 having the section relocated to its actual in-memory address. */
571
572 dyn_addr = bfd_section_vma (abfd, sect);
573 }
574
575 /* Read in .dynamic from the BFD. We will get the actual value
576 from memory later. */
577 sect_size = bfd_section_size (abfd, sect);
578 buf = bufstart = alloca (sect_size);
579 if (!bfd_get_section_contents (abfd, sect,
580 buf, 0, sect_size))
581 return 0;
582
583 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
584 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
585 : sizeof (Elf64_External_Dyn);
586 for (bufend = buf + sect_size;
587 buf < bufend;
588 buf += step)
589 {
590 if (arch_size == 32)
591 {
592 x_dynp_32 = (Elf32_External_Dyn *) buf;
593 dyn_tag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
594 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
595 }
596 else
597 {
598 x_dynp_64 = (Elf64_External_Dyn *) buf;
599 dyn_tag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
600 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
601 }
602 if (dyn_tag == DT_NULL)
603 return 0;
604 if (dyn_tag == dyntag)
605 {
606 /* If requested, try to read the runtime value of this .dynamic
607 entry. */
608 if (ptr)
609 {
610 struct type *ptr_type;
611 gdb_byte ptr_buf[8];
612 CORE_ADDR ptr_addr;
613
614 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
615 ptr_addr = dyn_addr + (buf - bufstart) + arch_size / 8;
616 if (target_read_memory (ptr_addr, ptr_buf, arch_size / 8) == 0)
617 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
618 *ptr = dyn_ptr;
619 }
620 return 1;
621 }
622 }
623
624 return 0;
625 }
626
627 /* Scan for DYNTAG in .dynamic section of the target's main executable,
628 found by consulting the OS auxillary vector. If DYNTAG is found 1 is
629 returned and the corresponding PTR is set. */
630
631 static int
632 scan_dyntag_auxv (int dyntag, CORE_ADDR *ptr)
633 {
634 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
635 int sect_size, arch_size, step;
636 long dyn_tag;
637 CORE_ADDR dyn_ptr;
638 gdb_byte *bufend, *bufstart, *buf;
639
640 /* Read in .dynamic section. */
641 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size);
642 if (!buf)
643 return 0;
644
645 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
646 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
647 : sizeof (Elf64_External_Dyn);
648 for (bufend = buf + sect_size;
649 buf < bufend;
650 buf += step)
651 {
652 if (arch_size == 32)
653 {
654 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
655
656 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
657 4, byte_order);
658 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
659 4, byte_order);
660 }
661 else
662 {
663 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
664
665 dyn_tag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
666 8, byte_order);
667 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
668 8, byte_order);
669 }
670 if (dyn_tag == DT_NULL)
671 break;
672
673 if (dyn_tag == dyntag)
674 {
675 if (ptr)
676 *ptr = dyn_ptr;
677
678 xfree (bufstart);
679 return 1;
680 }
681 }
682
683 xfree (bufstart);
684 return 0;
685 }
686
687 /* Locate the base address of dynamic linker structs for SVR4 elf
688 targets.
689
690 For SVR4 elf targets the address of the dynamic linker's runtime
691 structure is contained within the dynamic info section in the
692 executable file. The dynamic section is also mapped into the
693 inferior address space. Because the runtime loader fills in the
694 real address before starting the inferior, we have to read in the
695 dynamic info section from the inferior address space.
696 If there are any errors while trying to find the address, we
697 silently return 0, otherwise the found address is returned. */
698
699 static CORE_ADDR
700 elf_locate_base (void)
701 {
702 struct minimal_symbol *msymbol;
703 CORE_ADDR dyn_ptr;
704
705 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
706 instead of DT_DEBUG, although they sometimes contain an unused
707 DT_DEBUG. */
708 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr)
709 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr))
710 {
711 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
712 gdb_byte *pbuf;
713 int pbuf_size = TYPE_LENGTH (ptr_type);
714
715 pbuf = alloca (pbuf_size);
716 /* DT_MIPS_RLD_MAP contains a pointer to the address
717 of the dynamic link structure. */
718 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
719 return 0;
720 return extract_typed_address (pbuf, ptr_type);
721 }
722
723 /* Find DT_DEBUG. */
724 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr)
725 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr))
726 return dyn_ptr;
727
728 /* This may be a static executable. Look for the symbol
729 conventionally named _r_debug, as a last resort. */
730 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
731 if (msymbol != NULL)
732 return SYMBOL_VALUE_ADDRESS (msymbol);
733
734 /* DT_DEBUG entry not found. */
735 return 0;
736 }
737
738 /* Locate the base address of dynamic linker structs.
739
740 For both the SunOS and SVR4 shared library implementations, if the
741 inferior executable has been linked dynamically, there is a single
742 address somewhere in the inferior's data space which is the key to
743 locating all of the dynamic linker's runtime structures. This
744 address is the value of the debug base symbol. The job of this
745 function is to find and return that address, or to return 0 if there
746 is no such address (the executable is statically linked for example).
747
748 For SunOS, the job is almost trivial, since the dynamic linker and
749 all of it's structures are statically linked to the executable at
750 link time. Thus the symbol for the address we are looking for has
751 already been added to the minimal symbol table for the executable's
752 objfile at the time the symbol file's symbols were read, and all we
753 have to do is look it up there. Note that we explicitly do NOT want
754 to find the copies in the shared library.
755
756 The SVR4 version is a bit more complicated because the address
757 is contained somewhere in the dynamic info section. We have to go
758 to a lot more work to discover the address of the debug base symbol.
759 Because of this complexity, we cache the value we find and return that
760 value on subsequent invocations. Note there is no copy in the
761 executable symbol tables. */
762
763 static CORE_ADDR
764 locate_base (struct svr4_info *info)
765 {
766 /* Check to see if we have a currently valid address, and if so, avoid
767 doing all this work again and just return the cached address. If
768 we have no cached address, try to locate it in the dynamic info
769 section for ELF executables. There's no point in doing any of this
770 though if we don't have some link map offsets to work with. */
771
772 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
773 info->debug_base = elf_locate_base ();
774 return info->debug_base;
775 }
776
777 /* Find the first element in the inferior's dynamic link map, and
778 return its address in the inferior. Return zero if the address
779 could not be determined.
780
781 FIXME: Perhaps we should validate the info somehow, perhaps by
782 checking r_version for a known version number, or r_state for
783 RT_CONSISTENT. */
784
785 static CORE_ADDR
786 solib_svr4_r_map (struct svr4_info *info)
787 {
788 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
789 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
790 CORE_ADDR addr = 0;
791 volatile struct gdb_exception ex;
792
793 TRY_CATCH (ex, RETURN_MASK_ERROR)
794 {
795 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
796 ptr_type);
797 }
798 exception_print (gdb_stderr, ex);
799 return addr;
800 }
801
802 /* Find r_brk from the inferior's debug base. */
803
804 static CORE_ADDR
805 solib_svr4_r_brk (struct svr4_info *info)
806 {
807 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
808 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
809
810 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
811 ptr_type);
812 }
813
814 /* Find the link map for the dynamic linker (if it is not in the
815 normal list of loaded shared objects). */
816
817 static CORE_ADDR
818 solib_svr4_r_ldsomap (struct svr4_info *info)
819 {
820 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
821 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
822 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
823 ULONGEST version;
824
825 /* Check version, and return zero if `struct r_debug' doesn't have
826 the r_ldsomap member. */
827 version
828 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
829 lmo->r_version_size, byte_order);
830 if (version < 2 || lmo->r_ldsomap_offset == -1)
831 return 0;
832
833 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
834 ptr_type);
835 }
836
837 /* On Solaris systems with some versions of the dynamic linker,
838 ld.so's l_name pointer points to the SONAME in the string table
839 rather than into writable memory. So that GDB can find shared
840 libraries when loading a core file generated by gcore, ensure that
841 memory areas containing the l_name string are saved in the core
842 file. */
843
844 static int
845 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
846 {
847 struct svr4_info *info;
848 CORE_ADDR ldsomap;
849 struct so_list *new;
850 struct cleanup *old_chain;
851 CORE_ADDR name_lm;
852
853 info = get_svr4_info ();
854
855 info->debug_base = 0;
856 locate_base (info);
857 if (!info->debug_base)
858 return 0;
859
860 ldsomap = solib_svr4_r_ldsomap (info);
861 if (!ldsomap)
862 return 0;
863
864 new = XZALLOC (struct so_list);
865 old_chain = make_cleanup (xfree, new);
866 new->lm_info = lm_info_read (ldsomap);
867 make_cleanup (xfree, new->lm_info);
868 name_lm = new->lm_info ? new->lm_info->l_name : 0;
869 do_cleanups (old_chain);
870
871 return (name_lm >= vaddr && name_lm < vaddr + size);
872 }
873
874 /* Implement the "open_symbol_file_object" target_so_ops method.
875
876 If no open symbol file, attempt to locate and open the main symbol
877 file. On SVR4 systems, this is the first link map entry. If its
878 name is here, we can open it. Useful when attaching to a process
879 without first loading its symbol file. */
880
881 static int
882 open_symbol_file_object (void *from_ttyp)
883 {
884 CORE_ADDR lm, l_name;
885 char *filename;
886 int errcode;
887 int from_tty = *(int *)from_ttyp;
888 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
889 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
890 int l_name_size = TYPE_LENGTH (ptr_type);
891 gdb_byte *l_name_buf = xmalloc (l_name_size);
892 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
893 struct svr4_info *info = get_svr4_info ();
894
895 if (symfile_objfile)
896 if (!query (_("Attempt to reload symbols from process? ")))
897 {
898 do_cleanups (cleanups);
899 return 0;
900 }
901
902 /* Always locate the debug struct, in case it has moved. */
903 info->debug_base = 0;
904 if (locate_base (info) == 0)
905 {
906 do_cleanups (cleanups);
907 return 0; /* failed somehow... */
908 }
909
910 /* First link map member should be the executable. */
911 lm = solib_svr4_r_map (info);
912 if (lm == 0)
913 {
914 do_cleanups (cleanups);
915 return 0; /* failed somehow... */
916 }
917
918 /* Read address of name from target memory to GDB. */
919 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
920
921 /* Convert the address to host format. */
922 l_name = extract_typed_address (l_name_buf, ptr_type);
923
924 if (l_name == 0)
925 {
926 do_cleanups (cleanups);
927 return 0; /* No filename. */
928 }
929
930 /* Now fetch the filename from target memory. */
931 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
932 make_cleanup (xfree, filename);
933
934 if (errcode)
935 {
936 warning (_("failed to read exec filename from attached file: %s"),
937 safe_strerror (errcode));
938 do_cleanups (cleanups);
939 return 0;
940 }
941
942 /* Have a pathname: read the symbol file. */
943 symbol_file_add_main (filename, from_tty);
944
945 do_cleanups (cleanups);
946 return 1;
947 }
948
949 /* Data exchange structure for the XML parser as returned by
950 svr4_current_sos_via_xfer_libraries. */
951
952 struct svr4_library_list
953 {
954 struct so_list *head, **tailp;
955
956 /* Inferior address of struct link_map used for the main executable. It is
957 NULL if not known. */
958 CORE_ADDR main_lm;
959 };
960
961 /* Implementation for target_so_ops.free_so. */
962
963 static void
964 svr4_free_so (struct so_list *so)
965 {
966 xfree (so->lm_info);
967 }
968
969 /* Implement target_so_ops.clear_so. */
970
971 static void
972 svr4_clear_so (struct so_list *so)
973 {
974 if (so->lm_info != NULL)
975 so->lm_info->l_addr_p = 0;
976 }
977
978 /* Free so_list built so far (called via cleanup). */
979
980 static void
981 svr4_free_library_list (void *p_list)
982 {
983 struct so_list *list = *(struct so_list **) p_list;
984
985 while (list != NULL)
986 {
987 struct so_list *next = list->next;
988
989 free_so (list);
990 list = next;
991 }
992 }
993
994 #ifdef HAVE_LIBEXPAT
995
996 #include "xml-support.h"
997
998 /* Handle the start of a <library> element. Note: new elements are added
999 at the tail of the list, keeping the list in order. */
1000
1001 static void
1002 library_list_start_library (struct gdb_xml_parser *parser,
1003 const struct gdb_xml_element *element,
1004 void *user_data, VEC(gdb_xml_value_s) *attributes)
1005 {
1006 struct svr4_library_list *list = user_data;
1007 const char *name = xml_find_attribute (attributes, "name")->value;
1008 ULONGEST *lmp = xml_find_attribute (attributes, "lm")->value;
1009 ULONGEST *l_addrp = xml_find_attribute (attributes, "l_addr")->value;
1010 ULONGEST *l_ldp = xml_find_attribute (attributes, "l_ld")->value;
1011 struct so_list *new_elem;
1012
1013 new_elem = XZALLOC (struct so_list);
1014 new_elem->lm_info = XZALLOC (struct lm_info);
1015 new_elem->lm_info->lm_addr = *lmp;
1016 new_elem->lm_info->l_addr_inferior = *l_addrp;
1017 new_elem->lm_info->l_ld = *l_ldp;
1018
1019 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1020 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1021 strcpy (new_elem->so_original_name, new_elem->so_name);
1022
1023 *list->tailp = new_elem;
1024 list->tailp = &new_elem->next;
1025 }
1026
1027 /* Handle the start of a <library-list-svr4> element. */
1028
1029 static void
1030 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1031 const struct gdb_xml_element *element,
1032 void *user_data, VEC(gdb_xml_value_s) *attributes)
1033 {
1034 struct svr4_library_list *list = user_data;
1035 const char *version = xml_find_attribute (attributes, "version")->value;
1036 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1037
1038 if (strcmp (version, "1.0") != 0)
1039 gdb_xml_error (parser,
1040 _("SVR4 Library list has unsupported version \"%s\""),
1041 version);
1042
1043 if (main_lm)
1044 list->main_lm = *(ULONGEST *) main_lm->value;
1045 }
1046
1047 /* The allowed elements and attributes for an XML library list.
1048 The root element is a <library-list>. */
1049
1050 static const struct gdb_xml_attribute svr4_library_attributes[] =
1051 {
1052 { "name", GDB_XML_AF_NONE, NULL, NULL },
1053 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1054 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1055 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1056 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1057 };
1058
1059 static const struct gdb_xml_element svr4_library_list_children[] =
1060 {
1061 {
1062 "library", svr4_library_attributes, NULL,
1063 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1064 library_list_start_library, NULL
1065 },
1066 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1067 };
1068
1069 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1070 {
1071 { "version", GDB_XML_AF_NONE, NULL, NULL },
1072 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1073 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1074 };
1075
1076 static const struct gdb_xml_element svr4_library_list_elements[] =
1077 {
1078 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1079 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1080 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1081 };
1082
1083 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1084
1085 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1086 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1087 empty, caller is responsible for freeing all its entries. */
1088
1089 static int
1090 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1091 {
1092 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1093 &list->head);
1094
1095 memset (list, 0, sizeof (*list));
1096 list->tailp = &list->head;
1097 if (gdb_xml_parse_quick (_("target library list"), "library-list.dtd",
1098 svr4_library_list_elements, document, list) == 0)
1099 {
1100 /* Parsed successfully, keep the result. */
1101 discard_cleanups (back_to);
1102 return 1;
1103 }
1104
1105 do_cleanups (back_to);
1106 return 0;
1107 }
1108
1109 /* Attempt to get so_list from target via qXfer:libraries:read packet.
1110
1111 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1112 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1113 empty, caller is responsible for freeing all its entries. */
1114
1115 static int
1116 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1117 {
1118 char *svr4_library_document;
1119 int result;
1120 struct cleanup *back_to;
1121
1122 /* Fetch the list of shared libraries. */
1123 svr4_library_document = target_read_stralloc (&current_target,
1124 TARGET_OBJECT_LIBRARIES_SVR4,
1125 NULL);
1126 if (svr4_library_document == NULL)
1127 return 0;
1128
1129 back_to = make_cleanup (xfree, svr4_library_document);
1130 result = svr4_parse_libraries (svr4_library_document, list);
1131 do_cleanups (back_to);
1132
1133 return result;
1134 }
1135
1136 #else
1137
1138 static int
1139 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list)
1140 {
1141 return 0;
1142 }
1143
1144 #endif
1145
1146 /* If no shared library information is available from the dynamic
1147 linker, build a fallback list from other sources. */
1148
1149 static struct so_list *
1150 svr4_default_sos (void)
1151 {
1152 struct svr4_info *info = get_svr4_info ();
1153 struct so_list *new;
1154
1155 if (!info->debug_loader_offset_p)
1156 return NULL;
1157
1158 new = XZALLOC (struct so_list);
1159
1160 new->lm_info = xzalloc (sizeof (struct lm_info));
1161
1162 /* Nothing will ever check the other fields if we set l_addr_p. */
1163 new->lm_info->l_addr = info->debug_loader_offset;
1164 new->lm_info->l_addr_p = 1;
1165
1166 strncpy (new->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1167 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1168 strcpy (new->so_original_name, new->so_name);
1169
1170 return new;
1171 }
1172
1173 /* Read the whole inferior libraries chain starting at address LM. Add the
1174 entries to the tail referenced by LINK_PTR_PTR. Ignore the first entry if
1175 IGNORE_FIRST and set global MAIN_LM_ADDR according to it. */
1176
1177 static void
1178 svr4_read_so_list (CORE_ADDR lm, struct so_list ***link_ptr_ptr,
1179 int ignore_first)
1180 {
1181 CORE_ADDR prev_lm = 0, next_lm;
1182
1183 for (; lm != 0; prev_lm = lm, lm = next_lm)
1184 {
1185 struct so_list *new;
1186 struct cleanup *old_chain;
1187 int errcode;
1188 char *buffer;
1189
1190 new = XZALLOC (struct so_list);
1191 old_chain = make_cleanup_free_so (new);
1192
1193 new->lm_info = lm_info_read (lm);
1194 if (new->lm_info == NULL)
1195 {
1196 do_cleanups (old_chain);
1197 break;
1198 }
1199
1200 next_lm = new->lm_info->l_next;
1201
1202 if (new->lm_info->l_prev != prev_lm)
1203 {
1204 warning (_("Corrupted shared library list: %s != %s"),
1205 paddress (target_gdbarch (), prev_lm),
1206 paddress (target_gdbarch (), new->lm_info->l_prev));
1207 do_cleanups (old_chain);
1208 break;
1209 }
1210
1211 /* For SVR4 versions, the first entry in the link map is for the
1212 inferior executable, so we must ignore it. For some versions of
1213 SVR4, it has no name. For others (Solaris 2.3 for example), it
1214 does have a name, so we can no longer use a missing name to
1215 decide when to ignore it. */
1216 if (ignore_first && new->lm_info->l_prev == 0)
1217 {
1218 struct svr4_info *info = get_svr4_info ();
1219
1220 info->main_lm_addr = new->lm_info->lm_addr;
1221 do_cleanups (old_chain);
1222 continue;
1223 }
1224
1225 /* Extract this shared object's name. */
1226 target_read_string (new->lm_info->l_name, &buffer,
1227 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1228 if (errcode != 0)
1229 {
1230 warning (_("Can't read pathname for load map: %s."),
1231 safe_strerror (errcode));
1232 do_cleanups (old_chain);
1233 continue;
1234 }
1235
1236 strncpy (new->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1237 new->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1238 strcpy (new->so_original_name, new->so_name);
1239 xfree (buffer);
1240
1241 /* If this entry has no name, or its name matches the name
1242 for the main executable, don't include it in the list. */
1243 if (! new->so_name[0] || match_main (new->so_name))
1244 {
1245 do_cleanups (old_chain);
1246 continue;
1247 }
1248
1249 discard_cleanups (old_chain);
1250 new->next = 0;
1251 **link_ptr_ptr = new;
1252 *link_ptr_ptr = &new->next;
1253 }
1254 }
1255
1256 /* Implement the "current_sos" target_so_ops method. */
1257
1258 static struct so_list *
1259 svr4_current_sos (void)
1260 {
1261 CORE_ADDR lm;
1262 struct so_list *head = NULL;
1263 struct so_list **link_ptr = &head;
1264 struct svr4_info *info;
1265 struct cleanup *back_to;
1266 int ignore_first;
1267 struct svr4_library_list library_list;
1268
1269 /* Fall back to manual examination of the target if the packet is not
1270 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1271 tests a case where gdbserver cannot find the shared libraries list while
1272 GDB itself is able to find it via SYMFILE_OBJFILE.
1273
1274 Unfortunately statically linked inferiors will also fall back through this
1275 suboptimal code path. */
1276
1277 if (svr4_current_sos_via_xfer_libraries (&library_list))
1278 {
1279 if (library_list.main_lm)
1280 {
1281 info = get_svr4_info ();
1282 info->main_lm_addr = library_list.main_lm;
1283 }
1284
1285 return library_list.head ? library_list.head : svr4_default_sos ();
1286 }
1287
1288 info = get_svr4_info ();
1289
1290 /* Always locate the debug struct, in case it has moved. */
1291 info->debug_base = 0;
1292 locate_base (info);
1293
1294 /* If we can't find the dynamic linker's base structure, this
1295 must not be a dynamically linked executable. Hmm. */
1296 if (! info->debug_base)
1297 return svr4_default_sos ();
1298
1299 /* Assume that everything is a library if the dynamic loader was loaded
1300 late by a static executable. */
1301 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1302 ignore_first = 0;
1303 else
1304 ignore_first = 1;
1305
1306 back_to = make_cleanup (svr4_free_library_list, &head);
1307
1308 /* Walk the inferior's link map list, and build our list of
1309 `struct so_list' nodes. */
1310 lm = solib_svr4_r_map (info);
1311 if (lm)
1312 svr4_read_so_list (lm, &link_ptr, ignore_first);
1313
1314 /* On Solaris, the dynamic linker is not in the normal list of
1315 shared objects, so make sure we pick it up too. Having
1316 symbol information for the dynamic linker is quite crucial
1317 for skipping dynamic linker resolver code. */
1318 lm = solib_svr4_r_ldsomap (info);
1319 if (lm)
1320 svr4_read_so_list (lm, &link_ptr, 0);
1321
1322 discard_cleanups (back_to);
1323
1324 if (head == NULL)
1325 return svr4_default_sos ();
1326
1327 return head;
1328 }
1329
1330 /* Get the address of the link_map for a given OBJFILE. */
1331
1332 CORE_ADDR
1333 svr4_fetch_objfile_link_map (struct objfile *objfile)
1334 {
1335 struct so_list *so;
1336 struct svr4_info *info = get_svr4_info ();
1337
1338 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1339 if (info->main_lm_addr == 0)
1340 solib_add (NULL, 0, &current_target, auto_solib_add);
1341
1342 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1343 if (objfile == symfile_objfile)
1344 return info->main_lm_addr;
1345
1346 /* The other link map addresses may be found by examining the list
1347 of shared libraries. */
1348 for (so = master_so_list (); so; so = so->next)
1349 if (so->objfile == objfile)
1350 return so->lm_info->lm_addr;
1351
1352 /* Not found! */
1353 return 0;
1354 }
1355
1356 /* On some systems, the only way to recognize the link map entry for
1357 the main executable file is by looking at its name. Return
1358 non-zero iff SONAME matches one of the known main executable names. */
1359
1360 static int
1361 match_main (const char *soname)
1362 {
1363 const char * const *mainp;
1364
1365 for (mainp = main_name_list; *mainp != NULL; mainp++)
1366 {
1367 if (strcmp (soname, *mainp) == 0)
1368 return (1);
1369 }
1370
1371 return (0);
1372 }
1373
1374 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1375 SVR4 run time loader. */
1376
1377 int
1378 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1379 {
1380 struct svr4_info *info = get_svr4_info ();
1381
1382 return ((pc >= info->interp_text_sect_low
1383 && pc < info->interp_text_sect_high)
1384 || (pc >= info->interp_plt_sect_low
1385 && pc < info->interp_plt_sect_high)
1386 || in_plt_section (pc, NULL)
1387 || in_gnu_ifunc_stub (pc));
1388 }
1389
1390 /* Given an executable's ABFD and target, compute the entry-point
1391 address. */
1392
1393 static CORE_ADDR
1394 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1395 {
1396 CORE_ADDR addr;
1397
1398 /* KevinB wrote ... for most targets, the address returned by
1399 bfd_get_start_address() is the entry point for the start
1400 function. But, for some targets, bfd_get_start_address() returns
1401 the address of a function descriptor from which the entry point
1402 address may be extracted. This address is extracted by
1403 gdbarch_convert_from_func_ptr_addr(). The method
1404 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1405 function for targets which don't use function descriptors. */
1406 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1407 bfd_get_start_address (abfd),
1408 targ);
1409 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1410 }
1411
1412 /* Helper function for gdb_bfd_lookup_symbol. */
1413
1414 static int
1415 cmp_name_and_sec_flags (asymbol *sym, void *data)
1416 {
1417 return (strcmp (sym->name, (const char *) data) == 0
1418 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
1419 }
1420 /* Arrange for dynamic linker to hit breakpoint.
1421
1422 Both the SunOS and the SVR4 dynamic linkers have, as part of their
1423 debugger interface, support for arranging for the inferior to hit
1424 a breakpoint after mapping in the shared libraries. This function
1425 enables that breakpoint.
1426
1427 For SunOS, there is a special flag location (in_debugger) which we
1428 set to 1. When the dynamic linker sees this flag set, it will set
1429 a breakpoint at a location known only to itself, after saving the
1430 original contents of that place and the breakpoint address itself,
1431 in it's own internal structures. When we resume the inferior, it
1432 will eventually take a SIGTRAP when it runs into the breakpoint.
1433 We handle this (in a different place) by restoring the contents of
1434 the breakpointed location (which is only known after it stops),
1435 chasing around to locate the shared libraries that have been
1436 loaded, then resuming.
1437
1438 For SVR4, the debugger interface structure contains a member (r_brk)
1439 which is statically initialized at the time the shared library is
1440 built, to the offset of a function (_r_debug_state) which is guaran-
1441 teed to be called once before mapping in a library, and again when
1442 the mapping is complete. At the time we are examining this member,
1443 it contains only the unrelocated offset of the function, so we have
1444 to do our own relocation. Later, when the dynamic linker actually
1445 runs, it relocates r_brk to be the actual address of _r_debug_state().
1446
1447 The debugger interface structure also contains an enumeration which
1448 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
1449 depending upon whether or not the library is being mapped or unmapped,
1450 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
1451
1452 static int
1453 enable_break (struct svr4_info *info, int from_tty)
1454 {
1455 struct minimal_symbol *msymbol;
1456 const char * const *bkpt_namep;
1457 asection *interp_sect;
1458 char *interp_name;
1459 CORE_ADDR sym_addr;
1460
1461 info->interp_text_sect_low = info->interp_text_sect_high = 0;
1462 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
1463
1464 /* If we already have a shared library list in the target, and
1465 r_debug contains r_brk, set the breakpoint there - this should
1466 mean r_brk has already been relocated. Assume the dynamic linker
1467 is the object containing r_brk. */
1468
1469 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1470 sym_addr = 0;
1471 if (info->debug_base && solib_svr4_r_map (info) != 0)
1472 sym_addr = solib_svr4_r_brk (info);
1473
1474 if (sym_addr != 0)
1475 {
1476 struct obj_section *os;
1477
1478 sym_addr = gdbarch_addr_bits_remove
1479 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1480 sym_addr,
1481 &current_target));
1482
1483 /* On at least some versions of Solaris there's a dynamic relocation
1484 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
1485 we get control before the dynamic linker has self-relocated.
1486 Check if SYM_ADDR is in a known section, if it is assume we can
1487 trust its value. This is just a heuristic though, it could go away
1488 or be replaced if it's getting in the way.
1489
1490 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
1491 however it's spelled in your particular system) is ARM or Thumb.
1492 That knowledge is encoded in the address, if it's Thumb the low bit
1493 is 1. However, we've stripped that info above and it's not clear
1494 what all the consequences are of passing a non-addr_bits_remove'd
1495 address to create_solib_event_breakpoint. The call to
1496 find_pc_section verifies we know about the address and have some
1497 hope of computing the right kind of breakpoint to use (via
1498 symbol info). It does mean that GDB needs to be pointed at a
1499 non-stripped version of the dynamic linker in order to obtain
1500 information it already knows about. Sigh. */
1501
1502 os = find_pc_section (sym_addr);
1503 if (os != NULL)
1504 {
1505 /* Record the relocated start and end address of the dynamic linker
1506 text and plt section for svr4_in_dynsym_resolve_code. */
1507 bfd *tmp_bfd;
1508 CORE_ADDR load_addr;
1509
1510 tmp_bfd = os->objfile->obfd;
1511 load_addr = ANOFFSET (os->objfile->section_offsets,
1512 SECT_OFF_TEXT (os->objfile));
1513
1514 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1515 if (interp_sect)
1516 {
1517 info->interp_text_sect_low =
1518 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1519 info->interp_text_sect_high =
1520 info->interp_text_sect_low
1521 + bfd_section_size (tmp_bfd, interp_sect);
1522 }
1523 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1524 if (interp_sect)
1525 {
1526 info->interp_plt_sect_low =
1527 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1528 info->interp_plt_sect_high =
1529 info->interp_plt_sect_low
1530 + bfd_section_size (tmp_bfd, interp_sect);
1531 }
1532
1533 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1534 return 1;
1535 }
1536 }
1537
1538 /* Find the program interpreter; if not found, warn the user and drop
1539 into the old breakpoint at symbol code. */
1540 interp_name = find_program_interpreter ();
1541 if (interp_name)
1542 {
1543 CORE_ADDR load_addr = 0;
1544 int load_addr_found = 0;
1545 int loader_found_in_list = 0;
1546 struct so_list *so;
1547 bfd *tmp_bfd = NULL;
1548 struct target_ops *tmp_bfd_target;
1549 volatile struct gdb_exception ex;
1550
1551 sym_addr = 0;
1552
1553 /* Now we need to figure out where the dynamic linker was
1554 loaded so that we can load its symbols and place a breakpoint
1555 in the dynamic linker itself.
1556
1557 This address is stored on the stack. However, I've been unable
1558 to find any magic formula to find it for Solaris (appears to
1559 be trivial on GNU/Linux). Therefore, we have to try an alternate
1560 mechanism to find the dynamic linker's base address. */
1561
1562 TRY_CATCH (ex, RETURN_MASK_ALL)
1563 {
1564 tmp_bfd = solib_bfd_open (interp_name);
1565 }
1566 if (tmp_bfd == NULL)
1567 goto bkpt_at_symbol;
1568
1569 /* Now convert the TMP_BFD into a target. That way target, as
1570 well as BFD operations can be used. */
1571 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
1572 /* target_bfd_reopen acquired its own reference, so we can
1573 release ours now. */
1574 gdb_bfd_unref (tmp_bfd);
1575
1576 /* On a running target, we can get the dynamic linker's base
1577 address from the shared library table. */
1578 so = master_so_list ();
1579 while (so)
1580 {
1581 if (svr4_same_1 (interp_name, so->so_original_name))
1582 {
1583 load_addr_found = 1;
1584 loader_found_in_list = 1;
1585 load_addr = lm_addr_check (so, tmp_bfd);
1586 break;
1587 }
1588 so = so->next;
1589 }
1590
1591 /* If we were not able to find the base address of the loader
1592 from our so_list, then try using the AT_BASE auxilliary entry. */
1593 if (!load_addr_found)
1594 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
1595 {
1596 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
1597
1598 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
1599 that `+ load_addr' will overflow CORE_ADDR width not creating
1600 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
1601 GDB. */
1602
1603 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
1604 {
1605 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
1606 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
1607 tmp_bfd_target);
1608
1609 gdb_assert (load_addr < space_size);
1610
1611 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
1612 64bit ld.so with 32bit executable, it should not happen. */
1613
1614 if (tmp_entry_point < space_size
1615 && tmp_entry_point + load_addr >= space_size)
1616 load_addr -= space_size;
1617 }
1618
1619 load_addr_found = 1;
1620 }
1621
1622 /* Otherwise we find the dynamic linker's base address by examining
1623 the current pc (which should point at the entry point for the
1624 dynamic linker) and subtracting the offset of the entry point.
1625
1626 This is more fragile than the previous approaches, but is a good
1627 fallback method because it has actually been working well in
1628 most cases. */
1629 if (!load_addr_found)
1630 {
1631 struct regcache *regcache
1632 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
1633
1634 load_addr = (regcache_read_pc (regcache)
1635 - exec_entry_point (tmp_bfd, tmp_bfd_target));
1636 }
1637
1638 if (!loader_found_in_list)
1639 {
1640 info->debug_loader_name = xstrdup (interp_name);
1641 info->debug_loader_offset_p = 1;
1642 info->debug_loader_offset = load_addr;
1643 solib_add (NULL, from_tty, &current_target, auto_solib_add);
1644 }
1645
1646 /* Record the relocated start and end address of the dynamic linker
1647 text and plt section for svr4_in_dynsym_resolve_code. */
1648 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
1649 if (interp_sect)
1650 {
1651 info->interp_text_sect_low =
1652 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1653 info->interp_text_sect_high =
1654 info->interp_text_sect_low
1655 + bfd_section_size (tmp_bfd, interp_sect);
1656 }
1657 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
1658 if (interp_sect)
1659 {
1660 info->interp_plt_sect_low =
1661 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
1662 info->interp_plt_sect_high =
1663 info->interp_plt_sect_low
1664 + bfd_section_size (tmp_bfd, interp_sect);
1665 }
1666
1667 /* Now try to set a breakpoint in the dynamic linker. */
1668 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1669 {
1670 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
1671 (void *) *bkpt_namep);
1672 if (sym_addr != 0)
1673 break;
1674 }
1675
1676 if (sym_addr != 0)
1677 /* Convert 'sym_addr' from a function pointer to an address.
1678 Because we pass tmp_bfd_target instead of the current
1679 target, this will always produce an unrelocated value. */
1680 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1681 sym_addr,
1682 tmp_bfd_target);
1683
1684 /* We're done with both the temporary bfd and target. Closing
1685 the target closes the underlying bfd, because it holds the
1686 only remaining reference. */
1687 target_close (tmp_bfd_target);
1688
1689 if (sym_addr != 0)
1690 {
1691 create_solib_event_breakpoint (target_gdbarch (), load_addr + sym_addr);
1692 xfree (interp_name);
1693 return 1;
1694 }
1695
1696 /* For whatever reason we couldn't set a breakpoint in the dynamic
1697 linker. Warn and drop into the old code. */
1698 bkpt_at_symbol:
1699 xfree (interp_name);
1700 warning (_("Unable to find dynamic linker breakpoint function.\n"
1701 "GDB will be unable to debug shared library initializers\n"
1702 "and track explicitly loaded dynamic code."));
1703 }
1704
1705 /* Scan through the lists of symbols, trying to look up the symbol and
1706 set a breakpoint there. Terminate loop when we/if we succeed. */
1707
1708 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
1709 {
1710 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1711 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1712 {
1713 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1714 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1715 sym_addr,
1716 &current_target);
1717 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1718 return 1;
1719 }
1720 }
1721
1722 if (interp_name != NULL && !current_inferior ()->attach_flag)
1723 {
1724 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
1725 {
1726 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
1727 if ((msymbol != NULL) && (SYMBOL_VALUE_ADDRESS (msymbol) != 0))
1728 {
1729 sym_addr = SYMBOL_VALUE_ADDRESS (msymbol);
1730 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1731 sym_addr,
1732 &current_target);
1733 create_solib_event_breakpoint (target_gdbarch (), sym_addr);
1734 return 1;
1735 }
1736 }
1737 }
1738 return 0;
1739 }
1740
1741 /* Implement the "special_symbol_handling" target_so_ops method. */
1742
1743 static void
1744 svr4_special_symbol_handling (void)
1745 {
1746 /* Nothing to do. */
1747 }
1748
1749 /* Read the ELF program headers from ABFD. Return the contents and
1750 set *PHDRS_SIZE to the size of the program headers. */
1751
1752 static gdb_byte *
1753 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
1754 {
1755 Elf_Internal_Ehdr *ehdr;
1756 gdb_byte *buf;
1757
1758 ehdr = elf_elfheader (abfd);
1759
1760 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
1761 if (*phdrs_size == 0)
1762 return NULL;
1763
1764 buf = xmalloc (*phdrs_size);
1765 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
1766 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
1767 {
1768 xfree (buf);
1769 return NULL;
1770 }
1771
1772 return buf;
1773 }
1774
1775 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
1776 exec_bfd. Otherwise return 0.
1777
1778 We relocate all of the sections by the same amount. This
1779 behavior is mandated by recent editions of the System V ABI.
1780 According to the System V Application Binary Interface,
1781 Edition 4.1, page 5-5:
1782
1783 ... Though the system chooses virtual addresses for
1784 individual processes, it maintains the segments' relative
1785 positions. Because position-independent code uses relative
1786 addressesing between segments, the difference between
1787 virtual addresses in memory must match the difference
1788 between virtual addresses in the file. The difference
1789 between the virtual address of any segment in memory and
1790 the corresponding virtual address in the file is thus a
1791 single constant value for any one executable or shared
1792 object in a given process. This difference is the base
1793 address. One use of the base address is to relocate the
1794 memory image of the program during dynamic linking.
1795
1796 The same language also appears in Edition 4.0 of the System V
1797 ABI and is left unspecified in some of the earlier editions.
1798
1799 Decide if the objfile needs to be relocated. As indicated above, we will
1800 only be here when execution is stopped. But during attachment PC can be at
1801 arbitrary address therefore regcache_read_pc can be misleading (contrary to
1802 the auxv AT_ENTRY value). Moreover for executable with interpreter section
1803 regcache_read_pc would point to the interpreter and not the main executable.
1804
1805 So, to summarize, relocations are necessary when the start address obtained
1806 from the executable is different from the address in auxv AT_ENTRY entry.
1807
1808 [ The astute reader will note that we also test to make sure that
1809 the executable in question has the DYNAMIC flag set. It is my
1810 opinion that this test is unnecessary (undesirable even). It
1811 was added to avoid inadvertent relocation of an executable
1812 whose e_type member in the ELF header is not ET_DYN. There may
1813 be a time in the future when it is desirable to do relocations
1814 on other types of files as well in which case this condition
1815 should either be removed or modified to accomodate the new file
1816 type. - Kevin, Nov 2000. ] */
1817
1818 static int
1819 svr4_exec_displacement (CORE_ADDR *displacementp)
1820 {
1821 /* ENTRY_POINT is a possible function descriptor - before
1822 a call to gdbarch_convert_from_func_ptr_addr. */
1823 CORE_ADDR entry_point, displacement;
1824
1825 if (exec_bfd == NULL)
1826 return 0;
1827
1828 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
1829 being executed themselves and PIE (Position Independent Executable)
1830 executables are ET_DYN. */
1831
1832 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
1833 return 0;
1834
1835 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
1836 return 0;
1837
1838 displacement = entry_point - bfd_get_start_address (exec_bfd);
1839
1840 /* Verify the DISPLACEMENT candidate complies with the required page
1841 alignment. It is cheaper than the program headers comparison below. */
1842
1843 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1844 {
1845 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
1846
1847 /* p_align of PT_LOAD segments does not specify any alignment but
1848 only congruency of addresses:
1849 p_offset % p_align == p_vaddr % p_align
1850 Kernel is free to load the executable with lower alignment. */
1851
1852 if ((displacement & (elf->minpagesize - 1)) != 0)
1853 return 0;
1854 }
1855
1856 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
1857 comparing their program headers. If the program headers in the auxilliary
1858 vector do not match the program headers in the executable, then we are
1859 looking at a different file than the one used by the kernel - for
1860 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
1861
1862 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
1863 {
1864 /* Be optimistic and clear OK only if GDB was able to verify the headers
1865 really do not match. */
1866 int phdrs_size, phdrs2_size, ok = 1;
1867 gdb_byte *buf, *buf2;
1868 int arch_size;
1869
1870 buf = read_program_header (-1, &phdrs_size, &arch_size);
1871 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
1872 if (buf != NULL && buf2 != NULL)
1873 {
1874 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
1875
1876 /* We are dealing with three different addresses. EXEC_BFD
1877 represents current address in on-disk file. target memory content
1878 may be different from EXEC_BFD as the file may have been prelinked
1879 to a different address after the executable has been loaded.
1880 Moreover the address of placement in target memory can be
1881 different from what the program headers in target memory say -
1882 this is the goal of PIE.
1883
1884 Detected DISPLACEMENT covers both the offsets of PIE placement and
1885 possible new prelink performed after start of the program. Here
1886 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
1887 content offset for the verification purpose. */
1888
1889 if (phdrs_size != phdrs2_size
1890 || bfd_get_arch_size (exec_bfd) != arch_size)
1891 ok = 0;
1892 else if (arch_size == 32
1893 && phdrs_size >= sizeof (Elf32_External_Phdr)
1894 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
1895 {
1896 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
1897 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
1898 CORE_ADDR displacement = 0;
1899 int i;
1900
1901 /* DISPLACEMENT could be found more easily by the difference of
1902 ehdr2->e_entry. But we haven't read the ehdr yet, and we
1903 already have enough information to compute that displacement
1904 with what we've read. */
1905
1906 for (i = 0; i < ehdr2->e_phnum; i++)
1907 if (phdr2[i].p_type == PT_LOAD)
1908 {
1909 Elf32_External_Phdr *phdrp;
1910 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1911 CORE_ADDR vaddr, paddr;
1912 CORE_ADDR displacement_vaddr = 0;
1913 CORE_ADDR displacement_paddr = 0;
1914
1915 phdrp = &((Elf32_External_Phdr *) buf)[i];
1916 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1917 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1918
1919 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1920 byte_order);
1921 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
1922
1923 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1924 byte_order);
1925 displacement_paddr = paddr - phdr2[i].p_paddr;
1926
1927 if (displacement_vaddr == displacement_paddr)
1928 displacement = displacement_vaddr;
1929
1930 break;
1931 }
1932
1933 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
1934
1935 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
1936 {
1937 Elf32_External_Phdr *phdrp;
1938 Elf32_External_Phdr *phdr2p;
1939 gdb_byte *buf_vaddr_p, *buf_paddr_p;
1940 CORE_ADDR vaddr, paddr;
1941 asection *plt2_asect;
1942
1943 phdrp = &((Elf32_External_Phdr *) buf)[i];
1944 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
1945 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
1946 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
1947
1948 /* PT_GNU_STACK is an exception by being never relocated by
1949 prelink as its addresses are always zero. */
1950
1951 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1952 continue;
1953
1954 /* Check also other adjustment combinations - PR 11786. */
1955
1956 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
1957 byte_order);
1958 vaddr -= displacement;
1959 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
1960
1961 paddr = extract_unsigned_integer (buf_paddr_p, 4,
1962 byte_order);
1963 paddr -= displacement;
1964 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
1965
1966 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1967 continue;
1968
1969 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
1970 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
1971 if (plt2_asect)
1972 {
1973 int content2;
1974 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
1975 CORE_ADDR filesz;
1976
1977 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
1978 & SEC_HAS_CONTENTS) != 0;
1979
1980 filesz = extract_unsigned_integer (buf_filesz_p, 4,
1981 byte_order);
1982
1983 /* PLT2_ASECT is from on-disk file (exec_bfd) while
1984 FILESZ is from the in-memory image. */
1985 if (content2)
1986 filesz += bfd_get_section_size (plt2_asect);
1987 else
1988 filesz -= bfd_get_section_size (plt2_asect);
1989
1990 store_unsigned_integer (buf_filesz_p, 4, byte_order,
1991 filesz);
1992
1993 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
1994 continue;
1995 }
1996
1997 ok = 0;
1998 break;
1999 }
2000 }
2001 else if (arch_size == 64
2002 && phdrs_size >= sizeof (Elf64_External_Phdr)
2003 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2004 {
2005 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2006 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2007 CORE_ADDR displacement = 0;
2008 int i;
2009
2010 /* DISPLACEMENT could be found more easily by the difference of
2011 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2012 already have enough information to compute that displacement
2013 with what we've read. */
2014
2015 for (i = 0; i < ehdr2->e_phnum; i++)
2016 if (phdr2[i].p_type == PT_LOAD)
2017 {
2018 Elf64_External_Phdr *phdrp;
2019 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2020 CORE_ADDR vaddr, paddr;
2021 CORE_ADDR displacement_vaddr = 0;
2022 CORE_ADDR displacement_paddr = 0;
2023
2024 phdrp = &((Elf64_External_Phdr *) buf)[i];
2025 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2026 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2027
2028 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2029 byte_order);
2030 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2031
2032 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2033 byte_order);
2034 displacement_paddr = paddr - phdr2[i].p_paddr;
2035
2036 if (displacement_vaddr == displacement_paddr)
2037 displacement = displacement_vaddr;
2038
2039 break;
2040 }
2041
2042 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2043
2044 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2045 {
2046 Elf64_External_Phdr *phdrp;
2047 Elf64_External_Phdr *phdr2p;
2048 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2049 CORE_ADDR vaddr, paddr;
2050 asection *plt2_asect;
2051
2052 phdrp = &((Elf64_External_Phdr *) buf)[i];
2053 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2054 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2055 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2056
2057 /* PT_GNU_STACK is an exception by being never relocated by
2058 prelink as its addresses are always zero. */
2059
2060 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2061 continue;
2062
2063 /* Check also other adjustment combinations - PR 11786. */
2064
2065 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2066 byte_order);
2067 vaddr -= displacement;
2068 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2069
2070 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2071 byte_order);
2072 paddr -= displacement;
2073 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2074
2075 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2076 continue;
2077
2078 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2079 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2080 if (plt2_asect)
2081 {
2082 int content2;
2083 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2084 CORE_ADDR filesz;
2085
2086 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2087 & SEC_HAS_CONTENTS) != 0;
2088
2089 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2090 byte_order);
2091
2092 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2093 FILESZ is from the in-memory image. */
2094 if (content2)
2095 filesz += bfd_get_section_size (plt2_asect);
2096 else
2097 filesz -= bfd_get_section_size (plt2_asect);
2098
2099 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2100 filesz);
2101
2102 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2103 continue;
2104 }
2105
2106 ok = 0;
2107 break;
2108 }
2109 }
2110 else
2111 ok = 0;
2112 }
2113
2114 xfree (buf);
2115 xfree (buf2);
2116
2117 if (!ok)
2118 return 0;
2119 }
2120
2121 if (info_verbose)
2122 {
2123 /* It can be printed repeatedly as there is no easy way to check
2124 the executable symbols/file has been already relocated to
2125 displacement. */
2126
2127 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2128 "displacement %s for \"%s\".\n"),
2129 paddress (target_gdbarch (), displacement),
2130 bfd_get_filename (exec_bfd));
2131 }
2132
2133 *displacementp = displacement;
2134 return 1;
2135 }
2136
2137 /* Relocate the main executable. This function should be called upon
2138 stopping the inferior process at the entry point to the program.
2139 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2140 different, the main executable is relocated by the proper amount. */
2141
2142 static void
2143 svr4_relocate_main_executable (void)
2144 {
2145 CORE_ADDR displacement;
2146
2147 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
2148 probably contains the offsets computed using the PIE displacement
2149 from the previous run, which of course are irrelevant for this run.
2150 So we need to determine the new PIE displacement and recompute the
2151 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
2152 already contains pre-computed offsets.
2153
2154 If we cannot compute the PIE displacement, either:
2155
2156 - The executable is not PIE.
2157
2158 - SYMFILE_OBJFILE does not match the executable started in the target.
2159 This can happen for main executable symbols loaded at the host while
2160 `ld.so --ld-args main-executable' is loaded in the target.
2161
2162 Then we leave the section offsets untouched and use them as is for
2163 this run. Either:
2164
2165 - These section offsets were properly reset earlier, and thus
2166 already contain the correct values. This can happen for instance
2167 when reconnecting via the remote protocol to a target that supports
2168 the `qOffsets' packet.
2169
2170 - The section offsets were not reset earlier, and the best we can
2171 hope is that the old offsets are still applicable to the new run. */
2172
2173 if (! svr4_exec_displacement (&displacement))
2174 return;
2175
2176 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
2177 addresses. */
2178
2179 if (symfile_objfile)
2180 {
2181 struct section_offsets *new_offsets;
2182 int i;
2183
2184 new_offsets = alloca (symfile_objfile->num_sections
2185 * sizeof (*new_offsets));
2186
2187 for (i = 0; i < symfile_objfile->num_sections; i++)
2188 new_offsets->offsets[i] = displacement;
2189
2190 objfile_relocate (symfile_objfile, new_offsets);
2191 }
2192 else if (exec_bfd)
2193 {
2194 asection *asect;
2195
2196 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
2197 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
2198 (bfd_section_vma (exec_bfd, asect)
2199 + displacement));
2200 }
2201 }
2202
2203 /* Implement the "create_inferior_hook" target_solib_ops method.
2204
2205 For SVR4 executables, this first instruction is either the first
2206 instruction in the dynamic linker (for dynamically linked
2207 executables) or the instruction at "start" for statically linked
2208 executables. For dynamically linked executables, the system
2209 first exec's /lib/libc.so.N, which contains the dynamic linker,
2210 and starts it running. The dynamic linker maps in any needed
2211 shared libraries, maps in the actual user executable, and then
2212 jumps to "start" in the user executable.
2213
2214 We can arrange to cooperate with the dynamic linker to discover the
2215 names of shared libraries that are dynamically linked, and the base
2216 addresses to which they are linked.
2217
2218 This function is responsible for discovering those names and
2219 addresses, and saving sufficient information about them to allow
2220 their symbols to be read at a later time. */
2221
2222 static void
2223 svr4_solib_create_inferior_hook (int from_tty)
2224 {
2225 struct svr4_info *info;
2226
2227 info = get_svr4_info ();
2228
2229 /* Relocate the main executable if necessary. */
2230 svr4_relocate_main_executable ();
2231
2232 /* No point setting a breakpoint in the dynamic linker if we can't
2233 hit it (e.g., a core file, or a trace file). */
2234 if (!target_has_execution)
2235 return;
2236
2237 if (!svr4_have_link_map_offsets ())
2238 return;
2239
2240 if (!enable_break (info, from_tty))
2241 return;
2242 }
2243
2244 static void
2245 svr4_clear_solib (void)
2246 {
2247 struct svr4_info *info;
2248
2249 info = get_svr4_info ();
2250 info->debug_base = 0;
2251 info->debug_loader_offset_p = 0;
2252 info->debug_loader_offset = 0;
2253 xfree (info->debug_loader_name);
2254 info->debug_loader_name = NULL;
2255 }
2256
2257 /* Clear any bits of ADDR that wouldn't fit in a target-format
2258 data pointer. "Data pointer" here refers to whatever sort of
2259 address the dynamic linker uses to manage its sections. At the
2260 moment, we don't support shared libraries on any processors where
2261 code and data pointers are different sizes.
2262
2263 This isn't really the right solution. What we really need here is
2264 a way to do arithmetic on CORE_ADDR values that respects the
2265 natural pointer/address correspondence. (For example, on the MIPS,
2266 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
2267 sign-extend the value. There, simply truncating the bits above
2268 gdbarch_ptr_bit, as we do below, is no good.) This should probably
2269 be a new gdbarch method or something. */
2270 static CORE_ADDR
2271 svr4_truncate_ptr (CORE_ADDR addr)
2272 {
2273 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
2274 /* We don't need to truncate anything, and the bit twiddling below
2275 will fail due to overflow problems. */
2276 return addr;
2277 else
2278 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
2279 }
2280
2281
2282 static void
2283 svr4_relocate_section_addresses (struct so_list *so,
2284 struct target_section *sec)
2285 {
2286 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so,
2287 sec->bfd));
2288 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so,
2289 sec->bfd));
2290 }
2291 \f
2292
2293 /* Architecture-specific operations. */
2294
2295 /* Per-architecture data key. */
2296 static struct gdbarch_data *solib_svr4_data;
2297
2298 struct solib_svr4_ops
2299 {
2300 /* Return a description of the layout of `struct link_map'. */
2301 struct link_map_offsets *(*fetch_link_map_offsets)(void);
2302 };
2303
2304 /* Return a default for the architecture-specific operations. */
2305
2306 static void *
2307 solib_svr4_init (struct obstack *obstack)
2308 {
2309 struct solib_svr4_ops *ops;
2310
2311 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
2312 ops->fetch_link_map_offsets = NULL;
2313 return ops;
2314 }
2315
2316 /* Set the architecture-specific `struct link_map_offsets' fetcher for
2317 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
2318
2319 void
2320 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
2321 struct link_map_offsets *(*flmo) (void))
2322 {
2323 struct solib_svr4_ops *ops = gdbarch_data (gdbarch, solib_svr4_data);
2324
2325 ops->fetch_link_map_offsets = flmo;
2326
2327 set_solib_ops (gdbarch, &svr4_so_ops);
2328 }
2329
2330 /* Fetch a link_map_offsets structure using the architecture-specific
2331 `struct link_map_offsets' fetcher. */
2332
2333 static struct link_map_offsets *
2334 svr4_fetch_link_map_offsets (void)
2335 {
2336 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2337
2338 gdb_assert (ops->fetch_link_map_offsets);
2339 return ops->fetch_link_map_offsets ();
2340 }
2341
2342 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
2343
2344 static int
2345 svr4_have_link_map_offsets (void)
2346 {
2347 struct solib_svr4_ops *ops = gdbarch_data (target_gdbarch (), solib_svr4_data);
2348
2349 return (ops->fetch_link_map_offsets != NULL);
2350 }
2351 \f
2352
2353 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
2354 `struct r_debug' and a `struct link_map' that are binary compatible
2355 with the origional SVR4 implementation. */
2356
2357 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2358 for an ILP32 SVR4 system. */
2359
2360 struct link_map_offsets *
2361 svr4_ilp32_fetch_link_map_offsets (void)
2362 {
2363 static struct link_map_offsets lmo;
2364 static struct link_map_offsets *lmp = NULL;
2365
2366 if (lmp == NULL)
2367 {
2368 lmp = &lmo;
2369
2370 lmo.r_version_offset = 0;
2371 lmo.r_version_size = 4;
2372 lmo.r_map_offset = 4;
2373 lmo.r_brk_offset = 8;
2374 lmo.r_ldsomap_offset = 20;
2375
2376 /* Everything we need is in the first 20 bytes. */
2377 lmo.link_map_size = 20;
2378 lmo.l_addr_offset = 0;
2379 lmo.l_name_offset = 4;
2380 lmo.l_ld_offset = 8;
2381 lmo.l_next_offset = 12;
2382 lmo.l_prev_offset = 16;
2383 }
2384
2385 return lmp;
2386 }
2387
2388 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
2389 for an LP64 SVR4 system. */
2390
2391 struct link_map_offsets *
2392 svr4_lp64_fetch_link_map_offsets (void)
2393 {
2394 static struct link_map_offsets lmo;
2395 static struct link_map_offsets *lmp = NULL;
2396
2397 if (lmp == NULL)
2398 {
2399 lmp = &lmo;
2400
2401 lmo.r_version_offset = 0;
2402 lmo.r_version_size = 4;
2403 lmo.r_map_offset = 8;
2404 lmo.r_brk_offset = 16;
2405 lmo.r_ldsomap_offset = 40;
2406
2407 /* Everything we need is in the first 40 bytes. */
2408 lmo.link_map_size = 40;
2409 lmo.l_addr_offset = 0;
2410 lmo.l_name_offset = 8;
2411 lmo.l_ld_offset = 16;
2412 lmo.l_next_offset = 24;
2413 lmo.l_prev_offset = 32;
2414 }
2415
2416 return lmp;
2417 }
2418 \f
2419
2420 struct target_so_ops svr4_so_ops;
2421
2422 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
2423 different rule for symbol lookup. The lookup begins here in the DSO, not in
2424 the main executable. */
2425
2426 static struct symbol *
2427 elf_lookup_lib_symbol (const struct objfile *objfile,
2428 const char *name,
2429 const domain_enum domain)
2430 {
2431 bfd *abfd;
2432
2433 if (objfile == symfile_objfile)
2434 abfd = exec_bfd;
2435 else
2436 {
2437 /* OBJFILE should have been passed as the non-debug one. */
2438 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
2439
2440 abfd = objfile->obfd;
2441 }
2442
2443 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL) != 1)
2444 return NULL;
2445
2446 return lookup_global_symbol_from_objfile (objfile, name, domain);
2447 }
2448
2449 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
2450
2451 void
2452 _initialize_svr4_solib (void)
2453 {
2454 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
2455 solib_svr4_pspace_data
2456 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
2457
2458 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
2459 svr4_so_ops.free_so = svr4_free_so;
2460 svr4_so_ops.clear_so = svr4_clear_so;
2461 svr4_so_ops.clear_solib = svr4_clear_solib;
2462 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
2463 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
2464 svr4_so_ops.current_sos = svr4_current_sos;
2465 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
2466 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
2467 svr4_so_ops.bfd_open = solib_bfd_open;
2468 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
2469 svr4_so_ops.same = svr4_same;
2470 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
2471 }
This page took 0.171609 seconds and 4 git commands to generate.