remove trivialy unused variables
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2016 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "infrun.h"
34 #include "regcache.h"
35 #include "gdbthread.h"
36 #include "observer.h"
37
38 #include "solist.h"
39 #include "solib.h"
40 #include "solib-svr4.h"
41
42 #include "bfd-target.h"
43 #include "elf-bfd.h"
44 #include "exec.h"
45 #include "auxv.h"
46 #include "gdb_bfd.h"
47 #include "probe.h"
48
49 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
50 static int svr4_have_link_map_offsets (void);
51 static void svr4_relocate_main_executable (void);
52 static void svr4_free_library_list (void *p_list);
53
54 /* Link map info to include in an allocated so_list entry. */
55
56 struct lm_info
57 {
58 /* Amount by which addresses in the binary should be relocated to
59 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
60 When prelinking is involved and the prelink base address changes,
61 we may need a different offset - the recomputed offset is in L_ADDR.
62 It is commonly the same value. It is cached as we want to warn about
63 the difference and compute it only once. L_ADDR is valid
64 iff L_ADDR_P. */
65 CORE_ADDR l_addr, l_addr_inferior;
66 unsigned int l_addr_p : 1;
67
68 /* The target location of lm. */
69 CORE_ADDR lm_addr;
70
71 /* Values read in from inferior's fields of the same name. */
72 CORE_ADDR l_ld, l_next, l_prev, l_name;
73 };
74
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
77 events.
78
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
82
83 static const char * const solib_break_names[] =
84 {
85 "r_debug_state",
86 "_r_debug_state",
87 "_dl_debug_state",
88 "rtld_db_dlactivity",
89 "__dl_rtld_db_dlactivity",
90 "_rtld_debug_state",
91
92 NULL
93 };
94
95 static const char * const bkpt_names[] =
96 {
97 "_start",
98 "__start",
99 "main",
100 NULL
101 };
102
103 static const char * const main_name_list[] =
104 {
105 "main_$main",
106 NULL
107 };
108
109 /* What to do when a probe stop occurs. */
110
111 enum probe_action
112 {
113 /* Something went seriously wrong. Stop using probes and
114 revert to using the older interface. */
115 PROBES_INTERFACE_FAILED,
116
117 /* No action is required. The shared object list is still
118 valid. */
119 DO_NOTHING,
120
121 /* The shared object list should be reloaded entirely. */
122 FULL_RELOAD,
123
124 /* Attempt to incrementally update the shared object list. If
125 the update fails or is not possible, fall back to reloading
126 the list in full. */
127 UPDATE_OR_RELOAD,
128 };
129
130 /* A probe's name and its associated action. */
131
132 struct probe_info
133 {
134 /* The name of the probe. */
135 const char *name;
136
137 /* What to do when a probe stop occurs. */
138 enum probe_action action;
139 };
140
141 /* A list of named probes and their associated actions. If all
142 probes are present in the dynamic linker then the probes-based
143 interface will be used. */
144
145 static const struct probe_info probe_info[] =
146 {
147 { "init_start", DO_NOTHING },
148 { "init_complete", FULL_RELOAD },
149 { "map_start", DO_NOTHING },
150 { "map_failed", DO_NOTHING },
151 { "reloc_complete", UPDATE_OR_RELOAD },
152 { "unmap_start", DO_NOTHING },
153 { "unmap_complete", FULL_RELOAD },
154 };
155
156 #define NUM_PROBES ARRAY_SIZE (probe_info)
157
158 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
159 the same shared library. */
160
161 static int
162 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
163 {
164 if (strcmp (gdb_so_name, inferior_so_name) == 0)
165 return 1;
166
167 /* On Solaris, when starting inferior we think that dynamic linker is
168 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
169 contains /lib/ld.so.1. Sometimes one file is a link to another, but
170 sometimes they have identical content, but are not linked to each
171 other. We don't restrict this check for Solaris, but the chances
172 of running into this situation elsewhere are very low. */
173 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
174 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
175 return 1;
176
177 /* Similarly, we observed the same issue with sparc64, but with
178 different locations. */
179 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
180 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
181 return 1;
182
183 return 0;
184 }
185
186 static int
187 svr4_same (struct so_list *gdb, struct so_list *inferior)
188 {
189 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
190 }
191
192 static struct lm_info *
193 lm_info_read (CORE_ADDR lm_addr)
194 {
195 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
196 gdb_byte *lm;
197 struct lm_info *lm_info;
198 struct cleanup *back_to;
199
200 lm = (gdb_byte *) xmalloc (lmo->link_map_size);
201 back_to = make_cleanup (xfree, lm);
202
203 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
204 {
205 warning (_("Error reading shared library list entry at %s"),
206 paddress (target_gdbarch (), lm_addr)),
207 lm_info = NULL;
208 }
209 else
210 {
211 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
212
213 lm_info = XCNEW (struct lm_info);
214 lm_info->lm_addr = lm_addr;
215
216 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
217 ptr_type);
218 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
219 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
220 ptr_type);
221 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
222 ptr_type);
223 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
224 ptr_type);
225 }
226
227 do_cleanups (back_to);
228
229 return lm_info;
230 }
231
232 static int
233 has_lm_dynamic_from_link_map (void)
234 {
235 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
236
237 return lmo->l_ld_offset >= 0;
238 }
239
240 static CORE_ADDR
241 lm_addr_check (const struct so_list *so, bfd *abfd)
242 {
243 if (!so->lm_info->l_addr_p)
244 {
245 struct bfd_section *dyninfo_sect;
246 CORE_ADDR l_addr, l_dynaddr, dynaddr;
247
248 l_addr = so->lm_info->l_addr_inferior;
249
250 if (! abfd || ! has_lm_dynamic_from_link_map ())
251 goto set_addr;
252
253 l_dynaddr = so->lm_info->l_ld;
254
255 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
256 if (dyninfo_sect == NULL)
257 goto set_addr;
258
259 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
260
261 if (dynaddr + l_addr != l_dynaddr)
262 {
263 CORE_ADDR align = 0x1000;
264 CORE_ADDR minpagesize = align;
265
266 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
267 {
268 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
269 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
270 int i;
271
272 align = 1;
273
274 for (i = 0; i < ehdr->e_phnum; i++)
275 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
276 align = phdr[i].p_align;
277
278 minpagesize = get_elf_backend_data (abfd)->minpagesize;
279 }
280
281 /* Turn it into a mask. */
282 align--;
283
284 /* If the changes match the alignment requirements, we
285 assume we're using a core file that was generated by the
286 same binary, just prelinked with a different base offset.
287 If it doesn't match, we may have a different binary, the
288 same binary with the dynamic table loaded at an unrelated
289 location, or anything, really. To avoid regressions,
290 don't adjust the base offset in the latter case, although
291 odds are that, if things really changed, debugging won't
292 quite work.
293
294 One could expect more the condition
295 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
296 but the one below is relaxed for PPC. The PPC kernel supports
297 either 4k or 64k page sizes. To be prepared for 64k pages,
298 PPC ELF files are built using an alignment requirement of 64k.
299 However, when running on a kernel supporting 4k pages, the memory
300 mapping of the library may not actually happen on a 64k boundary!
301
302 (In the usual case where (l_addr & align) == 0, this check is
303 equivalent to the possibly expected check above.)
304
305 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
306
307 l_addr = l_dynaddr - dynaddr;
308
309 if ((l_addr & (minpagesize - 1)) == 0
310 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
311 {
312 if (info_verbose)
313 printf_unfiltered (_("Using PIC (Position Independent Code) "
314 "prelink displacement %s for \"%s\".\n"),
315 paddress (target_gdbarch (), l_addr),
316 so->so_name);
317 }
318 else
319 {
320 /* There is no way to verify the library file matches. prelink
321 can during prelinking of an unprelinked file (or unprelinking
322 of a prelinked file) shift the DYNAMIC segment by arbitrary
323 offset without any page size alignment. There is no way to
324 find out the ELF header and/or Program Headers for a limited
325 verification if it they match. One could do a verification
326 of the DYNAMIC segment. Still the found address is the best
327 one GDB could find. */
328
329 warning (_(".dynamic section for \"%s\" "
330 "is not at the expected address "
331 "(wrong library or version mismatch?)"), so->so_name);
332 }
333 }
334
335 set_addr:
336 so->lm_info->l_addr = l_addr;
337 so->lm_info->l_addr_p = 1;
338 }
339
340 return so->lm_info->l_addr;
341 }
342
343 /* Per pspace SVR4 specific data. */
344
345 struct svr4_info
346 {
347 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
348
349 /* Validity flag for debug_loader_offset. */
350 int debug_loader_offset_p;
351
352 /* Load address for the dynamic linker, inferred. */
353 CORE_ADDR debug_loader_offset;
354
355 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
356 char *debug_loader_name;
357
358 /* Load map address for the main executable. */
359 CORE_ADDR main_lm_addr;
360
361 CORE_ADDR interp_text_sect_low;
362 CORE_ADDR interp_text_sect_high;
363 CORE_ADDR interp_plt_sect_low;
364 CORE_ADDR interp_plt_sect_high;
365
366 /* Nonzero if the list of objects was last obtained from the target
367 via qXfer:libraries-svr4:read. */
368 int using_xfer;
369
370 /* Table of struct probe_and_action instances, used by the
371 probes-based interface to map breakpoint addresses to probes
372 and their associated actions. Lookup is performed using
373 probe_and_action->probe->address. */
374 htab_t probes_table;
375
376 /* List of objects loaded into the inferior, used by the probes-
377 based interface. */
378 struct so_list *solib_list;
379 };
380
381 /* Per-program-space data key. */
382 static const struct program_space_data *solib_svr4_pspace_data;
383
384 /* Free the probes table. */
385
386 static void
387 free_probes_table (struct svr4_info *info)
388 {
389 if (info->probes_table == NULL)
390 return;
391
392 htab_delete (info->probes_table);
393 info->probes_table = NULL;
394 }
395
396 /* Free the solib list. */
397
398 static void
399 free_solib_list (struct svr4_info *info)
400 {
401 svr4_free_library_list (&info->solib_list);
402 info->solib_list = NULL;
403 }
404
405 static void
406 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
407 {
408 struct svr4_info *info = (struct svr4_info *) arg;
409
410 free_probes_table (info);
411 free_solib_list (info);
412
413 xfree (info);
414 }
415
416 /* Get the current svr4 data. If none is found yet, add it now. This
417 function always returns a valid object. */
418
419 static struct svr4_info *
420 get_svr4_info (void)
421 {
422 struct svr4_info *info;
423
424 info = (struct svr4_info *) program_space_data (current_program_space,
425 solib_svr4_pspace_data);
426 if (info != NULL)
427 return info;
428
429 info = XCNEW (struct svr4_info);
430 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
431 return info;
432 }
433
434 /* Local function prototypes */
435
436 static int match_main (const char *);
437
438 /* Read program header TYPE from inferior memory. The header is found
439 by scanning the OS auxillary vector.
440
441 If TYPE == -1, return the program headers instead of the contents of
442 one program header.
443
444 Return a pointer to allocated memory holding the program header contents,
445 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
446 size of those contents is returned to P_SECT_SIZE. Likewise, the target
447 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE and
448 the base address of the section is returned in BASE_ADDR. */
449
450 static gdb_byte *
451 read_program_header (int type, int *p_sect_size, int *p_arch_size,
452 CORE_ADDR *base_addr)
453 {
454 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
455 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
456 int arch_size, sect_size;
457 CORE_ADDR sect_addr;
458 gdb_byte *buf;
459 int pt_phdr_p = 0;
460
461 /* Get required auxv elements from target. */
462 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
463 return 0;
464 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
465 return 0;
466 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
467 return 0;
468 if (!at_phdr || !at_phnum)
469 return 0;
470
471 /* Determine ELF architecture type. */
472 if (at_phent == sizeof (Elf32_External_Phdr))
473 arch_size = 32;
474 else if (at_phent == sizeof (Elf64_External_Phdr))
475 arch_size = 64;
476 else
477 return 0;
478
479 /* Find the requested segment. */
480 if (type == -1)
481 {
482 sect_addr = at_phdr;
483 sect_size = at_phent * at_phnum;
484 }
485 else if (arch_size == 32)
486 {
487 Elf32_External_Phdr phdr;
488 int i;
489
490 /* Search for requested PHDR. */
491 for (i = 0; i < at_phnum; i++)
492 {
493 int p_type;
494
495 if (target_read_memory (at_phdr + i * sizeof (phdr),
496 (gdb_byte *)&phdr, sizeof (phdr)))
497 return 0;
498
499 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
500 4, byte_order);
501
502 if (p_type == PT_PHDR)
503 {
504 pt_phdr_p = 1;
505 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
506 4, byte_order);
507 }
508
509 if (p_type == type)
510 break;
511 }
512
513 if (i == at_phnum)
514 return 0;
515
516 /* Retrieve address and size. */
517 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
518 4, byte_order);
519 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
520 4, byte_order);
521 }
522 else
523 {
524 Elf64_External_Phdr phdr;
525 int i;
526
527 /* Search for requested PHDR. */
528 for (i = 0; i < at_phnum; i++)
529 {
530 int p_type;
531
532 if (target_read_memory (at_phdr + i * sizeof (phdr),
533 (gdb_byte *)&phdr, sizeof (phdr)))
534 return 0;
535
536 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
537 4, byte_order);
538
539 if (p_type == PT_PHDR)
540 {
541 pt_phdr_p = 1;
542 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
543 8, byte_order);
544 }
545
546 if (p_type == type)
547 break;
548 }
549
550 if (i == at_phnum)
551 return 0;
552
553 /* Retrieve address and size. */
554 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
555 8, byte_order);
556 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
557 8, byte_order);
558 }
559
560 /* PT_PHDR is optional, but we really need it
561 for PIE to make this work in general. */
562
563 if (pt_phdr_p)
564 {
565 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
566 Relocation offset is the difference between the two. */
567 sect_addr = sect_addr + (at_phdr - pt_phdr);
568 }
569
570 /* Read in requested program header. */
571 buf = (gdb_byte *) xmalloc (sect_size);
572 if (target_read_memory (sect_addr, buf, sect_size))
573 {
574 xfree (buf);
575 return NULL;
576 }
577
578 if (p_arch_size)
579 *p_arch_size = arch_size;
580 if (p_sect_size)
581 *p_sect_size = sect_size;
582 if (base_addr)
583 *base_addr = sect_addr;
584
585 return buf;
586 }
587
588
589 /* Return program interpreter string. */
590 static char *
591 find_program_interpreter (void)
592 {
593 gdb_byte *buf = NULL;
594
595 /* If we have an exec_bfd, use its section table. */
596 if (exec_bfd
597 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
598 {
599 struct bfd_section *interp_sect;
600
601 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
602 if (interp_sect != NULL)
603 {
604 int sect_size = bfd_section_size (exec_bfd, interp_sect);
605
606 buf = (gdb_byte *) xmalloc (sect_size);
607 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
608 }
609 }
610
611 /* If we didn't find it, use the target auxillary vector. */
612 if (!buf)
613 buf = read_program_header (PT_INTERP, NULL, NULL, NULL);
614
615 return (char *) buf;
616 }
617
618
619 /* Scan for DESIRED_DYNTAG in .dynamic section of ABFD. If DESIRED_DYNTAG is
620 found, 1 is returned and the corresponding PTR is set. */
621
622 static int
623 scan_dyntag (const int desired_dyntag, bfd *abfd, CORE_ADDR *ptr,
624 CORE_ADDR *ptr_addr)
625 {
626 int arch_size, step, sect_size;
627 long current_dyntag;
628 CORE_ADDR dyn_ptr, dyn_addr;
629 gdb_byte *bufend, *bufstart, *buf;
630 Elf32_External_Dyn *x_dynp_32;
631 Elf64_External_Dyn *x_dynp_64;
632 struct bfd_section *sect;
633 struct target_section *target_section;
634
635 if (abfd == NULL)
636 return 0;
637
638 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
639 return 0;
640
641 arch_size = bfd_get_arch_size (abfd);
642 if (arch_size == -1)
643 return 0;
644
645 /* Find the start address of the .dynamic section. */
646 sect = bfd_get_section_by_name (abfd, ".dynamic");
647 if (sect == NULL)
648 return 0;
649
650 for (target_section = current_target_sections->sections;
651 target_section < current_target_sections->sections_end;
652 target_section++)
653 if (sect == target_section->the_bfd_section)
654 break;
655 if (target_section < current_target_sections->sections_end)
656 dyn_addr = target_section->addr;
657 else
658 {
659 /* ABFD may come from OBJFILE acting only as a symbol file without being
660 loaded into the target (see add_symbol_file_command). This case is
661 such fallback to the file VMA address without the possibility of
662 having the section relocated to its actual in-memory address. */
663
664 dyn_addr = bfd_section_vma (abfd, sect);
665 }
666
667 /* Read in .dynamic from the BFD. We will get the actual value
668 from memory later. */
669 sect_size = bfd_section_size (abfd, sect);
670 buf = bufstart = (gdb_byte *) alloca (sect_size);
671 if (!bfd_get_section_contents (abfd, sect,
672 buf, 0, sect_size))
673 return 0;
674
675 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
676 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
677 : sizeof (Elf64_External_Dyn);
678 for (bufend = buf + sect_size;
679 buf < bufend;
680 buf += step)
681 {
682 if (arch_size == 32)
683 {
684 x_dynp_32 = (Elf32_External_Dyn *) buf;
685 current_dyntag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
686 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
687 }
688 else
689 {
690 x_dynp_64 = (Elf64_External_Dyn *) buf;
691 current_dyntag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
692 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
693 }
694 if (current_dyntag == DT_NULL)
695 return 0;
696 if (current_dyntag == desired_dyntag)
697 {
698 /* If requested, try to read the runtime value of this .dynamic
699 entry. */
700 if (ptr)
701 {
702 struct type *ptr_type;
703 gdb_byte ptr_buf[8];
704 CORE_ADDR ptr_addr_1;
705
706 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
707 ptr_addr_1 = dyn_addr + (buf - bufstart) + arch_size / 8;
708 if (target_read_memory (ptr_addr_1, ptr_buf, arch_size / 8) == 0)
709 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
710 *ptr = dyn_ptr;
711 if (ptr_addr)
712 *ptr_addr = dyn_addr + (buf - bufstart);
713 }
714 return 1;
715 }
716 }
717
718 return 0;
719 }
720
721 /* Scan for DESIRED_DYNTAG in .dynamic section of the target's main executable,
722 found by consulting the OS auxillary vector. If DESIRED_DYNTAG is found, 1
723 is returned and the corresponding PTR is set. */
724
725 static int
726 scan_dyntag_auxv (const int desired_dyntag, CORE_ADDR *ptr,
727 CORE_ADDR *ptr_addr)
728 {
729 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
730 int sect_size, arch_size, step;
731 long current_dyntag;
732 CORE_ADDR dyn_ptr;
733 CORE_ADDR base_addr;
734 gdb_byte *bufend, *bufstart, *buf;
735
736 /* Read in .dynamic section. */
737 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size,
738 &base_addr);
739 if (!buf)
740 return 0;
741
742 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
743 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
744 : sizeof (Elf64_External_Dyn);
745 for (bufend = buf + sect_size;
746 buf < bufend;
747 buf += step)
748 {
749 if (arch_size == 32)
750 {
751 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
752
753 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
754 4, byte_order);
755 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
756 4, byte_order);
757 }
758 else
759 {
760 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
761
762 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
763 8, byte_order);
764 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
765 8, byte_order);
766 }
767 if (current_dyntag == DT_NULL)
768 break;
769
770 if (current_dyntag == desired_dyntag)
771 {
772 if (ptr)
773 *ptr = dyn_ptr;
774
775 if (ptr_addr)
776 *ptr_addr = base_addr + buf - bufstart;
777
778 xfree (bufstart);
779 return 1;
780 }
781 }
782
783 xfree (bufstart);
784 return 0;
785 }
786
787 /* Locate the base address of dynamic linker structs for SVR4 elf
788 targets.
789
790 For SVR4 elf targets the address of the dynamic linker's runtime
791 structure is contained within the dynamic info section in the
792 executable file. The dynamic section is also mapped into the
793 inferior address space. Because the runtime loader fills in the
794 real address before starting the inferior, we have to read in the
795 dynamic info section from the inferior address space.
796 If there are any errors while trying to find the address, we
797 silently return 0, otherwise the found address is returned. */
798
799 static CORE_ADDR
800 elf_locate_base (void)
801 {
802 struct bound_minimal_symbol msymbol;
803 CORE_ADDR dyn_ptr, dyn_ptr_addr;
804
805 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
806 instead of DT_DEBUG, although they sometimes contain an unused
807 DT_DEBUG. */
808 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr, NULL)
809 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr, NULL))
810 {
811 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
812 gdb_byte *pbuf;
813 int pbuf_size = TYPE_LENGTH (ptr_type);
814
815 pbuf = (gdb_byte *) alloca (pbuf_size);
816 /* DT_MIPS_RLD_MAP contains a pointer to the address
817 of the dynamic link structure. */
818 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
819 return 0;
820 return extract_typed_address (pbuf, ptr_type);
821 }
822
823 /* Then check DT_MIPS_RLD_MAP_REL. MIPS executables now use this form
824 because of needing to support PIE. DT_MIPS_RLD_MAP will also exist
825 in non-PIE. */
826 if (scan_dyntag (DT_MIPS_RLD_MAP_REL, exec_bfd, &dyn_ptr, &dyn_ptr_addr)
827 || scan_dyntag_auxv (DT_MIPS_RLD_MAP_REL, &dyn_ptr, &dyn_ptr_addr))
828 {
829 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
830 gdb_byte *pbuf;
831 int pbuf_size = TYPE_LENGTH (ptr_type);
832
833 pbuf = (gdb_byte *) alloca (pbuf_size);
834 /* DT_MIPS_RLD_MAP_REL contains an offset from the address of the
835 DT slot to the address of the dynamic link structure. */
836 if (target_read_memory (dyn_ptr + dyn_ptr_addr, pbuf, pbuf_size))
837 return 0;
838 return extract_typed_address (pbuf, ptr_type);
839 }
840
841 /* Find DT_DEBUG. */
842 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr, NULL)
843 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr, NULL))
844 return dyn_ptr;
845
846 /* This may be a static executable. Look for the symbol
847 conventionally named _r_debug, as a last resort. */
848 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
849 if (msymbol.minsym != NULL)
850 return BMSYMBOL_VALUE_ADDRESS (msymbol);
851
852 /* DT_DEBUG entry not found. */
853 return 0;
854 }
855
856 /* Locate the base address of dynamic linker structs.
857
858 For both the SunOS and SVR4 shared library implementations, if the
859 inferior executable has been linked dynamically, there is a single
860 address somewhere in the inferior's data space which is the key to
861 locating all of the dynamic linker's runtime structures. This
862 address is the value of the debug base symbol. The job of this
863 function is to find and return that address, or to return 0 if there
864 is no such address (the executable is statically linked for example).
865
866 For SunOS, the job is almost trivial, since the dynamic linker and
867 all of it's structures are statically linked to the executable at
868 link time. Thus the symbol for the address we are looking for has
869 already been added to the minimal symbol table for the executable's
870 objfile at the time the symbol file's symbols were read, and all we
871 have to do is look it up there. Note that we explicitly do NOT want
872 to find the copies in the shared library.
873
874 The SVR4 version is a bit more complicated because the address
875 is contained somewhere in the dynamic info section. We have to go
876 to a lot more work to discover the address of the debug base symbol.
877 Because of this complexity, we cache the value we find and return that
878 value on subsequent invocations. Note there is no copy in the
879 executable symbol tables. */
880
881 static CORE_ADDR
882 locate_base (struct svr4_info *info)
883 {
884 /* Check to see if we have a currently valid address, and if so, avoid
885 doing all this work again and just return the cached address. If
886 we have no cached address, try to locate it in the dynamic info
887 section for ELF executables. There's no point in doing any of this
888 though if we don't have some link map offsets to work with. */
889
890 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
891 info->debug_base = elf_locate_base ();
892 return info->debug_base;
893 }
894
895 /* Find the first element in the inferior's dynamic link map, and
896 return its address in the inferior. Return zero if the address
897 could not be determined.
898
899 FIXME: Perhaps we should validate the info somehow, perhaps by
900 checking r_version for a known version number, or r_state for
901 RT_CONSISTENT. */
902
903 static CORE_ADDR
904 solib_svr4_r_map (struct svr4_info *info)
905 {
906 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
907 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
908 CORE_ADDR addr = 0;
909
910 TRY
911 {
912 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
913 ptr_type);
914 }
915 CATCH (ex, RETURN_MASK_ERROR)
916 {
917 exception_print (gdb_stderr, ex);
918 }
919 END_CATCH
920
921 return addr;
922 }
923
924 /* Find r_brk from the inferior's debug base. */
925
926 static CORE_ADDR
927 solib_svr4_r_brk (struct svr4_info *info)
928 {
929 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
930 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
931
932 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
933 ptr_type);
934 }
935
936 /* Find the link map for the dynamic linker (if it is not in the
937 normal list of loaded shared objects). */
938
939 static CORE_ADDR
940 solib_svr4_r_ldsomap (struct svr4_info *info)
941 {
942 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
943 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
944 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
945 ULONGEST version = 0;
946
947 TRY
948 {
949 /* Check version, and return zero if `struct r_debug' doesn't have
950 the r_ldsomap member. */
951 version
952 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
953 lmo->r_version_size, byte_order);
954 }
955 CATCH (ex, RETURN_MASK_ERROR)
956 {
957 exception_print (gdb_stderr, ex);
958 }
959 END_CATCH
960
961 if (version < 2 || lmo->r_ldsomap_offset == -1)
962 return 0;
963
964 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
965 ptr_type);
966 }
967
968 /* On Solaris systems with some versions of the dynamic linker,
969 ld.so's l_name pointer points to the SONAME in the string table
970 rather than into writable memory. So that GDB can find shared
971 libraries when loading a core file generated by gcore, ensure that
972 memory areas containing the l_name string are saved in the core
973 file. */
974
975 static int
976 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
977 {
978 struct svr4_info *info;
979 CORE_ADDR ldsomap;
980 struct so_list *newobj;
981 struct cleanup *old_chain;
982 CORE_ADDR name_lm;
983
984 info = get_svr4_info ();
985
986 info->debug_base = 0;
987 locate_base (info);
988 if (!info->debug_base)
989 return 0;
990
991 ldsomap = solib_svr4_r_ldsomap (info);
992 if (!ldsomap)
993 return 0;
994
995 newobj = XCNEW (struct so_list);
996 old_chain = make_cleanup (xfree, newobj);
997 newobj->lm_info = lm_info_read (ldsomap);
998 make_cleanup (xfree, newobj->lm_info);
999 name_lm = newobj->lm_info ? newobj->lm_info->l_name : 0;
1000 do_cleanups (old_chain);
1001
1002 return (name_lm >= vaddr && name_lm < vaddr + size);
1003 }
1004
1005 /* Implement the "open_symbol_file_object" target_so_ops method.
1006
1007 If no open symbol file, attempt to locate and open the main symbol
1008 file. On SVR4 systems, this is the first link map entry. If its
1009 name is here, we can open it. Useful when attaching to a process
1010 without first loading its symbol file. */
1011
1012 static int
1013 open_symbol_file_object (void *from_ttyp)
1014 {
1015 CORE_ADDR lm, l_name;
1016 char *filename;
1017 int errcode;
1018 int from_tty = *(int *)from_ttyp;
1019 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1020 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
1021 int l_name_size = TYPE_LENGTH (ptr_type);
1022 gdb_byte *l_name_buf = (gdb_byte *) xmalloc (l_name_size);
1023 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
1024 struct svr4_info *info = get_svr4_info ();
1025
1026 if (symfile_objfile)
1027 if (!query (_("Attempt to reload symbols from process? ")))
1028 {
1029 do_cleanups (cleanups);
1030 return 0;
1031 }
1032
1033 /* Always locate the debug struct, in case it has moved. */
1034 info->debug_base = 0;
1035 if (locate_base (info) == 0)
1036 {
1037 do_cleanups (cleanups);
1038 return 0; /* failed somehow... */
1039 }
1040
1041 /* First link map member should be the executable. */
1042 lm = solib_svr4_r_map (info);
1043 if (lm == 0)
1044 {
1045 do_cleanups (cleanups);
1046 return 0; /* failed somehow... */
1047 }
1048
1049 /* Read address of name from target memory to GDB. */
1050 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1051
1052 /* Convert the address to host format. */
1053 l_name = extract_typed_address (l_name_buf, ptr_type);
1054
1055 if (l_name == 0)
1056 {
1057 do_cleanups (cleanups);
1058 return 0; /* No filename. */
1059 }
1060
1061 /* Now fetch the filename from target memory. */
1062 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1063 make_cleanup (xfree, filename);
1064
1065 if (errcode)
1066 {
1067 warning (_("failed to read exec filename from attached file: %s"),
1068 safe_strerror (errcode));
1069 do_cleanups (cleanups);
1070 return 0;
1071 }
1072
1073 /* Have a pathname: read the symbol file. */
1074 symbol_file_add_main (filename, from_tty);
1075
1076 do_cleanups (cleanups);
1077 return 1;
1078 }
1079
1080 /* Data exchange structure for the XML parser as returned by
1081 svr4_current_sos_via_xfer_libraries. */
1082
1083 struct svr4_library_list
1084 {
1085 struct so_list *head, **tailp;
1086
1087 /* Inferior address of struct link_map used for the main executable. It is
1088 NULL if not known. */
1089 CORE_ADDR main_lm;
1090 };
1091
1092 /* Implementation for target_so_ops.free_so. */
1093
1094 static void
1095 svr4_free_so (struct so_list *so)
1096 {
1097 xfree (so->lm_info);
1098 }
1099
1100 /* Implement target_so_ops.clear_so. */
1101
1102 static void
1103 svr4_clear_so (struct so_list *so)
1104 {
1105 if (so->lm_info != NULL)
1106 so->lm_info->l_addr_p = 0;
1107 }
1108
1109 /* Free so_list built so far (called via cleanup). */
1110
1111 static void
1112 svr4_free_library_list (void *p_list)
1113 {
1114 struct so_list *list = *(struct so_list **) p_list;
1115
1116 while (list != NULL)
1117 {
1118 struct so_list *next = list->next;
1119
1120 free_so (list);
1121 list = next;
1122 }
1123 }
1124
1125 /* Copy library list. */
1126
1127 static struct so_list *
1128 svr4_copy_library_list (struct so_list *src)
1129 {
1130 struct so_list *dst = NULL;
1131 struct so_list **link = &dst;
1132
1133 while (src != NULL)
1134 {
1135 struct so_list *newobj;
1136
1137 newobj = XNEW (struct so_list);
1138 memcpy (newobj, src, sizeof (struct so_list));
1139
1140 newobj->lm_info = XNEW (struct lm_info);
1141 memcpy (newobj->lm_info, src->lm_info, sizeof (struct lm_info));
1142
1143 newobj->next = NULL;
1144 *link = newobj;
1145 link = &newobj->next;
1146
1147 src = src->next;
1148 }
1149
1150 return dst;
1151 }
1152
1153 #ifdef HAVE_LIBEXPAT
1154
1155 #include "xml-support.h"
1156
1157 /* Handle the start of a <library> element. Note: new elements are added
1158 at the tail of the list, keeping the list in order. */
1159
1160 static void
1161 library_list_start_library (struct gdb_xml_parser *parser,
1162 const struct gdb_xml_element *element,
1163 void *user_data, VEC(gdb_xml_value_s) *attributes)
1164 {
1165 struct svr4_library_list *list = (struct svr4_library_list *) user_data;
1166 const char *name
1167 = (const char *) xml_find_attribute (attributes, "name")->value;
1168 ULONGEST *lmp
1169 = (ULONGEST *) xml_find_attribute (attributes, "lm")->value;
1170 ULONGEST *l_addrp
1171 = (ULONGEST *) xml_find_attribute (attributes, "l_addr")->value;
1172 ULONGEST *l_ldp
1173 = (ULONGEST *) xml_find_attribute (attributes, "l_ld")->value;
1174 struct so_list *new_elem;
1175
1176 new_elem = XCNEW (struct so_list);
1177 new_elem->lm_info = XCNEW (struct lm_info);
1178 new_elem->lm_info->lm_addr = *lmp;
1179 new_elem->lm_info->l_addr_inferior = *l_addrp;
1180 new_elem->lm_info->l_ld = *l_ldp;
1181
1182 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1183 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1184 strcpy (new_elem->so_original_name, new_elem->so_name);
1185
1186 *list->tailp = new_elem;
1187 list->tailp = &new_elem->next;
1188 }
1189
1190 /* Handle the start of a <library-list-svr4> element. */
1191
1192 static void
1193 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1194 const struct gdb_xml_element *element,
1195 void *user_data, VEC(gdb_xml_value_s) *attributes)
1196 {
1197 struct svr4_library_list *list = (struct svr4_library_list *) user_data;
1198 const char *version
1199 = (const char *) xml_find_attribute (attributes, "version")->value;
1200 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1201
1202 if (strcmp (version, "1.0") != 0)
1203 gdb_xml_error (parser,
1204 _("SVR4 Library list has unsupported version \"%s\""),
1205 version);
1206
1207 if (main_lm)
1208 list->main_lm = *(ULONGEST *) main_lm->value;
1209 }
1210
1211 /* The allowed elements and attributes for an XML library list.
1212 The root element is a <library-list>. */
1213
1214 static const struct gdb_xml_attribute svr4_library_attributes[] =
1215 {
1216 { "name", GDB_XML_AF_NONE, NULL, NULL },
1217 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1218 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1219 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1220 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1221 };
1222
1223 static const struct gdb_xml_element svr4_library_list_children[] =
1224 {
1225 {
1226 "library", svr4_library_attributes, NULL,
1227 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1228 library_list_start_library, NULL
1229 },
1230 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1231 };
1232
1233 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1234 {
1235 { "version", GDB_XML_AF_NONE, NULL, NULL },
1236 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1237 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1238 };
1239
1240 static const struct gdb_xml_element svr4_library_list_elements[] =
1241 {
1242 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1243 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1244 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1245 };
1246
1247 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1248
1249 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1250 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1251 empty, caller is responsible for freeing all its entries. */
1252
1253 static int
1254 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1255 {
1256 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1257 &list->head);
1258
1259 memset (list, 0, sizeof (*list));
1260 list->tailp = &list->head;
1261 if (gdb_xml_parse_quick (_("target library list"), "library-list-svr4.dtd",
1262 svr4_library_list_elements, document, list) == 0)
1263 {
1264 /* Parsed successfully, keep the result. */
1265 discard_cleanups (back_to);
1266 return 1;
1267 }
1268
1269 do_cleanups (back_to);
1270 return 0;
1271 }
1272
1273 /* Attempt to get so_list from target via qXfer:libraries-svr4:read packet.
1274
1275 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1276 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1277 empty, caller is responsible for freeing all its entries.
1278
1279 Note that ANNEX must be NULL if the remote does not explicitly allow
1280 qXfer:libraries-svr4:read packets with non-empty annexes. Support for
1281 this can be checked using target_augmented_libraries_svr4_read (). */
1282
1283 static int
1284 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1285 const char *annex)
1286 {
1287 char *svr4_library_document;
1288 int result;
1289 struct cleanup *back_to;
1290
1291 gdb_assert (annex == NULL || target_augmented_libraries_svr4_read ());
1292
1293 /* Fetch the list of shared libraries. */
1294 svr4_library_document = target_read_stralloc (&current_target,
1295 TARGET_OBJECT_LIBRARIES_SVR4,
1296 annex);
1297 if (svr4_library_document == NULL)
1298 return 0;
1299
1300 back_to = make_cleanup (xfree, svr4_library_document);
1301 result = svr4_parse_libraries (svr4_library_document, list);
1302 do_cleanups (back_to);
1303
1304 return result;
1305 }
1306
1307 #else
1308
1309 static int
1310 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1311 const char *annex)
1312 {
1313 return 0;
1314 }
1315
1316 #endif
1317
1318 /* If no shared library information is available from the dynamic
1319 linker, build a fallback list from other sources. */
1320
1321 static struct so_list *
1322 svr4_default_sos (void)
1323 {
1324 struct svr4_info *info = get_svr4_info ();
1325 struct so_list *newobj;
1326
1327 if (!info->debug_loader_offset_p)
1328 return NULL;
1329
1330 newobj = XCNEW (struct so_list);
1331
1332 newobj->lm_info = XCNEW (struct lm_info);
1333
1334 /* Nothing will ever check the other fields if we set l_addr_p. */
1335 newobj->lm_info->l_addr = info->debug_loader_offset;
1336 newobj->lm_info->l_addr_p = 1;
1337
1338 strncpy (newobj->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1339 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1340 strcpy (newobj->so_original_name, newobj->so_name);
1341
1342 return newobj;
1343 }
1344
1345 /* Read the whole inferior libraries chain starting at address LM.
1346 Expect the first entry in the chain's previous entry to be PREV_LM.
1347 Add the entries to the tail referenced by LINK_PTR_PTR. Ignore the
1348 first entry if IGNORE_FIRST and set global MAIN_LM_ADDR according
1349 to it. Returns nonzero upon success. If zero is returned the
1350 entries stored to LINK_PTR_PTR are still valid although they may
1351 represent only part of the inferior library list. */
1352
1353 static int
1354 svr4_read_so_list (CORE_ADDR lm, CORE_ADDR prev_lm,
1355 struct so_list ***link_ptr_ptr, int ignore_first)
1356 {
1357 CORE_ADDR first_l_name = 0;
1358 CORE_ADDR next_lm;
1359
1360 for (; lm != 0; prev_lm = lm, lm = next_lm)
1361 {
1362 struct so_list *newobj;
1363 struct cleanup *old_chain;
1364 int errcode;
1365 char *buffer;
1366
1367 newobj = XCNEW (struct so_list);
1368 old_chain = make_cleanup_free_so (newobj);
1369
1370 newobj->lm_info = lm_info_read (lm);
1371 if (newobj->lm_info == NULL)
1372 {
1373 do_cleanups (old_chain);
1374 return 0;
1375 }
1376
1377 next_lm = newobj->lm_info->l_next;
1378
1379 if (newobj->lm_info->l_prev != prev_lm)
1380 {
1381 warning (_("Corrupted shared library list: %s != %s"),
1382 paddress (target_gdbarch (), prev_lm),
1383 paddress (target_gdbarch (), newobj->lm_info->l_prev));
1384 do_cleanups (old_chain);
1385 return 0;
1386 }
1387
1388 /* For SVR4 versions, the first entry in the link map is for the
1389 inferior executable, so we must ignore it. For some versions of
1390 SVR4, it has no name. For others (Solaris 2.3 for example), it
1391 does have a name, so we can no longer use a missing name to
1392 decide when to ignore it. */
1393 if (ignore_first && newobj->lm_info->l_prev == 0)
1394 {
1395 struct svr4_info *info = get_svr4_info ();
1396
1397 first_l_name = newobj->lm_info->l_name;
1398 info->main_lm_addr = newobj->lm_info->lm_addr;
1399 do_cleanups (old_chain);
1400 continue;
1401 }
1402
1403 /* Extract this shared object's name. */
1404 target_read_string (newobj->lm_info->l_name, &buffer,
1405 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1406 if (errcode != 0)
1407 {
1408 /* If this entry's l_name address matches that of the
1409 inferior executable, then this is not a normal shared
1410 object, but (most likely) a vDSO. In this case, silently
1411 skip it; otherwise emit a warning. */
1412 if (first_l_name == 0 || newobj->lm_info->l_name != first_l_name)
1413 warning (_("Can't read pathname for load map: %s."),
1414 safe_strerror (errcode));
1415 do_cleanups (old_chain);
1416 continue;
1417 }
1418
1419 strncpy (newobj->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1420 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1421 strcpy (newobj->so_original_name, newobj->so_name);
1422 xfree (buffer);
1423
1424 /* If this entry has no name, or its name matches the name
1425 for the main executable, don't include it in the list. */
1426 if (! newobj->so_name[0] || match_main (newobj->so_name))
1427 {
1428 do_cleanups (old_chain);
1429 continue;
1430 }
1431
1432 discard_cleanups (old_chain);
1433 newobj->next = 0;
1434 **link_ptr_ptr = newobj;
1435 *link_ptr_ptr = &newobj->next;
1436 }
1437
1438 return 1;
1439 }
1440
1441 /* Read the full list of currently loaded shared objects directly
1442 from the inferior, without referring to any libraries read and
1443 stored by the probes interface. Handle special cases relating
1444 to the first elements of the list. */
1445
1446 static struct so_list *
1447 svr4_current_sos_direct (struct svr4_info *info)
1448 {
1449 CORE_ADDR lm;
1450 struct so_list *head = NULL;
1451 struct so_list **link_ptr = &head;
1452 struct cleanup *back_to;
1453 int ignore_first;
1454 struct svr4_library_list library_list;
1455
1456 /* Fall back to manual examination of the target if the packet is not
1457 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1458 tests a case where gdbserver cannot find the shared libraries list while
1459 GDB itself is able to find it via SYMFILE_OBJFILE.
1460
1461 Unfortunately statically linked inferiors will also fall back through this
1462 suboptimal code path. */
1463
1464 info->using_xfer = svr4_current_sos_via_xfer_libraries (&library_list,
1465 NULL);
1466 if (info->using_xfer)
1467 {
1468 if (library_list.main_lm)
1469 info->main_lm_addr = library_list.main_lm;
1470
1471 return library_list.head ? library_list.head : svr4_default_sos ();
1472 }
1473
1474 /* Always locate the debug struct, in case it has moved. */
1475 info->debug_base = 0;
1476 locate_base (info);
1477
1478 /* If we can't find the dynamic linker's base structure, this
1479 must not be a dynamically linked executable. Hmm. */
1480 if (! info->debug_base)
1481 return svr4_default_sos ();
1482
1483 /* Assume that everything is a library if the dynamic loader was loaded
1484 late by a static executable. */
1485 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1486 ignore_first = 0;
1487 else
1488 ignore_first = 1;
1489
1490 back_to = make_cleanup (svr4_free_library_list, &head);
1491
1492 /* Walk the inferior's link map list, and build our list of
1493 `struct so_list' nodes. */
1494 lm = solib_svr4_r_map (info);
1495 if (lm)
1496 svr4_read_so_list (lm, 0, &link_ptr, ignore_first);
1497
1498 /* On Solaris, the dynamic linker is not in the normal list of
1499 shared objects, so make sure we pick it up too. Having
1500 symbol information for the dynamic linker is quite crucial
1501 for skipping dynamic linker resolver code. */
1502 lm = solib_svr4_r_ldsomap (info);
1503 if (lm)
1504 svr4_read_so_list (lm, 0, &link_ptr, 0);
1505
1506 discard_cleanups (back_to);
1507
1508 if (head == NULL)
1509 return svr4_default_sos ();
1510
1511 return head;
1512 }
1513
1514 /* Implement the main part of the "current_sos" target_so_ops
1515 method. */
1516
1517 static struct so_list *
1518 svr4_current_sos_1 (void)
1519 {
1520 struct svr4_info *info = get_svr4_info ();
1521
1522 /* If the solib list has been read and stored by the probes
1523 interface then we return a copy of the stored list. */
1524 if (info->solib_list != NULL)
1525 return svr4_copy_library_list (info->solib_list);
1526
1527 /* Otherwise obtain the solib list directly from the inferior. */
1528 return svr4_current_sos_direct (info);
1529 }
1530
1531 /* Implement the "current_sos" target_so_ops method. */
1532
1533 static struct so_list *
1534 svr4_current_sos (void)
1535 {
1536 struct so_list *so_head = svr4_current_sos_1 ();
1537 struct mem_range vsyscall_range;
1538
1539 /* Filter out the vDSO module, if present. Its symbol file would
1540 not be found on disk. The vDSO/vsyscall's OBJFILE is instead
1541 managed by symfile-mem.c:add_vsyscall_page. */
1542 if (gdbarch_vsyscall_range (target_gdbarch (), &vsyscall_range)
1543 && vsyscall_range.length != 0)
1544 {
1545 struct so_list **sop;
1546
1547 sop = &so_head;
1548 while (*sop != NULL)
1549 {
1550 struct so_list *so = *sop;
1551
1552 /* We can't simply match the vDSO by starting address alone,
1553 because lm_info->l_addr_inferior (and also l_addr) do not
1554 necessarily represent the real starting address of the
1555 ELF if the vDSO's ELF itself is "prelinked". The l_ld
1556 field (the ".dynamic" section of the shared object)
1557 always points at the absolute/resolved address though.
1558 So check whether that address is inside the vDSO's
1559 mapping instead.
1560
1561 E.g., on Linux 3.16 (x86_64) the vDSO is a regular
1562 0-based ELF, and we see:
1563
1564 (gdb) info auxv
1565 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffb000
1566 (gdb) p/x *_r_debug.r_map.l_next
1567 $1 = {l_addr = 0x7ffff7ffb000, ..., l_ld = 0x7ffff7ffb318, ...}
1568
1569 And on Linux 2.6.32 (x86_64) we see:
1570
1571 (gdb) info auxv
1572 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffe000
1573 (gdb) p/x *_r_debug.r_map.l_next
1574 $5 = {l_addr = 0x7ffff88fe000, ..., l_ld = 0x7ffff7ffe580, ... }
1575
1576 Dumping that vDSO shows:
1577
1578 (gdb) info proc mappings
1579 0x7ffff7ffe000 0x7ffff7fff000 0x1000 0 [vdso]
1580 (gdb) dump memory vdso.bin 0x7ffff7ffe000 0x7ffff7fff000
1581 # readelf -Wa vdso.bin
1582 [...]
1583 Entry point address: 0xffffffffff700700
1584 [...]
1585 Section Headers:
1586 [Nr] Name Type Address Off Size
1587 [ 0] NULL 0000000000000000 000000 000000
1588 [ 1] .hash HASH ffffffffff700120 000120 000038
1589 [ 2] .dynsym DYNSYM ffffffffff700158 000158 0000d8
1590 [...]
1591 [ 9] .dynamic DYNAMIC ffffffffff700580 000580 0000f0
1592 */
1593 if (address_in_mem_range (so->lm_info->l_ld, &vsyscall_range))
1594 {
1595 *sop = so->next;
1596 free_so (so);
1597 break;
1598 }
1599
1600 sop = &so->next;
1601 }
1602 }
1603
1604 return so_head;
1605 }
1606
1607 /* Get the address of the link_map for a given OBJFILE. */
1608
1609 CORE_ADDR
1610 svr4_fetch_objfile_link_map (struct objfile *objfile)
1611 {
1612 struct so_list *so;
1613 struct svr4_info *info = get_svr4_info ();
1614
1615 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1616 if (info->main_lm_addr == 0)
1617 solib_add (NULL, 0, &current_target, auto_solib_add);
1618
1619 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1620 if (objfile == symfile_objfile)
1621 return info->main_lm_addr;
1622
1623 /* The other link map addresses may be found by examining the list
1624 of shared libraries. */
1625 for (so = master_so_list (); so; so = so->next)
1626 if (so->objfile == objfile)
1627 return so->lm_info->lm_addr;
1628
1629 /* Not found! */
1630 return 0;
1631 }
1632
1633 /* On some systems, the only way to recognize the link map entry for
1634 the main executable file is by looking at its name. Return
1635 non-zero iff SONAME matches one of the known main executable names. */
1636
1637 static int
1638 match_main (const char *soname)
1639 {
1640 const char * const *mainp;
1641
1642 for (mainp = main_name_list; *mainp != NULL; mainp++)
1643 {
1644 if (strcmp (soname, *mainp) == 0)
1645 return (1);
1646 }
1647
1648 return (0);
1649 }
1650
1651 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1652 SVR4 run time loader. */
1653
1654 int
1655 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1656 {
1657 struct svr4_info *info = get_svr4_info ();
1658
1659 return ((pc >= info->interp_text_sect_low
1660 && pc < info->interp_text_sect_high)
1661 || (pc >= info->interp_plt_sect_low
1662 && pc < info->interp_plt_sect_high)
1663 || in_plt_section (pc)
1664 || in_gnu_ifunc_stub (pc));
1665 }
1666
1667 /* Given an executable's ABFD and target, compute the entry-point
1668 address. */
1669
1670 static CORE_ADDR
1671 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1672 {
1673 CORE_ADDR addr;
1674
1675 /* KevinB wrote ... for most targets, the address returned by
1676 bfd_get_start_address() is the entry point for the start
1677 function. But, for some targets, bfd_get_start_address() returns
1678 the address of a function descriptor from which the entry point
1679 address may be extracted. This address is extracted by
1680 gdbarch_convert_from_func_ptr_addr(). The method
1681 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1682 function for targets which don't use function descriptors. */
1683 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1684 bfd_get_start_address (abfd),
1685 targ);
1686 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1687 }
1688
1689 /* A probe and its associated action. */
1690
1691 struct probe_and_action
1692 {
1693 /* The probe. */
1694 struct probe *probe;
1695
1696 /* The relocated address of the probe. */
1697 CORE_ADDR address;
1698
1699 /* The action. */
1700 enum probe_action action;
1701 };
1702
1703 /* Returns a hash code for the probe_and_action referenced by p. */
1704
1705 static hashval_t
1706 hash_probe_and_action (const void *p)
1707 {
1708 const struct probe_and_action *pa = (const struct probe_and_action *) p;
1709
1710 return (hashval_t) pa->address;
1711 }
1712
1713 /* Returns non-zero if the probe_and_actions referenced by p1 and p2
1714 are equal. */
1715
1716 static int
1717 equal_probe_and_action (const void *p1, const void *p2)
1718 {
1719 const struct probe_and_action *pa1 = (const struct probe_and_action *) p1;
1720 const struct probe_and_action *pa2 = (const struct probe_and_action *) p2;
1721
1722 return pa1->address == pa2->address;
1723 }
1724
1725 /* Register a solib event probe and its associated action in the
1726 probes table. */
1727
1728 static void
1729 register_solib_event_probe (struct probe *probe, CORE_ADDR address,
1730 enum probe_action action)
1731 {
1732 struct svr4_info *info = get_svr4_info ();
1733 struct probe_and_action lookup, *pa;
1734 void **slot;
1735
1736 /* Create the probes table, if necessary. */
1737 if (info->probes_table == NULL)
1738 info->probes_table = htab_create_alloc (1, hash_probe_and_action,
1739 equal_probe_and_action,
1740 xfree, xcalloc, xfree);
1741
1742 lookup.probe = probe;
1743 lookup.address = address;
1744 slot = htab_find_slot (info->probes_table, &lookup, INSERT);
1745 gdb_assert (*slot == HTAB_EMPTY_ENTRY);
1746
1747 pa = XCNEW (struct probe_and_action);
1748 pa->probe = probe;
1749 pa->address = address;
1750 pa->action = action;
1751
1752 *slot = pa;
1753 }
1754
1755 /* Get the solib event probe at the specified location, and the
1756 action associated with it. Returns NULL if no solib event probe
1757 was found. */
1758
1759 static struct probe_and_action *
1760 solib_event_probe_at (struct svr4_info *info, CORE_ADDR address)
1761 {
1762 struct probe_and_action lookup;
1763 void **slot;
1764
1765 lookup.address = address;
1766 slot = htab_find_slot (info->probes_table, &lookup, NO_INSERT);
1767
1768 if (slot == NULL)
1769 return NULL;
1770
1771 return (struct probe_and_action *) *slot;
1772 }
1773
1774 /* Decide what action to take when the specified solib event probe is
1775 hit. */
1776
1777 static enum probe_action
1778 solib_event_probe_action (struct probe_and_action *pa)
1779 {
1780 enum probe_action action;
1781 unsigned probe_argc = 0;
1782 struct frame_info *frame = get_current_frame ();
1783
1784 action = pa->action;
1785 if (action == DO_NOTHING || action == PROBES_INTERFACE_FAILED)
1786 return action;
1787
1788 gdb_assert (action == FULL_RELOAD || action == UPDATE_OR_RELOAD);
1789
1790 /* Check that an appropriate number of arguments has been supplied.
1791 We expect:
1792 arg0: Lmid_t lmid (mandatory)
1793 arg1: struct r_debug *debug_base (mandatory)
1794 arg2: struct link_map *new (optional, for incremental updates) */
1795 TRY
1796 {
1797 probe_argc = get_probe_argument_count (pa->probe, frame);
1798 }
1799 CATCH (ex, RETURN_MASK_ERROR)
1800 {
1801 exception_print (gdb_stderr, ex);
1802 probe_argc = 0;
1803 }
1804 END_CATCH
1805
1806 /* If get_probe_argument_count throws an exception, probe_argc will
1807 be set to zero. However, if pa->probe does not have arguments,
1808 then get_probe_argument_count will succeed but probe_argc will
1809 also be zero. Both cases happen because of different things, but
1810 they are treated equally here: action will be set to
1811 PROBES_INTERFACE_FAILED. */
1812 if (probe_argc == 2)
1813 action = FULL_RELOAD;
1814 else if (probe_argc < 2)
1815 action = PROBES_INTERFACE_FAILED;
1816
1817 return action;
1818 }
1819
1820 /* Populate the shared object list by reading the entire list of
1821 shared objects from the inferior. Handle special cases relating
1822 to the first elements of the list. Returns nonzero on success. */
1823
1824 static int
1825 solist_update_full (struct svr4_info *info)
1826 {
1827 free_solib_list (info);
1828 info->solib_list = svr4_current_sos_direct (info);
1829
1830 return 1;
1831 }
1832
1833 /* Update the shared object list starting from the link-map entry
1834 passed by the linker in the probe's third argument. Returns
1835 nonzero if the list was successfully updated, or zero to indicate
1836 failure. */
1837
1838 static int
1839 solist_update_incremental (struct svr4_info *info, CORE_ADDR lm)
1840 {
1841 struct so_list *tail;
1842 CORE_ADDR prev_lm;
1843
1844 /* svr4_current_sos_direct contains logic to handle a number of
1845 special cases relating to the first elements of the list. To
1846 avoid duplicating this logic we defer to solist_update_full
1847 if the list is empty. */
1848 if (info->solib_list == NULL)
1849 return 0;
1850
1851 /* Fall back to a full update if we are using a remote target
1852 that does not support incremental transfers. */
1853 if (info->using_xfer && !target_augmented_libraries_svr4_read ())
1854 return 0;
1855
1856 /* Walk to the end of the list. */
1857 for (tail = info->solib_list; tail->next != NULL; tail = tail->next)
1858 /* Nothing. */;
1859 prev_lm = tail->lm_info->lm_addr;
1860
1861 /* Read the new objects. */
1862 if (info->using_xfer)
1863 {
1864 struct svr4_library_list library_list;
1865 char annex[64];
1866
1867 xsnprintf (annex, sizeof (annex), "start=%s;prev=%s",
1868 phex_nz (lm, sizeof (lm)),
1869 phex_nz (prev_lm, sizeof (prev_lm)));
1870 if (!svr4_current_sos_via_xfer_libraries (&library_list, annex))
1871 return 0;
1872
1873 tail->next = library_list.head;
1874 }
1875 else
1876 {
1877 struct so_list **link = &tail->next;
1878
1879 /* IGNORE_FIRST may safely be set to zero here because the
1880 above check and deferral to solist_update_full ensures
1881 that this call to svr4_read_so_list will never see the
1882 first element. */
1883 if (!svr4_read_so_list (lm, prev_lm, &link, 0))
1884 return 0;
1885 }
1886
1887 return 1;
1888 }
1889
1890 /* Disable the probes-based linker interface and revert to the
1891 original interface. We don't reset the breakpoints as the
1892 ones set up for the probes-based interface are adequate. */
1893
1894 static void
1895 disable_probes_interface_cleanup (void *arg)
1896 {
1897 struct svr4_info *info = get_svr4_info ();
1898
1899 warning (_("Probes-based dynamic linker interface failed.\n"
1900 "Reverting to original interface.\n"));
1901
1902 free_probes_table (info);
1903 free_solib_list (info);
1904 }
1905
1906 /* Update the solib list as appropriate when using the
1907 probes-based linker interface. Do nothing if using the
1908 standard interface. */
1909
1910 static void
1911 svr4_handle_solib_event (void)
1912 {
1913 struct svr4_info *info = get_svr4_info ();
1914 struct probe_and_action *pa;
1915 enum probe_action action;
1916 struct cleanup *old_chain, *usm_chain;
1917 struct value *val = NULL;
1918 CORE_ADDR pc, debug_base, lm = 0;
1919 struct frame_info *frame = get_current_frame ();
1920
1921 /* Do nothing if not using the probes interface. */
1922 if (info->probes_table == NULL)
1923 return;
1924
1925 /* If anything goes wrong we revert to the original linker
1926 interface. */
1927 old_chain = make_cleanup (disable_probes_interface_cleanup, NULL);
1928
1929 pc = regcache_read_pc (get_current_regcache ());
1930 pa = solib_event_probe_at (info, pc);
1931 if (pa == NULL)
1932 {
1933 do_cleanups (old_chain);
1934 return;
1935 }
1936
1937 action = solib_event_probe_action (pa);
1938 if (action == PROBES_INTERFACE_FAILED)
1939 {
1940 do_cleanups (old_chain);
1941 return;
1942 }
1943
1944 if (action == DO_NOTHING)
1945 {
1946 discard_cleanups (old_chain);
1947 return;
1948 }
1949
1950 /* evaluate_probe_argument looks up symbols in the dynamic linker
1951 using find_pc_section. find_pc_section is accelerated by a cache
1952 called the section map. The section map is invalidated every
1953 time a shared library is loaded or unloaded, and if the inferior
1954 is generating a lot of shared library events then the section map
1955 will be updated every time svr4_handle_solib_event is called.
1956 We called find_pc_section in svr4_create_solib_event_breakpoints,
1957 so we can guarantee that the dynamic linker's sections are in the
1958 section map. We can therefore inhibit section map updates across
1959 these calls to evaluate_probe_argument and save a lot of time. */
1960 inhibit_section_map_updates (current_program_space);
1961 usm_chain = make_cleanup (resume_section_map_updates_cleanup,
1962 current_program_space);
1963
1964 TRY
1965 {
1966 val = evaluate_probe_argument (pa->probe, 1, frame);
1967 }
1968 CATCH (ex, RETURN_MASK_ERROR)
1969 {
1970 exception_print (gdb_stderr, ex);
1971 val = NULL;
1972 }
1973 END_CATCH
1974
1975 if (val == NULL)
1976 {
1977 do_cleanups (old_chain);
1978 return;
1979 }
1980
1981 debug_base = value_as_address (val);
1982 if (debug_base == 0)
1983 {
1984 do_cleanups (old_chain);
1985 return;
1986 }
1987
1988 /* Always locate the debug struct, in case it moved. */
1989 info->debug_base = 0;
1990 if (locate_base (info) == 0)
1991 {
1992 do_cleanups (old_chain);
1993 return;
1994 }
1995
1996 /* GDB does not currently support libraries loaded via dlmopen
1997 into namespaces other than the initial one. We must ignore
1998 any namespace other than the initial namespace here until
1999 support for this is added to GDB. */
2000 if (debug_base != info->debug_base)
2001 action = DO_NOTHING;
2002
2003 if (action == UPDATE_OR_RELOAD)
2004 {
2005 TRY
2006 {
2007 val = evaluate_probe_argument (pa->probe, 2, frame);
2008 }
2009 CATCH (ex, RETURN_MASK_ERROR)
2010 {
2011 exception_print (gdb_stderr, ex);
2012 do_cleanups (old_chain);
2013 return;
2014 }
2015 END_CATCH
2016
2017 if (val != NULL)
2018 lm = value_as_address (val);
2019
2020 if (lm == 0)
2021 action = FULL_RELOAD;
2022 }
2023
2024 /* Resume section map updates. */
2025 do_cleanups (usm_chain);
2026
2027 if (action == UPDATE_OR_RELOAD)
2028 {
2029 if (!solist_update_incremental (info, lm))
2030 action = FULL_RELOAD;
2031 }
2032
2033 if (action == FULL_RELOAD)
2034 {
2035 if (!solist_update_full (info))
2036 {
2037 do_cleanups (old_chain);
2038 return;
2039 }
2040 }
2041
2042 discard_cleanups (old_chain);
2043 }
2044
2045 /* Helper function for svr4_update_solib_event_breakpoints. */
2046
2047 static int
2048 svr4_update_solib_event_breakpoint (struct breakpoint *b, void *arg)
2049 {
2050 struct bp_location *loc;
2051
2052 if (b->type != bp_shlib_event)
2053 {
2054 /* Continue iterating. */
2055 return 0;
2056 }
2057
2058 for (loc = b->loc; loc != NULL; loc = loc->next)
2059 {
2060 struct svr4_info *info;
2061 struct probe_and_action *pa;
2062
2063 info = ((struct svr4_info *)
2064 program_space_data (loc->pspace, solib_svr4_pspace_data));
2065 if (info == NULL || info->probes_table == NULL)
2066 continue;
2067
2068 pa = solib_event_probe_at (info, loc->address);
2069 if (pa == NULL)
2070 continue;
2071
2072 if (pa->action == DO_NOTHING)
2073 {
2074 if (b->enable_state == bp_disabled && stop_on_solib_events)
2075 enable_breakpoint (b);
2076 else if (b->enable_state == bp_enabled && !stop_on_solib_events)
2077 disable_breakpoint (b);
2078 }
2079
2080 break;
2081 }
2082
2083 /* Continue iterating. */
2084 return 0;
2085 }
2086
2087 /* Enable or disable optional solib event breakpoints as appropriate.
2088 Called whenever stop_on_solib_events is changed. */
2089
2090 static void
2091 svr4_update_solib_event_breakpoints (void)
2092 {
2093 iterate_over_breakpoints (svr4_update_solib_event_breakpoint, NULL);
2094 }
2095
2096 /* Create and register solib event breakpoints. PROBES is an array
2097 of NUM_PROBES elements, each of which is vector of probes. A
2098 solib event breakpoint will be created and registered for each
2099 probe. */
2100
2101 static void
2102 svr4_create_probe_breakpoints (struct gdbarch *gdbarch,
2103 VEC (probe_p) **probes,
2104 struct objfile *objfile)
2105 {
2106 int i;
2107
2108 for (i = 0; i < NUM_PROBES; i++)
2109 {
2110 enum probe_action action = probe_info[i].action;
2111 struct probe *probe;
2112 int ix;
2113
2114 for (ix = 0;
2115 VEC_iterate (probe_p, probes[i], ix, probe);
2116 ++ix)
2117 {
2118 CORE_ADDR address = get_probe_address (probe, objfile);
2119
2120 create_solib_event_breakpoint (gdbarch, address);
2121 register_solib_event_probe (probe, address, action);
2122 }
2123 }
2124
2125 svr4_update_solib_event_breakpoints ();
2126 }
2127
2128 /* Both the SunOS and the SVR4 dynamic linkers call a marker function
2129 before and after mapping and unmapping shared libraries. The sole
2130 purpose of this method is to allow debuggers to set a breakpoint so
2131 they can track these changes.
2132
2133 Some versions of the glibc dynamic linker contain named probes
2134 to allow more fine grained stopping. Given the address of the
2135 original marker function, this function attempts to find these
2136 probes, and if found, sets breakpoints on those instead. If the
2137 probes aren't found, a single breakpoint is set on the original
2138 marker function. */
2139
2140 static void
2141 svr4_create_solib_event_breakpoints (struct gdbarch *gdbarch,
2142 CORE_ADDR address)
2143 {
2144 struct obj_section *os;
2145
2146 os = find_pc_section (address);
2147 if (os != NULL)
2148 {
2149 int with_prefix;
2150
2151 for (with_prefix = 0; with_prefix <= 1; with_prefix++)
2152 {
2153 VEC (probe_p) *probes[NUM_PROBES];
2154 int all_probes_found = 1;
2155 int checked_can_use_probe_arguments = 0;
2156 int i;
2157
2158 memset (probes, 0, sizeof (probes));
2159 for (i = 0; i < NUM_PROBES; i++)
2160 {
2161 const char *name = probe_info[i].name;
2162 struct probe *p;
2163 char buf[32];
2164
2165 /* Fedora 17 and Red Hat Enterprise Linux 6.2-6.4
2166 shipped with an early version of the probes code in
2167 which the probes' names were prefixed with "rtld_"
2168 and the "map_failed" probe did not exist. The
2169 locations of the probes are otherwise the same, so
2170 we check for probes with prefixed names if probes
2171 with unprefixed names are not present. */
2172 if (with_prefix)
2173 {
2174 xsnprintf (buf, sizeof (buf), "rtld_%s", name);
2175 name = buf;
2176 }
2177
2178 probes[i] = find_probes_in_objfile (os->objfile, "rtld", name);
2179
2180 /* The "map_failed" probe did not exist in early
2181 versions of the probes code in which the probes'
2182 names were prefixed with "rtld_". */
2183 if (strcmp (name, "rtld_map_failed") == 0)
2184 continue;
2185
2186 if (VEC_empty (probe_p, probes[i]))
2187 {
2188 all_probes_found = 0;
2189 break;
2190 }
2191
2192 /* Ensure probe arguments can be evaluated. */
2193 if (!checked_can_use_probe_arguments)
2194 {
2195 p = VEC_index (probe_p, probes[i], 0);
2196 if (!can_evaluate_probe_arguments (p))
2197 {
2198 all_probes_found = 0;
2199 break;
2200 }
2201 checked_can_use_probe_arguments = 1;
2202 }
2203 }
2204
2205 if (all_probes_found)
2206 svr4_create_probe_breakpoints (gdbarch, probes, os->objfile);
2207
2208 for (i = 0; i < NUM_PROBES; i++)
2209 VEC_free (probe_p, probes[i]);
2210
2211 if (all_probes_found)
2212 return;
2213 }
2214 }
2215
2216 create_solib_event_breakpoint (gdbarch, address);
2217 }
2218
2219 /* Helper function for gdb_bfd_lookup_symbol. */
2220
2221 static int
2222 cmp_name_and_sec_flags (const asymbol *sym, const void *data)
2223 {
2224 return (strcmp (sym->name, (const char *) data) == 0
2225 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
2226 }
2227 /* Arrange for dynamic linker to hit breakpoint.
2228
2229 Both the SunOS and the SVR4 dynamic linkers have, as part of their
2230 debugger interface, support for arranging for the inferior to hit
2231 a breakpoint after mapping in the shared libraries. This function
2232 enables that breakpoint.
2233
2234 For SunOS, there is a special flag location (in_debugger) which we
2235 set to 1. When the dynamic linker sees this flag set, it will set
2236 a breakpoint at a location known only to itself, after saving the
2237 original contents of that place and the breakpoint address itself,
2238 in it's own internal structures. When we resume the inferior, it
2239 will eventually take a SIGTRAP when it runs into the breakpoint.
2240 We handle this (in a different place) by restoring the contents of
2241 the breakpointed location (which is only known after it stops),
2242 chasing around to locate the shared libraries that have been
2243 loaded, then resuming.
2244
2245 For SVR4, the debugger interface structure contains a member (r_brk)
2246 which is statically initialized at the time the shared library is
2247 built, to the offset of a function (_r_debug_state) which is guaran-
2248 teed to be called once before mapping in a library, and again when
2249 the mapping is complete. At the time we are examining this member,
2250 it contains only the unrelocated offset of the function, so we have
2251 to do our own relocation. Later, when the dynamic linker actually
2252 runs, it relocates r_brk to be the actual address of _r_debug_state().
2253
2254 The debugger interface structure also contains an enumeration which
2255 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
2256 depending upon whether or not the library is being mapped or unmapped,
2257 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
2258
2259 static int
2260 enable_break (struct svr4_info *info, int from_tty)
2261 {
2262 struct bound_minimal_symbol msymbol;
2263 const char * const *bkpt_namep;
2264 asection *interp_sect;
2265 char *interp_name;
2266 CORE_ADDR sym_addr;
2267
2268 info->interp_text_sect_low = info->interp_text_sect_high = 0;
2269 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
2270
2271 /* If we already have a shared library list in the target, and
2272 r_debug contains r_brk, set the breakpoint there - this should
2273 mean r_brk has already been relocated. Assume the dynamic linker
2274 is the object containing r_brk. */
2275
2276 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2277 sym_addr = 0;
2278 if (info->debug_base && solib_svr4_r_map (info) != 0)
2279 sym_addr = solib_svr4_r_brk (info);
2280
2281 if (sym_addr != 0)
2282 {
2283 struct obj_section *os;
2284
2285 sym_addr = gdbarch_addr_bits_remove
2286 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2287 sym_addr,
2288 &current_target));
2289
2290 /* On at least some versions of Solaris there's a dynamic relocation
2291 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
2292 we get control before the dynamic linker has self-relocated.
2293 Check if SYM_ADDR is in a known section, if it is assume we can
2294 trust its value. This is just a heuristic though, it could go away
2295 or be replaced if it's getting in the way.
2296
2297 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
2298 however it's spelled in your particular system) is ARM or Thumb.
2299 That knowledge is encoded in the address, if it's Thumb the low bit
2300 is 1. However, we've stripped that info above and it's not clear
2301 what all the consequences are of passing a non-addr_bits_remove'd
2302 address to svr4_create_solib_event_breakpoints. The call to
2303 find_pc_section verifies we know about the address and have some
2304 hope of computing the right kind of breakpoint to use (via
2305 symbol info). It does mean that GDB needs to be pointed at a
2306 non-stripped version of the dynamic linker in order to obtain
2307 information it already knows about. Sigh. */
2308
2309 os = find_pc_section (sym_addr);
2310 if (os != NULL)
2311 {
2312 /* Record the relocated start and end address of the dynamic linker
2313 text and plt section for svr4_in_dynsym_resolve_code. */
2314 bfd *tmp_bfd;
2315 CORE_ADDR load_addr;
2316
2317 tmp_bfd = os->objfile->obfd;
2318 load_addr = ANOFFSET (os->objfile->section_offsets,
2319 SECT_OFF_TEXT (os->objfile));
2320
2321 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2322 if (interp_sect)
2323 {
2324 info->interp_text_sect_low =
2325 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2326 info->interp_text_sect_high =
2327 info->interp_text_sect_low
2328 + bfd_section_size (tmp_bfd, interp_sect);
2329 }
2330 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2331 if (interp_sect)
2332 {
2333 info->interp_plt_sect_low =
2334 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2335 info->interp_plt_sect_high =
2336 info->interp_plt_sect_low
2337 + bfd_section_size (tmp_bfd, interp_sect);
2338 }
2339
2340 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2341 return 1;
2342 }
2343 }
2344
2345 /* Find the program interpreter; if not found, warn the user and drop
2346 into the old breakpoint at symbol code. */
2347 interp_name = find_program_interpreter ();
2348 if (interp_name)
2349 {
2350 CORE_ADDR load_addr = 0;
2351 int load_addr_found = 0;
2352 int loader_found_in_list = 0;
2353 struct so_list *so;
2354 bfd *tmp_bfd = NULL;
2355 struct target_ops *tmp_bfd_target;
2356
2357 sym_addr = 0;
2358
2359 /* Now we need to figure out where the dynamic linker was
2360 loaded so that we can load its symbols and place a breakpoint
2361 in the dynamic linker itself.
2362
2363 This address is stored on the stack. However, I've been unable
2364 to find any magic formula to find it for Solaris (appears to
2365 be trivial on GNU/Linux). Therefore, we have to try an alternate
2366 mechanism to find the dynamic linker's base address. */
2367
2368 TRY
2369 {
2370 tmp_bfd = solib_bfd_open (interp_name);
2371 }
2372 CATCH (ex, RETURN_MASK_ALL)
2373 {
2374 }
2375 END_CATCH
2376
2377 if (tmp_bfd == NULL)
2378 goto bkpt_at_symbol;
2379
2380 /* Now convert the TMP_BFD into a target. That way target, as
2381 well as BFD operations can be used. */
2382 tmp_bfd_target = target_bfd_reopen (tmp_bfd);
2383 /* target_bfd_reopen acquired its own reference, so we can
2384 release ours now. */
2385 gdb_bfd_unref (tmp_bfd);
2386
2387 /* On a running target, we can get the dynamic linker's base
2388 address from the shared library table. */
2389 so = master_so_list ();
2390 while (so)
2391 {
2392 if (svr4_same_1 (interp_name, so->so_original_name))
2393 {
2394 load_addr_found = 1;
2395 loader_found_in_list = 1;
2396 load_addr = lm_addr_check (so, tmp_bfd);
2397 break;
2398 }
2399 so = so->next;
2400 }
2401
2402 /* If we were not able to find the base address of the loader
2403 from our so_list, then try using the AT_BASE auxilliary entry. */
2404 if (!load_addr_found)
2405 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
2406 {
2407 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
2408
2409 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
2410 that `+ load_addr' will overflow CORE_ADDR width not creating
2411 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
2412 GDB. */
2413
2414 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
2415 {
2416 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
2417 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd,
2418 tmp_bfd_target);
2419
2420 gdb_assert (load_addr < space_size);
2421
2422 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
2423 64bit ld.so with 32bit executable, it should not happen. */
2424
2425 if (tmp_entry_point < space_size
2426 && tmp_entry_point + load_addr >= space_size)
2427 load_addr -= space_size;
2428 }
2429
2430 load_addr_found = 1;
2431 }
2432
2433 /* Otherwise we find the dynamic linker's base address by examining
2434 the current pc (which should point at the entry point for the
2435 dynamic linker) and subtracting the offset of the entry point.
2436
2437 This is more fragile than the previous approaches, but is a good
2438 fallback method because it has actually been working well in
2439 most cases. */
2440 if (!load_addr_found)
2441 {
2442 struct regcache *regcache
2443 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
2444
2445 load_addr = (regcache_read_pc (regcache)
2446 - exec_entry_point (tmp_bfd, tmp_bfd_target));
2447 }
2448
2449 if (!loader_found_in_list)
2450 {
2451 info->debug_loader_name = xstrdup (interp_name);
2452 info->debug_loader_offset_p = 1;
2453 info->debug_loader_offset = load_addr;
2454 solib_add (NULL, from_tty, &current_target, auto_solib_add);
2455 }
2456
2457 /* Record the relocated start and end address of the dynamic linker
2458 text and plt section for svr4_in_dynsym_resolve_code. */
2459 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2460 if (interp_sect)
2461 {
2462 info->interp_text_sect_low =
2463 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2464 info->interp_text_sect_high =
2465 info->interp_text_sect_low
2466 + bfd_section_size (tmp_bfd, interp_sect);
2467 }
2468 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2469 if (interp_sect)
2470 {
2471 info->interp_plt_sect_low =
2472 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2473 info->interp_plt_sect_high =
2474 info->interp_plt_sect_low
2475 + bfd_section_size (tmp_bfd, interp_sect);
2476 }
2477
2478 /* Now try to set a breakpoint in the dynamic linker. */
2479 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2480 {
2481 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd, cmp_name_and_sec_flags,
2482 *bkpt_namep);
2483 if (sym_addr != 0)
2484 break;
2485 }
2486
2487 if (sym_addr != 0)
2488 /* Convert 'sym_addr' from a function pointer to an address.
2489 Because we pass tmp_bfd_target instead of the current
2490 target, this will always produce an unrelocated value. */
2491 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2492 sym_addr,
2493 tmp_bfd_target);
2494
2495 /* We're done with both the temporary bfd and target. Closing
2496 the target closes the underlying bfd, because it holds the
2497 only remaining reference. */
2498 target_close (tmp_bfd_target);
2499
2500 if (sym_addr != 0)
2501 {
2502 svr4_create_solib_event_breakpoints (target_gdbarch (),
2503 load_addr + sym_addr);
2504 xfree (interp_name);
2505 return 1;
2506 }
2507
2508 /* For whatever reason we couldn't set a breakpoint in the dynamic
2509 linker. Warn and drop into the old code. */
2510 bkpt_at_symbol:
2511 xfree (interp_name);
2512 warning (_("Unable to find dynamic linker breakpoint function.\n"
2513 "GDB will be unable to debug shared library initializers\n"
2514 "and track explicitly loaded dynamic code."));
2515 }
2516
2517 /* Scan through the lists of symbols, trying to look up the symbol and
2518 set a breakpoint there. Terminate loop when we/if we succeed. */
2519
2520 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2521 {
2522 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2523 if ((msymbol.minsym != NULL)
2524 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2525 {
2526 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2527 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2528 sym_addr,
2529 &current_target);
2530 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2531 return 1;
2532 }
2533 }
2534
2535 if (interp_name != NULL && !current_inferior ()->attach_flag)
2536 {
2537 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
2538 {
2539 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2540 if ((msymbol.minsym != NULL)
2541 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2542 {
2543 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2544 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2545 sym_addr,
2546 &current_target);
2547 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2548 return 1;
2549 }
2550 }
2551 }
2552 return 0;
2553 }
2554
2555 /* Implement the "special_symbol_handling" target_so_ops method. */
2556
2557 static void
2558 svr4_special_symbol_handling (void)
2559 {
2560 /* Nothing to do. */
2561 }
2562
2563 /* Read the ELF program headers from ABFD. Return the contents and
2564 set *PHDRS_SIZE to the size of the program headers. */
2565
2566 static gdb_byte *
2567 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
2568 {
2569 Elf_Internal_Ehdr *ehdr;
2570 gdb_byte *buf;
2571
2572 ehdr = elf_elfheader (abfd);
2573
2574 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
2575 if (*phdrs_size == 0)
2576 return NULL;
2577
2578 buf = (gdb_byte *) xmalloc (*phdrs_size);
2579 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
2580 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
2581 {
2582 xfree (buf);
2583 return NULL;
2584 }
2585
2586 return buf;
2587 }
2588
2589 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
2590 exec_bfd. Otherwise return 0.
2591
2592 We relocate all of the sections by the same amount. This
2593 behavior is mandated by recent editions of the System V ABI.
2594 According to the System V Application Binary Interface,
2595 Edition 4.1, page 5-5:
2596
2597 ... Though the system chooses virtual addresses for
2598 individual processes, it maintains the segments' relative
2599 positions. Because position-independent code uses relative
2600 addressesing between segments, the difference between
2601 virtual addresses in memory must match the difference
2602 between virtual addresses in the file. The difference
2603 between the virtual address of any segment in memory and
2604 the corresponding virtual address in the file is thus a
2605 single constant value for any one executable or shared
2606 object in a given process. This difference is the base
2607 address. One use of the base address is to relocate the
2608 memory image of the program during dynamic linking.
2609
2610 The same language also appears in Edition 4.0 of the System V
2611 ABI and is left unspecified in some of the earlier editions.
2612
2613 Decide if the objfile needs to be relocated. As indicated above, we will
2614 only be here when execution is stopped. But during attachment PC can be at
2615 arbitrary address therefore regcache_read_pc can be misleading (contrary to
2616 the auxv AT_ENTRY value). Moreover for executable with interpreter section
2617 regcache_read_pc would point to the interpreter and not the main executable.
2618
2619 So, to summarize, relocations are necessary when the start address obtained
2620 from the executable is different from the address in auxv AT_ENTRY entry.
2621
2622 [ The astute reader will note that we also test to make sure that
2623 the executable in question has the DYNAMIC flag set. It is my
2624 opinion that this test is unnecessary (undesirable even). It
2625 was added to avoid inadvertent relocation of an executable
2626 whose e_type member in the ELF header is not ET_DYN. There may
2627 be a time in the future when it is desirable to do relocations
2628 on other types of files as well in which case this condition
2629 should either be removed or modified to accomodate the new file
2630 type. - Kevin, Nov 2000. ] */
2631
2632 static int
2633 svr4_exec_displacement (CORE_ADDR *displacementp)
2634 {
2635 /* ENTRY_POINT is a possible function descriptor - before
2636 a call to gdbarch_convert_from_func_ptr_addr. */
2637 CORE_ADDR entry_point, exec_displacement;
2638
2639 if (exec_bfd == NULL)
2640 return 0;
2641
2642 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
2643 being executed themselves and PIE (Position Independent Executable)
2644 executables are ET_DYN. */
2645
2646 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
2647 return 0;
2648
2649 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
2650 return 0;
2651
2652 exec_displacement = entry_point - bfd_get_start_address (exec_bfd);
2653
2654 /* Verify the EXEC_DISPLACEMENT candidate complies with the required page
2655 alignment. It is cheaper than the program headers comparison below. */
2656
2657 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2658 {
2659 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
2660
2661 /* p_align of PT_LOAD segments does not specify any alignment but
2662 only congruency of addresses:
2663 p_offset % p_align == p_vaddr % p_align
2664 Kernel is free to load the executable with lower alignment. */
2665
2666 if ((exec_displacement & (elf->minpagesize - 1)) != 0)
2667 return 0;
2668 }
2669
2670 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
2671 comparing their program headers. If the program headers in the auxilliary
2672 vector do not match the program headers in the executable, then we are
2673 looking at a different file than the one used by the kernel - for
2674 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
2675
2676 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2677 {
2678 /* Be optimistic and clear OK only if GDB was able to verify the headers
2679 really do not match. */
2680 int phdrs_size, phdrs2_size, ok = 1;
2681 gdb_byte *buf, *buf2;
2682 int arch_size;
2683
2684 buf = read_program_header (-1, &phdrs_size, &arch_size, NULL);
2685 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
2686 if (buf != NULL && buf2 != NULL)
2687 {
2688 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
2689
2690 /* We are dealing with three different addresses. EXEC_BFD
2691 represents current address in on-disk file. target memory content
2692 may be different from EXEC_BFD as the file may have been prelinked
2693 to a different address after the executable has been loaded.
2694 Moreover the address of placement in target memory can be
2695 different from what the program headers in target memory say -
2696 this is the goal of PIE.
2697
2698 Detected DISPLACEMENT covers both the offsets of PIE placement and
2699 possible new prelink performed after start of the program. Here
2700 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
2701 content offset for the verification purpose. */
2702
2703 if (phdrs_size != phdrs2_size
2704 || bfd_get_arch_size (exec_bfd) != arch_size)
2705 ok = 0;
2706 else if (arch_size == 32
2707 && phdrs_size >= sizeof (Elf32_External_Phdr)
2708 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
2709 {
2710 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2711 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2712 CORE_ADDR displacement = 0;
2713 int i;
2714
2715 /* DISPLACEMENT could be found more easily by the difference of
2716 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2717 already have enough information to compute that displacement
2718 with what we've read. */
2719
2720 for (i = 0; i < ehdr2->e_phnum; i++)
2721 if (phdr2[i].p_type == PT_LOAD)
2722 {
2723 Elf32_External_Phdr *phdrp;
2724 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2725 CORE_ADDR vaddr, paddr;
2726 CORE_ADDR displacement_vaddr = 0;
2727 CORE_ADDR displacement_paddr = 0;
2728
2729 phdrp = &((Elf32_External_Phdr *) buf)[i];
2730 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2731 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2732
2733 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2734 byte_order);
2735 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2736
2737 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2738 byte_order);
2739 displacement_paddr = paddr - phdr2[i].p_paddr;
2740
2741 if (displacement_vaddr == displacement_paddr)
2742 displacement = displacement_vaddr;
2743
2744 break;
2745 }
2746
2747 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2748
2749 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
2750 {
2751 Elf32_External_Phdr *phdrp;
2752 Elf32_External_Phdr *phdr2p;
2753 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2754 CORE_ADDR vaddr, paddr;
2755 asection *plt2_asect;
2756
2757 phdrp = &((Elf32_External_Phdr *) buf)[i];
2758 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2759 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2760 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
2761
2762 /* PT_GNU_STACK is an exception by being never relocated by
2763 prelink as its addresses are always zero. */
2764
2765 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2766 continue;
2767
2768 /* Check also other adjustment combinations - PR 11786. */
2769
2770 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2771 byte_order);
2772 vaddr -= displacement;
2773 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
2774
2775 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2776 byte_order);
2777 paddr -= displacement;
2778 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
2779
2780 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2781 continue;
2782
2783 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2784 CentOS-5 has problems with filesz, memsz as well.
2785 See PR 11786. */
2786 if (phdr2[i].p_type == PT_GNU_RELRO)
2787 {
2788 Elf32_External_Phdr tmp_phdr = *phdrp;
2789 Elf32_External_Phdr tmp_phdr2 = *phdr2p;
2790
2791 memset (tmp_phdr.p_filesz, 0, 4);
2792 memset (tmp_phdr.p_memsz, 0, 4);
2793 memset (tmp_phdr.p_flags, 0, 4);
2794 memset (tmp_phdr.p_align, 0, 4);
2795 memset (tmp_phdr2.p_filesz, 0, 4);
2796 memset (tmp_phdr2.p_memsz, 0, 4);
2797 memset (tmp_phdr2.p_flags, 0, 4);
2798 memset (tmp_phdr2.p_align, 0, 4);
2799
2800 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2801 == 0)
2802 continue;
2803 }
2804
2805 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2806 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2807 if (plt2_asect)
2808 {
2809 int content2;
2810 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2811 CORE_ADDR filesz;
2812
2813 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2814 & SEC_HAS_CONTENTS) != 0;
2815
2816 filesz = extract_unsigned_integer (buf_filesz_p, 4,
2817 byte_order);
2818
2819 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2820 FILESZ is from the in-memory image. */
2821 if (content2)
2822 filesz += bfd_get_section_size (plt2_asect);
2823 else
2824 filesz -= bfd_get_section_size (plt2_asect);
2825
2826 store_unsigned_integer (buf_filesz_p, 4, byte_order,
2827 filesz);
2828
2829 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2830 continue;
2831 }
2832
2833 ok = 0;
2834 break;
2835 }
2836 }
2837 else if (arch_size == 64
2838 && phdrs_size >= sizeof (Elf64_External_Phdr)
2839 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2840 {
2841 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2842 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2843 CORE_ADDR displacement = 0;
2844 int i;
2845
2846 /* DISPLACEMENT could be found more easily by the difference of
2847 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2848 already have enough information to compute that displacement
2849 with what we've read. */
2850
2851 for (i = 0; i < ehdr2->e_phnum; i++)
2852 if (phdr2[i].p_type == PT_LOAD)
2853 {
2854 Elf64_External_Phdr *phdrp;
2855 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2856 CORE_ADDR vaddr, paddr;
2857 CORE_ADDR displacement_vaddr = 0;
2858 CORE_ADDR displacement_paddr = 0;
2859
2860 phdrp = &((Elf64_External_Phdr *) buf)[i];
2861 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2862 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2863
2864 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2865 byte_order);
2866 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2867
2868 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2869 byte_order);
2870 displacement_paddr = paddr - phdr2[i].p_paddr;
2871
2872 if (displacement_vaddr == displacement_paddr)
2873 displacement = displacement_vaddr;
2874
2875 break;
2876 }
2877
2878 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2879
2880 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2881 {
2882 Elf64_External_Phdr *phdrp;
2883 Elf64_External_Phdr *phdr2p;
2884 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2885 CORE_ADDR vaddr, paddr;
2886 asection *plt2_asect;
2887
2888 phdrp = &((Elf64_External_Phdr *) buf)[i];
2889 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2890 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2891 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2892
2893 /* PT_GNU_STACK is an exception by being never relocated by
2894 prelink as its addresses are always zero. */
2895
2896 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2897 continue;
2898
2899 /* Check also other adjustment combinations - PR 11786. */
2900
2901 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2902 byte_order);
2903 vaddr -= displacement;
2904 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2905
2906 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2907 byte_order);
2908 paddr -= displacement;
2909 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2910
2911 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2912 continue;
2913
2914 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2915 CentOS-5 has problems with filesz, memsz as well.
2916 See PR 11786. */
2917 if (phdr2[i].p_type == PT_GNU_RELRO)
2918 {
2919 Elf64_External_Phdr tmp_phdr = *phdrp;
2920 Elf64_External_Phdr tmp_phdr2 = *phdr2p;
2921
2922 memset (tmp_phdr.p_filesz, 0, 8);
2923 memset (tmp_phdr.p_memsz, 0, 8);
2924 memset (tmp_phdr.p_flags, 0, 4);
2925 memset (tmp_phdr.p_align, 0, 8);
2926 memset (tmp_phdr2.p_filesz, 0, 8);
2927 memset (tmp_phdr2.p_memsz, 0, 8);
2928 memset (tmp_phdr2.p_flags, 0, 4);
2929 memset (tmp_phdr2.p_align, 0, 8);
2930
2931 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2932 == 0)
2933 continue;
2934 }
2935
2936 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2937 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2938 if (plt2_asect)
2939 {
2940 int content2;
2941 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2942 CORE_ADDR filesz;
2943
2944 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2945 & SEC_HAS_CONTENTS) != 0;
2946
2947 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2948 byte_order);
2949
2950 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2951 FILESZ is from the in-memory image. */
2952 if (content2)
2953 filesz += bfd_get_section_size (plt2_asect);
2954 else
2955 filesz -= bfd_get_section_size (plt2_asect);
2956
2957 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2958 filesz);
2959
2960 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2961 continue;
2962 }
2963
2964 ok = 0;
2965 break;
2966 }
2967 }
2968 else
2969 ok = 0;
2970 }
2971
2972 xfree (buf);
2973 xfree (buf2);
2974
2975 if (!ok)
2976 return 0;
2977 }
2978
2979 if (info_verbose)
2980 {
2981 /* It can be printed repeatedly as there is no easy way to check
2982 the executable symbols/file has been already relocated to
2983 displacement. */
2984
2985 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2986 "displacement %s for \"%s\".\n"),
2987 paddress (target_gdbarch (), exec_displacement),
2988 bfd_get_filename (exec_bfd));
2989 }
2990
2991 *displacementp = exec_displacement;
2992 return 1;
2993 }
2994
2995 /* Relocate the main executable. This function should be called upon
2996 stopping the inferior process at the entry point to the program.
2997 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2998 different, the main executable is relocated by the proper amount. */
2999
3000 static void
3001 svr4_relocate_main_executable (void)
3002 {
3003 CORE_ADDR displacement;
3004
3005 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
3006 probably contains the offsets computed using the PIE displacement
3007 from the previous run, which of course are irrelevant for this run.
3008 So we need to determine the new PIE displacement and recompute the
3009 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
3010 already contains pre-computed offsets.
3011
3012 If we cannot compute the PIE displacement, either:
3013
3014 - The executable is not PIE.
3015
3016 - SYMFILE_OBJFILE does not match the executable started in the target.
3017 This can happen for main executable symbols loaded at the host while
3018 `ld.so --ld-args main-executable' is loaded in the target.
3019
3020 Then we leave the section offsets untouched and use them as is for
3021 this run. Either:
3022
3023 - These section offsets were properly reset earlier, and thus
3024 already contain the correct values. This can happen for instance
3025 when reconnecting via the remote protocol to a target that supports
3026 the `qOffsets' packet.
3027
3028 - The section offsets were not reset earlier, and the best we can
3029 hope is that the old offsets are still applicable to the new run. */
3030
3031 if (! svr4_exec_displacement (&displacement))
3032 return;
3033
3034 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
3035 addresses. */
3036
3037 if (symfile_objfile)
3038 {
3039 struct section_offsets *new_offsets;
3040 int i;
3041
3042 new_offsets = XALLOCAVEC (struct section_offsets,
3043 symfile_objfile->num_sections);
3044
3045 for (i = 0; i < symfile_objfile->num_sections; i++)
3046 new_offsets->offsets[i] = displacement;
3047
3048 objfile_relocate (symfile_objfile, new_offsets);
3049 }
3050 else if (exec_bfd)
3051 {
3052 asection *asect;
3053
3054 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
3055 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
3056 (bfd_section_vma (exec_bfd, asect)
3057 + displacement));
3058 }
3059 }
3060
3061 /* Implement the "create_inferior_hook" target_solib_ops method.
3062
3063 For SVR4 executables, this first instruction is either the first
3064 instruction in the dynamic linker (for dynamically linked
3065 executables) or the instruction at "start" for statically linked
3066 executables. For dynamically linked executables, the system
3067 first exec's /lib/libc.so.N, which contains the dynamic linker,
3068 and starts it running. The dynamic linker maps in any needed
3069 shared libraries, maps in the actual user executable, and then
3070 jumps to "start" in the user executable.
3071
3072 We can arrange to cooperate with the dynamic linker to discover the
3073 names of shared libraries that are dynamically linked, and the base
3074 addresses to which they are linked.
3075
3076 This function is responsible for discovering those names and
3077 addresses, and saving sufficient information about them to allow
3078 their symbols to be read at a later time. */
3079
3080 static void
3081 svr4_solib_create_inferior_hook (int from_tty)
3082 {
3083 struct svr4_info *info;
3084
3085 info = get_svr4_info ();
3086
3087 /* Clear the probes-based interface's state. */
3088 free_probes_table (info);
3089 free_solib_list (info);
3090
3091 /* Relocate the main executable if necessary. */
3092 svr4_relocate_main_executable ();
3093
3094 /* No point setting a breakpoint in the dynamic linker if we can't
3095 hit it (e.g., a core file, or a trace file). */
3096 if (!target_has_execution)
3097 return;
3098
3099 if (!svr4_have_link_map_offsets ())
3100 return;
3101
3102 if (!enable_break (info, from_tty))
3103 return;
3104 }
3105
3106 static void
3107 svr4_clear_solib (void)
3108 {
3109 struct svr4_info *info;
3110
3111 info = get_svr4_info ();
3112 info->debug_base = 0;
3113 info->debug_loader_offset_p = 0;
3114 info->debug_loader_offset = 0;
3115 xfree (info->debug_loader_name);
3116 info->debug_loader_name = NULL;
3117 }
3118
3119 /* Clear any bits of ADDR that wouldn't fit in a target-format
3120 data pointer. "Data pointer" here refers to whatever sort of
3121 address the dynamic linker uses to manage its sections. At the
3122 moment, we don't support shared libraries on any processors where
3123 code and data pointers are different sizes.
3124
3125 This isn't really the right solution. What we really need here is
3126 a way to do arithmetic on CORE_ADDR values that respects the
3127 natural pointer/address correspondence. (For example, on the MIPS,
3128 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
3129 sign-extend the value. There, simply truncating the bits above
3130 gdbarch_ptr_bit, as we do below, is no good.) This should probably
3131 be a new gdbarch method or something. */
3132 static CORE_ADDR
3133 svr4_truncate_ptr (CORE_ADDR addr)
3134 {
3135 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
3136 /* We don't need to truncate anything, and the bit twiddling below
3137 will fail due to overflow problems. */
3138 return addr;
3139 else
3140 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
3141 }
3142
3143
3144 static void
3145 svr4_relocate_section_addresses (struct so_list *so,
3146 struct target_section *sec)
3147 {
3148 bfd *abfd = sec->the_bfd_section->owner;
3149
3150 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so, abfd));
3151 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so, abfd));
3152 }
3153 \f
3154
3155 /* Architecture-specific operations. */
3156
3157 /* Per-architecture data key. */
3158 static struct gdbarch_data *solib_svr4_data;
3159
3160 struct solib_svr4_ops
3161 {
3162 /* Return a description of the layout of `struct link_map'. */
3163 struct link_map_offsets *(*fetch_link_map_offsets)(void);
3164 };
3165
3166 /* Return a default for the architecture-specific operations. */
3167
3168 static void *
3169 solib_svr4_init (struct obstack *obstack)
3170 {
3171 struct solib_svr4_ops *ops;
3172
3173 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
3174 ops->fetch_link_map_offsets = NULL;
3175 return ops;
3176 }
3177
3178 /* Set the architecture-specific `struct link_map_offsets' fetcher for
3179 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
3180
3181 void
3182 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
3183 struct link_map_offsets *(*flmo) (void))
3184 {
3185 struct solib_svr4_ops *ops
3186 = (struct solib_svr4_ops *) gdbarch_data (gdbarch, solib_svr4_data);
3187
3188 ops->fetch_link_map_offsets = flmo;
3189
3190 set_solib_ops (gdbarch, &svr4_so_ops);
3191 }
3192
3193 /* Fetch a link_map_offsets structure using the architecture-specific
3194 `struct link_map_offsets' fetcher. */
3195
3196 static struct link_map_offsets *
3197 svr4_fetch_link_map_offsets (void)
3198 {
3199 struct solib_svr4_ops *ops
3200 = (struct solib_svr4_ops *) gdbarch_data (target_gdbarch (),
3201 solib_svr4_data);
3202
3203 gdb_assert (ops->fetch_link_map_offsets);
3204 return ops->fetch_link_map_offsets ();
3205 }
3206
3207 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
3208
3209 static int
3210 svr4_have_link_map_offsets (void)
3211 {
3212 struct solib_svr4_ops *ops
3213 = (struct solib_svr4_ops *) gdbarch_data (target_gdbarch (),
3214 solib_svr4_data);
3215
3216 return (ops->fetch_link_map_offsets != NULL);
3217 }
3218 \f
3219
3220 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
3221 `struct r_debug' and a `struct link_map' that are binary compatible
3222 with the origional SVR4 implementation. */
3223
3224 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3225 for an ILP32 SVR4 system. */
3226
3227 struct link_map_offsets *
3228 svr4_ilp32_fetch_link_map_offsets (void)
3229 {
3230 static struct link_map_offsets lmo;
3231 static struct link_map_offsets *lmp = NULL;
3232
3233 if (lmp == NULL)
3234 {
3235 lmp = &lmo;
3236
3237 lmo.r_version_offset = 0;
3238 lmo.r_version_size = 4;
3239 lmo.r_map_offset = 4;
3240 lmo.r_brk_offset = 8;
3241 lmo.r_ldsomap_offset = 20;
3242
3243 /* Everything we need is in the first 20 bytes. */
3244 lmo.link_map_size = 20;
3245 lmo.l_addr_offset = 0;
3246 lmo.l_name_offset = 4;
3247 lmo.l_ld_offset = 8;
3248 lmo.l_next_offset = 12;
3249 lmo.l_prev_offset = 16;
3250 }
3251
3252 return lmp;
3253 }
3254
3255 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3256 for an LP64 SVR4 system. */
3257
3258 struct link_map_offsets *
3259 svr4_lp64_fetch_link_map_offsets (void)
3260 {
3261 static struct link_map_offsets lmo;
3262 static struct link_map_offsets *lmp = NULL;
3263
3264 if (lmp == NULL)
3265 {
3266 lmp = &lmo;
3267
3268 lmo.r_version_offset = 0;
3269 lmo.r_version_size = 4;
3270 lmo.r_map_offset = 8;
3271 lmo.r_brk_offset = 16;
3272 lmo.r_ldsomap_offset = 40;
3273
3274 /* Everything we need is in the first 40 bytes. */
3275 lmo.link_map_size = 40;
3276 lmo.l_addr_offset = 0;
3277 lmo.l_name_offset = 8;
3278 lmo.l_ld_offset = 16;
3279 lmo.l_next_offset = 24;
3280 lmo.l_prev_offset = 32;
3281 }
3282
3283 return lmp;
3284 }
3285 \f
3286
3287 struct target_so_ops svr4_so_ops;
3288
3289 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
3290 different rule for symbol lookup. The lookup begins here in the DSO, not in
3291 the main executable. */
3292
3293 static struct block_symbol
3294 elf_lookup_lib_symbol (struct objfile *objfile,
3295 const char *name,
3296 const domain_enum domain)
3297 {
3298 bfd *abfd;
3299
3300 if (objfile == symfile_objfile)
3301 abfd = exec_bfd;
3302 else
3303 {
3304 /* OBJFILE should have been passed as the non-debug one. */
3305 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
3306
3307 abfd = objfile->obfd;
3308 }
3309
3310 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL, NULL) != 1)
3311 return (struct block_symbol) {NULL, NULL};
3312
3313 return lookup_global_symbol_from_objfile (objfile, name, domain);
3314 }
3315
3316 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
3317
3318 void
3319 _initialize_svr4_solib (void)
3320 {
3321 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
3322 solib_svr4_pspace_data
3323 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
3324
3325 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
3326 svr4_so_ops.free_so = svr4_free_so;
3327 svr4_so_ops.clear_so = svr4_clear_so;
3328 svr4_so_ops.clear_solib = svr4_clear_solib;
3329 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
3330 svr4_so_ops.special_symbol_handling = svr4_special_symbol_handling;
3331 svr4_so_ops.current_sos = svr4_current_sos;
3332 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
3333 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
3334 svr4_so_ops.bfd_open = solib_bfd_open;
3335 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
3336 svr4_so_ops.same = svr4_same;
3337 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
3338 svr4_so_ops.update_breakpoints = svr4_update_solib_event_breakpoints;
3339 svr4_so_ops.handle_event = svr4_handle_solib_event;
3340 }
This page took 0.096375 seconds and 5 git commands to generate.