Remove unused parameter in solib_add and update_solib_list
[deliverable/binutils-gdb.git] / gdb / solib-svr4.c
1 /* Handle SVR4 shared libraries for GDB, the GNU Debugger.
2
3 Copyright (C) 1990-2017 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21
22 #include "elf/external.h"
23 #include "elf/common.h"
24 #include "elf/mips.h"
25
26 #include "symtab.h"
27 #include "bfd.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "gdbcore.h"
31 #include "target.h"
32 #include "inferior.h"
33 #include "infrun.h"
34 #include "regcache.h"
35 #include "gdbthread.h"
36 #include "observer.h"
37
38 #include "solist.h"
39 #include "solib.h"
40 #include "solib-svr4.h"
41
42 #include "bfd-target.h"
43 #include "elf-bfd.h"
44 #include "exec.h"
45 #include "auxv.h"
46 #include "gdb_bfd.h"
47 #include "probe.h"
48
49 static struct link_map_offsets *svr4_fetch_link_map_offsets (void);
50 static int svr4_have_link_map_offsets (void);
51 static void svr4_relocate_main_executable (void);
52 static void svr4_free_library_list (void *p_list);
53
54 /* Link map info to include in an allocated so_list entry. */
55
56 struct lm_info
57 {
58 /* Amount by which addresses in the binary should be relocated to
59 match the inferior. The direct inferior value is L_ADDR_INFERIOR.
60 When prelinking is involved and the prelink base address changes,
61 we may need a different offset - the recomputed offset is in L_ADDR.
62 It is commonly the same value. It is cached as we want to warn about
63 the difference and compute it only once. L_ADDR is valid
64 iff L_ADDR_P. */
65 CORE_ADDR l_addr, l_addr_inferior;
66 unsigned int l_addr_p : 1;
67
68 /* The target location of lm. */
69 CORE_ADDR lm_addr;
70
71 /* Values read in from inferior's fields of the same name. */
72 CORE_ADDR l_ld, l_next, l_prev, l_name;
73 };
74
75 /* On SVR4 systems, a list of symbols in the dynamic linker where
76 GDB can try to place a breakpoint to monitor shared library
77 events.
78
79 If none of these symbols are found, or other errors occur, then
80 SVR4 systems will fall back to using a symbol as the "startup
81 mapping complete" breakpoint address. */
82
83 static const char * const solib_break_names[] =
84 {
85 "r_debug_state",
86 "_r_debug_state",
87 "_dl_debug_state",
88 "rtld_db_dlactivity",
89 "__dl_rtld_db_dlactivity",
90 "_rtld_debug_state",
91
92 NULL
93 };
94
95 static const char * const bkpt_names[] =
96 {
97 "_start",
98 "__start",
99 "main",
100 NULL
101 };
102
103 static const char * const main_name_list[] =
104 {
105 "main_$main",
106 NULL
107 };
108
109 /* What to do when a probe stop occurs. */
110
111 enum probe_action
112 {
113 /* Something went seriously wrong. Stop using probes and
114 revert to using the older interface. */
115 PROBES_INTERFACE_FAILED,
116
117 /* No action is required. The shared object list is still
118 valid. */
119 DO_NOTHING,
120
121 /* The shared object list should be reloaded entirely. */
122 FULL_RELOAD,
123
124 /* Attempt to incrementally update the shared object list. If
125 the update fails or is not possible, fall back to reloading
126 the list in full. */
127 UPDATE_OR_RELOAD,
128 };
129
130 /* A probe's name and its associated action. */
131
132 struct probe_info
133 {
134 /* The name of the probe. */
135 const char *name;
136
137 /* What to do when a probe stop occurs. */
138 enum probe_action action;
139 };
140
141 /* A list of named probes and their associated actions. If all
142 probes are present in the dynamic linker then the probes-based
143 interface will be used. */
144
145 static const struct probe_info probe_info[] =
146 {
147 { "init_start", DO_NOTHING },
148 { "init_complete", FULL_RELOAD },
149 { "map_start", DO_NOTHING },
150 { "map_failed", DO_NOTHING },
151 { "reloc_complete", UPDATE_OR_RELOAD },
152 { "unmap_start", DO_NOTHING },
153 { "unmap_complete", FULL_RELOAD },
154 };
155
156 #define NUM_PROBES ARRAY_SIZE (probe_info)
157
158 /* Return non-zero if GDB_SO_NAME and INFERIOR_SO_NAME represent
159 the same shared library. */
160
161 static int
162 svr4_same_1 (const char *gdb_so_name, const char *inferior_so_name)
163 {
164 if (strcmp (gdb_so_name, inferior_so_name) == 0)
165 return 1;
166
167 /* On Solaris, when starting inferior we think that dynamic linker is
168 /usr/lib/ld.so.1, but later on, the table of loaded shared libraries
169 contains /lib/ld.so.1. Sometimes one file is a link to another, but
170 sometimes they have identical content, but are not linked to each
171 other. We don't restrict this check for Solaris, but the chances
172 of running into this situation elsewhere are very low. */
173 if (strcmp (gdb_so_name, "/usr/lib/ld.so.1") == 0
174 && strcmp (inferior_so_name, "/lib/ld.so.1") == 0)
175 return 1;
176
177 /* Similarly, we observed the same issue with sparc64, but with
178 different locations. */
179 if (strcmp (gdb_so_name, "/usr/lib/sparcv9/ld.so.1") == 0
180 && strcmp (inferior_so_name, "/lib/sparcv9/ld.so.1") == 0)
181 return 1;
182
183 return 0;
184 }
185
186 static int
187 svr4_same (struct so_list *gdb, struct so_list *inferior)
188 {
189 return (svr4_same_1 (gdb->so_original_name, inferior->so_original_name));
190 }
191
192 static struct lm_info *
193 lm_info_read (CORE_ADDR lm_addr)
194 {
195 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
196 gdb_byte *lm;
197 struct lm_info *lm_info;
198 struct cleanup *back_to;
199
200 lm = (gdb_byte *) xmalloc (lmo->link_map_size);
201 back_to = make_cleanup (xfree, lm);
202
203 if (target_read_memory (lm_addr, lm, lmo->link_map_size) != 0)
204 {
205 warning (_("Error reading shared library list entry at %s"),
206 paddress (target_gdbarch (), lm_addr)),
207 lm_info = NULL;
208 }
209 else
210 {
211 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
212
213 lm_info = XCNEW (struct lm_info);
214 lm_info->lm_addr = lm_addr;
215
216 lm_info->l_addr_inferior = extract_typed_address (&lm[lmo->l_addr_offset],
217 ptr_type);
218 lm_info->l_ld = extract_typed_address (&lm[lmo->l_ld_offset], ptr_type);
219 lm_info->l_next = extract_typed_address (&lm[lmo->l_next_offset],
220 ptr_type);
221 lm_info->l_prev = extract_typed_address (&lm[lmo->l_prev_offset],
222 ptr_type);
223 lm_info->l_name = extract_typed_address (&lm[lmo->l_name_offset],
224 ptr_type);
225 }
226
227 do_cleanups (back_to);
228
229 return lm_info;
230 }
231
232 static int
233 has_lm_dynamic_from_link_map (void)
234 {
235 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
236
237 return lmo->l_ld_offset >= 0;
238 }
239
240 static CORE_ADDR
241 lm_addr_check (const struct so_list *so, bfd *abfd)
242 {
243 if (!so->lm_info->l_addr_p)
244 {
245 struct bfd_section *dyninfo_sect;
246 CORE_ADDR l_addr, l_dynaddr, dynaddr;
247
248 l_addr = so->lm_info->l_addr_inferior;
249
250 if (! abfd || ! has_lm_dynamic_from_link_map ())
251 goto set_addr;
252
253 l_dynaddr = so->lm_info->l_ld;
254
255 dyninfo_sect = bfd_get_section_by_name (abfd, ".dynamic");
256 if (dyninfo_sect == NULL)
257 goto set_addr;
258
259 dynaddr = bfd_section_vma (abfd, dyninfo_sect);
260
261 if (dynaddr + l_addr != l_dynaddr)
262 {
263 CORE_ADDR align = 0x1000;
264 CORE_ADDR minpagesize = align;
265
266 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour)
267 {
268 Elf_Internal_Ehdr *ehdr = elf_tdata (abfd)->elf_header;
269 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
270 int i;
271
272 align = 1;
273
274 for (i = 0; i < ehdr->e_phnum; i++)
275 if (phdr[i].p_type == PT_LOAD && phdr[i].p_align > align)
276 align = phdr[i].p_align;
277
278 minpagesize = get_elf_backend_data (abfd)->minpagesize;
279 }
280
281 /* Turn it into a mask. */
282 align--;
283
284 /* If the changes match the alignment requirements, we
285 assume we're using a core file that was generated by the
286 same binary, just prelinked with a different base offset.
287 If it doesn't match, we may have a different binary, the
288 same binary with the dynamic table loaded at an unrelated
289 location, or anything, really. To avoid regressions,
290 don't adjust the base offset in the latter case, although
291 odds are that, if things really changed, debugging won't
292 quite work.
293
294 One could expect more the condition
295 ((l_addr & align) == 0 && ((l_dynaddr - dynaddr) & align) == 0)
296 but the one below is relaxed for PPC. The PPC kernel supports
297 either 4k or 64k page sizes. To be prepared for 64k pages,
298 PPC ELF files are built using an alignment requirement of 64k.
299 However, when running on a kernel supporting 4k pages, the memory
300 mapping of the library may not actually happen on a 64k boundary!
301
302 (In the usual case where (l_addr & align) == 0, this check is
303 equivalent to the possibly expected check above.)
304
305 Even on PPC it must be zero-aligned at least for MINPAGESIZE. */
306
307 l_addr = l_dynaddr - dynaddr;
308
309 if ((l_addr & (minpagesize - 1)) == 0
310 && (l_addr & align) == ((l_dynaddr - dynaddr) & align))
311 {
312 if (info_verbose)
313 printf_unfiltered (_("Using PIC (Position Independent Code) "
314 "prelink displacement %s for \"%s\".\n"),
315 paddress (target_gdbarch (), l_addr),
316 so->so_name);
317 }
318 else
319 {
320 /* There is no way to verify the library file matches. prelink
321 can during prelinking of an unprelinked file (or unprelinking
322 of a prelinked file) shift the DYNAMIC segment by arbitrary
323 offset without any page size alignment. There is no way to
324 find out the ELF header and/or Program Headers for a limited
325 verification if it they match. One could do a verification
326 of the DYNAMIC segment. Still the found address is the best
327 one GDB could find. */
328
329 warning (_(".dynamic section for \"%s\" "
330 "is not at the expected address "
331 "(wrong library or version mismatch?)"), so->so_name);
332 }
333 }
334
335 set_addr:
336 so->lm_info->l_addr = l_addr;
337 so->lm_info->l_addr_p = 1;
338 }
339
340 return so->lm_info->l_addr;
341 }
342
343 /* Per pspace SVR4 specific data. */
344
345 struct svr4_info
346 {
347 CORE_ADDR debug_base; /* Base of dynamic linker structures. */
348
349 /* Validity flag for debug_loader_offset. */
350 int debug_loader_offset_p;
351
352 /* Load address for the dynamic linker, inferred. */
353 CORE_ADDR debug_loader_offset;
354
355 /* Name of the dynamic linker, valid if debug_loader_offset_p. */
356 char *debug_loader_name;
357
358 /* Load map address for the main executable. */
359 CORE_ADDR main_lm_addr;
360
361 CORE_ADDR interp_text_sect_low;
362 CORE_ADDR interp_text_sect_high;
363 CORE_ADDR interp_plt_sect_low;
364 CORE_ADDR interp_plt_sect_high;
365
366 /* Nonzero if the list of objects was last obtained from the target
367 via qXfer:libraries-svr4:read. */
368 int using_xfer;
369
370 /* Table of struct probe_and_action instances, used by the
371 probes-based interface to map breakpoint addresses to probes
372 and their associated actions. Lookup is performed using
373 probe_and_action->probe->address. */
374 htab_t probes_table;
375
376 /* List of objects loaded into the inferior, used by the probes-
377 based interface. */
378 struct so_list *solib_list;
379 };
380
381 /* Per-program-space data key. */
382 static const struct program_space_data *solib_svr4_pspace_data;
383
384 /* Free the probes table. */
385
386 static void
387 free_probes_table (struct svr4_info *info)
388 {
389 if (info->probes_table == NULL)
390 return;
391
392 htab_delete (info->probes_table);
393 info->probes_table = NULL;
394 }
395
396 /* Free the solib list. */
397
398 static void
399 free_solib_list (struct svr4_info *info)
400 {
401 svr4_free_library_list (&info->solib_list);
402 info->solib_list = NULL;
403 }
404
405 static void
406 svr4_pspace_data_cleanup (struct program_space *pspace, void *arg)
407 {
408 struct svr4_info *info = (struct svr4_info *) arg;
409
410 free_probes_table (info);
411 free_solib_list (info);
412
413 xfree (info);
414 }
415
416 /* Get the current svr4 data. If none is found yet, add it now. This
417 function always returns a valid object. */
418
419 static struct svr4_info *
420 get_svr4_info (void)
421 {
422 struct svr4_info *info;
423
424 info = (struct svr4_info *) program_space_data (current_program_space,
425 solib_svr4_pspace_data);
426 if (info != NULL)
427 return info;
428
429 info = XCNEW (struct svr4_info);
430 set_program_space_data (current_program_space, solib_svr4_pspace_data, info);
431 return info;
432 }
433
434 /* Local function prototypes */
435
436 static int match_main (const char *);
437
438 /* Read program header TYPE from inferior memory. The header is found
439 by scanning the OS auxillary vector.
440
441 If TYPE == -1, return the program headers instead of the contents of
442 one program header.
443
444 Return a pointer to allocated memory holding the program header contents,
445 or NULL on failure. If sucessful, and unless P_SECT_SIZE is NULL, the
446 size of those contents is returned to P_SECT_SIZE. Likewise, the target
447 architecture size (32-bit or 64-bit) is returned to P_ARCH_SIZE and
448 the base address of the section is returned in BASE_ADDR. */
449
450 static gdb_byte *
451 read_program_header (int type, int *p_sect_size, int *p_arch_size,
452 CORE_ADDR *base_addr)
453 {
454 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
455 CORE_ADDR at_phdr, at_phent, at_phnum, pt_phdr = 0;
456 int arch_size, sect_size;
457 CORE_ADDR sect_addr;
458 gdb_byte *buf;
459 int pt_phdr_p = 0;
460
461 /* Get required auxv elements from target. */
462 if (target_auxv_search (&current_target, AT_PHDR, &at_phdr) <= 0)
463 return 0;
464 if (target_auxv_search (&current_target, AT_PHENT, &at_phent) <= 0)
465 return 0;
466 if (target_auxv_search (&current_target, AT_PHNUM, &at_phnum) <= 0)
467 return 0;
468 if (!at_phdr || !at_phnum)
469 return 0;
470
471 /* Determine ELF architecture type. */
472 if (at_phent == sizeof (Elf32_External_Phdr))
473 arch_size = 32;
474 else if (at_phent == sizeof (Elf64_External_Phdr))
475 arch_size = 64;
476 else
477 return 0;
478
479 /* Find the requested segment. */
480 if (type == -1)
481 {
482 sect_addr = at_phdr;
483 sect_size = at_phent * at_phnum;
484 }
485 else if (arch_size == 32)
486 {
487 Elf32_External_Phdr phdr;
488 int i;
489
490 /* Search for requested PHDR. */
491 for (i = 0; i < at_phnum; i++)
492 {
493 int p_type;
494
495 if (target_read_memory (at_phdr + i * sizeof (phdr),
496 (gdb_byte *)&phdr, sizeof (phdr)))
497 return 0;
498
499 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
500 4, byte_order);
501
502 if (p_type == PT_PHDR)
503 {
504 pt_phdr_p = 1;
505 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
506 4, byte_order);
507 }
508
509 if (p_type == type)
510 break;
511 }
512
513 if (i == at_phnum)
514 return 0;
515
516 /* Retrieve address and size. */
517 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
518 4, byte_order);
519 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
520 4, byte_order);
521 }
522 else
523 {
524 Elf64_External_Phdr phdr;
525 int i;
526
527 /* Search for requested PHDR. */
528 for (i = 0; i < at_phnum; i++)
529 {
530 int p_type;
531
532 if (target_read_memory (at_phdr + i * sizeof (phdr),
533 (gdb_byte *)&phdr, sizeof (phdr)))
534 return 0;
535
536 p_type = extract_unsigned_integer ((gdb_byte *) phdr.p_type,
537 4, byte_order);
538
539 if (p_type == PT_PHDR)
540 {
541 pt_phdr_p = 1;
542 pt_phdr = extract_unsigned_integer ((gdb_byte *) phdr.p_vaddr,
543 8, byte_order);
544 }
545
546 if (p_type == type)
547 break;
548 }
549
550 if (i == at_phnum)
551 return 0;
552
553 /* Retrieve address and size. */
554 sect_addr = extract_unsigned_integer ((gdb_byte *)phdr.p_vaddr,
555 8, byte_order);
556 sect_size = extract_unsigned_integer ((gdb_byte *)phdr.p_memsz,
557 8, byte_order);
558 }
559
560 /* PT_PHDR is optional, but we really need it
561 for PIE to make this work in general. */
562
563 if (pt_phdr_p)
564 {
565 /* at_phdr is real address in memory. pt_phdr is what pheader says it is.
566 Relocation offset is the difference between the two. */
567 sect_addr = sect_addr + (at_phdr - pt_phdr);
568 }
569
570 /* Read in requested program header. */
571 buf = (gdb_byte *) xmalloc (sect_size);
572 if (target_read_memory (sect_addr, buf, sect_size))
573 {
574 xfree (buf);
575 return NULL;
576 }
577
578 if (p_arch_size)
579 *p_arch_size = arch_size;
580 if (p_sect_size)
581 *p_sect_size = sect_size;
582 if (base_addr)
583 *base_addr = sect_addr;
584
585 return buf;
586 }
587
588
589 /* Return program interpreter string. */
590 static char *
591 find_program_interpreter (void)
592 {
593 gdb_byte *buf = NULL;
594
595 /* If we have an exec_bfd, use its section table. */
596 if (exec_bfd
597 && bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
598 {
599 struct bfd_section *interp_sect;
600
601 interp_sect = bfd_get_section_by_name (exec_bfd, ".interp");
602 if (interp_sect != NULL)
603 {
604 int sect_size = bfd_section_size (exec_bfd, interp_sect);
605
606 buf = (gdb_byte *) xmalloc (sect_size);
607 bfd_get_section_contents (exec_bfd, interp_sect, buf, 0, sect_size);
608 }
609 }
610
611 /* If we didn't find it, use the target auxillary vector. */
612 if (!buf)
613 buf = read_program_header (PT_INTERP, NULL, NULL, NULL);
614
615 return (char *) buf;
616 }
617
618
619 /* Scan for DESIRED_DYNTAG in .dynamic section of ABFD. If DESIRED_DYNTAG is
620 found, 1 is returned and the corresponding PTR is set. */
621
622 static int
623 scan_dyntag (const int desired_dyntag, bfd *abfd, CORE_ADDR *ptr,
624 CORE_ADDR *ptr_addr)
625 {
626 int arch_size, step, sect_size;
627 long current_dyntag;
628 CORE_ADDR dyn_ptr, dyn_addr;
629 gdb_byte *bufend, *bufstart, *buf;
630 Elf32_External_Dyn *x_dynp_32;
631 Elf64_External_Dyn *x_dynp_64;
632 struct bfd_section *sect;
633 struct target_section *target_section;
634
635 if (abfd == NULL)
636 return 0;
637
638 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
639 return 0;
640
641 arch_size = bfd_get_arch_size (abfd);
642 if (arch_size == -1)
643 return 0;
644
645 /* Find the start address of the .dynamic section. */
646 sect = bfd_get_section_by_name (abfd, ".dynamic");
647 if (sect == NULL)
648 return 0;
649
650 for (target_section = current_target_sections->sections;
651 target_section < current_target_sections->sections_end;
652 target_section++)
653 if (sect == target_section->the_bfd_section)
654 break;
655 if (target_section < current_target_sections->sections_end)
656 dyn_addr = target_section->addr;
657 else
658 {
659 /* ABFD may come from OBJFILE acting only as a symbol file without being
660 loaded into the target (see add_symbol_file_command). This case is
661 such fallback to the file VMA address without the possibility of
662 having the section relocated to its actual in-memory address. */
663
664 dyn_addr = bfd_section_vma (abfd, sect);
665 }
666
667 /* Read in .dynamic from the BFD. We will get the actual value
668 from memory later. */
669 sect_size = bfd_section_size (abfd, sect);
670 buf = bufstart = (gdb_byte *) alloca (sect_size);
671 if (!bfd_get_section_contents (abfd, sect,
672 buf, 0, sect_size))
673 return 0;
674
675 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
676 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
677 : sizeof (Elf64_External_Dyn);
678 for (bufend = buf + sect_size;
679 buf < bufend;
680 buf += step)
681 {
682 if (arch_size == 32)
683 {
684 x_dynp_32 = (Elf32_External_Dyn *) buf;
685 current_dyntag = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_tag);
686 dyn_ptr = bfd_h_get_32 (abfd, (bfd_byte *) x_dynp_32->d_un.d_ptr);
687 }
688 else
689 {
690 x_dynp_64 = (Elf64_External_Dyn *) buf;
691 current_dyntag = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_tag);
692 dyn_ptr = bfd_h_get_64 (abfd, (bfd_byte *) x_dynp_64->d_un.d_ptr);
693 }
694 if (current_dyntag == DT_NULL)
695 return 0;
696 if (current_dyntag == desired_dyntag)
697 {
698 /* If requested, try to read the runtime value of this .dynamic
699 entry. */
700 if (ptr)
701 {
702 struct type *ptr_type;
703 gdb_byte ptr_buf[8];
704 CORE_ADDR ptr_addr_1;
705
706 ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
707 ptr_addr_1 = dyn_addr + (buf - bufstart) + arch_size / 8;
708 if (target_read_memory (ptr_addr_1, ptr_buf, arch_size / 8) == 0)
709 dyn_ptr = extract_typed_address (ptr_buf, ptr_type);
710 *ptr = dyn_ptr;
711 if (ptr_addr)
712 *ptr_addr = dyn_addr + (buf - bufstart);
713 }
714 return 1;
715 }
716 }
717
718 return 0;
719 }
720
721 /* Scan for DESIRED_DYNTAG in .dynamic section of the target's main executable,
722 found by consulting the OS auxillary vector. If DESIRED_DYNTAG is found, 1
723 is returned and the corresponding PTR is set. */
724
725 static int
726 scan_dyntag_auxv (const int desired_dyntag, CORE_ADDR *ptr,
727 CORE_ADDR *ptr_addr)
728 {
729 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
730 int sect_size, arch_size, step;
731 long current_dyntag;
732 CORE_ADDR dyn_ptr;
733 CORE_ADDR base_addr;
734 gdb_byte *bufend, *bufstart, *buf;
735
736 /* Read in .dynamic section. */
737 buf = bufstart = read_program_header (PT_DYNAMIC, &sect_size, &arch_size,
738 &base_addr);
739 if (!buf)
740 return 0;
741
742 /* Iterate over BUF and scan for DYNTAG. If found, set PTR and return. */
743 step = (arch_size == 32) ? sizeof (Elf32_External_Dyn)
744 : sizeof (Elf64_External_Dyn);
745 for (bufend = buf + sect_size;
746 buf < bufend;
747 buf += step)
748 {
749 if (arch_size == 32)
750 {
751 Elf32_External_Dyn *dynp = (Elf32_External_Dyn *) buf;
752
753 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
754 4, byte_order);
755 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
756 4, byte_order);
757 }
758 else
759 {
760 Elf64_External_Dyn *dynp = (Elf64_External_Dyn *) buf;
761
762 current_dyntag = extract_unsigned_integer ((gdb_byte *) dynp->d_tag,
763 8, byte_order);
764 dyn_ptr = extract_unsigned_integer ((gdb_byte *) dynp->d_un.d_ptr,
765 8, byte_order);
766 }
767 if (current_dyntag == DT_NULL)
768 break;
769
770 if (current_dyntag == desired_dyntag)
771 {
772 if (ptr)
773 *ptr = dyn_ptr;
774
775 if (ptr_addr)
776 *ptr_addr = base_addr + buf - bufstart;
777
778 xfree (bufstart);
779 return 1;
780 }
781 }
782
783 xfree (bufstart);
784 return 0;
785 }
786
787 /* Locate the base address of dynamic linker structs for SVR4 elf
788 targets.
789
790 For SVR4 elf targets the address of the dynamic linker's runtime
791 structure is contained within the dynamic info section in the
792 executable file. The dynamic section is also mapped into the
793 inferior address space. Because the runtime loader fills in the
794 real address before starting the inferior, we have to read in the
795 dynamic info section from the inferior address space.
796 If there are any errors while trying to find the address, we
797 silently return 0, otherwise the found address is returned. */
798
799 static CORE_ADDR
800 elf_locate_base (void)
801 {
802 struct bound_minimal_symbol msymbol;
803 CORE_ADDR dyn_ptr, dyn_ptr_addr;
804
805 /* Look for DT_MIPS_RLD_MAP first. MIPS executables use this
806 instead of DT_DEBUG, although they sometimes contain an unused
807 DT_DEBUG. */
808 if (scan_dyntag (DT_MIPS_RLD_MAP, exec_bfd, &dyn_ptr, NULL)
809 || scan_dyntag_auxv (DT_MIPS_RLD_MAP, &dyn_ptr, NULL))
810 {
811 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
812 gdb_byte *pbuf;
813 int pbuf_size = TYPE_LENGTH (ptr_type);
814
815 pbuf = (gdb_byte *) alloca (pbuf_size);
816 /* DT_MIPS_RLD_MAP contains a pointer to the address
817 of the dynamic link structure. */
818 if (target_read_memory (dyn_ptr, pbuf, pbuf_size))
819 return 0;
820 return extract_typed_address (pbuf, ptr_type);
821 }
822
823 /* Then check DT_MIPS_RLD_MAP_REL. MIPS executables now use this form
824 because of needing to support PIE. DT_MIPS_RLD_MAP will also exist
825 in non-PIE. */
826 if (scan_dyntag (DT_MIPS_RLD_MAP_REL, exec_bfd, &dyn_ptr, &dyn_ptr_addr)
827 || scan_dyntag_auxv (DT_MIPS_RLD_MAP_REL, &dyn_ptr, &dyn_ptr_addr))
828 {
829 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
830 gdb_byte *pbuf;
831 int pbuf_size = TYPE_LENGTH (ptr_type);
832
833 pbuf = (gdb_byte *) alloca (pbuf_size);
834 /* DT_MIPS_RLD_MAP_REL contains an offset from the address of the
835 DT slot to the address of the dynamic link structure. */
836 if (target_read_memory (dyn_ptr + dyn_ptr_addr, pbuf, pbuf_size))
837 return 0;
838 return extract_typed_address (pbuf, ptr_type);
839 }
840
841 /* Find DT_DEBUG. */
842 if (scan_dyntag (DT_DEBUG, exec_bfd, &dyn_ptr, NULL)
843 || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr, NULL))
844 return dyn_ptr;
845
846 /* This may be a static executable. Look for the symbol
847 conventionally named _r_debug, as a last resort. */
848 msymbol = lookup_minimal_symbol ("_r_debug", NULL, symfile_objfile);
849 if (msymbol.minsym != NULL)
850 return BMSYMBOL_VALUE_ADDRESS (msymbol);
851
852 /* DT_DEBUG entry not found. */
853 return 0;
854 }
855
856 /* Locate the base address of dynamic linker structs.
857
858 For both the SunOS and SVR4 shared library implementations, if the
859 inferior executable has been linked dynamically, there is a single
860 address somewhere in the inferior's data space which is the key to
861 locating all of the dynamic linker's runtime structures. This
862 address is the value of the debug base symbol. The job of this
863 function is to find and return that address, or to return 0 if there
864 is no such address (the executable is statically linked for example).
865
866 For SunOS, the job is almost trivial, since the dynamic linker and
867 all of it's structures are statically linked to the executable at
868 link time. Thus the symbol for the address we are looking for has
869 already been added to the minimal symbol table for the executable's
870 objfile at the time the symbol file's symbols were read, and all we
871 have to do is look it up there. Note that we explicitly do NOT want
872 to find the copies in the shared library.
873
874 The SVR4 version is a bit more complicated because the address
875 is contained somewhere in the dynamic info section. We have to go
876 to a lot more work to discover the address of the debug base symbol.
877 Because of this complexity, we cache the value we find and return that
878 value on subsequent invocations. Note there is no copy in the
879 executable symbol tables. */
880
881 static CORE_ADDR
882 locate_base (struct svr4_info *info)
883 {
884 /* Check to see if we have a currently valid address, and if so, avoid
885 doing all this work again and just return the cached address. If
886 we have no cached address, try to locate it in the dynamic info
887 section for ELF executables. There's no point in doing any of this
888 though if we don't have some link map offsets to work with. */
889
890 if (info->debug_base == 0 && svr4_have_link_map_offsets ())
891 info->debug_base = elf_locate_base ();
892 return info->debug_base;
893 }
894
895 /* Find the first element in the inferior's dynamic link map, and
896 return its address in the inferior. Return zero if the address
897 could not be determined.
898
899 FIXME: Perhaps we should validate the info somehow, perhaps by
900 checking r_version for a known version number, or r_state for
901 RT_CONSISTENT. */
902
903 static CORE_ADDR
904 solib_svr4_r_map (struct svr4_info *info)
905 {
906 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
907 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
908 CORE_ADDR addr = 0;
909
910 TRY
911 {
912 addr = read_memory_typed_address (info->debug_base + lmo->r_map_offset,
913 ptr_type);
914 }
915 CATCH (ex, RETURN_MASK_ERROR)
916 {
917 exception_print (gdb_stderr, ex);
918 }
919 END_CATCH
920
921 return addr;
922 }
923
924 /* Find r_brk from the inferior's debug base. */
925
926 static CORE_ADDR
927 solib_svr4_r_brk (struct svr4_info *info)
928 {
929 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
930 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
931
932 return read_memory_typed_address (info->debug_base + lmo->r_brk_offset,
933 ptr_type);
934 }
935
936 /* Find the link map for the dynamic linker (if it is not in the
937 normal list of loaded shared objects). */
938
939 static CORE_ADDR
940 solib_svr4_r_ldsomap (struct svr4_info *info)
941 {
942 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
943 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
944 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
945 ULONGEST version = 0;
946
947 TRY
948 {
949 /* Check version, and return zero if `struct r_debug' doesn't have
950 the r_ldsomap member. */
951 version
952 = read_memory_unsigned_integer (info->debug_base + lmo->r_version_offset,
953 lmo->r_version_size, byte_order);
954 }
955 CATCH (ex, RETURN_MASK_ERROR)
956 {
957 exception_print (gdb_stderr, ex);
958 }
959 END_CATCH
960
961 if (version < 2 || lmo->r_ldsomap_offset == -1)
962 return 0;
963
964 return read_memory_typed_address (info->debug_base + lmo->r_ldsomap_offset,
965 ptr_type);
966 }
967
968 /* On Solaris systems with some versions of the dynamic linker,
969 ld.so's l_name pointer points to the SONAME in the string table
970 rather than into writable memory. So that GDB can find shared
971 libraries when loading a core file generated by gcore, ensure that
972 memory areas containing the l_name string are saved in the core
973 file. */
974
975 static int
976 svr4_keep_data_in_core (CORE_ADDR vaddr, unsigned long size)
977 {
978 struct svr4_info *info;
979 CORE_ADDR ldsomap;
980 struct so_list *newobj;
981 struct cleanup *old_chain;
982 CORE_ADDR name_lm;
983
984 info = get_svr4_info ();
985
986 info->debug_base = 0;
987 locate_base (info);
988 if (!info->debug_base)
989 return 0;
990
991 ldsomap = solib_svr4_r_ldsomap (info);
992 if (!ldsomap)
993 return 0;
994
995 newobj = XCNEW (struct so_list);
996 old_chain = make_cleanup (xfree, newobj);
997 newobj->lm_info = lm_info_read (ldsomap);
998 make_cleanup (xfree, newobj->lm_info);
999 name_lm = newobj->lm_info ? newobj->lm_info->l_name : 0;
1000 do_cleanups (old_chain);
1001
1002 return (name_lm >= vaddr && name_lm < vaddr + size);
1003 }
1004
1005 /* Implement the "open_symbol_file_object" target_so_ops method.
1006
1007 If no open symbol file, attempt to locate and open the main symbol
1008 file. On SVR4 systems, this is the first link map entry. If its
1009 name is here, we can open it. Useful when attaching to a process
1010 without first loading its symbol file. */
1011
1012 static int
1013 open_symbol_file_object (void *from_ttyp)
1014 {
1015 CORE_ADDR lm, l_name;
1016 char *filename;
1017 int errcode;
1018 int from_tty = *(int *)from_ttyp;
1019 struct link_map_offsets *lmo = svr4_fetch_link_map_offsets ();
1020 struct type *ptr_type = builtin_type (target_gdbarch ())->builtin_data_ptr;
1021 int l_name_size = TYPE_LENGTH (ptr_type);
1022 gdb_byte *l_name_buf = (gdb_byte *) xmalloc (l_name_size);
1023 struct cleanup *cleanups = make_cleanup (xfree, l_name_buf);
1024 struct svr4_info *info = get_svr4_info ();
1025 symfile_add_flags add_flags = 0;
1026
1027 if (from_tty)
1028 add_flags |= SYMFILE_VERBOSE;
1029
1030 if (symfile_objfile)
1031 if (!query (_("Attempt to reload symbols from process? ")))
1032 {
1033 do_cleanups (cleanups);
1034 return 0;
1035 }
1036
1037 /* Always locate the debug struct, in case it has moved. */
1038 info->debug_base = 0;
1039 if (locate_base (info) == 0)
1040 {
1041 do_cleanups (cleanups);
1042 return 0; /* failed somehow... */
1043 }
1044
1045 /* First link map member should be the executable. */
1046 lm = solib_svr4_r_map (info);
1047 if (lm == 0)
1048 {
1049 do_cleanups (cleanups);
1050 return 0; /* failed somehow... */
1051 }
1052
1053 /* Read address of name from target memory to GDB. */
1054 read_memory (lm + lmo->l_name_offset, l_name_buf, l_name_size);
1055
1056 /* Convert the address to host format. */
1057 l_name = extract_typed_address (l_name_buf, ptr_type);
1058
1059 if (l_name == 0)
1060 {
1061 do_cleanups (cleanups);
1062 return 0; /* No filename. */
1063 }
1064
1065 /* Now fetch the filename from target memory. */
1066 target_read_string (l_name, &filename, SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1067 make_cleanup (xfree, filename);
1068
1069 if (errcode)
1070 {
1071 warning (_("failed to read exec filename from attached file: %s"),
1072 safe_strerror (errcode));
1073 do_cleanups (cleanups);
1074 return 0;
1075 }
1076
1077 /* Have a pathname: read the symbol file. */
1078 symbol_file_add_main (filename, add_flags);
1079
1080 do_cleanups (cleanups);
1081 return 1;
1082 }
1083
1084 /* Data exchange structure for the XML parser as returned by
1085 svr4_current_sos_via_xfer_libraries. */
1086
1087 struct svr4_library_list
1088 {
1089 struct so_list *head, **tailp;
1090
1091 /* Inferior address of struct link_map used for the main executable. It is
1092 NULL if not known. */
1093 CORE_ADDR main_lm;
1094 };
1095
1096 /* Implementation for target_so_ops.free_so. */
1097
1098 static void
1099 svr4_free_so (struct so_list *so)
1100 {
1101 xfree (so->lm_info);
1102 }
1103
1104 /* Implement target_so_ops.clear_so. */
1105
1106 static void
1107 svr4_clear_so (struct so_list *so)
1108 {
1109 if (so->lm_info != NULL)
1110 so->lm_info->l_addr_p = 0;
1111 }
1112
1113 /* Free so_list built so far (called via cleanup). */
1114
1115 static void
1116 svr4_free_library_list (void *p_list)
1117 {
1118 struct so_list *list = *(struct so_list **) p_list;
1119
1120 while (list != NULL)
1121 {
1122 struct so_list *next = list->next;
1123
1124 free_so (list);
1125 list = next;
1126 }
1127 }
1128
1129 /* Copy library list. */
1130
1131 static struct so_list *
1132 svr4_copy_library_list (struct so_list *src)
1133 {
1134 struct so_list *dst = NULL;
1135 struct so_list **link = &dst;
1136
1137 while (src != NULL)
1138 {
1139 struct so_list *newobj;
1140
1141 newobj = XNEW (struct so_list);
1142 memcpy (newobj, src, sizeof (struct so_list));
1143
1144 newobj->lm_info = XNEW (struct lm_info);
1145 memcpy (newobj->lm_info, src->lm_info, sizeof (struct lm_info));
1146
1147 newobj->next = NULL;
1148 *link = newobj;
1149 link = &newobj->next;
1150
1151 src = src->next;
1152 }
1153
1154 return dst;
1155 }
1156
1157 #ifdef HAVE_LIBEXPAT
1158
1159 #include "xml-support.h"
1160
1161 /* Handle the start of a <library> element. Note: new elements are added
1162 at the tail of the list, keeping the list in order. */
1163
1164 static void
1165 library_list_start_library (struct gdb_xml_parser *parser,
1166 const struct gdb_xml_element *element,
1167 void *user_data, VEC(gdb_xml_value_s) *attributes)
1168 {
1169 struct svr4_library_list *list = (struct svr4_library_list *) user_data;
1170 const char *name
1171 = (const char *) xml_find_attribute (attributes, "name")->value;
1172 ULONGEST *lmp
1173 = (ULONGEST *) xml_find_attribute (attributes, "lm")->value;
1174 ULONGEST *l_addrp
1175 = (ULONGEST *) xml_find_attribute (attributes, "l_addr")->value;
1176 ULONGEST *l_ldp
1177 = (ULONGEST *) xml_find_attribute (attributes, "l_ld")->value;
1178 struct so_list *new_elem;
1179
1180 new_elem = XCNEW (struct so_list);
1181 new_elem->lm_info = XCNEW (struct lm_info);
1182 new_elem->lm_info->lm_addr = *lmp;
1183 new_elem->lm_info->l_addr_inferior = *l_addrp;
1184 new_elem->lm_info->l_ld = *l_ldp;
1185
1186 strncpy (new_elem->so_name, name, sizeof (new_elem->so_name) - 1);
1187 new_elem->so_name[sizeof (new_elem->so_name) - 1] = 0;
1188 strcpy (new_elem->so_original_name, new_elem->so_name);
1189
1190 *list->tailp = new_elem;
1191 list->tailp = &new_elem->next;
1192 }
1193
1194 /* Handle the start of a <library-list-svr4> element. */
1195
1196 static void
1197 svr4_library_list_start_list (struct gdb_xml_parser *parser,
1198 const struct gdb_xml_element *element,
1199 void *user_data, VEC(gdb_xml_value_s) *attributes)
1200 {
1201 struct svr4_library_list *list = (struct svr4_library_list *) user_data;
1202 const char *version
1203 = (const char *) xml_find_attribute (attributes, "version")->value;
1204 struct gdb_xml_value *main_lm = xml_find_attribute (attributes, "main-lm");
1205
1206 if (strcmp (version, "1.0") != 0)
1207 gdb_xml_error (parser,
1208 _("SVR4 Library list has unsupported version \"%s\""),
1209 version);
1210
1211 if (main_lm)
1212 list->main_lm = *(ULONGEST *) main_lm->value;
1213 }
1214
1215 /* The allowed elements and attributes for an XML library list.
1216 The root element is a <library-list>. */
1217
1218 static const struct gdb_xml_attribute svr4_library_attributes[] =
1219 {
1220 { "name", GDB_XML_AF_NONE, NULL, NULL },
1221 { "lm", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1222 { "l_addr", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1223 { "l_ld", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1224 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1225 };
1226
1227 static const struct gdb_xml_element svr4_library_list_children[] =
1228 {
1229 {
1230 "library", svr4_library_attributes, NULL,
1231 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL,
1232 library_list_start_library, NULL
1233 },
1234 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1235 };
1236
1237 static const struct gdb_xml_attribute svr4_library_list_attributes[] =
1238 {
1239 { "version", GDB_XML_AF_NONE, NULL, NULL },
1240 { "main-lm", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1241 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1242 };
1243
1244 static const struct gdb_xml_element svr4_library_list_elements[] =
1245 {
1246 { "library-list-svr4", svr4_library_list_attributes, svr4_library_list_children,
1247 GDB_XML_EF_NONE, svr4_library_list_start_list, NULL },
1248 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1249 };
1250
1251 /* Parse qXfer:libraries:read packet into *SO_LIST_RETURN. Return 1 if
1252
1253 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1254 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1255 empty, caller is responsible for freeing all its entries. */
1256
1257 static int
1258 svr4_parse_libraries (const char *document, struct svr4_library_list *list)
1259 {
1260 struct cleanup *back_to = make_cleanup (svr4_free_library_list,
1261 &list->head);
1262
1263 memset (list, 0, sizeof (*list));
1264 list->tailp = &list->head;
1265 if (gdb_xml_parse_quick (_("target library list"), "library-list-svr4.dtd",
1266 svr4_library_list_elements, document, list) == 0)
1267 {
1268 /* Parsed successfully, keep the result. */
1269 discard_cleanups (back_to);
1270 return 1;
1271 }
1272
1273 do_cleanups (back_to);
1274 return 0;
1275 }
1276
1277 /* Attempt to get so_list from target via qXfer:libraries-svr4:read packet.
1278
1279 Return 0 if packet not supported, *SO_LIST_RETURN is not modified in such
1280 case. Return 1 if *SO_LIST_RETURN contains the library list, it may be
1281 empty, caller is responsible for freeing all its entries.
1282
1283 Note that ANNEX must be NULL if the remote does not explicitly allow
1284 qXfer:libraries-svr4:read packets with non-empty annexes. Support for
1285 this can be checked using target_augmented_libraries_svr4_read (). */
1286
1287 static int
1288 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1289 const char *annex)
1290 {
1291 char *svr4_library_document;
1292 int result;
1293 struct cleanup *back_to;
1294
1295 gdb_assert (annex == NULL || target_augmented_libraries_svr4_read ());
1296
1297 /* Fetch the list of shared libraries. */
1298 svr4_library_document = target_read_stralloc (&current_target,
1299 TARGET_OBJECT_LIBRARIES_SVR4,
1300 annex);
1301 if (svr4_library_document == NULL)
1302 return 0;
1303
1304 back_to = make_cleanup (xfree, svr4_library_document);
1305 result = svr4_parse_libraries (svr4_library_document, list);
1306 do_cleanups (back_to);
1307
1308 return result;
1309 }
1310
1311 #else
1312
1313 static int
1314 svr4_current_sos_via_xfer_libraries (struct svr4_library_list *list,
1315 const char *annex)
1316 {
1317 return 0;
1318 }
1319
1320 #endif
1321
1322 /* If no shared library information is available from the dynamic
1323 linker, build a fallback list from other sources. */
1324
1325 static struct so_list *
1326 svr4_default_sos (void)
1327 {
1328 struct svr4_info *info = get_svr4_info ();
1329 struct so_list *newobj;
1330
1331 if (!info->debug_loader_offset_p)
1332 return NULL;
1333
1334 newobj = XCNEW (struct so_list);
1335
1336 newobj->lm_info = XCNEW (struct lm_info);
1337
1338 /* Nothing will ever check the other fields if we set l_addr_p. */
1339 newobj->lm_info->l_addr = info->debug_loader_offset;
1340 newobj->lm_info->l_addr_p = 1;
1341
1342 strncpy (newobj->so_name, info->debug_loader_name, SO_NAME_MAX_PATH_SIZE - 1);
1343 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1344 strcpy (newobj->so_original_name, newobj->so_name);
1345
1346 return newobj;
1347 }
1348
1349 /* Read the whole inferior libraries chain starting at address LM.
1350 Expect the first entry in the chain's previous entry to be PREV_LM.
1351 Add the entries to the tail referenced by LINK_PTR_PTR. Ignore the
1352 first entry if IGNORE_FIRST and set global MAIN_LM_ADDR according
1353 to it. Returns nonzero upon success. If zero is returned the
1354 entries stored to LINK_PTR_PTR are still valid although they may
1355 represent only part of the inferior library list. */
1356
1357 static int
1358 svr4_read_so_list (CORE_ADDR lm, CORE_ADDR prev_lm,
1359 struct so_list ***link_ptr_ptr, int ignore_first)
1360 {
1361 CORE_ADDR first_l_name = 0;
1362 CORE_ADDR next_lm;
1363
1364 for (; lm != 0; prev_lm = lm, lm = next_lm)
1365 {
1366 struct so_list *newobj;
1367 struct cleanup *old_chain;
1368 int errcode;
1369 char *buffer;
1370
1371 newobj = XCNEW (struct so_list);
1372 old_chain = make_cleanup_free_so (newobj);
1373
1374 newobj->lm_info = lm_info_read (lm);
1375 if (newobj->lm_info == NULL)
1376 {
1377 do_cleanups (old_chain);
1378 return 0;
1379 }
1380
1381 next_lm = newobj->lm_info->l_next;
1382
1383 if (newobj->lm_info->l_prev != prev_lm)
1384 {
1385 warning (_("Corrupted shared library list: %s != %s"),
1386 paddress (target_gdbarch (), prev_lm),
1387 paddress (target_gdbarch (), newobj->lm_info->l_prev));
1388 do_cleanups (old_chain);
1389 return 0;
1390 }
1391
1392 /* For SVR4 versions, the first entry in the link map is for the
1393 inferior executable, so we must ignore it. For some versions of
1394 SVR4, it has no name. For others (Solaris 2.3 for example), it
1395 does have a name, so we can no longer use a missing name to
1396 decide when to ignore it. */
1397 if (ignore_first && newobj->lm_info->l_prev == 0)
1398 {
1399 struct svr4_info *info = get_svr4_info ();
1400
1401 first_l_name = newobj->lm_info->l_name;
1402 info->main_lm_addr = newobj->lm_info->lm_addr;
1403 do_cleanups (old_chain);
1404 continue;
1405 }
1406
1407 /* Extract this shared object's name. */
1408 target_read_string (newobj->lm_info->l_name, &buffer,
1409 SO_NAME_MAX_PATH_SIZE - 1, &errcode);
1410 if (errcode != 0)
1411 {
1412 /* If this entry's l_name address matches that of the
1413 inferior executable, then this is not a normal shared
1414 object, but (most likely) a vDSO. In this case, silently
1415 skip it; otherwise emit a warning. */
1416 if (first_l_name == 0 || newobj->lm_info->l_name != first_l_name)
1417 warning (_("Can't read pathname for load map: %s."),
1418 safe_strerror (errcode));
1419 do_cleanups (old_chain);
1420 continue;
1421 }
1422
1423 strncpy (newobj->so_name, buffer, SO_NAME_MAX_PATH_SIZE - 1);
1424 newobj->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0';
1425 strcpy (newobj->so_original_name, newobj->so_name);
1426 xfree (buffer);
1427
1428 /* If this entry has no name, or its name matches the name
1429 for the main executable, don't include it in the list. */
1430 if (! newobj->so_name[0] || match_main (newobj->so_name))
1431 {
1432 do_cleanups (old_chain);
1433 continue;
1434 }
1435
1436 discard_cleanups (old_chain);
1437 newobj->next = 0;
1438 **link_ptr_ptr = newobj;
1439 *link_ptr_ptr = &newobj->next;
1440 }
1441
1442 return 1;
1443 }
1444
1445 /* Read the full list of currently loaded shared objects directly
1446 from the inferior, without referring to any libraries read and
1447 stored by the probes interface. Handle special cases relating
1448 to the first elements of the list. */
1449
1450 static struct so_list *
1451 svr4_current_sos_direct (struct svr4_info *info)
1452 {
1453 CORE_ADDR lm;
1454 struct so_list *head = NULL;
1455 struct so_list **link_ptr = &head;
1456 struct cleanup *back_to;
1457 int ignore_first;
1458 struct svr4_library_list library_list;
1459
1460 /* Fall back to manual examination of the target if the packet is not
1461 supported or gdbserver failed to find DT_DEBUG. gdb.server/solib-list.exp
1462 tests a case where gdbserver cannot find the shared libraries list while
1463 GDB itself is able to find it via SYMFILE_OBJFILE.
1464
1465 Unfortunately statically linked inferiors will also fall back through this
1466 suboptimal code path. */
1467
1468 info->using_xfer = svr4_current_sos_via_xfer_libraries (&library_list,
1469 NULL);
1470 if (info->using_xfer)
1471 {
1472 if (library_list.main_lm)
1473 info->main_lm_addr = library_list.main_lm;
1474
1475 return library_list.head ? library_list.head : svr4_default_sos ();
1476 }
1477
1478 /* Always locate the debug struct, in case it has moved. */
1479 info->debug_base = 0;
1480 locate_base (info);
1481
1482 /* If we can't find the dynamic linker's base structure, this
1483 must not be a dynamically linked executable. Hmm. */
1484 if (! info->debug_base)
1485 return svr4_default_sos ();
1486
1487 /* Assume that everything is a library if the dynamic loader was loaded
1488 late by a static executable. */
1489 if (exec_bfd && bfd_get_section_by_name (exec_bfd, ".dynamic") == NULL)
1490 ignore_first = 0;
1491 else
1492 ignore_first = 1;
1493
1494 back_to = make_cleanup (svr4_free_library_list, &head);
1495
1496 /* Walk the inferior's link map list, and build our list of
1497 `struct so_list' nodes. */
1498 lm = solib_svr4_r_map (info);
1499 if (lm)
1500 svr4_read_so_list (lm, 0, &link_ptr, ignore_first);
1501
1502 /* On Solaris, the dynamic linker is not in the normal list of
1503 shared objects, so make sure we pick it up too. Having
1504 symbol information for the dynamic linker is quite crucial
1505 for skipping dynamic linker resolver code. */
1506 lm = solib_svr4_r_ldsomap (info);
1507 if (lm)
1508 svr4_read_so_list (lm, 0, &link_ptr, 0);
1509
1510 discard_cleanups (back_to);
1511
1512 if (head == NULL)
1513 return svr4_default_sos ();
1514
1515 return head;
1516 }
1517
1518 /* Implement the main part of the "current_sos" target_so_ops
1519 method. */
1520
1521 static struct so_list *
1522 svr4_current_sos_1 (void)
1523 {
1524 struct svr4_info *info = get_svr4_info ();
1525
1526 /* If the solib list has been read and stored by the probes
1527 interface then we return a copy of the stored list. */
1528 if (info->solib_list != NULL)
1529 return svr4_copy_library_list (info->solib_list);
1530
1531 /* Otherwise obtain the solib list directly from the inferior. */
1532 return svr4_current_sos_direct (info);
1533 }
1534
1535 /* Implement the "current_sos" target_so_ops method. */
1536
1537 static struct so_list *
1538 svr4_current_sos (void)
1539 {
1540 struct so_list *so_head = svr4_current_sos_1 ();
1541 struct mem_range vsyscall_range;
1542
1543 /* Filter out the vDSO module, if present. Its symbol file would
1544 not be found on disk. The vDSO/vsyscall's OBJFILE is instead
1545 managed by symfile-mem.c:add_vsyscall_page. */
1546 if (gdbarch_vsyscall_range (target_gdbarch (), &vsyscall_range)
1547 && vsyscall_range.length != 0)
1548 {
1549 struct so_list **sop;
1550
1551 sop = &so_head;
1552 while (*sop != NULL)
1553 {
1554 struct so_list *so = *sop;
1555
1556 /* We can't simply match the vDSO by starting address alone,
1557 because lm_info->l_addr_inferior (and also l_addr) do not
1558 necessarily represent the real starting address of the
1559 ELF if the vDSO's ELF itself is "prelinked". The l_ld
1560 field (the ".dynamic" section of the shared object)
1561 always points at the absolute/resolved address though.
1562 So check whether that address is inside the vDSO's
1563 mapping instead.
1564
1565 E.g., on Linux 3.16 (x86_64) the vDSO is a regular
1566 0-based ELF, and we see:
1567
1568 (gdb) info auxv
1569 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffb000
1570 (gdb) p/x *_r_debug.r_map.l_next
1571 $1 = {l_addr = 0x7ffff7ffb000, ..., l_ld = 0x7ffff7ffb318, ...}
1572
1573 And on Linux 2.6.32 (x86_64) we see:
1574
1575 (gdb) info auxv
1576 33 AT_SYSINFO_EHDR System-supplied DSO's ELF header 0x7ffff7ffe000
1577 (gdb) p/x *_r_debug.r_map.l_next
1578 $5 = {l_addr = 0x7ffff88fe000, ..., l_ld = 0x7ffff7ffe580, ... }
1579
1580 Dumping that vDSO shows:
1581
1582 (gdb) info proc mappings
1583 0x7ffff7ffe000 0x7ffff7fff000 0x1000 0 [vdso]
1584 (gdb) dump memory vdso.bin 0x7ffff7ffe000 0x7ffff7fff000
1585 # readelf -Wa vdso.bin
1586 [...]
1587 Entry point address: 0xffffffffff700700
1588 [...]
1589 Section Headers:
1590 [Nr] Name Type Address Off Size
1591 [ 0] NULL 0000000000000000 000000 000000
1592 [ 1] .hash HASH ffffffffff700120 000120 000038
1593 [ 2] .dynsym DYNSYM ffffffffff700158 000158 0000d8
1594 [...]
1595 [ 9] .dynamic DYNAMIC ffffffffff700580 000580 0000f0
1596 */
1597 if (address_in_mem_range (so->lm_info->l_ld, &vsyscall_range))
1598 {
1599 *sop = so->next;
1600 free_so (so);
1601 break;
1602 }
1603
1604 sop = &so->next;
1605 }
1606 }
1607
1608 return so_head;
1609 }
1610
1611 /* Get the address of the link_map for a given OBJFILE. */
1612
1613 CORE_ADDR
1614 svr4_fetch_objfile_link_map (struct objfile *objfile)
1615 {
1616 struct so_list *so;
1617 struct svr4_info *info = get_svr4_info ();
1618
1619 /* Cause svr4_current_sos() to be run if it hasn't been already. */
1620 if (info->main_lm_addr == 0)
1621 solib_add (NULL, 0, auto_solib_add);
1622
1623 /* svr4_current_sos() will set main_lm_addr for the main executable. */
1624 if (objfile == symfile_objfile)
1625 return info->main_lm_addr;
1626
1627 /* The other link map addresses may be found by examining the list
1628 of shared libraries. */
1629 for (so = master_so_list (); so; so = so->next)
1630 if (so->objfile == objfile)
1631 return so->lm_info->lm_addr;
1632
1633 /* Not found! */
1634 return 0;
1635 }
1636
1637 /* On some systems, the only way to recognize the link map entry for
1638 the main executable file is by looking at its name. Return
1639 non-zero iff SONAME matches one of the known main executable names. */
1640
1641 static int
1642 match_main (const char *soname)
1643 {
1644 const char * const *mainp;
1645
1646 for (mainp = main_name_list; *mainp != NULL; mainp++)
1647 {
1648 if (strcmp (soname, *mainp) == 0)
1649 return (1);
1650 }
1651
1652 return (0);
1653 }
1654
1655 /* Return 1 if PC lies in the dynamic symbol resolution code of the
1656 SVR4 run time loader. */
1657
1658 int
1659 svr4_in_dynsym_resolve_code (CORE_ADDR pc)
1660 {
1661 struct svr4_info *info = get_svr4_info ();
1662
1663 return ((pc >= info->interp_text_sect_low
1664 && pc < info->interp_text_sect_high)
1665 || (pc >= info->interp_plt_sect_low
1666 && pc < info->interp_plt_sect_high)
1667 || in_plt_section (pc)
1668 || in_gnu_ifunc_stub (pc));
1669 }
1670
1671 /* Given an executable's ABFD and target, compute the entry-point
1672 address. */
1673
1674 static CORE_ADDR
1675 exec_entry_point (struct bfd *abfd, struct target_ops *targ)
1676 {
1677 CORE_ADDR addr;
1678
1679 /* KevinB wrote ... for most targets, the address returned by
1680 bfd_get_start_address() is the entry point for the start
1681 function. But, for some targets, bfd_get_start_address() returns
1682 the address of a function descriptor from which the entry point
1683 address may be extracted. This address is extracted by
1684 gdbarch_convert_from_func_ptr_addr(). The method
1685 gdbarch_convert_from_func_ptr_addr() is the merely the identify
1686 function for targets which don't use function descriptors. */
1687 addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
1688 bfd_get_start_address (abfd),
1689 targ);
1690 return gdbarch_addr_bits_remove (target_gdbarch (), addr);
1691 }
1692
1693 /* A probe and its associated action. */
1694
1695 struct probe_and_action
1696 {
1697 /* The probe. */
1698 struct probe *probe;
1699
1700 /* The relocated address of the probe. */
1701 CORE_ADDR address;
1702
1703 /* The action. */
1704 enum probe_action action;
1705 };
1706
1707 /* Returns a hash code for the probe_and_action referenced by p. */
1708
1709 static hashval_t
1710 hash_probe_and_action (const void *p)
1711 {
1712 const struct probe_and_action *pa = (const struct probe_and_action *) p;
1713
1714 return (hashval_t) pa->address;
1715 }
1716
1717 /* Returns non-zero if the probe_and_actions referenced by p1 and p2
1718 are equal. */
1719
1720 static int
1721 equal_probe_and_action (const void *p1, const void *p2)
1722 {
1723 const struct probe_and_action *pa1 = (const struct probe_and_action *) p1;
1724 const struct probe_and_action *pa2 = (const struct probe_and_action *) p2;
1725
1726 return pa1->address == pa2->address;
1727 }
1728
1729 /* Register a solib event probe and its associated action in the
1730 probes table. */
1731
1732 static void
1733 register_solib_event_probe (struct probe *probe, CORE_ADDR address,
1734 enum probe_action action)
1735 {
1736 struct svr4_info *info = get_svr4_info ();
1737 struct probe_and_action lookup, *pa;
1738 void **slot;
1739
1740 /* Create the probes table, if necessary. */
1741 if (info->probes_table == NULL)
1742 info->probes_table = htab_create_alloc (1, hash_probe_and_action,
1743 equal_probe_and_action,
1744 xfree, xcalloc, xfree);
1745
1746 lookup.probe = probe;
1747 lookup.address = address;
1748 slot = htab_find_slot (info->probes_table, &lookup, INSERT);
1749 gdb_assert (*slot == HTAB_EMPTY_ENTRY);
1750
1751 pa = XCNEW (struct probe_and_action);
1752 pa->probe = probe;
1753 pa->address = address;
1754 pa->action = action;
1755
1756 *slot = pa;
1757 }
1758
1759 /* Get the solib event probe at the specified location, and the
1760 action associated with it. Returns NULL if no solib event probe
1761 was found. */
1762
1763 static struct probe_and_action *
1764 solib_event_probe_at (struct svr4_info *info, CORE_ADDR address)
1765 {
1766 struct probe_and_action lookup;
1767 void **slot;
1768
1769 lookup.address = address;
1770 slot = htab_find_slot (info->probes_table, &lookup, NO_INSERT);
1771
1772 if (slot == NULL)
1773 return NULL;
1774
1775 return (struct probe_and_action *) *slot;
1776 }
1777
1778 /* Decide what action to take when the specified solib event probe is
1779 hit. */
1780
1781 static enum probe_action
1782 solib_event_probe_action (struct probe_and_action *pa)
1783 {
1784 enum probe_action action;
1785 unsigned probe_argc = 0;
1786 struct frame_info *frame = get_current_frame ();
1787
1788 action = pa->action;
1789 if (action == DO_NOTHING || action == PROBES_INTERFACE_FAILED)
1790 return action;
1791
1792 gdb_assert (action == FULL_RELOAD || action == UPDATE_OR_RELOAD);
1793
1794 /* Check that an appropriate number of arguments has been supplied.
1795 We expect:
1796 arg0: Lmid_t lmid (mandatory)
1797 arg1: struct r_debug *debug_base (mandatory)
1798 arg2: struct link_map *new (optional, for incremental updates) */
1799 TRY
1800 {
1801 probe_argc = get_probe_argument_count (pa->probe, frame);
1802 }
1803 CATCH (ex, RETURN_MASK_ERROR)
1804 {
1805 exception_print (gdb_stderr, ex);
1806 probe_argc = 0;
1807 }
1808 END_CATCH
1809
1810 /* If get_probe_argument_count throws an exception, probe_argc will
1811 be set to zero. However, if pa->probe does not have arguments,
1812 then get_probe_argument_count will succeed but probe_argc will
1813 also be zero. Both cases happen because of different things, but
1814 they are treated equally here: action will be set to
1815 PROBES_INTERFACE_FAILED. */
1816 if (probe_argc == 2)
1817 action = FULL_RELOAD;
1818 else if (probe_argc < 2)
1819 action = PROBES_INTERFACE_FAILED;
1820
1821 return action;
1822 }
1823
1824 /* Populate the shared object list by reading the entire list of
1825 shared objects from the inferior. Handle special cases relating
1826 to the first elements of the list. Returns nonzero on success. */
1827
1828 static int
1829 solist_update_full (struct svr4_info *info)
1830 {
1831 free_solib_list (info);
1832 info->solib_list = svr4_current_sos_direct (info);
1833
1834 return 1;
1835 }
1836
1837 /* Update the shared object list starting from the link-map entry
1838 passed by the linker in the probe's third argument. Returns
1839 nonzero if the list was successfully updated, or zero to indicate
1840 failure. */
1841
1842 static int
1843 solist_update_incremental (struct svr4_info *info, CORE_ADDR lm)
1844 {
1845 struct so_list *tail;
1846 CORE_ADDR prev_lm;
1847
1848 /* svr4_current_sos_direct contains logic to handle a number of
1849 special cases relating to the first elements of the list. To
1850 avoid duplicating this logic we defer to solist_update_full
1851 if the list is empty. */
1852 if (info->solib_list == NULL)
1853 return 0;
1854
1855 /* Fall back to a full update if we are using a remote target
1856 that does not support incremental transfers. */
1857 if (info->using_xfer && !target_augmented_libraries_svr4_read ())
1858 return 0;
1859
1860 /* Walk to the end of the list. */
1861 for (tail = info->solib_list; tail->next != NULL; tail = tail->next)
1862 /* Nothing. */;
1863 prev_lm = tail->lm_info->lm_addr;
1864
1865 /* Read the new objects. */
1866 if (info->using_xfer)
1867 {
1868 struct svr4_library_list library_list;
1869 char annex[64];
1870
1871 xsnprintf (annex, sizeof (annex), "start=%s;prev=%s",
1872 phex_nz (lm, sizeof (lm)),
1873 phex_nz (prev_lm, sizeof (prev_lm)));
1874 if (!svr4_current_sos_via_xfer_libraries (&library_list, annex))
1875 return 0;
1876
1877 tail->next = library_list.head;
1878 }
1879 else
1880 {
1881 struct so_list **link = &tail->next;
1882
1883 /* IGNORE_FIRST may safely be set to zero here because the
1884 above check and deferral to solist_update_full ensures
1885 that this call to svr4_read_so_list will never see the
1886 first element. */
1887 if (!svr4_read_so_list (lm, prev_lm, &link, 0))
1888 return 0;
1889 }
1890
1891 return 1;
1892 }
1893
1894 /* Disable the probes-based linker interface and revert to the
1895 original interface. We don't reset the breakpoints as the
1896 ones set up for the probes-based interface are adequate. */
1897
1898 static void
1899 disable_probes_interface_cleanup (void *arg)
1900 {
1901 struct svr4_info *info = get_svr4_info ();
1902
1903 warning (_("Probes-based dynamic linker interface failed.\n"
1904 "Reverting to original interface.\n"));
1905
1906 free_probes_table (info);
1907 free_solib_list (info);
1908 }
1909
1910 /* Update the solib list as appropriate when using the
1911 probes-based linker interface. Do nothing if using the
1912 standard interface. */
1913
1914 static void
1915 svr4_handle_solib_event (void)
1916 {
1917 struct svr4_info *info = get_svr4_info ();
1918 struct probe_and_action *pa;
1919 enum probe_action action;
1920 struct cleanup *old_chain, *usm_chain;
1921 struct value *val = NULL;
1922 CORE_ADDR pc, debug_base, lm = 0;
1923 struct frame_info *frame = get_current_frame ();
1924
1925 /* Do nothing if not using the probes interface. */
1926 if (info->probes_table == NULL)
1927 return;
1928
1929 /* If anything goes wrong we revert to the original linker
1930 interface. */
1931 old_chain = make_cleanup (disable_probes_interface_cleanup, NULL);
1932
1933 pc = regcache_read_pc (get_current_regcache ());
1934 pa = solib_event_probe_at (info, pc);
1935 if (pa == NULL)
1936 {
1937 do_cleanups (old_chain);
1938 return;
1939 }
1940
1941 action = solib_event_probe_action (pa);
1942 if (action == PROBES_INTERFACE_FAILED)
1943 {
1944 do_cleanups (old_chain);
1945 return;
1946 }
1947
1948 if (action == DO_NOTHING)
1949 {
1950 discard_cleanups (old_chain);
1951 return;
1952 }
1953
1954 /* evaluate_probe_argument looks up symbols in the dynamic linker
1955 using find_pc_section. find_pc_section is accelerated by a cache
1956 called the section map. The section map is invalidated every
1957 time a shared library is loaded or unloaded, and if the inferior
1958 is generating a lot of shared library events then the section map
1959 will be updated every time svr4_handle_solib_event is called.
1960 We called find_pc_section in svr4_create_solib_event_breakpoints,
1961 so we can guarantee that the dynamic linker's sections are in the
1962 section map. We can therefore inhibit section map updates across
1963 these calls to evaluate_probe_argument and save a lot of time. */
1964 inhibit_section_map_updates (current_program_space);
1965 usm_chain = make_cleanup (resume_section_map_updates_cleanup,
1966 current_program_space);
1967
1968 TRY
1969 {
1970 val = evaluate_probe_argument (pa->probe, 1, frame);
1971 }
1972 CATCH (ex, RETURN_MASK_ERROR)
1973 {
1974 exception_print (gdb_stderr, ex);
1975 val = NULL;
1976 }
1977 END_CATCH
1978
1979 if (val == NULL)
1980 {
1981 do_cleanups (old_chain);
1982 return;
1983 }
1984
1985 debug_base = value_as_address (val);
1986 if (debug_base == 0)
1987 {
1988 do_cleanups (old_chain);
1989 return;
1990 }
1991
1992 /* Always locate the debug struct, in case it moved. */
1993 info->debug_base = 0;
1994 if (locate_base (info) == 0)
1995 {
1996 do_cleanups (old_chain);
1997 return;
1998 }
1999
2000 /* GDB does not currently support libraries loaded via dlmopen
2001 into namespaces other than the initial one. We must ignore
2002 any namespace other than the initial namespace here until
2003 support for this is added to GDB. */
2004 if (debug_base != info->debug_base)
2005 action = DO_NOTHING;
2006
2007 if (action == UPDATE_OR_RELOAD)
2008 {
2009 TRY
2010 {
2011 val = evaluate_probe_argument (pa->probe, 2, frame);
2012 }
2013 CATCH (ex, RETURN_MASK_ERROR)
2014 {
2015 exception_print (gdb_stderr, ex);
2016 do_cleanups (old_chain);
2017 return;
2018 }
2019 END_CATCH
2020
2021 if (val != NULL)
2022 lm = value_as_address (val);
2023
2024 if (lm == 0)
2025 action = FULL_RELOAD;
2026 }
2027
2028 /* Resume section map updates. */
2029 do_cleanups (usm_chain);
2030
2031 if (action == UPDATE_OR_RELOAD)
2032 {
2033 if (!solist_update_incremental (info, lm))
2034 action = FULL_RELOAD;
2035 }
2036
2037 if (action == FULL_RELOAD)
2038 {
2039 if (!solist_update_full (info))
2040 {
2041 do_cleanups (old_chain);
2042 return;
2043 }
2044 }
2045
2046 discard_cleanups (old_chain);
2047 }
2048
2049 /* Helper function for svr4_update_solib_event_breakpoints. */
2050
2051 static int
2052 svr4_update_solib_event_breakpoint (struct breakpoint *b, void *arg)
2053 {
2054 struct bp_location *loc;
2055
2056 if (b->type != bp_shlib_event)
2057 {
2058 /* Continue iterating. */
2059 return 0;
2060 }
2061
2062 for (loc = b->loc; loc != NULL; loc = loc->next)
2063 {
2064 struct svr4_info *info;
2065 struct probe_and_action *pa;
2066
2067 info = ((struct svr4_info *)
2068 program_space_data (loc->pspace, solib_svr4_pspace_data));
2069 if (info == NULL || info->probes_table == NULL)
2070 continue;
2071
2072 pa = solib_event_probe_at (info, loc->address);
2073 if (pa == NULL)
2074 continue;
2075
2076 if (pa->action == DO_NOTHING)
2077 {
2078 if (b->enable_state == bp_disabled && stop_on_solib_events)
2079 enable_breakpoint (b);
2080 else if (b->enable_state == bp_enabled && !stop_on_solib_events)
2081 disable_breakpoint (b);
2082 }
2083
2084 break;
2085 }
2086
2087 /* Continue iterating. */
2088 return 0;
2089 }
2090
2091 /* Enable or disable optional solib event breakpoints as appropriate.
2092 Called whenever stop_on_solib_events is changed. */
2093
2094 static void
2095 svr4_update_solib_event_breakpoints (void)
2096 {
2097 iterate_over_breakpoints (svr4_update_solib_event_breakpoint, NULL);
2098 }
2099
2100 /* Create and register solib event breakpoints. PROBES is an array
2101 of NUM_PROBES elements, each of which is vector of probes. A
2102 solib event breakpoint will be created and registered for each
2103 probe. */
2104
2105 static void
2106 svr4_create_probe_breakpoints (struct gdbarch *gdbarch,
2107 VEC (probe_p) **probes,
2108 struct objfile *objfile)
2109 {
2110 int i;
2111
2112 for (i = 0; i < NUM_PROBES; i++)
2113 {
2114 enum probe_action action = probe_info[i].action;
2115 struct probe *probe;
2116 int ix;
2117
2118 for (ix = 0;
2119 VEC_iterate (probe_p, probes[i], ix, probe);
2120 ++ix)
2121 {
2122 CORE_ADDR address = get_probe_address (probe, objfile);
2123
2124 create_solib_event_breakpoint (gdbarch, address);
2125 register_solib_event_probe (probe, address, action);
2126 }
2127 }
2128
2129 svr4_update_solib_event_breakpoints ();
2130 }
2131
2132 /* Both the SunOS and the SVR4 dynamic linkers call a marker function
2133 before and after mapping and unmapping shared libraries. The sole
2134 purpose of this method is to allow debuggers to set a breakpoint so
2135 they can track these changes.
2136
2137 Some versions of the glibc dynamic linker contain named probes
2138 to allow more fine grained stopping. Given the address of the
2139 original marker function, this function attempts to find these
2140 probes, and if found, sets breakpoints on those instead. If the
2141 probes aren't found, a single breakpoint is set on the original
2142 marker function. */
2143
2144 static void
2145 svr4_create_solib_event_breakpoints (struct gdbarch *gdbarch,
2146 CORE_ADDR address)
2147 {
2148 struct obj_section *os;
2149
2150 os = find_pc_section (address);
2151 if (os != NULL)
2152 {
2153 int with_prefix;
2154
2155 for (with_prefix = 0; with_prefix <= 1; with_prefix++)
2156 {
2157 VEC (probe_p) *probes[NUM_PROBES];
2158 int all_probes_found = 1;
2159 int checked_can_use_probe_arguments = 0;
2160 int i;
2161
2162 memset (probes, 0, sizeof (probes));
2163 for (i = 0; i < NUM_PROBES; i++)
2164 {
2165 const char *name = probe_info[i].name;
2166 struct probe *p;
2167 char buf[32];
2168
2169 /* Fedora 17 and Red Hat Enterprise Linux 6.2-6.4
2170 shipped with an early version of the probes code in
2171 which the probes' names were prefixed with "rtld_"
2172 and the "map_failed" probe did not exist. The
2173 locations of the probes are otherwise the same, so
2174 we check for probes with prefixed names if probes
2175 with unprefixed names are not present. */
2176 if (with_prefix)
2177 {
2178 xsnprintf (buf, sizeof (buf), "rtld_%s", name);
2179 name = buf;
2180 }
2181
2182 probes[i] = find_probes_in_objfile (os->objfile, "rtld", name);
2183
2184 /* The "map_failed" probe did not exist in early
2185 versions of the probes code in which the probes'
2186 names were prefixed with "rtld_". */
2187 if (strcmp (name, "rtld_map_failed") == 0)
2188 continue;
2189
2190 if (VEC_empty (probe_p, probes[i]))
2191 {
2192 all_probes_found = 0;
2193 break;
2194 }
2195
2196 /* Ensure probe arguments can be evaluated. */
2197 if (!checked_can_use_probe_arguments)
2198 {
2199 p = VEC_index (probe_p, probes[i], 0);
2200 if (!can_evaluate_probe_arguments (p))
2201 {
2202 all_probes_found = 0;
2203 break;
2204 }
2205 checked_can_use_probe_arguments = 1;
2206 }
2207 }
2208
2209 if (all_probes_found)
2210 svr4_create_probe_breakpoints (gdbarch, probes, os->objfile);
2211
2212 for (i = 0; i < NUM_PROBES; i++)
2213 VEC_free (probe_p, probes[i]);
2214
2215 if (all_probes_found)
2216 return;
2217 }
2218 }
2219
2220 create_solib_event_breakpoint (gdbarch, address);
2221 }
2222
2223 /* Helper function for gdb_bfd_lookup_symbol. */
2224
2225 static int
2226 cmp_name_and_sec_flags (const asymbol *sym, const void *data)
2227 {
2228 return (strcmp (sym->name, (const char *) data) == 0
2229 && (sym->section->flags & (SEC_CODE | SEC_DATA)) != 0);
2230 }
2231 /* Arrange for dynamic linker to hit breakpoint.
2232
2233 Both the SunOS and the SVR4 dynamic linkers have, as part of their
2234 debugger interface, support for arranging for the inferior to hit
2235 a breakpoint after mapping in the shared libraries. This function
2236 enables that breakpoint.
2237
2238 For SunOS, there is a special flag location (in_debugger) which we
2239 set to 1. When the dynamic linker sees this flag set, it will set
2240 a breakpoint at a location known only to itself, after saving the
2241 original contents of that place and the breakpoint address itself,
2242 in it's own internal structures. When we resume the inferior, it
2243 will eventually take a SIGTRAP when it runs into the breakpoint.
2244 We handle this (in a different place) by restoring the contents of
2245 the breakpointed location (which is only known after it stops),
2246 chasing around to locate the shared libraries that have been
2247 loaded, then resuming.
2248
2249 For SVR4, the debugger interface structure contains a member (r_brk)
2250 which is statically initialized at the time the shared library is
2251 built, to the offset of a function (_r_debug_state) which is guaran-
2252 teed to be called once before mapping in a library, and again when
2253 the mapping is complete. At the time we are examining this member,
2254 it contains only the unrelocated offset of the function, so we have
2255 to do our own relocation. Later, when the dynamic linker actually
2256 runs, it relocates r_brk to be the actual address of _r_debug_state().
2257
2258 The debugger interface structure also contains an enumeration which
2259 is set to either RT_ADD or RT_DELETE prior to changing the mapping,
2260 depending upon whether or not the library is being mapped or unmapped,
2261 and then set to RT_CONSISTENT after the library is mapped/unmapped. */
2262
2263 static int
2264 enable_break (struct svr4_info *info, int from_tty)
2265 {
2266 struct bound_minimal_symbol msymbol;
2267 const char * const *bkpt_namep;
2268 asection *interp_sect;
2269 char *interp_name;
2270 CORE_ADDR sym_addr;
2271
2272 info->interp_text_sect_low = info->interp_text_sect_high = 0;
2273 info->interp_plt_sect_low = info->interp_plt_sect_high = 0;
2274
2275 /* If we already have a shared library list in the target, and
2276 r_debug contains r_brk, set the breakpoint there - this should
2277 mean r_brk has already been relocated. Assume the dynamic linker
2278 is the object containing r_brk. */
2279
2280 solib_add (NULL, from_tty, auto_solib_add);
2281 sym_addr = 0;
2282 if (info->debug_base && solib_svr4_r_map (info) != 0)
2283 sym_addr = solib_svr4_r_brk (info);
2284
2285 if (sym_addr != 0)
2286 {
2287 struct obj_section *os;
2288
2289 sym_addr = gdbarch_addr_bits_remove
2290 (target_gdbarch (), gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2291 sym_addr,
2292 &current_target));
2293
2294 /* On at least some versions of Solaris there's a dynamic relocation
2295 on _r_debug.r_brk and SYM_ADDR may not be relocated yet, e.g., if
2296 we get control before the dynamic linker has self-relocated.
2297 Check if SYM_ADDR is in a known section, if it is assume we can
2298 trust its value. This is just a heuristic though, it could go away
2299 or be replaced if it's getting in the way.
2300
2301 On ARM we need to know whether the ISA of rtld_db_dlactivity (or
2302 however it's spelled in your particular system) is ARM or Thumb.
2303 That knowledge is encoded in the address, if it's Thumb the low bit
2304 is 1. However, we've stripped that info above and it's not clear
2305 what all the consequences are of passing a non-addr_bits_remove'd
2306 address to svr4_create_solib_event_breakpoints. The call to
2307 find_pc_section verifies we know about the address and have some
2308 hope of computing the right kind of breakpoint to use (via
2309 symbol info). It does mean that GDB needs to be pointed at a
2310 non-stripped version of the dynamic linker in order to obtain
2311 information it already knows about. Sigh. */
2312
2313 os = find_pc_section (sym_addr);
2314 if (os != NULL)
2315 {
2316 /* Record the relocated start and end address of the dynamic linker
2317 text and plt section for svr4_in_dynsym_resolve_code. */
2318 bfd *tmp_bfd;
2319 CORE_ADDR load_addr;
2320
2321 tmp_bfd = os->objfile->obfd;
2322 load_addr = ANOFFSET (os->objfile->section_offsets,
2323 SECT_OFF_TEXT (os->objfile));
2324
2325 interp_sect = bfd_get_section_by_name (tmp_bfd, ".text");
2326 if (interp_sect)
2327 {
2328 info->interp_text_sect_low =
2329 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2330 info->interp_text_sect_high =
2331 info->interp_text_sect_low
2332 + bfd_section_size (tmp_bfd, interp_sect);
2333 }
2334 interp_sect = bfd_get_section_by_name (tmp_bfd, ".plt");
2335 if (interp_sect)
2336 {
2337 info->interp_plt_sect_low =
2338 bfd_section_vma (tmp_bfd, interp_sect) + load_addr;
2339 info->interp_plt_sect_high =
2340 info->interp_plt_sect_low
2341 + bfd_section_size (tmp_bfd, interp_sect);
2342 }
2343
2344 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2345 return 1;
2346 }
2347 }
2348
2349 /* Find the program interpreter; if not found, warn the user and drop
2350 into the old breakpoint at symbol code. */
2351 interp_name = find_program_interpreter ();
2352 if (interp_name)
2353 {
2354 CORE_ADDR load_addr = 0;
2355 int load_addr_found = 0;
2356 int loader_found_in_list = 0;
2357 struct so_list *so;
2358 struct target_ops *tmp_bfd_target;
2359
2360 sym_addr = 0;
2361
2362 /* Now we need to figure out where the dynamic linker was
2363 loaded so that we can load its symbols and place a breakpoint
2364 in the dynamic linker itself.
2365
2366 This address is stored on the stack. However, I've been unable
2367 to find any magic formula to find it for Solaris (appears to
2368 be trivial on GNU/Linux). Therefore, we have to try an alternate
2369 mechanism to find the dynamic linker's base address. */
2370
2371 gdb_bfd_ref_ptr tmp_bfd;
2372 TRY
2373 {
2374 tmp_bfd = solib_bfd_open (interp_name);
2375 }
2376 CATCH (ex, RETURN_MASK_ALL)
2377 {
2378 }
2379 END_CATCH
2380
2381 if (tmp_bfd == NULL)
2382 goto bkpt_at_symbol;
2383
2384 /* Now convert the TMP_BFD into a target. That way target, as
2385 well as BFD operations can be used. target_bfd_reopen
2386 acquires its own reference. */
2387 tmp_bfd_target = target_bfd_reopen (tmp_bfd.get ());
2388
2389 /* On a running target, we can get the dynamic linker's base
2390 address from the shared library table. */
2391 so = master_so_list ();
2392 while (so)
2393 {
2394 if (svr4_same_1 (interp_name, so->so_original_name))
2395 {
2396 load_addr_found = 1;
2397 loader_found_in_list = 1;
2398 load_addr = lm_addr_check (so, tmp_bfd.get ());
2399 break;
2400 }
2401 so = so->next;
2402 }
2403
2404 /* If we were not able to find the base address of the loader
2405 from our so_list, then try using the AT_BASE auxilliary entry. */
2406 if (!load_addr_found)
2407 if (target_auxv_search (&current_target, AT_BASE, &load_addr) > 0)
2408 {
2409 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
2410
2411 /* Ensure LOAD_ADDR has proper sign in its possible upper bits so
2412 that `+ load_addr' will overflow CORE_ADDR width not creating
2413 invalid addresses like 0x101234567 for 32bit inferiors on 64bit
2414 GDB. */
2415
2416 if (addr_bit < (sizeof (CORE_ADDR) * HOST_CHAR_BIT))
2417 {
2418 CORE_ADDR space_size = (CORE_ADDR) 1 << addr_bit;
2419 CORE_ADDR tmp_entry_point = exec_entry_point (tmp_bfd.get (),
2420 tmp_bfd_target);
2421
2422 gdb_assert (load_addr < space_size);
2423
2424 /* TMP_ENTRY_POINT exceeding SPACE_SIZE would be for prelinked
2425 64bit ld.so with 32bit executable, it should not happen. */
2426
2427 if (tmp_entry_point < space_size
2428 && tmp_entry_point + load_addr >= space_size)
2429 load_addr -= space_size;
2430 }
2431
2432 load_addr_found = 1;
2433 }
2434
2435 /* Otherwise we find the dynamic linker's base address by examining
2436 the current pc (which should point at the entry point for the
2437 dynamic linker) and subtracting the offset of the entry point.
2438
2439 This is more fragile than the previous approaches, but is a good
2440 fallback method because it has actually been working well in
2441 most cases. */
2442 if (!load_addr_found)
2443 {
2444 struct regcache *regcache
2445 = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
2446
2447 load_addr = (regcache_read_pc (regcache)
2448 - exec_entry_point (tmp_bfd.get (), tmp_bfd_target));
2449 }
2450
2451 if (!loader_found_in_list)
2452 {
2453 info->debug_loader_name = xstrdup (interp_name);
2454 info->debug_loader_offset_p = 1;
2455 info->debug_loader_offset = load_addr;
2456 solib_add (NULL, from_tty, auto_solib_add);
2457 }
2458
2459 /* Record the relocated start and end address of the dynamic linker
2460 text and plt section for svr4_in_dynsym_resolve_code. */
2461 interp_sect = bfd_get_section_by_name (tmp_bfd.get (), ".text");
2462 if (interp_sect)
2463 {
2464 info->interp_text_sect_low =
2465 bfd_section_vma (tmp_bfd.get (), interp_sect) + load_addr;
2466 info->interp_text_sect_high =
2467 info->interp_text_sect_low
2468 + bfd_section_size (tmp_bfd.get (), interp_sect);
2469 }
2470 interp_sect = bfd_get_section_by_name (tmp_bfd.get (), ".plt");
2471 if (interp_sect)
2472 {
2473 info->interp_plt_sect_low =
2474 bfd_section_vma (tmp_bfd.get (), interp_sect) + load_addr;
2475 info->interp_plt_sect_high =
2476 info->interp_plt_sect_low
2477 + bfd_section_size (tmp_bfd.get (), interp_sect);
2478 }
2479
2480 /* Now try to set a breakpoint in the dynamic linker. */
2481 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2482 {
2483 sym_addr = gdb_bfd_lookup_symbol (tmp_bfd.get (),
2484 cmp_name_and_sec_flags,
2485 *bkpt_namep);
2486 if (sym_addr != 0)
2487 break;
2488 }
2489
2490 if (sym_addr != 0)
2491 /* Convert 'sym_addr' from a function pointer to an address.
2492 Because we pass tmp_bfd_target instead of the current
2493 target, this will always produce an unrelocated value. */
2494 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2495 sym_addr,
2496 tmp_bfd_target);
2497
2498 /* We're done with both the temporary bfd and target. Closing
2499 the target closes the underlying bfd, because it holds the
2500 only remaining reference. */
2501 target_close (tmp_bfd_target);
2502
2503 if (sym_addr != 0)
2504 {
2505 svr4_create_solib_event_breakpoints (target_gdbarch (),
2506 load_addr + sym_addr);
2507 xfree (interp_name);
2508 return 1;
2509 }
2510
2511 /* For whatever reason we couldn't set a breakpoint in the dynamic
2512 linker. Warn and drop into the old code. */
2513 bkpt_at_symbol:
2514 xfree (interp_name);
2515 warning (_("Unable to find dynamic linker breakpoint function.\n"
2516 "GDB will be unable to debug shared library initializers\n"
2517 "and track explicitly loaded dynamic code."));
2518 }
2519
2520 /* Scan through the lists of symbols, trying to look up the symbol and
2521 set a breakpoint there. Terminate loop when we/if we succeed. */
2522
2523 for (bkpt_namep = solib_break_names; *bkpt_namep != NULL; bkpt_namep++)
2524 {
2525 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2526 if ((msymbol.minsym != NULL)
2527 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2528 {
2529 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2530 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2531 sym_addr,
2532 &current_target);
2533 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2534 return 1;
2535 }
2536 }
2537
2538 if (interp_name != NULL && !current_inferior ()->attach_flag)
2539 {
2540 for (bkpt_namep = bkpt_names; *bkpt_namep != NULL; bkpt_namep++)
2541 {
2542 msymbol = lookup_minimal_symbol (*bkpt_namep, NULL, symfile_objfile);
2543 if ((msymbol.minsym != NULL)
2544 && (BMSYMBOL_VALUE_ADDRESS (msymbol) != 0))
2545 {
2546 sym_addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2547 sym_addr = gdbarch_convert_from_func_ptr_addr (target_gdbarch (),
2548 sym_addr,
2549 &current_target);
2550 svr4_create_solib_event_breakpoints (target_gdbarch (), sym_addr);
2551 return 1;
2552 }
2553 }
2554 }
2555 return 0;
2556 }
2557
2558 /* Read the ELF program headers from ABFD. Return the contents and
2559 set *PHDRS_SIZE to the size of the program headers. */
2560
2561 static gdb_byte *
2562 read_program_headers_from_bfd (bfd *abfd, int *phdrs_size)
2563 {
2564 Elf_Internal_Ehdr *ehdr;
2565 gdb_byte *buf;
2566
2567 ehdr = elf_elfheader (abfd);
2568
2569 *phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
2570 if (*phdrs_size == 0)
2571 return NULL;
2572
2573 buf = (gdb_byte *) xmalloc (*phdrs_size);
2574 if (bfd_seek (abfd, ehdr->e_phoff, SEEK_SET) != 0
2575 || bfd_bread (buf, *phdrs_size, abfd) != *phdrs_size)
2576 {
2577 xfree (buf);
2578 return NULL;
2579 }
2580
2581 return buf;
2582 }
2583
2584 /* Return 1 and fill *DISPLACEMENTP with detected PIE offset of inferior
2585 exec_bfd. Otherwise return 0.
2586
2587 We relocate all of the sections by the same amount. This
2588 behavior is mandated by recent editions of the System V ABI.
2589 According to the System V Application Binary Interface,
2590 Edition 4.1, page 5-5:
2591
2592 ... Though the system chooses virtual addresses for
2593 individual processes, it maintains the segments' relative
2594 positions. Because position-independent code uses relative
2595 addressesing between segments, the difference between
2596 virtual addresses in memory must match the difference
2597 between virtual addresses in the file. The difference
2598 between the virtual address of any segment in memory and
2599 the corresponding virtual address in the file is thus a
2600 single constant value for any one executable or shared
2601 object in a given process. This difference is the base
2602 address. One use of the base address is to relocate the
2603 memory image of the program during dynamic linking.
2604
2605 The same language also appears in Edition 4.0 of the System V
2606 ABI and is left unspecified in some of the earlier editions.
2607
2608 Decide if the objfile needs to be relocated. As indicated above, we will
2609 only be here when execution is stopped. But during attachment PC can be at
2610 arbitrary address therefore regcache_read_pc can be misleading (contrary to
2611 the auxv AT_ENTRY value). Moreover for executable with interpreter section
2612 regcache_read_pc would point to the interpreter and not the main executable.
2613
2614 So, to summarize, relocations are necessary when the start address obtained
2615 from the executable is different from the address in auxv AT_ENTRY entry.
2616
2617 [ The astute reader will note that we also test to make sure that
2618 the executable in question has the DYNAMIC flag set. It is my
2619 opinion that this test is unnecessary (undesirable even). It
2620 was added to avoid inadvertent relocation of an executable
2621 whose e_type member in the ELF header is not ET_DYN. There may
2622 be a time in the future when it is desirable to do relocations
2623 on other types of files as well in which case this condition
2624 should either be removed or modified to accomodate the new file
2625 type. - Kevin, Nov 2000. ] */
2626
2627 static int
2628 svr4_exec_displacement (CORE_ADDR *displacementp)
2629 {
2630 /* ENTRY_POINT is a possible function descriptor - before
2631 a call to gdbarch_convert_from_func_ptr_addr. */
2632 CORE_ADDR entry_point, exec_displacement;
2633
2634 if (exec_bfd == NULL)
2635 return 0;
2636
2637 /* Therefore for ELF it is ET_EXEC and not ET_DYN. Both shared libraries
2638 being executed themselves and PIE (Position Independent Executable)
2639 executables are ET_DYN. */
2640
2641 if ((bfd_get_file_flags (exec_bfd) & DYNAMIC) == 0)
2642 return 0;
2643
2644 if (target_auxv_search (&current_target, AT_ENTRY, &entry_point) <= 0)
2645 return 0;
2646
2647 exec_displacement = entry_point - bfd_get_start_address (exec_bfd);
2648
2649 /* Verify the EXEC_DISPLACEMENT candidate complies with the required page
2650 alignment. It is cheaper than the program headers comparison below. */
2651
2652 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2653 {
2654 const struct elf_backend_data *elf = get_elf_backend_data (exec_bfd);
2655
2656 /* p_align of PT_LOAD segments does not specify any alignment but
2657 only congruency of addresses:
2658 p_offset % p_align == p_vaddr % p_align
2659 Kernel is free to load the executable with lower alignment. */
2660
2661 if ((exec_displacement & (elf->minpagesize - 1)) != 0)
2662 return 0;
2663 }
2664
2665 /* Verify that the auxilliary vector describes the same file as exec_bfd, by
2666 comparing their program headers. If the program headers in the auxilliary
2667 vector do not match the program headers in the executable, then we are
2668 looking at a different file than the one used by the kernel - for
2669 instance, "gdb program" connected to "gdbserver :PORT ld.so program". */
2670
2671 if (bfd_get_flavour (exec_bfd) == bfd_target_elf_flavour)
2672 {
2673 /* Be optimistic and clear OK only if GDB was able to verify the headers
2674 really do not match. */
2675 int phdrs_size, phdrs2_size, ok = 1;
2676 gdb_byte *buf, *buf2;
2677 int arch_size;
2678
2679 buf = read_program_header (-1, &phdrs_size, &arch_size, NULL);
2680 buf2 = read_program_headers_from_bfd (exec_bfd, &phdrs2_size);
2681 if (buf != NULL && buf2 != NULL)
2682 {
2683 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
2684
2685 /* We are dealing with three different addresses. EXEC_BFD
2686 represents current address in on-disk file. target memory content
2687 may be different from EXEC_BFD as the file may have been prelinked
2688 to a different address after the executable has been loaded.
2689 Moreover the address of placement in target memory can be
2690 different from what the program headers in target memory say -
2691 this is the goal of PIE.
2692
2693 Detected DISPLACEMENT covers both the offsets of PIE placement and
2694 possible new prelink performed after start of the program. Here
2695 relocate BUF and BUF2 just by the EXEC_BFD vs. target memory
2696 content offset for the verification purpose. */
2697
2698 if (phdrs_size != phdrs2_size
2699 || bfd_get_arch_size (exec_bfd) != arch_size)
2700 ok = 0;
2701 else if (arch_size == 32
2702 && phdrs_size >= sizeof (Elf32_External_Phdr)
2703 && phdrs_size % sizeof (Elf32_External_Phdr) == 0)
2704 {
2705 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2706 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2707 CORE_ADDR displacement = 0;
2708 int i;
2709
2710 /* DISPLACEMENT could be found more easily by the difference of
2711 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2712 already have enough information to compute that displacement
2713 with what we've read. */
2714
2715 for (i = 0; i < ehdr2->e_phnum; i++)
2716 if (phdr2[i].p_type == PT_LOAD)
2717 {
2718 Elf32_External_Phdr *phdrp;
2719 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2720 CORE_ADDR vaddr, paddr;
2721 CORE_ADDR displacement_vaddr = 0;
2722 CORE_ADDR displacement_paddr = 0;
2723
2724 phdrp = &((Elf32_External_Phdr *) buf)[i];
2725 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2726 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2727
2728 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2729 byte_order);
2730 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2731
2732 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2733 byte_order);
2734 displacement_paddr = paddr - phdr2[i].p_paddr;
2735
2736 if (displacement_vaddr == displacement_paddr)
2737 displacement = displacement_vaddr;
2738
2739 break;
2740 }
2741
2742 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2743
2744 for (i = 0; i < phdrs_size / sizeof (Elf32_External_Phdr); i++)
2745 {
2746 Elf32_External_Phdr *phdrp;
2747 Elf32_External_Phdr *phdr2p;
2748 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2749 CORE_ADDR vaddr, paddr;
2750 asection *plt2_asect;
2751
2752 phdrp = &((Elf32_External_Phdr *) buf)[i];
2753 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2754 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2755 phdr2p = &((Elf32_External_Phdr *) buf2)[i];
2756
2757 /* PT_GNU_STACK is an exception by being never relocated by
2758 prelink as its addresses are always zero. */
2759
2760 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2761 continue;
2762
2763 /* Check also other adjustment combinations - PR 11786. */
2764
2765 vaddr = extract_unsigned_integer (buf_vaddr_p, 4,
2766 byte_order);
2767 vaddr -= displacement;
2768 store_unsigned_integer (buf_vaddr_p, 4, byte_order, vaddr);
2769
2770 paddr = extract_unsigned_integer (buf_paddr_p, 4,
2771 byte_order);
2772 paddr -= displacement;
2773 store_unsigned_integer (buf_paddr_p, 4, byte_order, paddr);
2774
2775 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2776 continue;
2777
2778 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2779 CentOS-5 has problems with filesz, memsz as well.
2780 See PR 11786. */
2781 if (phdr2[i].p_type == PT_GNU_RELRO)
2782 {
2783 Elf32_External_Phdr tmp_phdr = *phdrp;
2784 Elf32_External_Phdr tmp_phdr2 = *phdr2p;
2785
2786 memset (tmp_phdr.p_filesz, 0, 4);
2787 memset (tmp_phdr.p_memsz, 0, 4);
2788 memset (tmp_phdr.p_flags, 0, 4);
2789 memset (tmp_phdr.p_align, 0, 4);
2790 memset (tmp_phdr2.p_filesz, 0, 4);
2791 memset (tmp_phdr2.p_memsz, 0, 4);
2792 memset (tmp_phdr2.p_flags, 0, 4);
2793 memset (tmp_phdr2.p_align, 0, 4);
2794
2795 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2796 == 0)
2797 continue;
2798 }
2799
2800 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2801 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2802 if (plt2_asect)
2803 {
2804 int content2;
2805 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2806 CORE_ADDR filesz;
2807
2808 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2809 & SEC_HAS_CONTENTS) != 0;
2810
2811 filesz = extract_unsigned_integer (buf_filesz_p, 4,
2812 byte_order);
2813
2814 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2815 FILESZ is from the in-memory image. */
2816 if (content2)
2817 filesz += bfd_get_section_size (plt2_asect);
2818 else
2819 filesz -= bfd_get_section_size (plt2_asect);
2820
2821 store_unsigned_integer (buf_filesz_p, 4, byte_order,
2822 filesz);
2823
2824 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2825 continue;
2826 }
2827
2828 ok = 0;
2829 break;
2830 }
2831 }
2832 else if (arch_size == 64
2833 && phdrs_size >= sizeof (Elf64_External_Phdr)
2834 && phdrs_size % sizeof (Elf64_External_Phdr) == 0)
2835 {
2836 Elf_Internal_Ehdr *ehdr2 = elf_tdata (exec_bfd)->elf_header;
2837 Elf_Internal_Phdr *phdr2 = elf_tdata (exec_bfd)->phdr;
2838 CORE_ADDR displacement = 0;
2839 int i;
2840
2841 /* DISPLACEMENT could be found more easily by the difference of
2842 ehdr2->e_entry. But we haven't read the ehdr yet, and we
2843 already have enough information to compute that displacement
2844 with what we've read. */
2845
2846 for (i = 0; i < ehdr2->e_phnum; i++)
2847 if (phdr2[i].p_type == PT_LOAD)
2848 {
2849 Elf64_External_Phdr *phdrp;
2850 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2851 CORE_ADDR vaddr, paddr;
2852 CORE_ADDR displacement_vaddr = 0;
2853 CORE_ADDR displacement_paddr = 0;
2854
2855 phdrp = &((Elf64_External_Phdr *) buf)[i];
2856 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2857 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2858
2859 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2860 byte_order);
2861 displacement_vaddr = vaddr - phdr2[i].p_vaddr;
2862
2863 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2864 byte_order);
2865 displacement_paddr = paddr - phdr2[i].p_paddr;
2866
2867 if (displacement_vaddr == displacement_paddr)
2868 displacement = displacement_vaddr;
2869
2870 break;
2871 }
2872
2873 /* Now compare BUF and BUF2 with optional DISPLACEMENT. */
2874
2875 for (i = 0; i < phdrs_size / sizeof (Elf64_External_Phdr); i++)
2876 {
2877 Elf64_External_Phdr *phdrp;
2878 Elf64_External_Phdr *phdr2p;
2879 gdb_byte *buf_vaddr_p, *buf_paddr_p;
2880 CORE_ADDR vaddr, paddr;
2881 asection *plt2_asect;
2882
2883 phdrp = &((Elf64_External_Phdr *) buf)[i];
2884 buf_vaddr_p = (gdb_byte *) &phdrp->p_vaddr;
2885 buf_paddr_p = (gdb_byte *) &phdrp->p_paddr;
2886 phdr2p = &((Elf64_External_Phdr *) buf2)[i];
2887
2888 /* PT_GNU_STACK is an exception by being never relocated by
2889 prelink as its addresses are always zero. */
2890
2891 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2892 continue;
2893
2894 /* Check also other adjustment combinations - PR 11786. */
2895
2896 vaddr = extract_unsigned_integer (buf_vaddr_p, 8,
2897 byte_order);
2898 vaddr -= displacement;
2899 store_unsigned_integer (buf_vaddr_p, 8, byte_order, vaddr);
2900
2901 paddr = extract_unsigned_integer (buf_paddr_p, 8,
2902 byte_order);
2903 paddr -= displacement;
2904 store_unsigned_integer (buf_paddr_p, 8, byte_order, paddr);
2905
2906 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2907 continue;
2908
2909 /* Strip modifies the flags and alignment of PT_GNU_RELRO.
2910 CentOS-5 has problems with filesz, memsz as well.
2911 See PR 11786. */
2912 if (phdr2[i].p_type == PT_GNU_RELRO)
2913 {
2914 Elf64_External_Phdr tmp_phdr = *phdrp;
2915 Elf64_External_Phdr tmp_phdr2 = *phdr2p;
2916
2917 memset (tmp_phdr.p_filesz, 0, 8);
2918 memset (tmp_phdr.p_memsz, 0, 8);
2919 memset (tmp_phdr.p_flags, 0, 4);
2920 memset (tmp_phdr.p_align, 0, 8);
2921 memset (tmp_phdr2.p_filesz, 0, 8);
2922 memset (tmp_phdr2.p_memsz, 0, 8);
2923 memset (tmp_phdr2.p_flags, 0, 4);
2924 memset (tmp_phdr2.p_align, 0, 8);
2925
2926 if (memcmp (&tmp_phdr, &tmp_phdr2, sizeof (tmp_phdr))
2927 == 0)
2928 continue;
2929 }
2930
2931 /* prelink can convert .plt SHT_NOBITS to SHT_PROGBITS. */
2932 plt2_asect = bfd_get_section_by_name (exec_bfd, ".plt");
2933 if (plt2_asect)
2934 {
2935 int content2;
2936 gdb_byte *buf_filesz_p = (gdb_byte *) &phdrp->p_filesz;
2937 CORE_ADDR filesz;
2938
2939 content2 = (bfd_get_section_flags (exec_bfd, plt2_asect)
2940 & SEC_HAS_CONTENTS) != 0;
2941
2942 filesz = extract_unsigned_integer (buf_filesz_p, 8,
2943 byte_order);
2944
2945 /* PLT2_ASECT is from on-disk file (exec_bfd) while
2946 FILESZ is from the in-memory image. */
2947 if (content2)
2948 filesz += bfd_get_section_size (plt2_asect);
2949 else
2950 filesz -= bfd_get_section_size (plt2_asect);
2951
2952 store_unsigned_integer (buf_filesz_p, 8, byte_order,
2953 filesz);
2954
2955 if (memcmp (phdrp, phdr2p, sizeof (*phdrp)) == 0)
2956 continue;
2957 }
2958
2959 ok = 0;
2960 break;
2961 }
2962 }
2963 else
2964 ok = 0;
2965 }
2966
2967 xfree (buf);
2968 xfree (buf2);
2969
2970 if (!ok)
2971 return 0;
2972 }
2973
2974 if (info_verbose)
2975 {
2976 /* It can be printed repeatedly as there is no easy way to check
2977 the executable symbols/file has been already relocated to
2978 displacement. */
2979
2980 printf_unfiltered (_("Using PIE (Position Independent Executable) "
2981 "displacement %s for \"%s\".\n"),
2982 paddress (target_gdbarch (), exec_displacement),
2983 bfd_get_filename (exec_bfd));
2984 }
2985
2986 *displacementp = exec_displacement;
2987 return 1;
2988 }
2989
2990 /* Relocate the main executable. This function should be called upon
2991 stopping the inferior process at the entry point to the program.
2992 The entry point from BFD is compared to the AT_ENTRY of AUXV and if they are
2993 different, the main executable is relocated by the proper amount. */
2994
2995 static void
2996 svr4_relocate_main_executable (void)
2997 {
2998 CORE_ADDR displacement;
2999
3000 /* If we are re-running this executable, SYMFILE_OBJFILE->SECTION_OFFSETS
3001 probably contains the offsets computed using the PIE displacement
3002 from the previous run, which of course are irrelevant for this run.
3003 So we need to determine the new PIE displacement and recompute the
3004 section offsets accordingly, even if SYMFILE_OBJFILE->SECTION_OFFSETS
3005 already contains pre-computed offsets.
3006
3007 If we cannot compute the PIE displacement, either:
3008
3009 - The executable is not PIE.
3010
3011 - SYMFILE_OBJFILE does not match the executable started in the target.
3012 This can happen for main executable symbols loaded at the host while
3013 `ld.so --ld-args main-executable' is loaded in the target.
3014
3015 Then we leave the section offsets untouched and use them as is for
3016 this run. Either:
3017
3018 - These section offsets were properly reset earlier, and thus
3019 already contain the correct values. This can happen for instance
3020 when reconnecting via the remote protocol to a target that supports
3021 the `qOffsets' packet.
3022
3023 - The section offsets were not reset earlier, and the best we can
3024 hope is that the old offsets are still applicable to the new run. */
3025
3026 if (! svr4_exec_displacement (&displacement))
3027 return;
3028
3029 /* Even DISPLACEMENT 0 is a valid new difference of in-memory vs. in-file
3030 addresses. */
3031
3032 if (symfile_objfile)
3033 {
3034 struct section_offsets *new_offsets;
3035 int i;
3036
3037 new_offsets = XALLOCAVEC (struct section_offsets,
3038 symfile_objfile->num_sections);
3039
3040 for (i = 0; i < symfile_objfile->num_sections; i++)
3041 new_offsets->offsets[i] = displacement;
3042
3043 objfile_relocate (symfile_objfile, new_offsets);
3044 }
3045 else if (exec_bfd)
3046 {
3047 asection *asect;
3048
3049 for (asect = exec_bfd->sections; asect != NULL; asect = asect->next)
3050 exec_set_section_address (bfd_get_filename (exec_bfd), asect->index,
3051 (bfd_section_vma (exec_bfd, asect)
3052 + displacement));
3053 }
3054 }
3055
3056 /* Implement the "create_inferior_hook" target_solib_ops method.
3057
3058 For SVR4 executables, this first instruction is either the first
3059 instruction in the dynamic linker (for dynamically linked
3060 executables) or the instruction at "start" for statically linked
3061 executables. For dynamically linked executables, the system
3062 first exec's /lib/libc.so.N, which contains the dynamic linker,
3063 and starts it running. The dynamic linker maps in any needed
3064 shared libraries, maps in the actual user executable, and then
3065 jumps to "start" in the user executable.
3066
3067 We can arrange to cooperate with the dynamic linker to discover the
3068 names of shared libraries that are dynamically linked, and the base
3069 addresses to which they are linked.
3070
3071 This function is responsible for discovering those names and
3072 addresses, and saving sufficient information about them to allow
3073 their symbols to be read at a later time. */
3074
3075 static void
3076 svr4_solib_create_inferior_hook (int from_tty)
3077 {
3078 struct svr4_info *info;
3079
3080 info = get_svr4_info ();
3081
3082 /* Clear the probes-based interface's state. */
3083 free_probes_table (info);
3084 free_solib_list (info);
3085
3086 /* Relocate the main executable if necessary. */
3087 svr4_relocate_main_executable ();
3088
3089 /* No point setting a breakpoint in the dynamic linker if we can't
3090 hit it (e.g., a core file, or a trace file). */
3091 if (!target_has_execution)
3092 return;
3093
3094 if (!svr4_have_link_map_offsets ())
3095 return;
3096
3097 if (!enable_break (info, from_tty))
3098 return;
3099 }
3100
3101 static void
3102 svr4_clear_solib (void)
3103 {
3104 struct svr4_info *info;
3105
3106 info = get_svr4_info ();
3107 info->debug_base = 0;
3108 info->debug_loader_offset_p = 0;
3109 info->debug_loader_offset = 0;
3110 xfree (info->debug_loader_name);
3111 info->debug_loader_name = NULL;
3112 }
3113
3114 /* Clear any bits of ADDR that wouldn't fit in a target-format
3115 data pointer. "Data pointer" here refers to whatever sort of
3116 address the dynamic linker uses to manage its sections. At the
3117 moment, we don't support shared libraries on any processors where
3118 code and data pointers are different sizes.
3119
3120 This isn't really the right solution. What we really need here is
3121 a way to do arithmetic on CORE_ADDR values that respects the
3122 natural pointer/address correspondence. (For example, on the MIPS,
3123 converting a 32-bit pointer to a 64-bit CORE_ADDR requires you to
3124 sign-extend the value. There, simply truncating the bits above
3125 gdbarch_ptr_bit, as we do below, is no good.) This should probably
3126 be a new gdbarch method or something. */
3127 static CORE_ADDR
3128 svr4_truncate_ptr (CORE_ADDR addr)
3129 {
3130 if (gdbarch_ptr_bit (target_gdbarch ()) == sizeof (CORE_ADDR) * 8)
3131 /* We don't need to truncate anything, and the bit twiddling below
3132 will fail due to overflow problems. */
3133 return addr;
3134 else
3135 return addr & (((CORE_ADDR) 1 << gdbarch_ptr_bit (target_gdbarch ())) - 1);
3136 }
3137
3138
3139 static void
3140 svr4_relocate_section_addresses (struct so_list *so,
3141 struct target_section *sec)
3142 {
3143 bfd *abfd = sec->the_bfd_section->owner;
3144
3145 sec->addr = svr4_truncate_ptr (sec->addr + lm_addr_check (so, abfd));
3146 sec->endaddr = svr4_truncate_ptr (sec->endaddr + lm_addr_check (so, abfd));
3147 }
3148 \f
3149
3150 /* Architecture-specific operations. */
3151
3152 /* Per-architecture data key. */
3153 static struct gdbarch_data *solib_svr4_data;
3154
3155 struct solib_svr4_ops
3156 {
3157 /* Return a description of the layout of `struct link_map'. */
3158 struct link_map_offsets *(*fetch_link_map_offsets)(void);
3159 };
3160
3161 /* Return a default for the architecture-specific operations. */
3162
3163 static void *
3164 solib_svr4_init (struct obstack *obstack)
3165 {
3166 struct solib_svr4_ops *ops;
3167
3168 ops = OBSTACK_ZALLOC (obstack, struct solib_svr4_ops);
3169 ops->fetch_link_map_offsets = NULL;
3170 return ops;
3171 }
3172
3173 /* Set the architecture-specific `struct link_map_offsets' fetcher for
3174 GDBARCH to FLMO. Also, install SVR4 solib_ops into GDBARCH. */
3175
3176 void
3177 set_solib_svr4_fetch_link_map_offsets (struct gdbarch *gdbarch,
3178 struct link_map_offsets *(*flmo) (void))
3179 {
3180 struct solib_svr4_ops *ops
3181 = (struct solib_svr4_ops *) gdbarch_data (gdbarch, solib_svr4_data);
3182
3183 ops->fetch_link_map_offsets = flmo;
3184
3185 set_solib_ops (gdbarch, &svr4_so_ops);
3186 }
3187
3188 /* Fetch a link_map_offsets structure using the architecture-specific
3189 `struct link_map_offsets' fetcher. */
3190
3191 static struct link_map_offsets *
3192 svr4_fetch_link_map_offsets (void)
3193 {
3194 struct solib_svr4_ops *ops
3195 = (struct solib_svr4_ops *) gdbarch_data (target_gdbarch (),
3196 solib_svr4_data);
3197
3198 gdb_assert (ops->fetch_link_map_offsets);
3199 return ops->fetch_link_map_offsets ();
3200 }
3201
3202 /* Return 1 if a link map offset fetcher has been defined, 0 otherwise. */
3203
3204 static int
3205 svr4_have_link_map_offsets (void)
3206 {
3207 struct solib_svr4_ops *ops
3208 = (struct solib_svr4_ops *) gdbarch_data (target_gdbarch (),
3209 solib_svr4_data);
3210
3211 return (ops->fetch_link_map_offsets != NULL);
3212 }
3213 \f
3214
3215 /* Most OS'es that have SVR4-style ELF dynamic libraries define a
3216 `struct r_debug' and a `struct link_map' that are binary compatible
3217 with the origional SVR4 implementation. */
3218
3219 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3220 for an ILP32 SVR4 system. */
3221
3222 struct link_map_offsets *
3223 svr4_ilp32_fetch_link_map_offsets (void)
3224 {
3225 static struct link_map_offsets lmo;
3226 static struct link_map_offsets *lmp = NULL;
3227
3228 if (lmp == NULL)
3229 {
3230 lmp = &lmo;
3231
3232 lmo.r_version_offset = 0;
3233 lmo.r_version_size = 4;
3234 lmo.r_map_offset = 4;
3235 lmo.r_brk_offset = 8;
3236 lmo.r_ldsomap_offset = 20;
3237
3238 /* Everything we need is in the first 20 bytes. */
3239 lmo.link_map_size = 20;
3240 lmo.l_addr_offset = 0;
3241 lmo.l_name_offset = 4;
3242 lmo.l_ld_offset = 8;
3243 lmo.l_next_offset = 12;
3244 lmo.l_prev_offset = 16;
3245 }
3246
3247 return lmp;
3248 }
3249
3250 /* Fetch (and possibly build) an appropriate `struct link_map_offsets'
3251 for an LP64 SVR4 system. */
3252
3253 struct link_map_offsets *
3254 svr4_lp64_fetch_link_map_offsets (void)
3255 {
3256 static struct link_map_offsets lmo;
3257 static struct link_map_offsets *lmp = NULL;
3258
3259 if (lmp == NULL)
3260 {
3261 lmp = &lmo;
3262
3263 lmo.r_version_offset = 0;
3264 lmo.r_version_size = 4;
3265 lmo.r_map_offset = 8;
3266 lmo.r_brk_offset = 16;
3267 lmo.r_ldsomap_offset = 40;
3268
3269 /* Everything we need is in the first 40 bytes. */
3270 lmo.link_map_size = 40;
3271 lmo.l_addr_offset = 0;
3272 lmo.l_name_offset = 8;
3273 lmo.l_ld_offset = 16;
3274 lmo.l_next_offset = 24;
3275 lmo.l_prev_offset = 32;
3276 }
3277
3278 return lmp;
3279 }
3280 \f
3281
3282 struct target_so_ops svr4_so_ops;
3283
3284 /* Lookup global symbol for ELF DSOs linked with -Bsymbolic. Those DSOs have a
3285 different rule for symbol lookup. The lookup begins here in the DSO, not in
3286 the main executable. */
3287
3288 static struct block_symbol
3289 elf_lookup_lib_symbol (struct objfile *objfile,
3290 const char *name,
3291 const domain_enum domain)
3292 {
3293 bfd *abfd;
3294
3295 if (objfile == symfile_objfile)
3296 abfd = exec_bfd;
3297 else
3298 {
3299 /* OBJFILE should have been passed as the non-debug one. */
3300 gdb_assert (objfile->separate_debug_objfile_backlink == NULL);
3301
3302 abfd = objfile->obfd;
3303 }
3304
3305 if (abfd == NULL || scan_dyntag (DT_SYMBOLIC, abfd, NULL, NULL) != 1)
3306 return (struct block_symbol) {NULL, NULL};
3307
3308 return lookup_global_symbol_from_objfile (objfile, name, domain);
3309 }
3310
3311 extern initialize_file_ftype _initialize_svr4_solib; /* -Wmissing-prototypes */
3312
3313 void
3314 _initialize_svr4_solib (void)
3315 {
3316 solib_svr4_data = gdbarch_data_register_pre_init (solib_svr4_init);
3317 solib_svr4_pspace_data
3318 = register_program_space_data_with_cleanup (NULL, svr4_pspace_data_cleanup);
3319
3320 svr4_so_ops.relocate_section_addresses = svr4_relocate_section_addresses;
3321 svr4_so_ops.free_so = svr4_free_so;
3322 svr4_so_ops.clear_so = svr4_clear_so;
3323 svr4_so_ops.clear_solib = svr4_clear_solib;
3324 svr4_so_ops.solib_create_inferior_hook = svr4_solib_create_inferior_hook;
3325 svr4_so_ops.current_sos = svr4_current_sos;
3326 svr4_so_ops.open_symbol_file_object = open_symbol_file_object;
3327 svr4_so_ops.in_dynsym_resolve_code = svr4_in_dynsym_resolve_code;
3328 svr4_so_ops.bfd_open = solib_bfd_open;
3329 svr4_so_ops.lookup_lib_global_symbol = elf_lookup_lib_symbol;
3330 svr4_so_ops.same = svr4_same;
3331 svr4_so_ops.keep_data_in_core = svr4_keep_data_in_core;
3332 svr4_so_ops.update_breakpoints = svr4_update_solib_event_breakpoints;
3333 svr4_so_ops.handle_event = svr4_handle_solib_event;
3334 }
This page took 0.106945 seconds and 5 git commands to generate.