Commit | Line | Data |
---|---|---|
92c9a463 JB |
1 | /* Copyright (C) 2010 Free Software Foundation, Inc. |
2 | ||
3 | This file is part of GDB. | |
4 | ||
5 | This program is free software; you can redistribute it and/or modify | |
6 | it under the terms of the GNU General Public License as published by | |
7 | the Free Software Foundation; either version 3 of the License, or | |
8 | (at your option) any later version. | |
9 | ||
10 | This program is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | GNU General Public License for more details. | |
14 | ||
15 | You should have received a copy of the GNU General Public License | |
16 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
17 | ||
18 | #include "defs.h" | |
19 | #include "ia64-tdep.h" | |
20 | #include "ia64-hpux-tdep.h" | |
21 | #include "solib-ia64-hpux.h" | |
22 | #include "solist.h" | |
23 | #include "solib.h" | |
24 | #include "target.h" | |
25 | #include "gdbtypes.h" | |
26 | #include "inferior.h" | |
27 | #include "gdbcore.h" | |
28 | #include "regcache.h" | |
29 | #include "opcode/ia64.h" | |
30 | #include "symfile.h" | |
31 | #include "objfiles.h" | |
32 | #include "elf-bfd.h" | |
33 | #include "exceptions.h" | |
34 | ||
35 | /* Need to define the following macro in order to get the complete | |
36 | load_module_desc struct definition in dlfcn.h Otherwise, it doesn't | |
37 | match the size of the struct the loader is providing us during load | |
38 | events. */ | |
39 | #define _LOAD_MODULE_DESC_EXT | |
40 | ||
41 | #include <sys/ttrace.h> | |
42 | #include <dlfcn.h> | |
43 | #include <elf.h> | |
44 | #include <service_mgr.h> | |
45 | ||
46 | /* The following is to have access to the definition of type load_info_t. */ | |
47 | #include <crt0.h> | |
48 | ||
49 | /* The r32 pseudo-register number. | |
50 | ||
51 | Like all stacked registers, r32 is treated as a pseudo-register, | |
52 | because it is not always available for read/write via the ttrace | |
53 | interface. */ | |
54 | /* This is a bit of a hack, as we duplicate something hidden inside | |
55 | ia64-tdep.c, but oh well... */ | |
56 | #define IA64_R32_PSEUDO_REGNUM (IA64_NAT127_REGNUM + 2) | |
57 | ||
58 | /* Our struct so_list private data structure. */ | |
59 | ||
60 | struct lm_info | |
61 | { | |
62 | /* The shared library module descriptor. We extract this structure | |
63 | from the loader at the time the shared library gets mapped. */ | |
64 | struct load_module_desc module_desc; | |
65 | ||
66 | /* The text segment address as defined in the shared library object | |
67 | (this is not the address where this segment got loaded). This | |
68 | field is initially set to zero, and computed lazily. */ | |
69 | CORE_ADDR text_start; | |
70 | ||
71 | /* The data segment address as defined in the shared library object | |
72 | (this is not the address where this segment got loaded). This | |
73 | field is initially set to zero, and computed lazily. */ | |
74 | CORE_ADDR data_start; | |
75 | }; | |
76 | ||
77 | /* The list of shared libraries currently mapped by the inferior. */ | |
78 | ||
79 | static struct so_list *so_list_head = NULL; | |
80 | ||
81 | /* Create a new so_list element. The result should be deallocated | |
82 | when no longer in use. */ | |
83 | ||
84 | static struct so_list * | |
85 | new_so_list (char *so_name, struct load_module_desc module_desc) | |
86 | { | |
87 | struct so_list *new_so; | |
88 | ||
89 | new_so = (struct so_list *) XZALLOC (struct so_list); | |
90 | new_so->lm_info = (struct lm_info *) XZALLOC (struct lm_info); | |
91 | new_so->lm_info->module_desc = module_desc; | |
92 | ||
93 | strncpy (new_so->so_name, so_name, SO_NAME_MAX_PATH_SIZE - 1); | |
94 | new_so->so_name[SO_NAME_MAX_PATH_SIZE - 1] = '\0'; | |
95 | strcpy (new_so->so_original_name, new_so->so_name); | |
96 | ||
97 | return new_so; | |
98 | } | |
99 | ||
100 | /* Return non-zero if the instruction at the current PC is a breakpoint | |
101 | part of the dynamic loading process. | |
102 | ||
103 | We identify such instructions by checking that the instruction at | |
104 | the current pc is a break insn where no software breakpoint has been | |
105 | inserted by us. We also verify that the operands have specific | |
106 | known values, to be extra certain. | |
107 | ||
108 | PTID is the ptid of the thread that should be checked, but this | |
109 | function also assumes that inferior_ptid is already equal to PTID. | |
110 | Ideally, we would like to avoid the requirement on inferior_ptid, | |
111 | but many routines still use the inferior_ptid global to access | |
112 | the relevant thread's register and memory. We still have the ptid | |
113 | as parameter to be able to pass it to the routines that do take a ptid | |
114 | - that way we avoid increasing explicit uses of the inferior_ptid | |
115 | global. */ | |
116 | ||
117 | static int | |
118 | ia64_hpux_at_dld_breakpoint_1_p (ptid_t ptid) | |
119 | { | |
120 | struct regcache *regcache = get_thread_regcache (ptid); | |
121 | CORE_ADDR pc = regcache_read_pc (regcache); | |
122 | struct address_space *aspace = get_regcache_aspace (regcache); | |
123 | ia64_insn t0, t1, slot[3], template, insn; | |
124 | int slotnum; | |
125 | bfd_byte bundle[16]; | |
126 | ||
127 | /* If this is a regular breakpoint, then it can not be a dld one. */ | |
128 | if (breakpoint_inserted_here_p (aspace, pc)) | |
129 | return 0; | |
130 | ||
131 | slotnum = ((long) pc) & 0xf; | |
132 | if (slotnum > 2) | |
133 | internal_error (__FILE__, __LINE__, | |
134 | "invalid slot (%d) for address %s", slotnum, | |
135 | paddress (get_regcache_arch (regcache), pc)); | |
136 | ||
137 | pc -= (pc & 0xf); | |
138 | read_memory (pc, bundle, sizeof (bundle)); | |
139 | ||
140 | /* bundles are always in little-endian byte order */ | |
141 | t0 = bfd_getl64 (bundle); | |
142 | t1 = bfd_getl64 (bundle + 8); | |
143 | template = (t0 >> 1) & 0xf; | |
144 | slot[0] = (t0 >> 5) & 0x1ffffffffffLL; | |
145 | slot[1] = ((t0 >> 46) & 0x3ffff) | ((t1 & 0x7fffff) << 18); | |
146 | slot[2] = (t1 >> 23) & 0x1ffffffffffLL; | |
147 | ||
148 | if (template == 2 && slotnum == 1) | |
149 | { | |
150 | /* skip L slot in MLI template: */ | |
151 | slotnum = 2; | |
152 | } | |
153 | ||
154 | insn = slot[slotnum]; | |
155 | ||
156 | return (insn == 0x1c0c9c0 /* break.i 0x070327 */ | |
157 | || insn == 0x3c0c9c0); /* break.i 0x0f0327 */ | |
158 | } | |
159 | ||
160 | /* Same as ia64_hpux_at_dld_breakpoint_1_p above, with the following | |
161 | differences: It temporarily sets inferior_ptid to PTID, and also | |
162 | contains any exception being raised. */ | |
163 | ||
164 | int | |
165 | ia64_hpux_at_dld_breakpoint_p (ptid_t ptid) | |
166 | { | |
167 | struct gdb_exception e; | |
168 | ptid_t saved_ptid = inferior_ptid; | |
169 | int result = 0; | |
170 | ||
171 | inferior_ptid = ptid; | |
172 | TRY_CATCH (e, RETURN_MASK_ALL) | |
173 | { | |
174 | result = ia64_hpux_at_dld_breakpoint_1_p (ptid); | |
175 | } | |
176 | inferior_ptid = saved_ptid; | |
177 | if (e.reason < 0) | |
178 | warning (_("error while checking for dld breakpoint: %s"), e.message); | |
179 | ||
180 | return result; | |
181 | } | |
182 | ||
183 | /* Handler for library load event: Read the information provided by | |
184 | the loader, and then use it to read the shared library symbols. */ | |
185 | ||
186 | static void | |
187 | ia64_hpux_handle_load_event (struct regcache *regcache) | |
188 | { | |
189 | CORE_ADDR module_desc_addr; | |
190 | ULONGEST module_desc_size; | |
191 | CORE_ADDR so_path_addr; | |
192 | char so_path[MAXPATHLEN]; | |
193 | struct load_module_desc module_desc; | |
194 | struct so_list *new_so; | |
195 | ||
196 | /* Extract the data provided by the loader as follow: | |
197 | - r33: Address of load_module_desc structure | |
198 | - r34: size of struct load_module_desc | |
199 | - r35: Address of string holding shared library path | |
200 | */ | |
201 | regcache_cooked_read_unsigned (regcache, IA64_R32_PSEUDO_REGNUM + 1, | |
202 | &module_desc_addr); | |
203 | regcache_cooked_read_unsigned (regcache, IA64_R32_PSEUDO_REGNUM + 2, | |
204 | &module_desc_size); | |
205 | regcache_cooked_read_unsigned (regcache, IA64_R32_PSEUDO_REGNUM + 3, | |
206 | &so_path_addr); | |
207 | ||
208 | if (module_desc_size != sizeof (struct load_module_desc)) | |
209 | warning (_("load_module_desc size (%ld) != size returned by kernel (%s)"), | |
210 | sizeof (struct load_module_desc), | |
211 | pulongest (module_desc_size)); | |
212 | ||
213 | read_memory_string (so_path_addr, so_path, MAXPATHLEN); | |
214 | read_memory (module_desc_addr, (gdb_byte *) &module_desc, | |
215 | sizeof (module_desc)); | |
216 | ||
217 | /* Create a new so_list element and insert it at the start of our | |
218 | so_list_head (we insert at the start of the list only because | |
219 | it is less work compared to inserting it elsewhere). */ | |
220 | new_so = new_so_list (so_path, module_desc); | |
221 | new_so->next = so_list_head; | |
222 | so_list_head = new_so; | |
223 | } | |
224 | ||
225 | /* Update the value of the PC to point to the begining of the next | |
226 | instruction bundle. */ | |
227 | ||
228 | static void | |
229 | ia64_hpux_move_pc_to_next_bundle (struct regcache *regcache) | |
230 | { | |
231 | CORE_ADDR pc = regcache_read_pc (regcache); | |
232 | ||
233 | pc -= pc & 0xf; | |
234 | pc += 16; | |
235 | ia64_write_pc (regcache, pc); | |
236 | } | |
237 | ||
238 | /* Handle loader events. | |
239 | ||
240 | PTID is the ptid of the thread corresponding to the event being | |
241 | handled. Similarly to ia64_hpux_at_dld_breakpoint_1_p, this | |
242 | function assumes that inferior_ptid is set to PTID. */ | |
243 | ||
244 | static void | |
245 | ia64_hpux_handle_dld_breakpoint_1 (ptid_t ptid) | |
246 | { | |
247 | struct regcache *regcache = get_thread_regcache (ptid); | |
248 | ULONGEST arg0; | |
249 | ||
250 | /* The type of event is provided by the loaded via r32. */ | |
251 | regcache_cooked_read_unsigned (regcache, IA64_R32_PSEUDO_REGNUM, &arg0); | |
252 | switch (arg0) | |
253 | { | |
254 | case BREAK_DE_SVC_LOADED: | |
255 | /* Currently, the only service loads are uld and dld, | |
256 | so we shouldn't need to do anything. Just ignore. */ | |
257 | break; | |
258 | case BREAK_DE_LIB_LOADED: | |
259 | ia64_hpux_handle_load_event (regcache); | |
260 | solib_add (NULL, 0, ¤t_target, auto_solib_add); | |
261 | break; | |
262 | case BREAK_DE_LIB_UNLOADED: | |
263 | case BREAK_DE_LOAD_COMPLETE: | |
264 | case BREAK_DE_BOR: | |
265 | /* Ignore for now. */ | |
266 | break; | |
267 | } | |
268 | ||
269 | /* Now that we have handled the event, we can move the PC to | |
270 | the next instruction bundle, past the break instruction. */ | |
271 | ia64_hpux_move_pc_to_next_bundle (regcache); | |
272 | } | |
273 | ||
274 | /* Same as ia64_hpux_handle_dld_breakpoint_1 above, with the following | |
275 | differences: This function temporarily sets inferior_ptid to PTID, | |
276 | and also contains any exception. */ | |
277 | ||
278 | void | |
279 | ia64_hpux_handle_dld_breakpoint (ptid_t ptid) | |
280 | { | |
281 | struct gdb_exception e; | |
282 | ptid_t saved_ptid = inferior_ptid; | |
283 | ||
284 | inferior_ptid = ptid; | |
285 | TRY_CATCH (e, RETURN_MASK_ALL) | |
286 | { | |
287 | ia64_hpux_handle_dld_breakpoint_1 (ptid); | |
288 | } | |
289 | inferior_ptid = saved_ptid; | |
290 | if (e.reason < 0) | |
291 | warning (_("error detected while handling dld breakpoint: %s"), e.message); | |
292 | } | |
293 | ||
294 | /* Find the address of the code and data segments in ABFD, and update | |
295 | TEXT_START and DATA_START accordingly. */ | |
296 | ||
297 | static void | |
298 | ia64_hpux_find_start_vma (bfd *abfd, CORE_ADDR *text_start, | |
299 | CORE_ADDR *data_start) | |
300 | { | |
301 | Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd); | |
302 | Elf64_Phdr phdr; | |
303 | int i; | |
304 | ||
305 | *text_start = 0; | |
306 | *data_start = 0; | |
307 | ||
308 | if (bfd_seek (abfd, i_ehdrp->e_phoff, SEEK_SET) == -1) | |
309 | error (_("invalid program header offset in %s"), abfd->filename); | |
310 | ||
311 | for (i = 0; i < i_ehdrp->e_phnum; i++) | |
312 | { | |
a9df6b22 | 313 | if (bfd_bread (&phdr, sizeof (phdr), abfd) != sizeof (phdr)) |
92c9a463 JB |
314 | error (_("failed to read segment %d in %s"), i, abfd->filename); |
315 | ||
316 | if (phdr.p_flags & PF_X | |
317 | && (*text_start == 0 || phdr.p_vaddr < *text_start)) | |
318 | *text_start = phdr.p_vaddr; | |
319 | ||
320 | if (phdr.p_flags & PF_W | |
321 | && (*data_start == 0 || phdr.p_vaddr < *data_start)) | |
322 | *data_start = phdr.p_vaddr; | |
323 | } | |
324 | } | |
325 | ||
326 | /* The "relocate_section_addresses" target_so_ops routine for ia64-hpux. */ | |
327 | ||
328 | static void | |
329 | ia64_hpux_relocate_section_addresses (struct so_list *so, | |
330 | struct target_section *sec) | |
331 | { | |
332 | CORE_ADDR offset = 0; | |
333 | ||
334 | /* If we haven't computed the text & data segment addresses, do so now. | |
335 | We do this here, because we now have direct access to the associated | |
336 | bfd, whereas we would have had to open our own if we wanted to do it | |
337 | while processing the library-load event. */ | |
338 | if (so->lm_info->text_start == 0 && so->lm_info->data_start == 0) | |
339 | ia64_hpux_find_start_vma (sec->bfd, &so->lm_info->text_start, | |
340 | &so->lm_info->data_start); | |
341 | ||
342 | /* Determine the relocation offset based on which segment | |
343 | the section belongs to. */ | |
344 | if ((so->lm_info->text_start < so->lm_info->data_start | |
345 | && sec->addr < so->lm_info->data_start) | |
346 | || (so->lm_info->text_start > so->lm_info->data_start | |
347 | && sec->addr >= so->lm_info->text_start)) | |
348 | offset = so->lm_info->module_desc.text_base - so->lm_info->text_start; | |
349 | else if ((so->lm_info->text_start < so->lm_info->data_start | |
350 | && sec->addr >= so->lm_info->data_start) | |
351 | || (so->lm_info->text_start > so->lm_info->data_start | |
352 | && sec->addr < so->lm_info->text_start)) | |
353 | offset = so->lm_info->module_desc.data_base - so->lm_info->data_start; | |
354 | ||
355 | /* And now apply the relocation. */ | |
356 | sec->addr += offset; | |
357 | sec->endaddr += offset; | |
358 | ||
359 | /* Best effort to set addr_high/addr_low. This is used only by | |
360 | 'info sharedlibrary'. */ | |
361 | if (so->addr_low == 0 || sec->addr < so->addr_low) | |
362 | so->addr_low = sec->addr; | |
363 | ||
364 | if (so->addr_high == 0 || sec->endaddr > so->addr_high) | |
365 | so->addr_high = sec->endaddr; | |
366 | } | |
367 | ||
368 | /* The "free_so" target_so_ops routine for ia64-hpux. */ | |
369 | ||
370 | static void | |
371 | ia64_hpux_free_so (struct so_list *so) | |
372 | { | |
373 | xfree (so->lm_info); | |
374 | } | |
375 | ||
376 | /* The "clear_solib" target_so_ops routine for ia64-hpux. */ | |
377 | ||
378 | static void | |
379 | ia64_hpux_clear_solib (void) | |
380 | { | |
381 | struct so_list *so; | |
382 | ||
383 | while (so_list_head != NULL) | |
384 | { | |
385 | so = so_list_head; | |
386 | so_list_head = so_list_head->next; | |
387 | ||
388 | ia64_hpux_free_so (so); | |
389 | xfree (so); | |
390 | } | |
391 | } | |
392 | ||
393 | /* Assuming the inferior just stopped on an EXEC event, return | |
394 | the address of the load_info_t structure. */ | |
395 | ||
396 | static CORE_ADDR | |
397 | ia64_hpux_get_load_info_addr (void) | |
398 | { | |
399 | struct type *data_ptr_type = builtin_type (target_gdbarch)->builtin_data_ptr; | |
400 | CORE_ADDR addr; | |
401 | int status; | |
402 | ||
403 | /* The address of the load_info_t structure is stored in the 4th | |
404 | argument passed to the initial thread of the process (in other | |
405 | words, in argv[3]). So get the address of these arguments, | |
406 | and extract the 4th one. */ | |
407 | status = ttrace (TT_PROC_GET_ARGS, ptid_get_pid (inferior_ptid), | |
408 | 0, (uintptr_t) &addr, sizeof (CORE_ADDR), 0); | |
409 | if (status == -1 && errno) | |
410 | perror_with_name (_("Unable to get argument list")); | |
411 | return (read_memory_typed_address (addr + 3 * 8, data_ptr_type)); | |
412 | } | |
413 | ||
414 | /* A structure used to aggregate some information extracted from | |
415 | the dynamic section of the main executable. */ | |
416 | ||
417 | struct dld_info | |
418 | { | |
a9df6b22 | 419 | ULONGEST dld_flags; |
92c9a463 JB |
420 | CORE_ADDR load_map; |
421 | }; | |
422 | ||
423 | /* Scan the ".dynamic" section referenced by ABFD and DYN_SECT, | |
424 | and extract the information needed to fill in INFO. */ | |
425 | ||
426 | static void | |
427 | ia64_hpux_read_dynamic_info (struct gdbarch *gdbarch, bfd *abfd, | |
428 | asection *dyn_sect, struct dld_info *info) | |
429 | { | |
430 | int sect_size; | |
431 | char *buf; | |
432 | char *buf_end; | |
433 | ||
434 | /* Make sure that info always has initialized data, even if we fail | |
435 | to read the syn_sect section. */ | |
436 | memset (info, 0, sizeof (struct dld_info)); | |
437 | ||
438 | sect_size = bfd_section_size (abfd, dyn_sect); | |
439 | buf = alloca (sect_size); | |
440 | buf_end = buf + sect_size; | |
441 | ||
442 | if (bfd_seek (abfd, dyn_sect->filepos, SEEK_SET) != 0 | |
443 | || bfd_bread (buf, sect_size, abfd) != sect_size) | |
444 | error (_("failed to read contents of .dynamic section")); | |
445 | ||
446 | for (; buf < buf_end; buf += sizeof (Elf64_Dyn)) | |
447 | { | |
448 | Elf64_Dyn *dynp = (Elf64_Dyn *) buf; | |
449 | Elf64_Sxword d_tag; | |
450 | ||
451 | d_tag = bfd_h_get_64 (abfd, &dynp->d_tag); | |
452 | switch (d_tag) | |
453 | { | |
454 | case DT_HP_DLD_FLAGS: | |
455 | info->dld_flags = bfd_h_get_64 (abfd, &dynp->d_un); | |
456 | break; | |
457 | ||
458 | case DT_HP_LOAD_MAP: | |
459 | { | |
460 | CORE_ADDR load_map_addr = bfd_h_get_64 (abfd, &dynp->d_un.d_ptr); | |
461 | ||
462 | if (target_read_memory (load_map_addr, (char *) &info->load_map, | |
463 | sizeof (info->load_map)) != 0) | |
464 | error (_("failed to read load map at %s"), | |
465 | paddress (gdbarch, load_map_addr)); | |
466 | } | |
467 | break; | |
468 | } | |
469 | } | |
470 | } | |
471 | ||
472 | /* Wrapper around target_read_memory used with libdl. */ | |
473 | ||
474 | static void * | |
475 | ia64_hpux_read_tgt_mem (void *buffer, uint64_t ptr, size_t bufsiz, int ident) | |
476 | { | |
477 | if (target_read_memory (ptr, (gdb_byte *) buffer, bufsiz) != 0) | |
478 | return 0; | |
479 | else | |
480 | return buffer; | |
481 | } | |
482 | ||
483 | /* Create a new so_list object for a shared library, and store that | |
484 | new so_list object in our SO_LIST_HEAD list. | |
485 | ||
486 | SO_INDEX is an index specifying the placement of the loaded shared | |
487 | library in the dynamic loader's search list. Normally, this index | |
488 | is strictly positive, but an index of -1 refers to the loader itself. | |
489 | ||
490 | Return nonzero if the so_list object could be created. A null | |
491 | return value with a positive SO_INDEX normally means that there are | |
492 | no more entries in the dynamic loader's search list at SO_INDEX or | |
493 | beyond. */ | |
494 | ||
495 | static int | |
496 | ia64_hpux_add_so_from_dld_info (struct dld_info info, int so_index) | |
497 | { | |
498 | struct load_module_desc module_desc; | |
499 | uint64_t so_handle; | |
500 | char *so_path; | |
501 | struct so_list *so; | |
502 | ||
503 | so_handle = dlgetmodinfo (so_index, &module_desc, sizeof (module_desc), | |
504 | ia64_hpux_read_tgt_mem, 0, info.load_map); | |
505 | ||
506 | if (so_handle == 0) | |
507 | /* No such entry. We probably reached the end of the list. */ | |
508 | return 0; | |
509 | ||
510 | so_path = dlgetname (&module_desc, sizeof (module_desc), | |
511 | ia64_hpux_read_tgt_mem, 0, info.load_map); | |
512 | if (so_path == NULL) | |
513 | { | |
514 | /* Should never happen, but let's not crash if it does. */ | |
515 | warning (_("unable to get shared library name, symbols not loaded")); | |
516 | return 0; | |
517 | } | |
518 | ||
519 | /* Create a new so_list and insert it at the start of our list. | |
520 | The order is not extremely important, but it's less work to do so | |
521 | at the end of the list. */ | |
522 | so = new_so_list (so_path, module_desc); | |
523 | so->next = so_list_head; | |
524 | so_list_head = so; | |
525 | ||
526 | return 1; | |
527 | } | |
528 | ||
529 | /* Assuming we just attached to a process, update our list of shared | |
530 | libraries (SO_LIST_HEAD) as well as GDB's list. */ | |
531 | ||
532 | static void | |
533 | ia64_hpux_solib_add_after_attach (void) | |
534 | { | |
535 | bfd *abfd; | |
536 | asection *dyn_sect; | |
537 | struct dld_info info; | |
538 | int i; | |
539 | ||
540 | if (symfile_objfile == NULL) | |
541 | return; | |
542 | ||
543 | abfd = symfile_objfile->obfd; | |
544 | dyn_sect = bfd_get_section_by_name (abfd, ".dynamic"); | |
545 | ||
546 | if (dyn_sect == NULL || bfd_section_size (abfd, dyn_sect) == 0) | |
547 | return; | |
548 | ||
549 | ia64_hpux_read_dynamic_info (get_objfile_arch (symfile_objfile), abfd, | |
550 | dyn_sect, &info); | |
551 | ||
552 | if ((info.dld_flags & DT_HP_DEBUG_PRIVATE) == 0) | |
553 | { | |
554 | warning (_( | |
555 | "The shared libraries were not privately mapped; setting a breakpoint\n\ | |
556 | in a shared library will not work until you rerun the program.\n\ | |
557 | Use the following command to enable debugging of shared libraries.\n\ | |
558 | chatr +dbg enable a.out")); | |
559 | } | |
560 | ||
561 | /* Read the symbols of the dynamic loader (dld.so). */ | |
562 | ia64_hpux_add_so_from_dld_info (info, -1); | |
563 | ||
564 | /* Read the symbols of all the other shared libraries. */ | |
565 | for (i = 1; ; i++) | |
566 | if (!ia64_hpux_add_so_from_dld_info (info, i)) | |
567 | break; /* End of list. */ | |
568 | ||
569 | /* Resync the library list at the core level. */ | |
570 | solib_add (NULL, 1, ¤t_target, auto_solib_add); | |
571 | } | |
572 | ||
573 | /* The "create_inferior_hook" target_so_ops routine for ia64-hpux. */ | |
574 | ||
575 | static void | |
576 | ia64_hpux_solib_create_inferior_hook (int from_tty) | |
577 | { | |
578 | CORE_ADDR load_info_addr; | |
579 | load_info_t load_info; | |
580 | ||
581 | /* Initially, we were thinking about adding a check that the program | |
582 | (accessible through symfile_objfile) was linked against some shared | |
583 | libraries, by searching for a ".dynamic" section. However, could | |
584 | this break in the case of a statically linked program that later | |
585 | uses dlopen? Programs that are fully statically linked are very | |
586 | rare, and we will worry about them when we encounter one that | |
587 | causes trouble. */ | |
588 | ||
589 | /* Set the LI_TRACE flag in the load_info_t structure. This enables | |
590 | notifications when shared libraries are being mapped. */ | |
591 | load_info_addr = ia64_hpux_get_load_info_addr (); | |
592 | read_memory (load_info_addr, (gdb_byte *) &load_info, sizeof (load_info)); | |
593 | load_info.li_flags |= LI_TRACE; | |
594 | write_memory (load_info_addr, (gdb_byte *) &load_info, sizeof (load_info)); | |
595 | ||
596 | /* If we just attached to our process, some shard libraries have | |
597 | already been mapped. Find which ones they are... */ | |
598 | if (current_inferior ()->attach_flag) | |
599 | ia64_hpux_solib_add_after_attach (); | |
600 | } | |
601 | ||
602 | /* The "special_symbol_handling" target_so_ops routine for ia64-hpux. */ | |
603 | ||
604 | static void | |
605 | ia64_hpux_special_symbol_handling (void) | |
606 | { | |
607 | /* Nothing to do. */ | |
608 | } | |
609 | ||
610 | /* The "current_sos" target_so_ops routine for ia64-hpux. */ | |
611 | ||
612 | static struct so_list * | |
613 | ia64_hpux_current_sos (void) | |
614 | { | |
615 | /* Return a deep copy of our own list. */ | |
616 | struct so_list *new_head = NULL, *prev_new_so = NULL; | |
617 | struct so_list *our_so; | |
618 | ||
619 | for (our_so = so_list_head; our_so != NULL; our_so = our_so->next) | |
620 | { | |
621 | struct so_list *new_so; | |
622 | ||
623 | new_so = new_so_list (our_so->so_name, our_so->lm_info->module_desc); | |
624 | if (prev_new_so != NULL) | |
625 | prev_new_so->next = new_so; | |
626 | prev_new_so = new_so; | |
627 | if (new_head == NULL) | |
628 | new_head = new_so; | |
629 | } | |
630 | ||
631 | return new_head; | |
632 | } | |
633 | ||
634 | /* The "open_symbol_file_object" target_so_ops routine for ia64-hpux. */ | |
635 | ||
636 | static int | |
637 | ia64_hpux_open_symbol_file_object (void *from_ttyp) | |
638 | { | |
639 | return 0; | |
640 | } | |
641 | ||
642 | /* The "in_dynsym_resolve_code" target_so_ops routine for ia64-hpux. */ | |
643 | ||
644 | static int | |
645 | ia64_hpux_in_dynsym_resolve_code (CORE_ADDR pc) | |
646 | { | |
647 | return 0; | |
648 | } | |
649 | ||
650 | /* If FADDR is the address of a function inside one of the shared | |
651 | libraries, return the shared library linkage address. */ | |
652 | ||
653 | CORE_ADDR | |
654 | ia64_hpux_get_solib_linkage_addr (CORE_ADDR faddr) | |
655 | { | |
656 | struct so_list *so = so_list_head; | |
657 | ||
658 | while (so != NULL) | |
659 | { | |
660 | struct load_module_desc module_desc = so->lm_info->module_desc; | |
661 | ||
662 | if (module_desc.text_base <= faddr | |
663 | && (module_desc.text_base + module_desc.text_size) > faddr) | |
664 | return module_desc.linkage_ptr; | |
665 | ||
666 | so = so->next; | |
667 | } | |
668 | ||
669 | return 0; | |
670 | } | |
671 | ||
672 | /* Create a new target_so_ops structure suitable for ia64-hpux, and | |
673 | return its address. */ | |
674 | ||
675 | static struct target_so_ops * | |
676 | ia64_hpux_target_so_ops (void) | |
677 | { | |
678 | struct target_so_ops *ops = XZALLOC (struct target_so_ops); | |
679 | ||
680 | ops->relocate_section_addresses = ia64_hpux_relocate_section_addresses; | |
681 | ops->free_so = ia64_hpux_free_so; | |
682 | ops->clear_solib = ia64_hpux_clear_solib; | |
683 | ops->solib_create_inferior_hook = ia64_hpux_solib_create_inferior_hook; | |
684 | ops->special_symbol_handling = ia64_hpux_special_symbol_handling; | |
685 | ops->current_sos = ia64_hpux_current_sos; | |
686 | ops->open_symbol_file_object = ia64_hpux_open_symbol_file_object; | |
687 | ops->in_dynsym_resolve_code = ia64_hpux_in_dynsym_resolve_code; | |
688 | ops->bfd_open = solib_bfd_open; | |
689 | ||
690 | return ops; | |
691 | } | |
692 | ||
693 | /* Prevent warning from -Wmissing-prototypes. */ | |
694 | void _initialize_solib_ia64_hpux (void); | |
695 | ||
696 | void | |
697 | _initialize_solib_ia64_hpux (void) | |
698 | { | |
699 | ia64_hpux_so_ops = ia64_hpux_target_so_ops (); | |
700 | } |