[ppc64le] Use skip_entrypoint for skip_trampoline_code
[deliverable/binutils-gdb.git] / gdb / ppc64-tdep.c
1 /* Common target-dependent code for ppc64 GDB, the GNU debugger.
2
3 Copyright (C) 1986-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "frame.h"
22 #include "gdbcore.h"
23 #include "infrun.h"
24 #include "ppc-tdep.h"
25 #include "ppc64-tdep.h"
26 #include "elf-bfd.h"
27
28 /* Macros for matching instructions. Note that, since all the
29 operands are masked off before they're or-ed into the instruction,
30 you can use -1 to make masks. */
31
32 #define insn_d(opcd, rts, ra, d) \
33 ((((opcd) & 0x3f) << 26) \
34 | (((rts) & 0x1f) << 21) \
35 | (((ra) & 0x1f) << 16) \
36 | ((d) & 0xffff))
37
38 #define insn_ds(opcd, rts, ra, d, xo) \
39 ((((opcd) & 0x3f) << 26) \
40 | (((rts) & 0x1f) << 21) \
41 | (((ra) & 0x1f) << 16) \
42 | ((d) & 0xfffc) \
43 | ((xo) & 0x3))
44
45 #define insn_xfx(opcd, rts, spr, xo) \
46 ((((opcd) & 0x3f) << 26) \
47 | (((rts) & 0x1f) << 21) \
48 | (((spr) & 0x1f) << 16) \
49 | (((spr) & 0x3e0) << 6) \
50 | (((xo) & 0x3ff) << 1))
51
52 /* If PLT is the address of a 64-bit PowerPC PLT entry,
53 return the function's entry point. */
54
55 static CORE_ADDR
56 ppc64_plt_entry_point (struct gdbarch *gdbarch, CORE_ADDR plt)
57 {
58 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
59 /* The first word of the PLT entry is the function entry point. */
60 return (CORE_ADDR) read_memory_unsigned_integer (plt, 8, byte_order);
61 }
62
63 /* Patterns for the standard linkage functions. These are built by
64 build_plt_stub in bfd/elf64-ppc.c. */
65
66 /* Old ELFv1 PLT call stub. */
67
68 static struct ppc_insn_pattern ppc64_standard_linkage1[] =
69 {
70 /* addis r12, r2, <any> */
71 { insn_d (-1, -1, -1, 0), insn_d (15, 12, 2, 0), 0 },
72
73 /* std r2, 40(r1) */
74 { -1, insn_ds (62, 2, 1, 40, 0), 0 },
75
76 /* ld r11, <any>(r12) */
77 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 0 },
78
79 /* addis r12, r12, 1 <optional> */
80 { insn_d (-1, -1, -1, -1), insn_d (15, 12, 12, 1), 1 },
81
82 /* ld r2, <any>(r12) */
83 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 12, 0, 0), 0 },
84
85 /* addis r12, r12, 1 <optional> */
86 { insn_d (-1, -1, -1, -1), insn_d (15, 12, 12, 1), 1 },
87
88 /* mtctr r11 */
89 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 11, 9, 467), 0 },
90
91 /* ld r11, <any>(r12) <optional> */
92 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 1 },
93
94 /* bctr */
95 { -1, 0x4e800420, 0 },
96
97 { 0, 0, 0 }
98 };
99
100 /* ELFv1 PLT call stub to access PLT entries more than +/- 32k from r2.
101 Also supports older stub with different placement of std 2,40(1),
102 a stub that omits the std 2,40(1), and both versions of power7
103 thread safety read barriers. Note that there are actually two more
104 instructions following "cmpldi r2, 0", "bnectr+" and "b <glink_i>",
105 but there isn't any need to match them. */
106
107 static struct ppc_insn_pattern ppc64_standard_linkage2[] =
108 {
109 /* std r2, 40(r1) <optional> */
110 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
111
112 /* addis r12, r2, <any> */
113 { insn_d (-1, -1, -1, 0), insn_d (15, 12, 2, 0), 0 },
114
115 /* std r2, 40(r1) <optional> */
116 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
117
118 /* ld r11, <any>(r12) */
119 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 0 },
120
121 /* addi r12, r12, <any> <optional> */
122 { insn_d (-1, -1, -1, 0), insn_d (14, 12, 12, 0), 1 },
123
124 /* mtctr r11 */
125 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 11, 9, 467), 0 },
126
127 /* xor r11, r11, r11 <optional> */
128 { -1, 0x7d6b5a78, 1 },
129
130 /* add r12, r12, r11 <optional> */
131 { -1, 0x7d8c5a14, 1 },
132
133 /* ld r2, <any>(r12) */
134 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 12, 0, 0), 0 },
135
136 /* ld r11, <any>(r12) <optional> */
137 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 1 },
138
139 /* bctr <optional> */
140 { -1, 0x4e800420, 1 },
141
142 /* cmpldi r2, 0 <optional> */
143 { -1, 0x28220000, 1 },
144
145 { 0, 0, 0 }
146 };
147
148 /* ELFv1 PLT call stub to access PLT entries within +/- 32k of r2. */
149
150 static struct ppc_insn_pattern ppc64_standard_linkage3[] =
151 {
152 /* std r2, 40(r1) <optional> */
153 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
154
155 /* ld r11, <any>(r2) */
156 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 2, 0, 0), 0 },
157
158 /* addi r2, r2, <any> <optional> */
159 { insn_d (-1, -1, -1, 0), insn_d (14, 2, 2, 0), 1 },
160
161 /* mtctr r11 */
162 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 11, 9, 467), 0 },
163
164 /* xor r11, r11, r11 <optional> */
165 { -1, 0x7d6b5a78, 1 },
166
167 /* add r2, r2, r11 <optional> */
168 { -1, 0x7c425a14, 1 },
169
170 /* ld r11, <any>(r2) <optional> */
171 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 2, 0, 0), 1 },
172
173 /* ld r2, <any>(r2) */
174 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 2, 0, 0), 0 },
175
176 /* bctr <optional> */
177 { -1, 0x4e800420, 1 },
178
179 /* cmpldi r2, 0 <optional> */
180 { -1, 0x28220000, 1 },
181
182 { 0, 0, 0 }
183 };
184
185 /* ELFv1 PLT call stub to access PLT entries more than +/- 32k from r2.
186 A more modern variant of ppc64_standard_linkage2 differing in
187 register usage. */
188
189 static struct ppc_insn_pattern ppc64_standard_linkage4[] =
190 {
191 /* std r2, 40(r1) <optional> */
192 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
193
194 /* addis r11, r2, <any> */
195 { insn_d (-1, -1, -1, 0), insn_d (15, 11, 2, 0), 0 },
196
197 /* ld r12, <any>(r11) */
198 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 11, 0, 0), 0 },
199
200 /* addi r11, r11, <any> <optional> */
201 { insn_d (-1, -1, -1, 0), insn_d (14, 11, 11, 0), 1 },
202
203 /* mtctr r12 */
204 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
205
206 /* xor r2, r12, r12 <optional> */
207 { -1, 0x7d826278, 1 },
208
209 /* add r11, r11, r2 <optional> */
210 { -1, 0x7d6b1214, 1 },
211
212 /* ld r2, <any>(r11) */
213 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 11, 0, 0), 0 },
214
215 /* ld r11, <any>(r11) <optional> */
216 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 11, 0, 0), 1 },
217
218 /* bctr <optional> */
219 { -1, 0x4e800420, 1 },
220
221 /* cmpldi r2, 0 <optional> */
222 { -1, 0x28220000, 1 },
223
224 { 0, 0, 0 }
225 };
226
227 /* ELFv1 PLT call stub to access PLT entries within +/- 32k of r2.
228 A more modern variant of ppc64_standard_linkage3 differing in
229 register usage. */
230
231 static struct ppc_insn_pattern ppc64_standard_linkage5[] =
232 {
233 /* std r2, 40(r1) <optional> */
234 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
235
236 /* ld r12, <any>(r2) */
237 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 2, 0, 0), 0 },
238
239 /* addi r2, r2, <any> <optional> */
240 { insn_d (-1, -1, -1, 0), insn_d (14, 2, 2, 0), 1 },
241
242 /* mtctr r12 */
243 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
244
245 /* xor r11, r12, r12 <optional> */
246 { -1, 0x7d8b6278, 1 },
247
248 /* add r2, r2, r11 <optional> */
249 { -1, 0x7c425a14, 1 },
250
251 /* ld r11, <any>(r2) <optional> */
252 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 2, 0, 0), 1 },
253
254 /* ld r2, <any>(r2) */
255 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 2, 0, 0), 0 },
256
257 /* bctr <optional> */
258 { -1, 0x4e800420, 1 },
259
260 /* cmpldi r2, 0 <optional> */
261 { -1, 0x28220000, 1 },
262
263 { 0, 0, 0 }
264 };
265
266 /* ELFv2 PLT call stub to access PLT entries more than +/- 32k from r2. */
267
268 static struct ppc_insn_pattern ppc64_standard_linkage6[] =
269 {
270 /* std r2, 24(r1) <optional> */
271 { -1, insn_ds (62, 2, 1, 24, 0), 1 },
272
273 /* addis r11, r2, <any> */
274 { insn_d (-1, -1, -1, 0), insn_d (15, 11, 2, 0), 0 },
275
276 /* ld r12, <any>(r11) */
277 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 11, 0, 0), 0 },
278
279 /* mtctr r12 */
280 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
281
282 /* bctr */
283 { -1, 0x4e800420, 0 },
284
285 { 0, 0, 0 }
286 };
287
288 /* ELFv2 PLT call stub to access PLT entries within +/- 32k of r2. */
289
290 static struct ppc_insn_pattern ppc64_standard_linkage7[] =
291 {
292 /* std r2, 24(r1) <optional> */
293 { -1, insn_ds (62, 2, 1, 24, 0), 1 },
294
295 /* ld r12, <any>(r2) */
296 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 2, 0, 0), 0 },
297
298 /* mtctr r12 */
299 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
300
301 /* bctr */
302 { -1, 0x4e800420, 0 },
303
304 { 0, 0, 0 }
305 };
306
307 /* ELFv2 PLT call stub to access PLT entries more than +/- 32k from r2,
308 supporting fusion. */
309
310 static struct ppc_insn_pattern ppc64_standard_linkage8[] =
311 {
312 /* std r2, 24(r1) <optional> */
313 { -1, insn_ds (62, 2, 1, 24, 0), 1 },
314
315 /* addis r12, r2, <any> */
316 { insn_d (-1, -1, -1, 0), insn_d (15, 12, 2, 0), 0 },
317
318 /* ld r12, <any>(r12) */
319 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 12, 0, 0), 0 },
320
321 /* mtctr r12 */
322 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
323
324 /* bctr */
325 { -1, 0x4e800420, 0 },
326
327 { 0, 0, 0 }
328 };
329
330 /* When the dynamic linker is doing lazy symbol resolution, the first
331 call to a function in another object will go like this:
332
333 - The user's function calls the linkage function:
334
335 100003d4: 4b ff ff ad bl 10000380 <nnnn.plt_call.printf>
336 100003d8: e8 41 00 28 ld r2,40(r1)
337
338 - The linkage function loads the entry point and toc pointer from
339 the function descriptor in the PLT, and jumps to it:
340
341 <nnnn.plt_call.printf>:
342 10000380: f8 41 00 28 std r2,40(r1)
343 10000384: e9 62 80 78 ld r11,-32648(r2)
344 10000388: 7d 69 03 a6 mtctr r11
345 1000038c: e8 42 80 80 ld r2,-32640(r2)
346 10000390: 28 22 00 00 cmpldi r2,0
347 10000394: 4c e2 04 20 bnectr+
348 10000398: 48 00 03 a0 b 10000738 <printf@plt>
349
350 - But since this is the first time that PLT entry has been used, it
351 sends control to its glink entry. That loads the number of the
352 PLT entry and jumps to the common glink0 code:
353
354 <printf@plt>:
355 10000738: 38 00 00 01 li r0,1
356 1000073c: 4b ff ff bc b 100006f8 <__glink_PLTresolve>
357
358 - The common glink0 code then transfers control to the dynamic
359 linker's fixup code:
360
361 100006f0: 0000000000010440 .quad plt0 - (. + 16)
362 <__glink_PLTresolve>:
363 100006f8: 7d 88 02 a6 mflr r12
364 100006fc: 42 9f 00 05 bcl 20,4*cr7+so,10000700
365 10000700: 7d 68 02 a6 mflr r11
366 10000704: e8 4b ff f0 ld r2,-16(r11)
367 10000708: 7d 88 03 a6 mtlr r12
368 1000070c: 7d 82 5a 14 add r12,r2,r11
369 10000710: e9 6c 00 00 ld r11,0(r12)
370 10000714: e8 4c 00 08 ld r2,8(r12)
371 10000718: 7d 69 03 a6 mtctr r11
372 1000071c: e9 6c 00 10 ld r11,16(r12)
373 10000720: 4e 80 04 20 bctr
374
375 Eventually, this code will figure out how to skip all of this,
376 including the dynamic linker. At the moment, we just get through
377 the linkage function. */
378
379 /* If the current thread is about to execute a series of instructions
380 at PC matching the ppc64_standard_linkage pattern, and INSN is the result
381 from that pattern match, return the code address to which the
382 standard linkage function will send them. (This doesn't deal with
383 dynamic linker lazy symbol resolution stubs.) */
384
385 static CORE_ADDR
386 ppc64_standard_linkage1_target (struct frame_info *frame,
387 CORE_ADDR pc, unsigned int *insn)
388 {
389 struct gdbarch *gdbarch = get_frame_arch (frame);
390 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
391
392 /* The address of the PLT entry this linkage function references. */
393 CORE_ADDR plt
394 = ((CORE_ADDR) get_frame_register_unsigned (frame,
395 tdep->ppc_gp0_regnum + 2)
396 + (ppc_insn_d_field (insn[0]) << 16)
397 + ppc_insn_ds_field (insn[2]));
398
399 return ppc64_plt_entry_point (gdbarch, plt);
400 }
401
402 static CORE_ADDR
403 ppc64_standard_linkage2_target (struct frame_info *frame,
404 CORE_ADDR pc, unsigned int *insn)
405 {
406 struct gdbarch *gdbarch = get_frame_arch (frame);
407 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
408
409 /* The address of the PLT entry this linkage function references. */
410 CORE_ADDR plt
411 = ((CORE_ADDR) get_frame_register_unsigned (frame,
412 tdep->ppc_gp0_regnum + 2)
413 + (ppc_insn_d_field (insn[1]) << 16)
414 + ppc_insn_ds_field (insn[3]));
415
416 return ppc64_plt_entry_point (gdbarch, plt);
417 }
418
419 static CORE_ADDR
420 ppc64_standard_linkage3_target (struct frame_info *frame,
421 CORE_ADDR pc, unsigned int *insn)
422 {
423 struct gdbarch *gdbarch = get_frame_arch (frame);
424 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
425
426 /* The address of the PLT entry this linkage function references. */
427 CORE_ADDR plt
428 = ((CORE_ADDR) get_frame_register_unsigned (frame,
429 tdep->ppc_gp0_regnum + 2)
430 + ppc_insn_ds_field (insn[1]));
431
432 return ppc64_plt_entry_point (gdbarch, plt);
433 }
434
435 static CORE_ADDR
436 ppc64_standard_linkage4_target (struct frame_info *frame,
437 CORE_ADDR pc, unsigned int *insn)
438 {
439 struct gdbarch *gdbarch = get_frame_arch (frame);
440 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
441
442 CORE_ADDR plt
443 = ((CORE_ADDR) get_frame_register_unsigned (frame, tdep->ppc_gp0_regnum + 2)
444 + (ppc_insn_d_field (insn[1]) << 16)
445 + ppc_insn_ds_field (insn[2]));
446
447 return ppc64_plt_entry_point (gdbarch, plt);
448 }
449
450
451 /* Given that we've begun executing a call trampoline at PC, return
452 the entry point of the function the trampoline will go to.
453
454 When the execution direction is EXEC_REVERSE, scan backward to
455 check whether we are in the middle of a PLT stub. */
456
457 static CORE_ADDR
458 ppc64_skip_trampoline_code_1 (struct frame_info *frame, CORE_ADDR pc)
459 {
460 #define MAX(a,b) ((a) > (b) ? (a) : (b))
461 unsigned int insns[MAX (MAX (MAX (ARRAY_SIZE (ppc64_standard_linkage1),
462 ARRAY_SIZE (ppc64_standard_linkage2)),
463 MAX (ARRAY_SIZE (ppc64_standard_linkage3),
464 ARRAY_SIZE (ppc64_standard_linkage4))),
465 MAX (MAX (ARRAY_SIZE (ppc64_standard_linkage5),
466 ARRAY_SIZE (ppc64_standard_linkage6)),
467 MAX (ARRAY_SIZE (ppc64_standard_linkage7),
468 ARRAY_SIZE (ppc64_standard_linkage8))))
469 - 1];
470 CORE_ADDR target;
471 int scan_limit, i;
472
473 scan_limit = 1;
474 /* When reverse-debugging, scan backward to check whether we are
475 in the middle of trampoline code. */
476 if (execution_direction == EXEC_REVERSE)
477 scan_limit = ARRAY_SIZE (insns) - 1;
478
479 for (i = 0; i < scan_limit; i++)
480 {
481 if (i < ARRAY_SIZE (ppc64_standard_linkage8) - 1
482 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage8, insns))
483 pc = ppc64_standard_linkage4_target (frame, pc, insns);
484 else if (i < ARRAY_SIZE (ppc64_standard_linkage7) - 1
485 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage7,
486 insns))
487 pc = ppc64_standard_linkage3_target (frame, pc, insns);
488 else if (i < ARRAY_SIZE (ppc64_standard_linkage6) - 1
489 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage6,
490 insns))
491 pc = ppc64_standard_linkage4_target (frame, pc, insns);
492 else if (i < ARRAY_SIZE (ppc64_standard_linkage5) - 1
493 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage5,
494 insns)
495 && (insns[8] != 0 || insns[9] != 0))
496 pc = ppc64_standard_linkage3_target (frame, pc, insns);
497 else if (i < ARRAY_SIZE (ppc64_standard_linkage4) - 1
498 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage4,
499 insns)
500 && (insns[9] != 0 || insns[10] != 0))
501 pc = ppc64_standard_linkage4_target (frame, pc, insns);
502 else if (i < ARRAY_SIZE (ppc64_standard_linkage3) - 1
503 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage3,
504 insns)
505 && (insns[8] != 0 || insns[9] != 0))
506 pc = ppc64_standard_linkage3_target (frame, pc, insns);
507 else if (i < ARRAY_SIZE (ppc64_standard_linkage2) - 1
508 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage2,
509 insns)
510 && (insns[10] != 0 || insns[11] != 0))
511 pc = ppc64_standard_linkage2_target (frame, pc, insns);
512 else if (i < ARRAY_SIZE (ppc64_standard_linkage1) - 1
513 && ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage1,
514 insns))
515 pc = ppc64_standard_linkage1_target (frame, pc, insns);
516 else
517 {
518 /* Scan backward one more instructions if doesn't match. */
519 pc -= 4;
520 continue;
521 }
522
523 /* The PLT descriptor will either point to the already resolved target
524 address, or else to a glink stub. As the latter carry synthetic @plt
525 symbols, find_solib_trampoline_target should be able to resolve them. */
526 target = find_solib_trampoline_target (frame, pc);
527 return target ? target : pc;
528 }
529
530 return 0;
531 }
532
533 /* Wrapper of ppc64_skip_trampoline_code_1 checking also
534 ppc_elfv2_skip_entrypoint. */
535
536 CORE_ADDR
537 ppc64_skip_trampoline_code (struct frame_info *frame, CORE_ADDR pc)
538 {
539 struct gdbarch *gdbarch = get_frame_arch (frame);
540
541 pc = ppc64_skip_trampoline_code_1 (frame, pc);
542 if (pc != 0 && gdbarch_skip_entrypoint_p (gdbarch))
543 pc = gdbarch_skip_entrypoint (gdbarch, pc);
544 return pc;
545 }
546
547 /* Support for convert_from_func_ptr_addr (ARCH, ADDR, TARG) on PPC64
548 GNU/Linux.
549
550 Usually a function pointer's representation is simply the address
551 of the function. On GNU/Linux on the PowerPC however, a function
552 pointer may be a pointer to a function descriptor.
553
554 For PPC64, a function descriptor is a TOC entry, in a data section,
555 which contains three words: the first word is the address of the
556 function, the second word is the TOC pointer (r2), and the third word
557 is the static chain value.
558
559 Throughout GDB it is currently assumed that a function pointer contains
560 the address of the function, which is not easy to fix. In addition, the
561 conversion of a function address to a function pointer would
562 require allocation of a TOC entry in the inferior's memory space,
563 with all its drawbacks. To be able to call C++ virtual methods in
564 the inferior (which are called via function pointers),
565 find_function_addr uses this function to get the function address
566 from a function pointer.
567
568 If ADDR points at what is clearly a function descriptor, transform
569 it into the address of the corresponding function, if needed. Be
570 conservative, otherwise GDB will do the transformation on any
571 random addresses such as occur when there is no symbol table. */
572
573 CORE_ADDR
574 ppc64_convert_from_func_ptr_addr (struct gdbarch *gdbarch,
575 CORE_ADDR addr,
576 struct target_ops *targ)
577 {
578 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
579 struct target_section *s = target_section_by_addr (targ, addr);
580
581 /* Check if ADDR points to a function descriptor. */
582 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
583 {
584 /* There may be relocations that need to be applied to the .opd
585 section. Unfortunately, this function may be called at a time
586 where these relocations have not yet been performed -- this can
587 happen for example shortly after a library has been loaded with
588 dlopen, but ld.so has not yet applied the relocations.
589
590 To cope with both the case where the relocation has been applied,
591 and the case where it has not yet been applied, we do *not* read
592 the (maybe) relocated value from target memory, but we instead
593 read the non-relocated value from the BFD, and apply the relocation
594 offset manually.
595
596 This makes the assumption that all .opd entries are always relocated
597 by the same offset the section itself was relocated. This should
598 always be the case for GNU/Linux executables and shared libraries.
599 Note that other kind of object files (e.g. those added via
600 add-symbol-files) will currently never end up here anyway, as this
601 function accesses *target* sections only; only the main exec and
602 shared libraries are ever added to the target. */
603
604 gdb_byte buf[8];
605 int res;
606
607 res = bfd_get_section_contents (s->the_bfd_section->owner,
608 s->the_bfd_section,
609 &buf, addr - s->addr, 8);
610 if (res != 0)
611 return extract_unsigned_integer (buf, 8, byte_order)
612 - bfd_section_vma (s->bfd, s->the_bfd_section) + s->addr;
613 }
614
615 return addr;
616 }
617
618 /* A synthetic 'dot' symbols on ppc64 has the udata.p entry pointing
619 back to the original ELF symbol it was derived from. Get the size
620 from that symbol. */
621
622 void
623 ppc64_elf_make_msymbol_special (asymbol *sym, struct minimal_symbol *msym)
624 {
625 if ((sym->flags & BSF_SYNTHETIC) != 0 && sym->udata.p != NULL)
626 {
627 elf_symbol_type *elf_sym = (elf_symbol_type *) sym->udata.p;
628 SET_MSYMBOL_SIZE (msym, elf_sym->internal_elf_sym.st_size);
629 }
630 }
This page took 0.050711 seconds and 5 git commands to generate.