Fix gdb.trace/entry-values.exp for thumb mode
[deliverable/binutils-gdb.git] / gdb / ppc64-tdep.c
1 /* Common target-dependent code for ppc64 GDB, the GNU debugger.
2
3 Copyright (C) 1986-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "frame.h"
22 #include "gdbcore.h"
23 #include "ppc-tdep.h"
24 #include "ppc64-tdep.h"
25 #include "elf-bfd.h"
26
27 /* Macros for matching instructions. Note that, since all the
28 operands are masked off before they're or-ed into the instruction,
29 you can use -1 to make masks. */
30
31 #define insn_d(opcd, rts, ra, d) \
32 ((((opcd) & 0x3f) << 26) \
33 | (((rts) & 0x1f) << 21) \
34 | (((ra) & 0x1f) << 16) \
35 | ((d) & 0xffff))
36
37 #define insn_ds(opcd, rts, ra, d, xo) \
38 ((((opcd) & 0x3f) << 26) \
39 | (((rts) & 0x1f) << 21) \
40 | (((ra) & 0x1f) << 16) \
41 | ((d) & 0xfffc) \
42 | ((xo) & 0x3))
43
44 #define insn_xfx(opcd, rts, spr, xo) \
45 ((((opcd) & 0x3f) << 26) \
46 | (((rts) & 0x1f) << 21) \
47 | (((spr) & 0x1f) << 16) \
48 | (((spr) & 0x3e0) << 6) \
49 | (((xo) & 0x3ff) << 1))
50
51 /* If PLT is the address of a 64-bit PowerPC PLT entry,
52 return the function's entry point. */
53
54 static CORE_ADDR
55 ppc64_plt_entry_point (struct gdbarch *gdbarch, CORE_ADDR plt)
56 {
57 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
58 /* The first word of the PLT entry is the function entry point. */
59 return (CORE_ADDR) read_memory_unsigned_integer (plt, 8, byte_order);
60 }
61
62 /* Patterns for the standard linkage functions. These are built by
63 build_plt_stub in bfd/elf64-ppc.c. */
64
65 /* Old ELFv1 PLT call stub. */
66
67 static struct ppc_insn_pattern ppc64_standard_linkage1[] =
68 {
69 /* addis r12, r2, <any> */
70 { insn_d (-1, -1, -1, 0), insn_d (15, 12, 2, 0), 0 },
71
72 /* std r2, 40(r1) */
73 { -1, insn_ds (62, 2, 1, 40, 0), 0 },
74
75 /* ld r11, <any>(r12) */
76 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 0 },
77
78 /* addis r12, r12, 1 <optional> */
79 { insn_d (-1, -1, -1, -1), insn_d (15, 12, 12, 1), 1 },
80
81 /* ld r2, <any>(r12) */
82 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 12, 0, 0), 0 },
83
84 /* addis r12, r12, 1 <optional> */
85 { insn_d (-1, -1, -1, -1), insn_d (15, 12, 12, 1), 1 },
86
87 /* mtctr r11 */
88 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 11, 9, 467), 0 },
89
90 /* ld r11, <any>(r12) <optional> */
91 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 1 },
92
93 /* bctr */
94 { -1, 0x4e800420, 0 },
95
96 { 0, 0, 0 }
97 };
98
99 /* ELFv1 PLT call stub to access PLT entries more than +/- 32k from r2.
100 Also supports older stub with different placement of std 2,40(1),
101 a stub that omits the std 2,40(1), and both versions of power7
102 thread safety read barriers. Note that there are actually two more
103 instructions following "cmpldi r2, 0", "bnectr+" and "b <glink_i>",
104 but there isn't any need to match them. */
105
106 static struct ppc_insn_pattern ppc64_standard_linkage2[] =
107 {
108 /* std r2, 40(r1) <optional> */
109 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
110
111 /* addis r12, r2, <any> */
112 { insn_d (-1, -1, -1, 0), insn_d (15, 12, 2, 0), 0 },
113
114 /* std r2, 40(r1) <optional> */
115 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
116
117 /* ld r11, <any>(r12) */
118 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 0 },
119
120 /* addi r12, r12, <any> <optional> */
121 { insn_d (-1, -1, -1, 0), insn_d (14, 12, 12, 0), 1 },
122
123 /* mtctr r11 */
124 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 11, 9, 467), 0 },
125
126 /* xor r11, r11, r11 <optional> */
127 { -1, 0x7d6b5a78, 1 },
128
129 /* add r12, r12, r11 <optional> */
130 { -1, 0x7d8c5a14, 1 },
131
132 /* ld r2, <any>(r12) */
133 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 12, 0, 0), 0 },
134
135 /* ld r11, <any>(r12) <optional> */
136 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 12, 0, 0), 1 },
137
138 /* bctr <optional> */
139 { -1, 0x4e800420, 1 },
140
141 /* cmpldi r2, 0 <optional> */
142 { -1, 0x28220000, 1 },
143
144 { 0, 0, 0 }
145 };
146
147 /* ELFv1 PLT call stub to access PLT entries within +/- 32k of r2. */
148
149 static struct ppc_insn_pattern ppc64_standard_linkage3[] =
150 {
151 /* std r2, 40(r1) <optional> */
152 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
153
154 /* ld r11, <any>(r2) */
155 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 2, 0, 0), 0 },
156
157 /* addi r2, r2, <any> <optional> */
158 { insn_d (-1, -1, -1, 0), insn_d (14, 2, 2, 0), 1 },
159
160 /* mtctr r11 */
161 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 11, 9, 467), 0 },
162
163 /* xor r11, r11, r11 <optional> */
164 { -1, 0x7d6b5a78, 1 },
165
166 /* add r2, r2, r11 <optional> */
167 { -1, 0x7c425a14, 1 },
168
169 /* ld r11, <any>(r2) <optional> */
170 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 2, 0, 0), 1 },
171
172 /* ld r2, <any>(r2) */
173 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 2, 0, 0), 0 },
174
175 /* bctr <optional> */
176 { -1, 0x4e800420, 1 },
177
178 /* cmpldi r2, 0 <optional> */
179 { -1, 0x28220000, 1 },
180
181 { 0, 0, 0 }
182 };
183
184 /* ELFv1 PLT call stub to access PLT entries more than +/- 32k from r2.
185 A more modern variant of ppc64_standard_linkage2 differing in
186 register usage. */
187
188 static struct ppc_insn_pattern ppc64_standard_linkage4[] =
189 {
190 /* std r2, 40(r1) <optional> */
191 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
192
193 /* addis r11, r2, <any> */
194 { insn_d (-1, -1, -1, 0), insn_d (15, 11, 2, 0), 0 },
195
196 /* ld r12, <any>(r11) */
197 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 11, 0, 0), 0 },
198
199 /* addi r11, r11, <any> <optional> */
200 { insn_d (-1, -1, -1, 0), insn_d (14, 11, 11, 0), 1 },
201
202 /* mtctr r12 */
203 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
204
205 /* xor r2, r12, r12 <optional> */
206 { -1, 0x7d826278, 1 },
207
208 /* add r11, r11, r2 <optional> */
209 { -1, 0x7d6b1214, 1 },
210
211 /* ld r2, <any>(r11) */
212 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 11, 0, 0), 0 },
213
214 /* ld r11, <any>(r11) <optional> */
215 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 11, 0, 0), 1 },
216
217 /* bctr <optional> */
218 { -1, 0x4e800420, 1 },
219
220 /* cmpldi r2, 0 <optional> */
221 { -1, 0x28220000, 1 },
222
223 { 0, 0, 0 }
224 };
225
226 /* ELFv1 PLT call stub to access PLT entries within +/- 32k of r2.
227 A more modern variant of ppc64_standard_linkage3 differing in
228 register usage. */
229
230 static struct ppc_insn_pattern ppc64_standard_linkage5[] =
231 {
232 /* std r2, 40(r1) <optional> */
233 { -1, insn_ds (62, 2, 1, 40, 0), 1 },
234
235 /* ld r12, <any>(r2) */
236 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 2, 0, 0), 0 },
237
238 /* addi r2, r2, <any> <optional> */
239 { insn_d (-1, -1, -1, 0), insn_d (14, 2, 2, 0), 1 },
240
241 /* mtctr r12 */
242 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
243
244 /* xor r11, r12, r12 <optional> */
245 { -1, 0x7d8b6278, 1 },
246
247 /* add r2, r2, r11 <optional> */
248 { -1, 0x7c425a14, 1 },
249
250 /* ld r11, <any>(r2) <optional> */
251 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 11, 2, 0, 0), 1 },
252
253 /* ld r2, <any>(r2) */
254 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 2, 2, 0, 0), 0 },
255
256 /* bctr <optional> */
257 { -1, 0x4e800420, 1 },
258
259 /* cmpldi r2, 0 <optional> */
260 { -1, 0x28220000, 1 },
261
262 { 0, 0, 0 }
263 };
264
265 /* ELFv2 PLT call stub to access PLT entries more than +/- 32k from r2. */
266
267 static struct ppc_insn_pattern ppc64_standard_linkage6[] =
268 {
269 /* std r2, 24(r1) <optional> */
270 { -1, insn_ds (62, 2, 1, 24, 0), 1 },
271
272 /* addis r11, r2, <any> */
273 { insn_d (-1, -1, -1, 0), insn_d (15, 11, 2, 0), 0 },
274
275 /* ld r12, <any>(r11) */
276 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 11, 0, 0), 0 },
277
278 /* mtctr r12 */
279 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
280
281 /* bctr */
282 { -1, 0x4e800420, 0 },
283
284 { 0, 0, 0 }
285 };
286
287 /* ELFv2 PLT call stub to access PLT entries within +/- 32k of r2. */
288
289 static struct ppc_insn_pattern ppc64_standard_linkage7[] =
290 {
291 /* std r2, 24(r1) <optional> */
292 { -1, insn_ds (62, 2, 1, 24, 0), 1 },
293
294 /* ld r12, <any>(r2) */
295 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 2, 0, 0), 0 },
296
297 /* mtctr r12 */
298 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
299
300 /* bctr */
301 { -1, 0x4e800420, 0 },
302
303 { 0, 0, 0 }
304 };
305
306 /* ELFv2 PLT call stub to access PLT entries more than +/- 32k from r2,
307 supporting fusion. */
308
309 static struct ppc_insn_pattern ppc64_standard_linkage8[] =
310 {
311 /* std r2, 24(r1) <optional> */
312 { -1, insn_ds (62, 2, 1, 24, 0), 1 },
313
314 /* addis r12, r2, <any> */
315 { insn_d (-1, -1, -1, 0), insn_d (15, 12, 2, 0), 0 },
316
317 /* ld r12, <any>(r12) */
318 { insn_ds (-1, -1, -1, 0, -1), insn_ds (58, 12, 12, 0, 0), 0 },
319
320 /* mtctr r12 */
321 { insn_xfx (-1, -1, -1, -1), insn_xfx (31, 12, 9, 467), 0 },
322
323 /* bctr */
324 { -1, 0x4e800420, 0 },
325
326 { 0, 0, 0 }
327 };
328
329 /* When the dynamic linker is doing lazy symbol resolution, the first
330 call to a function in another object will go like this:
331
332 - The user's function calls the linkage function:
333
334 100003d4: 4b ff ff ad bl 10000380 <nnnn.plt_call.printf>
335 100003d8: e8 41 00 28 ld r2,40(r1)
336
337 - The linkage function loads the entry point and toc pointer from
338 the function descriptor in the PLT, and jumps to it:
339
340 <nnnn.plt_call.printf>:
341 10000380: f8 41 00 28 std r2,40(r1)
342 10000384: e9 62 80 78 ld r11,-32648(r2)
343 10000388: 7d 69 03 a6 mtctr r11
344 1000038c: e8 42 80 80 ld r2,-32640(r2)
345 10000390: 28 22 00 00 cmpldi r2,0
346 10000394: 4c e2 04 20 bnectr+
347 10000398: 48 00 03 a0 b 10000738 <printf@plt>
348
349 - But since this is the first time that PLT entry has been used, it
350 sends control to its glink entry. That loads the number of the
351 PLT entry and jumps to the common glink0 code:
352
353 <printf@plt>:
354 10000738: 38 00 00 01 li r0,1
355 1000073c: 4b ff ff bc b 100006f8 <__glink_PLTresolve>
356
357 - The common glink0 code then transfers control to the dynamic
358 linker's fixup code:
359
360 100006f0: 0000000000010440 .quad plt0 - (. + 16)
361 <__glink_PLTresolve>:
362 100006f8: 7d 88 02 a6 mflr r12
363 100006fc: 42 9f 00 05 bcl 20,4*cr7+so,10000700
364 10000700: 7d 68 02 a6 mflr r11
365 10000704: e8 4b ff f0 ld r2,-16(r11)
366 10000708: 7d 88 03 a6 mtlr r12
367 1000070c: 7d 82 5a 14 add r12,r2,r11
368 10000710: e9 6c 00 00 ld r11,0(r12)
369 10000714: e8 4c 00 08 ld r2,8(r12)
370 10000718: 7d 69 03 a6 mtctr r11
371 1000071c: e9 6c 00 10 ld r11,16(r12)
372 10000720: 4e 80 04 20 bctr
373
374 Eventually, this code will figure out how to skip all of this,
375 including the dynamic linker. At the moment, we just get through
376 the linkage function. */
377
378 /* If the current thread is about to execute a series of instructions
379 at PC matching the ppc64_standard_linkage pattern, and INSN is the result
380 from that pattern match, return the code address to which the
381 standard linkage function will send them. (This doesn't deal with
382 dynamic linker lazy symbol resolution stubs.) */
383
384 static CORE_ADDR
385 ppc64_standard_linkage1_target (struct frame_info *frame,
386 CORE_ADDR pc, unsigned int *insn)
387 {
388 struct gdbarch *gdbarch = get_frame_arch (frame);
389 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
390
391 /* The address of the PLT entry this linkage function references. */
392 CORE_ADDR plt
393 = ((CORE_ADDR) get_frame_register_unsigned (frame,
394 tdep->ppc_gp0_regnum + 2)
395 + (ppc_insn_d_field (insn[0]) << 16)
396 + ppc_insn_ds_field (insn[2]));
397
398 return ppc64_plt_entry_point (gdbarch, plt);
399 }
400
401 static CORE_ADDR
402 ppc64_standard_linkage2_target (struct frame_info *frame,
403 CORE_ADDR pc, unsigned int *insn)
404 {
405 struct gdbarch *gdbarch = get_frame_arch (frame);
406 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
407
408 /* The address of the PLT entry this linkage function references. */
409 CORE_ADDR plt
410 = ((CORE_ADDR) get_frame_register_unsigned (frame,
411 tdep->ppc_gp0_regnum + 2)
412 + (ppc_insn_d_field (insn[1]) << 16)
413 + ppc_insn_ds_field (insn[3]));
414
415 return ppc64_plt_entry_point (gdbarch, plt);
416 }
417
418 static CORE_ADDR
419 ppc64_standard_linkage3_target (struct frame_info *frame,
420 CORE_ADDR pc, unsigned int *insn)
421 {
422 struct gdbarch *gdbarch = get_frame_arch (frame);
423 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
424
425 /* The address of the PLT entry this linkage function references. */
426 CORE_ADDR plt
427 = ((CORE_ADDR) get_frame_register_unsigned (frame,
428 tdep->ppc_gp0_regnum + 2)
429 + ppc_insn_ds_field (insn[1]));
430
431 return ppc64_plt_entry_point (gdbarch, plt);
432 }
433
434 static CORE_ADDR
435 ppc64_standard_linkage4_target (struct frame_info *frame,
436 CORE_ADDR pc, unsigned int *insn)
437 {
438 struct gdbarch *gdbarch = get_frame_arch (frame);
439 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
440
441 CORE_ADDR plt
442 = ((CORE_ADDR) get_frame_register_unsigned (frame, tdep->ppc_gp0_regnum + 2)
443 + (ppc_insn_d_field (insn[1]) << 16)
444 + ppc_insn_ds_field (insn[2]));
445
446 return ppc64_plt_entry_point (gdbarch, plt);
447 }
448
449
450 /* Given that we've begun executing a call trampoline at PC, return
451 the entry point of the function the trampoline will go to. */
452
453 CORE_ADDR
454 ppc64_skip_trampoline_code (struct frame_info *frame, CORE_ADDR pc)
455 {
456 #define MAX(a,b) ((a) > (b) ? (a) : (b))
457 unsigned int insns[MAX (MAX (MAX (ARRAY_SIZE (ppc64_standard_linkage1),
458 ARRAY_SIZE (ppc64_standard_linkage2)),
459 MAX (ARRAY_SIZE (ppc64_standard_linkage3),
460 ARRAY_SIZE (ppc64_standard_linkage4))),
461 MAX (MAX (ARRAY_SIZE (ppc64_standard_linkage5),
462 ARRAY_SIZE (ppc64_standard_linkage6)),
463 MAX (ARRAY_SIZE (ppc64_standard_linkage7),
464 ARRAY_SIZE (ppc64_standard_linkage8))))
465 - 1];
466 CORE_ADDR target;
467
468 if (ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage8, insns))
469 pc = ppc64_standard_linkage4_target (frame, pc, insns);
470 else if (ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage7, insns))
471 pc = ppc64_standard_linkage3_target (frame, pc, insns);
472 else if (ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage6, insns))
473 pc = ppc64_standard_linkage4_target (frame, pc, insns);
474 else if (ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage5, insns)
475 && (insns[8] != 0 || insns[9] != 0))
476 pc = ppc64_standard_linkage3_target (frame, pc, insns);
477 else if (ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage4, insns)
478 && (insns[9] != 0 || insns[10] != 0))
479 pc = ppc64_standard_linkage4_target (frame, pc, insns);
480 else if (ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage3, insns)
481 && (insns[8] != 0 || insns[9] != 0))
482 pc = ppc64_standard_linkage3_target (frame, pc, insns);
483 else if (ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage2, insns)
484 && (insns[10] != 0 || insns[11] != 0))
485 pc = ppc64_standard_linkage2_target (frame, pc, insns);
486 else if (ppc_insns_match_pattern (frame, pc, ppc64_standard_linkage1, insns))
487 pc = ppc64_standard_linkage1_target (frame, pc, insns);
488 else
489 return 0;
490
491 /* The PLT descriptor will either point to the already resolved target
492 address, or else to a glink stub. As the latter carry synthetic @plt
493 symbols, find_solib_trampoline_target should be able to resolve them. */
494 target = find_solib_trampoline_target (frame, pc);
495 return target ? target : pc;
496 }
497
498 /* Support for convert_from_func_ptr_addr (ARCH, ADDR, TARG) on PPC64
499 GNU/Linux.
500
501 Usually a function pointer's representation is simply the address
502 of the function. On GNU/Linux on the PowerPC however, a function
503 pointer may be a pointer to a function descriptor.
504
505 For PPC64, a function descriptor is a TOC entry, in a data section,
506 which contains three words: the first word is the address of the
507 function, the second word is the TOC pointer (r2), and the third word
508 is the static chain value.
509
510 Throughout GDB it is currently assumed that a function pointer contains
511 the address of the function, which is not easy to fix. In addition, the
512 conversion of a function address to a function pointer would
513 require allocation of a TOC entry in the inferior's memory space,
514 with all its drawbacks. To be able to call C++ virtual methods in
515 the inferior (which are called via function pointers),
516 find_function_addr uses this function to get the function address
517 from a function pointer.
518
519 If ADDR points at what is clearly a function descriptor, transform
520 it into the address of the corresponding function, if needed. Be
521 conservative, otherwise GDB will do the transformation on any
522 random addresses such as occur when there is no symbol table. */
523
524 CORE_ADDR
525 ppc64_convert_from_func_ptr_addr (struct gdbarch *gdbarch,
526 CORE_ADDR addr,
527 struct target_ops *targ)
528 {
529 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
530 struct target_section *s = target_section_by_addr (targ, addr);
531
532 /* Check if ADDR points to a function descriptor. */
533 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
534 {
535 /* There may be relocations that need to be applied to the .opd
536 section. Unfortunately, this function may be called at a time
537 where these relocations have not yet been performed -- this can
538 happen for example shortly after a library has been loaded with
539 dlopen, but ld.so has not yet applied the relocations.
540
541 To cope with both the case where the relocation has been applied,
542 and the case where it has not yet been applied, we do *not* read
543 the (maybe) relocated value from target memory, but we instead
544 read the non-relocated value from the BFD, and apply the relocation
545 offset manually.
546
547 This makes the assumption that all .opd entries are always relocated
548 by the same offset the section itself was relocated. This should
549 always be the case for GNU/Linux executables and shared libraries.
550 Note that other kind of object files (e.g. those added via
551 add-symbol-files) will currently never end up here anyway, as this
552 function accesses *target* sections only; only the main exec and
553 shared libraries are ever added to the target. */
554
555 gdb_byte buf[8];
556 int res;
557
558 res = bfd_get_section_contents (s->the_bfd_section->owner,
559 s->the_bfd_section,
560 &buf, addr - s->addr, 8);
561 if (res != 0)
562 return extract_unsigned_integer (buf, 8, byte_order)
563 - bfd_section_vma (s->bfd, s->the_bfd_section) + s->addr;
564 }
565
566 return addr;
567 }
568
569 /* A synthetic 'dot' symbols on ppc64 has the udata.p entry pointing
570 back to the original ELF symbol it was derived from. Get the size
571 from that symbol. */
572
573 void
574 ppc64_elf_make_msymbol_special (asymbol *sym, struct minimal_symbol *msym)
575 {
576 if ((sym->flags & BSF_SYNTHETIC) != 0 && sym->udata.p != NULL)
577 {
578 elf_symbol_type *elf_sym = (elf_symbol_type *) sym->udata.p;
579 SET_MSYMBOL_SIZE (msym, elf_sym->internal_elf_sym.st_size);
580 }
581 }
This page took 0.041981 seconds and 4 git commands to generate.