[LD][AARCH64]Add BFD_RELOC_AARCH64_MOVW_GOTOFF_G1 Support.
[deliverable/binutils-gdb.git] / bfd / elfxx-aarch64.c
1 /* AArch64-specific support for ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
20
21 #include "sysdep.h"
22 #include "elfxx-aarch64.h"
23 #include <stdarg.h>
24 #include <string.h>
25
26 #define MASK(n) ((1u << (n)) - 1)
27
28 /* Sign-extend VALUE, which has the indicated number of BITS. */
29
30 bfd_signed_vma
31 _bfd_aarch64_sign_extend (bfd_vma value, int bits)
32 {
33 if (value & ((bfd_vma) 1 << (bits - 1)))
34 /* VALUE is negative. */
35 value |= ((bfd_vma) - 1) << bits;
36
37 return value;
38 }
39
40 /* Decode the IMM field of ADRP. */
41
42 uint32_t
43 _bfd_aarch64_decode_adrp_imm (uint32_t insn)
44 {
45 return (((insn >> 5) & MASK (19)) << 2) | ((insn >> 29) & MASK (2));
46 }
47
48 /* Reencode the imm field of add immediate. */
49 static inline uint32_t
50 reencode_add_imm (uint32_t insn, uint32_t imm)
51 {
52 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
53 }
54
55 /* Reencode the IMM field of ADR. */
56
57 uint32_t
58 _bfd_aarch64_reencode_adr_imm (uint32_t insn, uint32_t imm)
59 {
60 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
61 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
62 }
63
64 /* Reencode the imm field of ld/st pos immediate. */
65 static inline uint32_t
66 reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
67 {
68 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
69 }
70
71 /* Encode the 26-bit offset of unconditional branch. */
72 static inline uint32_t
73 reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
74 {
75 return (insn & ~MASK (26)) | (ofs & MASK (26));
76 }
77
78 /* Encode the 19-bit offset of conditional branch and compare & branch. */
79 static inline uint32_t
80 reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
81 {
82 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
83 }
84
85 /* Decode the 19-bit offset of load literal. */
86 static inline uint32_t
87 reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
88 {
89 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
90 }
91
92 /* Encode the 14-bit offset of test & branch. */
93 static inline uint32_t
94 reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
95 {
96 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
97 }
98
99 /* Reencode the imm field of move wide. */
100 static inline uint32_t
101 reencode_movw_imm (uint32_t insn, uint32_t imm)
102 {
103 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
104 }
105
106 /* Reencode mov[zn] to movz. */
107 static inline uint32_t
108 reencode_movzn_to_movz (uint32_t opcode)
109 {
110 return opcode | (1 << 30);
111 }
112
113 /* Reencode mov[zn] to movn. */
114 static inline uint32_t
115 reencode_movzn_to_movn (uint32_t opcode)
116 {
117 return opcode & ~(1 << 30);
118 }
119
120 /* Return non-zero if the indicated VALUE has overflowed the maximum
121 range expressible by a unsigned number with the indicated number of
122 BITS. */
123
124 static bfd_reloc_status_type
125 aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
126 {
127 bfd_vma lim;
128 if (bits >= sizeof (bfd_vma) * 8)
129 return bfd_reloc_ok;
130 lim = (bfd_vma) 1 << bits;
131 if (value >= lim)
132 return bfd_reloc_overflow;
133 return bfd_reloc_ok;
134 }
135
136 /* Return non-zero if the indicated VALUE has overflowed the maximum
137 range expressible by an signed number with the indicated number of
138 BITS. */
139
140 static bfd_reloc_status_type
141 aarch64_signed_overflow (bfd_vma value, unsigned int bits)
142 {
143 bfd_signed_vma svalue = (bfd_signed_vma) value;
144 bfd_signed_vma lim;
145
146 if (bits >= sizeof (bfd_vma) * 8)
147 return bfd_reloc_ok;
148 lim = (bfd_signed_vma) 1 << (bits - 1);
149 if (svalue < -lim || svalue >= lim)
150 return bfd_reloc_overflow;
151 return bfd_reloc_ok;
152 }
153
154 /* Insert the addend/value into the instruction or data object being
155 relocated. */
156 bfd_reloc_status_type
157 _bfd_aarch64_elf_put_addend (bfd *abfd,
158 bfd_byte *address, bfd_reloc_code_real_type r_type,
159 reloc_howto_type *howto, bfd_signed_vma addend)
160 {
161 bfd_reloc_status_type status = bfd_reloc_ok;
162 bfd_signed_vma old_addend = addend;
163 bfd_vma contents;
164 int size;
165
166 size = bfd_get_reloc_size (howto);
167 switch (size)
168 {
169 case 0:
170 return status;
171 case 2:
172 contents = bfd_get_16 (abfd, address);
173 break;
174 case 4:
175 if (howto->src_mask != 0xffffffff)
176 /* Must be 32-bit instruction, always little-endian. */
177 contents = bfd_getl32 (address);
178 else
179 /* Must be 32-bit data (endianness dependent). */
180 contents = bfd_get_32 (abfd, address);
181 break;
182 case 8:
183 contents = bfd_get_64 (abfd, address);
184 break;
185 default:
186 abort ();
187 }
188
189 switch (howto->complain_on_overflow)
190 {
191 case complain_overflow_dont:
192 break;
193 case complain_overflow_signed:
194 status = aarch64_signed_overflow (addend,
195 howto->bitsize + howto->rightshift);
196 break;
197 case complain_overflow_unsigned:
198 status = aarch64_unsigned_overflow (addend,
199 howto->bitsize + howto->rightshift);
200 break;
201 case complain_overflow_bitfield:
202 default:
203 abort ();
204 }
205
206 addend >>= howto->rightshift;
207
208 switch (r_type)
209 {
210 case BFD_RELOC_AARCH64_CALL26:
211 case BFD_RELOC_AARCH64_JUMP26:
212 contents = reencode_branch_ofs_26 (contents, addend);
213 break;
214
215 case BFD_RELOC_AARCH64_BRANCH19:
216 contents = reencode_cond_branch_ofs_19 (contents, addend);
217 break;
218
219 case BFD_RELOC_AARCH64_TSTBR14:
220 contents = reencode_tst_branch_ofs_14 (contents, addend);
221 break;
222
223 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
224 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
225 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
226 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
227 if (old_addend & ((1 << howto->rightshift) - 1))
228 return bfd_reloc_overflow;
229 contents = reencode_ld_lit_ofs_19 (contents, addend);
230 break;
231
232 case BFD_RELOC_AARCH64_TLSDESC_CALL:
233 break;
234
235 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
236 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
237 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
238 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
239 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
240 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
241 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
242 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
243 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
244 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
245 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
246 contents = _bfd_aarch64_reencode_adr_imm (contents, addend);
247 break;
248
249 case BFD_RELOC_AARCH64_ADD_LO12:
250 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
251 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
252 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
253 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
254 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
255 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
256 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
257 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
258 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
259 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
260 12 bits of the page offset following
261 BFD_RELOC_AARCH64_ADR_HI21_PCREL which computes the
262 (pc-relative) page base. */
263 contents = reencode_add_imm (contents, addend);
264 break;
265
266 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
267 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
268 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
269 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
270 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
271 case BFD_RELOC_AARCH64_LDST128_LO12:
272 case BFD_RELOC_AARCH64_LDST16_LO12:
273 case BFD_RELOC_AARCH64_LDST32_LO12:
274 case BFD_RELOC_AARCH64_LDST64_LO12:
275 case BFD_RELOC_AARCH64_LDST8_LO12:
276 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
277 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
278 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
279 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
280 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
281 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
282 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
283 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
284 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
285 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
286 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
287 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
288 if (old_addend & ((1 << howto->rightshift) - 1))
289 return bfd_reloc_overflow;
290 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
291 12 bits of the page offset following BFD_RELOC_AARCH64_ADR_HI21_PCREL
292 which computes the (pc-relative) page base. */
293 contents = reencode_ldst_pos_imm (contents, addend);
294 break;
295
296 /* Group relocations to create high bits of a 16, 32, 48 or 64
297 bit signed data or abs address inline. Will change
298 instruction to MOVN or MOVZ depending on sign of calculated
299 value. */
300
301 case BFD_RELOC_AARCH64_MOVW_G0_S:
302 case BFD_RELOC_AARCH64_MOVW_G1_S:
303 case BFD_RELOC_AARCH64_MOVW_G2_S:
304 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
305 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
306 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
307 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
308 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
309 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
310 /* NOTE: We can only come here with movz or movn. */
311 if (addend < 0)
312 {
313 /* Force use of MOVN. */
314 addend = ~addend;
315 contents = reencode_movzn_to_movn (contents);
316 }
317 else
318 {
319 /* Force use of MOVZ. */
320 contents = reencode_movzn_to_movz (contents);
321 }
322 /* fall through */
323
324 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
325 data or abs address inline. */
326
327 case BFD_RELOC_AARCH64_MOVW_G0:
328 case BFD_RELOC_AARCH64_MOVW_G0_NC:
329 case BFD_RELOC_AARCH64_MOVW_G1:
330 case BFD_RELOC_AARCH64_MOVW_G1_NC:
331 case BFD_RELOC_AARCH64_MOVW_G2:
332 case BFD_RELOC_AARCH64_MOVW_G2_NC:
333 case BFD_RELOC_AARCH64_MOVW_G3:
334 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
335 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
336 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
337 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
338 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
339 contents = reencode_movw_imm (contents, addend);
340 break;
341
342 default:
343 /* Repack simple data */
344 if (howto->dst_mask & (howto->dst_mask + 1))
345 return bfd_reloc_notsupported;
346
347 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
348 break;
349 }
350
351 switch (size)
352 {
353 case 2:
354 bfd_put_16 (abfd, contents, address);
355 break;
356 case 4:
357 if (howto->dst_mask != 0xffffffff)
358 /* must be 32-bit instruction, always little-endian */
359 bfd_putl32 (contents, address);
360 else
361 /* must be 32-bit data (endianness dependent) */
362 bfd_put_32 (abfd, contents, address);
363 break;
364 case 8:
365 bfd_put_64 (abfd, contents, address);
366 break;
367 default:
368 abort ();
369 }
370
371 return status;
372 }
373
374 bfd_vma
375 _bfd_aarch64_elf_resolve_relocation (bfd_reloc_code_real_type r_type,
376 bfd_vma place, bfd_vma value,
377 bfd_vma addend, bfd_boolean weak_undef_p)
378 {
379 switch (r_type)
380 {
381 case BFD_RELOC_AARCH64_NONE:
382 case BFD_RELOC_AARCH64_TLSDESC_CALL:
383 break;
384
385 case BFD_RELOC_AARCH64_16_PCREL:
386 case BFD_RELOC_AARCH64_32_PCREL:
387 case BFD_RELOC_AARCH64_64_PCREL:
388 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
389 case BFD_RELOC_AARCH64_BRANCH19:
390 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
391 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
392 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
393 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
394 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
395 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
396 case BFD_RELOC_AARCH64_TSTBR14:
397 if (weak_undef_p)
398 value = place;
399 value = value + addend - place;
400 break;
401
402 case BFD_RELOC_AARCH64_CALL26:
403 case BFD_RELOC_AARCH64_JUMP26:
404 value = value + addend - place;
405 break;
406
407 case BFD_RELOC_AARCH64_16:
408 case BFD_RELOC_AARCH64_32:
409 case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
410 case BFD_RELOC_AARCH64_MOVW_G0:
411 case BFD_RELOC_AARCH64_MOVW_G0_NC:
412 case BFD_RELOC_AARCH64_MOVW_G0_S:
413 case BFD_RELOC_AARCH64_MOVW_G1:
414 case BFD_RELOC_AARCH64_MOVW_G1_NC:
415 case BFD_RELOC_AARCH64_MOVW_G1_S:
416 case BFD_RELOC_AARCH64_MOVW_G2:
417 case BFD_RELOC_AARCH64_MOVW_G2_NC:
418 case BFD_RELOC_AARCH64_MOVW_G2_S:
419 case BFD_RELOC_AARCH64_MOVW_G3:
420 case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
421 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
422 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
423 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
424 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
425 case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
426 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
427 case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
428 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
429 case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
430 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
431 case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
432 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
433 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
434 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
435 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
436 case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
437 value = value + addend;
438 break;
439
440 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
441 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
442 if (weak_undef_p)
443 value = PG (place);
444 value = PG (value + addend) - PG (place);
445 break;
446
447 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
448 value = value + addend - place;
449 break;
450
451 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
452 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
453 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
454 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
455 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
456 value = PG (value + addend) - PG (place);
457 break;
458
459 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
460 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
461 /* Caller must make sure addend is the base address of .got section. */
462 value = value - PG (addend);
463 break;
464
465 case BFD_RELOC_AARCH64_ADD_LO12:
466 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
467 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
468 case BFD_RELOC_AARCH64_LDST128_LO12:
469 case BFD_RELOC_AARCH64_LDST16_LO12:
470 case BFD_RELOC_AARCH64_LDST32_LO12:
471 case BFD_RELOC_AARCH64_LDST64_LO12:
472 case BFD_RELOC_AARCH64_LDST8_LO12:
473 case BFD_RELOC_AARCH64_TLSDESC_ADD:
474 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
475 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
476 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
477 case BFD_RELOC_AARCH64_TLSDESC_LDR:
478 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
479 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
480 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
481 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
482 value = PG_OFFSET (value + addend);
483 break;
484
485 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
486 value = value + addend;
487 break;
488
489 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
490 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
491 value = (value + addend) & (bfd_vma) 0xffff0000;
492 break;
493 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
494 /* Mask off low 12bits, keep all other high bits, so that the later
495 generic code could check whehter there is overflow. */
496 value = (value + addend) & ~(bfd_vma) 0xfff;
497 break;
498
499 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
500 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
501 value = (value + addend) & (bfd_vma) 0xffff;
502 break;
503
504 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
505 value = (value + addend) & ~(bfd_vma) 0xffffffff;
506 value -= place & ~(bfd_vma) 0xffffffff;
507 break;
508
509 default:
510 break;
511 }
512
513 return value;
514 }
515
516 /* Hook called by the linker routine which adds symbols from an object
517 file. */
518
519 bfd_boolean
520 _bfd_aarch64_elf_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
521 Elf_Internal_Sym *sym,
522 const char **namep ATTRIBUTE_UNUSED,
523 flagword *flagsp ATTRIBUTE_UNUSED,
524 asection **secp ATTRIBUTE_UNUSED,
525 bfd_vma *valp ATTRIBUTE_UNUSED)
526 {
527 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
528 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
529 && (abfd->flags & DYNAMIC) == 0
530 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
531 elf_tdata (info->output_bfd)->has_gnu_symbols = elf_gnu_symbol_any;
532
533 return TRUE;
534 }
535
536 /* Support for core dump NOTE sections. */
537
538 bfd_boolean
539 _bfd_aarch64_elf_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
540 {
541 int offset;
542 size_t size;
543
544 switch (note->descsz)
545 {
546 default:
547 return FALSE;
548
549 case 392: /* sizeof(struct elf_prstatus) on Linux/arm64. */
550 /* pr_cursig */
551 elf_tdata (abfd)->core->signal
552 = bfd_get_16 (abfd, note->descdata + 12);
553
554 /* pr_pid */
555 elf_tdata (abfd)->core->lwpid
556 = bfd_get_32 (abfd, note->descdata + 32);
557
558 /* pr_reg */
559 offset = 112;
560 size = 272;
561
562 break;
563 }
564
565 /* Make a ".reg/999" section. */
566 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
567 size, note->descpos + offset);
568 }
569
570 bfd_boolean
571 _bfd_aarch64_elf_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
572 {
573 switch (note->descsz)
574 {
575 default:
576 return FALSE;
577
578 case 136: /* This is sizeof(struct elf_prpsinfo) on Linux/aarch64. */
579 elf_tdata (abfd)->core->pid = bfd_get_32 (abfd, note->descdata + 24);
580 elf_tdata (abfd)->core->program
581 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
582 elf_tdata (abfd)->core->command
583 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
584 }
585
586 /* Note that for some reason, a spurious space is tacked
587 onto the end of the args in some (at least one anyway)
588 implementations, so strip it off if it exists. */
589
590 {
591 char *command = elf_tdata (abfd)->core->command;
592 int n = strlen (command);
593
594 if (0 < n && command[n - 1] == ' ')
595 command[n - 1] = '\0';
596 }
597
598 return TRUE;
599 }
600
601 char *
602 _bfd_aarch64_elf_write_core_note (bfd *abfd, char *buf, int *bufsiz, int note_type,
603 ...)
604 {
605 switch (note_type)
606 {
607 default:
608 return NULL;
609
610 case NT_PRPSINFO:
611 {
612 char data[136];
613 va_list ap;
614
615 va_start (ap, note_type);
616 memset (data, 0, sizeof (data));
617 strncpy (data + 40, va_arg (ap, const char *), 16);
618 strncpy (data + 56, va_arg (ap, const char *), 80);
619 va_end (ap);
620
621 return elfcore_write_note (abfd, buf, bufsiz, "CORE",
622 note_type, data, sizeof (data));
623 }
624
625 case NT_PRSTATUS:
626 {
627 char data[392];
628 va_list ap;
629 long pid;
630 int cursig;
631 const void *greg;
632
633 va_start (ap, note_type);
634 memset (data, 0, sizeof (data));
635 pid = va_arg (ap, long);
636 bfd_put_32 (abfd, pid, data + 32);
637 cursig = va_arg (ap, int);
638 bfd_put_16 (abfd, cursig, data + 12);
639 greg = va_arg (ap, const void *);
640 memcpy (data + 112, greg, 272);
641 va_end (ap);
642
643 return elfcore_write_note (abfd, buf, bufsiz, "CORE",
644 note_type, data, sizeof (data));
645 }
646 }
647 }
This page took 0.043657 seconds and 5 git commands to generate.