* symfile.c (list_overlays_command, map_overlay_command)
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
3 2010, 2011, 2012
4 Free Software Foundation, Inc.
5 Contributed by Jan Hubicka <jh@suse.cz>.
6
7 This file is part of BFD, the Binary File Descriptor library.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
22 MA 02110-1301, USA. */
23
24 #include "sysdep.h"
25 #include "bfd.h"
26 #include "bfdlink.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "bfd_stdint.h"
31 #include "objalloc.h"
32 #include "hashtab.h"
33 #include "dwarf2.h"
34 #include "libiberty.h"
35
36 #include "elf/x86-64.h"
37
38 #ifdef CORE_HEADER
39 #include <stdarg.h>
40 #include CORE_HEADER
41 #endif
42
43 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
44 #define MINUS_ONE (~ (bfd_vma) 0)
45
46 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
47 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
48 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
49 since they are the same. */
50
51 #define ABI_64_P(abfd) \
52 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
53
54 /* The relocation "howto" table. Order of fields:
55 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
56 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
57 static reloc_howto_type x86_64_elf_howto_table[] =
58 {
59 HOWTO(R_X86_64_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
60 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
61 FALSE),
62 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
63 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
64 FALSE),
65 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
66 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
67 TRUE),
68 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
69 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
70 FALSE),
71 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
72 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
73 TRUE),
74 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
75 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
76 FALSE),
77 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
78 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
79 MINUS_ONE, FALSE),
80 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
81 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
82 MINUS_ONE, FALSE),
83 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
84 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
85 MINUS_ONE, FALSE),
86 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
87 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
88 0xffffffff, TRUE),
89 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
90 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
91 FALSE),
92 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
93 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
94 FALSE),
95 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
97 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
99 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
100 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
101 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
103 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
104 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
105 MINUS_ONE, FALSE),
106 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
107 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
108 MINUS_ONE, FALSE),
109 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
110 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
111 MINUS_ONE, FALSE),
112 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
113 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
114 0xffffffff, TRUE),
115 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
116 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
117 0xffffffff, TRUE),
118 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
119 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
120 0xffffffff, FALSE),
121 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
122 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
123 0xffffffff, TRUE),
124 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
125 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
126 0xffffffff, FALSE),
127 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
128 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
129 TRUE),
130 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
131 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
132 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
133 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
134 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
135 FALSE, 0xffffffff, 0xffffffff, TRUE),
136 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
137 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
138 FALSE),
139 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
140 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
141 MINUS_ONE, TRUE),
142 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
143 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
144 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
145 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
146 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
147 MINUS_ONE, FALSE),
148 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
149 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
150 MINUS_ONE, FALSE),
151 EMPTY_HOWTO (32),
152 EMPTY_HOWTO (33),
153 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
154 complain_overflow_bitfield, bfd_elf_generic_reloc,
155 "R_X86_64_GOTPC32_TLSDESC",
156 FALSE, 0xffffffff, 0xffffffff, TRUE),
157 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
158 complain_overflow_dont, bfd_elf_generic_reloc,
159 "R_X86_64_TLSDESC_CALL",
160 FALSE, 0, 0, FALSE),
161 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
162 complain_overflow_bitfield, bfd_elf_generic_reloc,
163 "R_X86_64_TLSDESC",
164 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
165 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
166 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
167 MINUS_ONE, FALSE),
168 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
169 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
170 MINUS_ONE, FALSE),
171
172 /* We have a gap in the reloc numbers here.
173 R_X86_64_standard counts the number up to this point, and
174 R_X86_64_vt_offset is the value to subtract from a reloc type of
175 R_X86_64_GNU_VT* to form an index into this table. */
176 #define R_X86_64_standard (R_X86_64_IRELATIVE + 1)
177 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
178
179 /* GNU extension to record C++ vtable hierarchy. */
180 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
181 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
182
183 /* GNU extension to record C++ vtable member usage. */
184 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
185 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
186 FALSE),
187
188 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
189 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
190 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
191 FALSE)
192 };
193
194 #define IS_X86_64_PCREL_TYPE(TYPE) \
195 ( ((TYPE) == R_X86_64_PC8) \
196 || ((TYPE) == R_X86_64_PC16) \
197 || ((TYPE) == R_X86_64_PC32) \
198 || ((TYPE) == R_X86_64_PC64))
199
200 /* Map BFD relocs to the x86_64 elf relocs. */
201 struct elf_reloc_map
202 {
203 bfd_reloc_code_real_type bfd_reloc_val;
204 unsigned char elf_reloc_val;
205 };
206
207 static const struct elf_reloc_map x86_64_reloc_map[] =
208 {
209 { BFD_RELOC_NONE, R_X86_64_NONE, },
210 { BFD_RELOC_64, R_X86_64_64, },
211 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
212 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
213 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
214 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
215 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
216 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
217 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
218 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
219 { BFD_RELOC_32, R_X86_64_32, },
220 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
221 { BFD_RELOC_16, R_X86_64_16, },
222 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
223 { BFD_RELOC_8, R_X86_64_8, },
224 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
225 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
226 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
227 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
228 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
229 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
230 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
231 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
232 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
233 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
234 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
235 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
236 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
237 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
238 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
239 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
240 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
241 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
242 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
243 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
244 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
245 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
246 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
247 };
248
249 static reloc_howto_type *
250 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
251 {
252 unsigned i;
253
254 if (r_type == (unsigned int) R_X86_64_32)
255 {
256 if (ABI_64_P (abfd))
257 i = r_type;
258 else
259 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
260 }
261 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
262 || r_type >= (unsigned int) R_X86_64_max)
263 {
264 if (r_type >= (unsigned int) R_X86_64_standard)
265 {
266 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
267 abfd, (int) r_type);
268 r_type = R_X86_64_NONE;
269 }
270 i = r_type;
271 }
272 else
273 i = r_type - (unsigned int) R_X86_64_vt_offset;
274 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
275 return &x86_64_elf_howto_table[i];
276 }
277
278 /* Given a BFD reloc type, return a HOWTO structure. */
279 static reloc_howto_type *
280 elf_x86_64_reloc_type_lookup (bfd *abfd,
281 bfd_reloc_code_real_type code)
282 {
283 unsigned int i;
284
285 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
286 i++)
287 {
288 if (x86_64_reloc_map[i].bfd_reloc_val == code)
289 return elf_x86_64_rtype_to_howto (abfd,
290 x86_64_reloc_map[i].elf_reloc_val);
291 }
292 return 0;
293 }
294
295 static reloc_howto_type *
296 elf_x86_64_reloc_name_lookup (bfd *abfd,
297 const char *r_name)
298 {
299 unsigned int i;
300
301 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
302 {
303 /* Get x32 R_X86_64_32. */
304 reloc_howto_type *reloc
305 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
306 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
307 return reloc;
308 }
309
310 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
311 if (x86_64_elf_howto_table[i].name != NULL
312 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
313 return &x86_64_elf_howto_table[i];
314
315 return NULL;
316 }
317
318 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
319
320 static void
321 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
322 Elf_Internal_Rela *dst)
323 {
324 unsigned r_type;
325
326 r_type = ELF32_R_TYPE (dst->r_info);
327 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
328 BFD_ASSERT (r_type == cache_ptr->howto->type);
329 }
330 \f
331 /* Support for core dump NOTE sections. */
332 static bfd_boolean
333 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
334 {
335 int offset;
336 size_t size;
337
338 switch (note->descsz)
339 {
340 default:
341 return FALSE;
342
343 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
344 /* pr_cursig */
345 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
346
347 /* pr_pid */
348 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
349
350 /* pr_reg */
351 offset = 72;
352 size = 216;
353
354 break;
355
356 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core_signal
359 = bfd_get_16 (abfd, note->descdata + 12);
360
361 /* pr_pid */
362 elf_tdata (abfd)->core_lwpid
363 = bfd_get_32 (abfd, note->descdata + 32);
364
365 /* pr_reg */
366 offset = 112;
367 size = 216;
368
369 break;
370 }
371
372 /* Make a ".reg/999" section. */
373 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
374 size, note->descpos + offset);
375 }
376
377 static bfd_boolean
378 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
379 {
380 switch (note->descsz)
381 {
382 default:
383 return FALSE;
384
385 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
386 elf_tdata (abfd)->core_pid
387 = bfd_get_32 (abfd, note->descdata + 12);
388 elf_tdata (abfd)->core_program
389 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
390 elf_tdata (abfd)->core_command
391 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
392 break;
393
394 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
395 elf_tdata (abfd)->core_pid
396 = bfd_get_32 (abfd, note->descdata + 24);
397 elf_tdata (abfd)->core_program
398 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
399 elf_tdata (abfd)->core_command
400 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
401 }
402
403 /* Note that for some reason, a spurious space is tacked
404 onto the end of the args in some (at least one anyway)
405 implementations, so strip it off if it exists. */
406
407 {
408 char *command = elf_tdata (abfd)->core_command;
409 int n = strlen (command);
410
411 if (0 < n && command[n - 1] == ' ')
412 command[n - 1] = '\0';
413 }
414
415 return TRUE;
416 }
417
418 #ifdef CORE_HEADER
419 static char *
420 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
421 int note_type, ...)
422 {
423 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
424 va_list ap;
425 const char *fname, *psargs;
426 long pid;
427 int cursig;
428 const void *gregs;
429
430 switch (note_type)
431 {
432 default:
433 return NULL;
434
435 case NT_PRPSINFO:
436 va_start (ap, note_type);
437 fname = va_arg (ap, const char *);
438 psargs = va_arg (ap, const char *);
439 va_end (ap);
440
441 if (bed->s->elfclass == ELFCLASS32)
442 {
443 prpsinfo32_t data;
444 memset (&data, 0, sizeof (data));
445 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
446 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
447 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
448 &data, sizeof (data));
449 }
450 else
451 {
452 prpsinfo64_t data;
453 memset (&data, 0, sizeof (data));
454 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
455 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
456 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
457 &data, sizeof (data));
458 }
459 /* NOTREACHED */
460
461 case NT_PRSTATUS:
462 va_start (ap, note_type);
463 pid = va_arg (ap, long);
464 cursig = va_arg (ap, int);
465 gregs = va_arg (ap, const void *);
466 va_end (ap);
467
468 if (bed->s->elfclass == ELFCLASS32)
469 {
470 if (bed->elf_machine_code == EM_X86_64)
471 {
472 prstatusx32_t prstat;
473 memset (&prstat, 0, sizeof (prstat));
474 prstat.pr_pid = pid;
475 prstat.pr_cursig = cursig;
476 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
477 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
478 &prstat, sizeof (prstat));
479 }
480 else
481 {
482 prstatus32_t prstat;
483 memset (&prstat, 0, sizeof (prstat));
484 prstat.pr_pid = pid;
485 prstat.pr_cursig = cursig;
486 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
487 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
488 &prstat, sizeof (prstat));
489 }
490 }
491 else
492 {
493 prstatus64_t prstat;
494 memset (&prstat, 0, sizeof (prstat));
495 prstat.pr_pid = pid;
496 prstat.pr_cursig = cursig;
497 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
498 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
499 &prstat, sizeof (prstat));
500 }
501 }
502 /* NOTREACHED */
503 }
504 #endif
505 \f
506 /* Functions for the x86-64 ELF linker. */
507
508 /* The name of the dynamic interpreter. This is put in the .interp
509 section. */
510
511 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
512 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
513
514 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
515 copying dynamic variables from a shared lib into an app's dynbss
516 section, and instead use a dynamic relocation to point into the
517 shared lib. */
518 #define ELIMINATE_COPY_RELOCS 1
519
520 /* The size in bytes of an entry in the global offset table. */
521
522 #define GOT_ENTRY_SIZE 8
523
524 /* The size in bytes of an entry in the procedure linkage table. */
525
526 #define PLT_ENTRY_SIZE 16
527
528 /* The first entry in a procedure linkage table looks like this. See the
529 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
530
531 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
532 {
533 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
534 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
535 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
536 };
537
538 /* Subsequent entries in a procedure linkage table look like this. */
539
540 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
541 {
542 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
543 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
544 0x68, /* pushq immediate */
545 0, 0, 0, 0, /* replaced with index into relocation table. */
546 0xe9, /* jmp relative */
547 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
548 };
549
550 /* .eh_frame covering the .plt section. */
551
552 static const bfd_byte elf_x86_64_eh_frame_plt[] =
553 {
554 #define PLT_CIE_LENGTH 20
555 #define PLT_FDE_LENGTH 36
556 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
557 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
558 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
559 0, 0, 0, 0, /* CIE ID */
560 1, /* CIE version */
561 'z', 'R', 0, /* Augmentation string */
562 1, /* Code alignment factor */
563 0x78, /* Data alignment factor */
564 16, /* Return address column */
565 1, /* Augmentation size */
566 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
567 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
568 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
569 DW_CFA_nop, DW_CFA_nop,
570
571 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
572 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
573 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
574 0, 0, 0, 0, /* .plt size goes here */
575 0, /* Augmentation size */
576 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
577 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
578 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
579 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
580 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
581 11, /* Block length */
582 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
583 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
584 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
585 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
586 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
587 };
588
589 /* Architecture-specific backend data for x86-64. */
590
591 struct elf_x86_64_backend_data
592 {
593 /* Templates for the initial PLT entry and for subsequent entries. */
594 const bfd_byte *plt0_entry;
595 const bfd_byte *plt_entry;
596 unsigned int plt_entry_size; /* Size of each PLT entry. */
597
598 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
599 unsigned int plt0_got1_offset;
600 unsigned int plt0_got2_offset;
601
602 /* Offset of the end of the PC-relative instruction containing
603 plt0_got2_offset. */
604 unsigned int plt0_got2_insn_end;
605
606 /* Offsets into plt_entry that are to be replaced with... */
607 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
608 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
609 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
610
611 /* Length of the PC-relative instruction containing plt_got_offset. */
612 unsigned int plt_got_insn_size;
613
614 /* Offset of the end of the PC-relative jump to plt0_entry. */
615 unsigned int plt_plt_insn_end;
616
617 /* Offset into plt_entry where the initial value of the GOT entry points. */
618 unsigned int plt_lazy_offset;
619
620 /* .eh_frame covering the .plt section. */
621 const bfd_byte *eh_frame_plt;
622 unsigned int eh_frame_plt_size;
623 };
624
625 #define get_elf_x86_64_backend_data(abfd) \
626 ((const struct elf_x86_64_backend_data *) \
627 get_elf_backend_data (abfd)->arch_data)
628
629 #define GET_PLT_ENTRY_SIZE(abfd) \
630 get_elf_x86_64_backend_data (abfd)->plt_entry_size
631
632 /* These are the standard parameters. */
633 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
634 {
635 elf_x86_64_plt0_entry, /* plt0_entry */
636 elf_x86_64_plt_entry, /* plt_entry */
637 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
638 2, /* plt0_got1_offset */
639 8, /* plt0_got2_offset */
640 12, /* plt0_got2_insn_end */
641 2, /* plt_got_offset */
642 7, /* plt_reloc_offset */
643 12, /* plt_plt_offset */
644 6, /* plt_got_insn_size */
645 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
646 6, /* plt_lazy_offset */
647 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
648 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
649 };
650
651 #define elf_backend_arch_data &elf_x86_64_arch_bed
652
653 /* x86-64 ELF linker hash entry. */
654
655 struct elf_x86_64_link_hash_entry
656 {
657 struct elf_link_hash_entry elf;
658
659 /* Track dynamic relocs copied for this symbol. */
660 struct elf_dyn_relocs *dyn_relocs;
661
662 #define GOT_UNKNOWN 0
663 #define GOT_NORMAL 1
664 #define GOT_TLS_GD 2
665 #define GOT_TLS_IE 3
666 #define GOT_TLS_GDESC 4
667 #define GOT_TLS_GD_BOTH_P(type) \
668 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
669 #define GOT_TLS_GD_P(type) \
670 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
671 #define GOT_TLS_GDESC_P(type) \
672 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
673 #define GOT_TLS_GD_ANY_P(type) \
674 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
675 unsigned char tls_type;
676
677 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
678 starting at the end of the jump table. */
679 bfd_vma tlsdesc_got;
680 };
681
682 #define elf_x86_64_hash_entry(ent) \
683 ((struct elf_x86_64_link_hash_entry *)(ent))
684
685 struct elf_x86_64_obj_tdata
686 {
687 struct elf_obj_tdata root;
688
689 /* tls_type for each local got entry. */
690 char *local_got_tls_type;
691
692 /* GOTPLT entries for TLS descriptors. */
693 bfd_vma *local_tlsdesc_gotent;
694 };
695
696 #define elf_x86_64_tdata(abfd) \
697 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
698
699 #define elf_x86_64_local_got_tls_type(abfd) \
700 (elf_x86_64_tdata (abfd)->local_got_tls_type)
701
702 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
703 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
704
705 #define is_x86_64_elf(bfd) \
706 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
707 && elf_tdata (bfd) != NULL \
708 && elf_object_id (bfd) == X86_64_ELF_DATA)
709
710 static bfd_boolean
711 elf_x86_64_mkobject (bfd *abfd)
712 {
713 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
714 X86_64_ELF_DATA);
715 }
716
717 /* x86-64 ELF linker hash table. */
718
719 struct elf_x86_64_link_hash_table
720 {
721 struct elf_link_hash_table elf;
722
723 /* Short-cuts to get to dynamic linker sections. */
724 asection *sdynbss;
725 asection *srelbss;
726 asection *plt_eh_frame;
727
728 union
729 {
730 bfd_signed_vma refcount;
731 bfd_vma offset;
732 } tls_ld_got;
733
734 /* The amount of space used by the jump slots in the GOT. */
735 bfd_vma sgotplt_jump_table_size;
736
737 /* Small local sym cache. */
738 struct sym_cache sym_cache;
739
740 bfd_vma (*r_info) (bfd_vma, bfd_vma);
741 bfd_vma (*r_sym) (bfd_vma);
742 unsigned int pointer_r_type;
743 const char *dynamic_interpreter;
744 int dynamic_interpreter_size;
745
746 /* _TLS_MODULE_BASE_ symbol. */
747 struct bfd_link_hash_entry *tls_module_base;
748
749 /* Used by local STT_GNU_IFUNC symbols. */
750 htab_t loc_hash_table;
751 void * loc_hash_memory;
752
753 /* The offset into splt of the PLT entry for the TLS descriptor
754 resolver. Special values are 0, if not necessary (or not found
755 to be necessary yet), and -1 if needed but not determined
756 yet. */
757 bfd_vma tlsdesc_plt;
758 /* The offset into sgot of the GOT entry used by the PLT entry
759 above. */
760 bfd_vma tlsdesc_got;
761
762 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
763 bfd_vma next_jump_slot_index;
764 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
765 bfd_vma next_irelative_index;
766 };
767
768 /* Get the x86-64 ELF linker hash table from a link_info structure. */
769
770 #define elf_x86_64_hash_table(p) \
771 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
772 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
773
774 #define elf_x86_64_compute_jump_table_size(htab) \
775 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
776
777 /* Create an entry in an x86-64 ELF linker hash table. */
778
779 static struct bfd_hash_entry *
780 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
781 struct bfd_hash_table *table,
782 const char *string)
783 {
784 /* Allocate the structure if it has not already been allocated by a
785 subclass. */
786 if (entry == NULL)
787 {
788 entry = (struct bfd_hash_entry *)
789 bfd_hash_allocate (table,
790 sizeof (struct elf_x86_64_link_hash_entry));
791 if (entry == NULL)
792 return entry;
793 }
794
795 /* Call the allocation method of the superclass. */
796 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
797 if (entry != NULL)
798 {
799 struct elf_x86_64_link_hash_entry *eh;
800
801 eh = (struct elf_x86_64_link_hash_entry *) entry;
802 eh->dyn_relocs = NULL;
803 eh->tls_type = GOT_UNKNOWN;
804 eh->tlsdesc_got = (bfd_vma) -1;
805 }
806
807 return entry;
808 }
809
810 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
811 for local symbol so that we can handle local STT_GNU_IFUNC symbols
812 as global symbol. We reuse indx and dynstr_index for local symbol
813 hash since they aren't used by global symbols in this backend. */
814
815 static hashval_t
816 elf_x86_64_local_htab_hash (const void *ptr)
817 {
818 struct elf_link_hash_entry *h
819 = (struct elf_link_hash_entry *) ptr;
820 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
821 }
822
823 /* Compare local hash entries. */
824
825 static int
826 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
827 {
828 struct elf_link_hash_entry *h1
829 = (struct elf_link_hash_entry *) ptr1;
830 struct elf_link_hash_entry *h2
831 = (struct elf_link_hash_entry *) ptr2;
832
833 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
834 }
835
836 /* Find and/or create a hash entry for local symbol. */
837
838 static struct elf_link_hash_entry *
839 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
840 bfd *abfd, const Elf_Internal_Rela *rel,
841 bfd_boolean create)
842 {
843 struct elf_x86_64_link_hash_entry e, *ret;
844 asection *sec = abfd->sections;
845 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
846 htab->r_sym (rel->r_info));
847 void **slot;
848
849 e.elf.indx = sec->id;
850 e.elf.dynstr_index = htab->r_sym (rel->r_info);
851 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
852 create ? INSERT : NO_INSERT);
853
854 if (!slot)
855 return NULL;
856
857 if (*slot)
858 {
859 ret = (struct elf_x86_64_link_hash_entry *) *slot;
860 return &ret->elf;
861 }
862
863 ret = (struct elf_x86_64_link_hash_entry *)
864 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
865 sizeof (struct elf_x86_64_link_hash_entry));
866 if (ret)
867 {
868 memset (ret, 0, sizeof (*ret));
869 ret->elf.indx = sec->id;
870 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
871 ret->elf.dynindx = -1;
872 *slot = ret;
873 }
874 return &ret->elf;
875 }
876
877 /* Create an X86-64 ELF linker hash table. */
878
879 static struct bfd_link_hash_table *
880 elf_x86_64_link_hash_table_create (bfd *abfd)
881 {
882 struct elf_x86_64_link_hash_table *ret;
883 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
884
885 ret = (struct elf_x86_64_link_hash_table *) bfd_malloc (amt);
886 if (ret == NULL)
887 return NULL;
888
889 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
890 elf_x86_64_link_hash_newfunc,
891 sizeof (struct elf_x86_64_link_hash_entry),
892 X86_64_ELF_DATA))
893 {
894 free (ret);
895 return NULL;
896 }
897
898 ret->sdynbss = NULL;
899 ret->srelbss = NULL;
900 ret->plt_eh_frame = NULL;
901 ret->sym_cache.abfd = NULL;
902 ret->tlsdesc_plt = 0;
903 ret->tlsdesc_got = 0;
904 ret->tls_ld_got.refcount = 0;
905 ret->sgotplt_jump_table_size = 0;
906 ret->tls_module_base = NULL;
907 ret->next_jump_slot_index = 0;
908 ret->next_irelative_index = 0;
909
910 if (ABI_64_P (abfd))
911 {
912 ret->r_info = elf64_r_info;
913 ret->r_sym = elf64_r_sym;
914 ret->pointer_r_type = R_X86_64_64;
915 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
916 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
917 }
918 else
919 {
920 ret->r_info = elf32_r_info;
921 ret->r_sym = elf32_r_sym;
922 ret->pointer_r_type = R_X86_64_32;
923 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
924 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
925 }
926
927 ret->loc_hash_table = htab_try_create (1024,
928 elf_x86_64_local_htab_hash,
929 elf_x86_64_local_htab_eq,
930 NULL);
931 ret->loc_hash_memory = objalloc_create ();
932 if (!ret->loc_hash_table || !ret->loc_hash_memory)
933 {
934 free (ret);
935 return NULL;
936 }
937
938 return &ret->elf.root;
939 }
940
941 /* Destroy an X86-64 ELF linker hash table. */
942
943 static void
944 elf_x86_64_link_hash_table_free (struct bfd_link_hash_table *hash)
945 {
946 struct elf_x86_64_link_hash_table *htab
947 = (struct elf_x86_64_link_hash_table *) hash;
948
949 if (htab->loc_hash_table)
950 htab_delete (htab->loc_hash_table);
951 if (htab->loc_hash_memory)
952 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
953 _bfd_generic_link_hash_table_free (hash);
954 }
955
956 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
957 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
958 hash table. */
959
960 static bfd_boolean
961 elf_x86_64_create_dynamic_sections (bfd *dynobj,
962 struct bfd_link_info *info)
963 {
964 struct elf_x86_64_link_hash_table *htab;
965
966 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
967 return FALSE;
968
969 htab = elf_x86_64_hash_table (info);
970 if (htab == NULL)
971 return FALSE;
972
973 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
974 if (!info->shared)
975 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
976
977 if (!htab->sdynbss
978 || (!info->shared && !htab->srelbss))
979 abort ();
980
981 if (!info->no_ld_generated_unwind_info
982 && htab->plt_eh_frame == NULL
983 && htab->elf.splt != NULL)
984 {
985 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
986 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
987 | SEC_LINKER_CREATED);
988 htab->plt_eh_frame
989 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
990 if (htab->plt_eh_frame == NULL
991 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
992 return FALSE;
993 }
994 return TRUE;
995 }
996
997 /* Copy the extra info we tack onto an elf_link_hash_entry. */
998
999 static void
1000 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1001 struct elf_link_hash_entry *dir,
1002 struct elf_link_hash_entry *ind)
1003 {
1004 struct elf_x86_64_link_hash_entry *edir, *eind;
1005
1006 edir = (struct elf_x86_64_link_hash_entry *) dir;
1007 eind = (struct elf_x86_64_link_hash_entry *) ind;
1008
1009 if (eind->dyn_relocs != NULL)
1010 {
1011 if (edir->dyn_relocs != NULL)
1012 {
1013 struct elf_dyn_relocs **pp;
1014 struct elf_dyn_relocs *p;
1015
1016 /* Add reloc counts against the indirect sym to the direct sym
1017 list. Merge any entries against the same section. */
1018 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1019 {
1020 struct elf_dyn_relocs *q;
1021
1022 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1023 if (q->sec == p->sec)
1024 {
1025 q->pc_count += p->pc_count;
1026 q->count += p->count;
1027 *pp = p->next;
1028 break;
1029 }
1030 if (q == NULL)
1031 pp = &p->next;
1032 }
1033 *pp = edir->dyn_relocs;
1034 }
1035
1036 edir->dyn_relocs = eind->dyn_relocs;
1037 eind->dyn_relocs = NULL;
1038 }
1039
1040 if (ind->root.type == bfd_link_hash_indirect
1041 && dir->got.refcount <= 0)
1042 {
1043 edir->tls_type = eind->tls_type;
1044 eind->tls_type = GOT_UNKNOWN;
1045 }
1046
1047 if (ELIMINATE_COPY_RELOCS
1048 && ind->root.type != bfd_link_hash_indirect
1049 && dir->dynamic_adjusted)
1050 {
1051 /* If called to transfer flags for a weakdef during processing
1052 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1053 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1054 dir->ref_dynamic |= ind->ref_dynamic;
1055 dir->ref_regular |= ind->ref_regular;
1056 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1057 dir->needs_plt |= ind->needs_plt;
1058 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1059 }
1060 else
1061 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1062 }
1063
1064 static bfd_boolean
1065 elf64_x86_64_elf_object_p (bfd *abfd)
1066 {
1067 /* Set the right machine number for an x86-64 elf64 file. */
1068 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1069 return TRUE;
1070 }
1071
1072 static bfd_boolean
1073 elf32_x86_64_elf_object_p (bfd *abfd)
1074 {
1075 /* Set the right machine number for an x86-64 elf32 file. */
1076 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1077 return TRUE;
1078 }
1079
1080 /* Return TRUE if the TLS access code sequence support transition
1081 from R_TYPE. */
1082
1083 static bfd_boolean
1084 elf_x86_64_check_tls_transition (bfd *abfd,
1085 struct bfd_link_info *info,
1086 asection *sec,
1087 bfd_byte *contents,
1088 Elf_Internal_Shdr *symtab_hdr,
1089 struct elf_link_hash_entry **sym_hashes,
1090 unsigned int r_type,
1091 const Elf_Internal_Rela *rel,
1092 const Elf_Internal_Rela *relend)
1093 {
1094 unsigned int val;
1095 unsigned long r_symndx;
1096 struct elf_link_hash_entry *h;
1097 bfd_vma offset;
1098 struct elf_x86_64_link_hash_table *htab;
1099
1100 /* Get the section contents. */
1101 if (contents == NULL)
1102 {
1103 if (elf_section_data (sec)->this_hdr.contents != NULL)
1104 contents = elf_section_data (sec)->this_hdr.contents;
1105 else
1106 {
1107 /* FIXME: How to better handle error condition? */
1108 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1109 return FALSE;
1110
1111 /* Cache the section contents for elf_link_input_bfd. */
1112 elf_section_data (sec)->this_hdr.contents = contents;
1113 }
1114 }
1115
1116 htab = elf_x86_64_hash_table (info);
1117 offset = rel->r_offset;
1118 switch (r_type)
1119 {
1120 case R_X86_64_TLSGD:
1121 case R_X86_64_TLSLD:
1122 if ((rel + 1) >= relend)
1123 return FALSE;
1124
1125 if (r_type == R_X86_64_TLSGD)
1126 {
1127 /* Check transition from GD access model. For 64bit, only
1128 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1129 .word 0x6666; rex64; call __tls_get_addr
1130 can transit to different access model. For 32bit, only
1131 leaq foo@tlsgd(%rip), %rdi
1132 .word 0x6666; rex64; call __tls_get_addr
1133 can transit to different access model. */
1134
1135 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1136 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1137
1138 if ((offset + 12) > sec->size
1139 || memcmp (contents + offset + 4, call, 4) != 0)
1140 return FALSE;
1141
1142 if (ABI_64_P (abfd))
1143 {
1144 if (offset < 4
1145 || memcmp (contents + offset - 4, leaq, 4) != 0)
1146 return FALSE;
1147 }
1148 else
1149 {
1150 if (offset < 3
1151 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1152 return FALSE;
1153 }
1154 }
1155 else
1156 {
1157 /* Check transition from LD access model. Only
1158 leaq foo@tlsld(%rip), %rdi;
1159 call __tls_get_addr
1160 can transit to different access model. */
1161
1162 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1163
1164 if (offset < 3 || (offset + 9) > sec->size)
1165 return FALSE;
1166
1167 if (memcmp (contents + offset - 3, lea, 3) != 0
1168 || 0xe8 != *(contents + offset + 4))
1169 return FALSE;
1170 }
1171
1172 r_symndx = htab->r_sym (rel[1].r_info);
1173 if (r_symndx < symtab_hdr->sh_info)
1174 return FALSE;
1175
1176 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1177 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1178 may be versioned. */
1179 return (h != NULL
1180 && h->root.root.string != NULL
1181 && (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1182 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32)
1183 && (strncmp (h->root.root.string,
1184 "__tls_get_addr", 14) == 0));
1185
1186 case R_X86_64_GOTTPOFF:
1187 /* Check transition from IE access model:
1188 mov foo@gottpoff(%rip), %reg
1189 add foo@gottpoff(%rip), %reg
1190 */
1191
1192 /* Check REX prefix first. */
1193 if (offset >= 3 && (offset + 4) <= sec->size)
1194 {
1195 val = bfd_get_8 (abfd, contents + offset - 3);
1196 if (val != 0x48 && val != 0x4c)
1197 {
1198 /* X32 may have 0x44 REX prefix or no REX prefix. */
1199 if (ABI_64_P (abfd))
1200 return FALSE;
1201 }
1202 }
1203 else
1204 {
1205 /* X32 may not have any REX prefix. */
1206 if (ABI_64_P (abfd))
1207 return FALSE;
1208 if (offset < 2 || (offset + 3) > sec->size)
1209 return FALSE;
1210 }
1211
1212 val = bfd_get_8 (abfd, contents + offset - 2);
1213 if (val != 0x8b && val != 0x03)
1214 return FALSE;
1215
1216 val = bfd_get_8 (abfd, contents + offset - 1);
1217 return (val & 0xc7) == 5;
1218
1219 case R_X86_64_GOTPC32_TLSDESC:
1220 /* Check transition from GDesc access model:
1221 leaq x@tlsdesc(%rip), %rax
1222
1223 Make sure it's a leaq adding rip to a 32-bit offset
1224 into any register, although it's probably almost always
1225 going to be rax. */
1226
1227 if (offset < 3 || (offset + 4) > sec->size)
1228 return FALSE;
1229
1230 val = bfd_get_8 (abfd, contents + offset - 3);
1231 if ((val & 0xfb) != 0x48)
1232 return FALSE;
1233
1234 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1235 return FALSE;
1236
1237 val = bfd_get_8 (abfd, contents + offset - 1);
1238 return (val & 0xc7) == 0x05;
1239
1240 case R_X86_64_TLSDESC_CALL:
1241 /* Check transition from GDesc access model:
1242 call *x@tlsdesc(%rax)
1243 */
1244 if (offset + 2 <= sec->size)
1245 {
1246 /* Make sure that it's a call *x@tlsdesc(%rax). */
1247 static const unsigned char call[] = { 0xff, 0x10 };
1248 return memcmp (contents + offset, call, 2) == 0;
1249 }
1250
1251 return FALSE;
1252
1253 default:
1254 abort ();
1255 }
1256 }
1257
1258 /* Return TRUE if the TLS access transition is OK or no transition
1259 will be performed. Update R_TYPE if there is a transition. */
1260
1261 static bfd_boolean
1262 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1263 asection *sec, bfd_byte *contents,
1264 Elf_Internal_Shdr *symtab_hdr,
1265 struct elf_link_hash_entry **sym_hashes,
1266 unsigned int *r_type, int tls_type,
1267 const Elf_Internal_Rela *rel,
1268 const Elf_Internal_Rela *relend,
1269 struct elf_link_hash_entry *h,
1270 unsigned long r_symndx)
1271 {
1272 unsigned int from_type = *r_type;
1273 unsigned int to_type = from_type;
1274 bfd_boolean check = TRUE;
1275
1276 /* Skip TLS transition for functions. */
1277 if (h != NULL
1278 && (h->type == STT_FUNC
1279 || h->type == STT_GNU_IFUNC))
1280 return TRUE;
1281
1282 switch (from_type)
1283 {
1284 case R_X86_64_TLSGD:
1285 case R_X86_64_GOTPC32_TLSDESC:
1286 case R_X86_64_TLSDESC_CALL:
1287 case R_X86_64_GOTTPOFF:
1288 if (info->executable)
1289 {
1290 if (h == NULL)
1291 to_type = R_X86_64_TPOFF32;
1292 else
1293 to_type = R_X86_64_GOTTPOFF;
1294 }
1295
1296 /* When we are called from elf_x86_64_relocate_section,
1297 CONTENTS isn't NULL and there may be additional transitions
1298 based on TLS_TYPE. */
1299 if (contents != NULL)
1300 {
1301 unsigned int new_to_type = to_type;
1302
1303 if (info->executable
1304 && h != NULL
1305 && h->dynindx == -1
1306 && tls_type == GOT_TLS_IE)
1307 new_to_type = R_X86_64_TPOFF32;
1308
1309 if (to_type == R_X86_64_TLSGD
1310 || to_type == R_X86_64_GOTPC32_TLSDESC
1311 || to_type == R_X86_64_TLSDESC_CALL)
1312 {
1313 if (tls_type == GOT_TLS_IE)
1314 new_to_type = R_X86_64_GOTTPOFF;
1315 }
1316
1317 /* We checked the transition before when we were called from
1318 elf_x86_64_check_relocs. We only want to check the new
1319 transition which hasn't been checked before. */
1320 check = new_to_type != to_type && from_type == to_type;
1321 to_type = new_to_type;
1322 }
1323
1324 break;
1325
1326 case R_X86_64_TLSLD:
1327 if (info->executable)
1328 to_type = R_X86_64_TPOFF32;
1329 break;
1330
1331 default:
1332 return TRUE;
1333 }
1334
1335 /* Return TRUE if there is no transition. */
1336 if (from_type == to_type)
1337 return TRUE;
1338
1339 /* Check if the transition can be performed. */
1340 if (check
1341 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1342 symtab_hdr, sym_hashes,
1343 from_type, rel, relend))
1344 {
1345 reloc_howto_type *from, *to;
1346 const char *name;
1347
1348 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1349 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1350
1351 if (h)
1352 name = h->root.root.string;
1353 else
1354 {
1355 struct elf_x86_64_link_hash_table *htab;
1356
1357 htab = elf_x86_64_hash_table (info);
1358 if (htab == NULL)
1359 name = "*unknown*";
1360 else
1361 {
1362 Elf_Internal_Sym *isym;
1363
1364 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1365 abfd, r_symndx);
1366 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1367 }
1368 }
1369
1370 (*_bfd_error_handler)
1371 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1372 "in section `%A' failed"),
1373 abfd, sec, from->name, to->name, name,
1374 (unsigned long) rel->r_offset);
1375 bfd_set_error (bfd_error_bad_value);
1376 return FALSE;
1377 }
1378
1379 *r_type = to_type;
1380 return TRUE;
1381 }
1382
1383 /* Look through the relocs for a section during the first phase, and
1384 calculate needed space in the global offset table, procedure
1385 linkage table, and dynamic reloc sections. */
1386
1387 static bfd_boolean
1388 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1389 asection *sec,
1390 const Elf_Internal_Rela *relocs)
1391 {
1392 struct elf_x86_64_link_hash_table *htab;
1393 Elf_Internal_Shdr *symtab_hdr;
1394 struct elf_link_hash_entry **sym_hashes;
1395 const Elf_Internal_Rela *rel;
1396 const Elf_Internal_Rela *rel_end;
1397 asection *sreloc;
1398
1399 if (info->relocatable)
1400 return TRUE;
1401
1402 BFD_ASSERT (is_x86_64_elf (abfd));
1403
1404 htab = elf_x86_64_hash_table (info);
1405 if (htab == NULL)
1406 return FALSE;
1407
1408 symtab_hdr = &elf_symtab_hdr (abfd);
1409 sym_hashes = elf_sym_hashes (abfd);
1410
1411 sreloc = NULL;
1412
1413 rel_end = relocs + sec->reloc_count;
1414 for (rel = relocs; rel < rel_end; rel++)
1415 {
1416 unsigned int r_type;
1417 unsigned long r_symndx;
1418 struct elf_link_hash_entry *h;
1419 Elf_Internal_Sym *isym;
1420 const char *name;
1421
1422 r_symndx = htab->r_sym (rel->r_info);
1423 r_type = ELF32_R_TYPE (rel->r_info);
1424
1425 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1426 {
1427 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1428 abfd, r_symndx);
1429 return FALSE;
1430 }
1431
1432 if (r_symndx < symtab_hdr->sh_info)
1433 {
1434 /* A local symbol. */
1435 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1436 abfd, r_symndx);
1437 if (isym == NULL)
1438 return FALSE;
1439
1440 /* Check relocation against local STT_GNU_IFUNC symbol. */
1441 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1442 {
1443 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1444 TRUE);
1445 if (h == NULL)
1446 return FALSE;
1447
1448 /* Fake a STT_GNU_IFUNC symbol. */
1449 h->type = STT_GNU_IFUNC;
1450 h->def_regular = 1;
1451 h->ref_regular = 1;
1452 h->forced_local = 1;
1453 h->root.type = bfd_link_hash_defined;
1454 }
1455 else
1456 h = NULL;
1457 }
1458 else
1459 {
1460 isym = NULL;
1461 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1462 while (h->root.type == bfd_link_hash_indirect
1463 || h->root.type == bfd_link_hash_warning)
1464 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1465 }
1466
1467 /* Check invalid x32 relocations. */
1468 if (!ABI_64_P (abfd))
1469 switch (r_type)
1470 {
1471 default:
1472 break;
1473
1474 case R_X86_64_DTPOFF64:
1475 case R_X86_64_TPOFF64:
1476 case R_X86_64_PC64:
1477 case R_X86_64_GOTOFF64:
1478 case R_X86_64_GOT64:
1479 case R_X86_64_GOTPCREL64:
1480 case R_X86_64_GOTPC64:
1481 case R_X86_64_GOTPLT64:
1482 case R_X86_64_PLTOFF64:
1483 {
1484 if (h)
1485 name = h->root.root.string;
1486 else
1487 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1488 NULL);
1489 (*_bfd_error_handler)
1490 (_("%B: relocation %s against symbol `%s' isn't "
1491 "supported in x32 mode"), abfd,
1492 x86_64_elf_howto_table[r_type].name, name);
1493 bfd_set_error (bfd_error_bad_value);
1494 return FALSE;
1495 }
1496 break;
1497 }
1498
1499 if (h != NULL)
1500 {
1501 /* Create the ifunc sections for static executables. If we
1502 never see an indirect function symbol nor we are building
1503 a static executable, those sections will be empty and
1504 won't appear in output. */
1505 switch (r_type)
1506 {
1507 default:
1508 break;
1509
1510 case R_X86_64_32S:
1511 case R_X86_64_32:
1512 case R_X86_64_64:
1513 case R_X86_64_PC32:
1514 case R_X86_64_PC64:
1515 case R_X86_64_PLT32:
1516 case R_X86_64_GOTPCREL:
1517 case R_X86_64_GOTPCREL64:
1518 if (htab->elf.dynobj == NULL)
1519 htab->elf.dynobj = abfd;
1520 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1521 return FALSE;
1522 break;
1523 }
1524
1525 /* It is referenced by a non-shared object. */
1526 h->ref_regular = 1;
1527 }
1528
1529 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1530 symtab_hdr, sym_hashes,
1531 &r_type, GOT_UNKNOWN,
1532 rel, rel_end, h, r_symndx))
1533 return FALSE;
1534
1535 switch (r_type)
1536 {
1537 case R_X86_64_TLSLD:
1538 htab->tls_ld_got.refcount += 1;
1539 goto create_got;
1540
1541 case R_X86_64_TPOFF32:
1542 if (!info->executable && ABI_64_P (abfd))
1543 {
1544 if (h)
1545 name = h->root.root.string;
1546 else
1547 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1548 NULL);
1549 (*_bfd_error_handler)
1550 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1551 abfd,
1552 x86_64_elf_howto_table[r_type].name, name);
1553 bfd_set_error (bfd_error_bad_value);
1554 return FALSE;
1555 }
1556 break;
1557
1558 case R_X86_64_GOTTPOFF:
1559 if (!info->executable)
1560 info->flags |= DF_STATIC_TLS;
1561 /* Fall through */
1562
1563 case R_X86_64_GOT32:
1564 case R_X86_64_GOTPCREL:
1565 case R_X86_64_TLSGD:
1566 case R_X86_64_GOT64:
1567 case R_X86_64_GOTPCREL64:
1568 case R_X86_64_GOTPLT64:
1569 case R_X86_64_GOTPC32_TLSDESC:
1570 case R_X86_64_TLSDESC_CALL:
1571 /* This symbol requires a global offset table entry. */
1572 {
1573 int tls_type, old_tls_type;
1574
1575 switch (r_type)
1576 {
1577 default: tls_type = GOT_NORMAL; break;
1578 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1579 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1580 case R_X86_64_GOTPC32_TLSDESC:
1581 case R_X86_64_TLSDESC_CALL:
1582 tls_type = GOT_TLS_GDESC; break;
1583 }
1584
1585 if (h != NULL)
1586 {
1587 if (r_type == R_X86_64_GOTPLT64)
1588 {
1589 /* This relocation indicates that we also need
1590 a PLT entry, as this is a function. We don't need
1591 a PLT entry for local symbols. */
1592 h->needs_plt = 1;
1593 h->plt.refcount += 1;
1594 }
1595 h->got.refcount += 1;
1596 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1597 }
1598 else
1599 {
1600 bfd_signed_vma *local_got_refcounts;
1601
1602 /* This is a global offset table entry for a local symbol. */
1603 local_got_refcounts = elf_local_got_refcounts (abfd);
1604 if (local_got_refcounts == NULL)
1605 {
1606 bfd_size_type size;
1607
1608 size = symtab_hdr->sh_info;
1609 size *= sizeof (bfd_signed_vma)
1610 + sizeof (bfd_vma) + sizeof (char);
1611 local_got_refcounts = ((bfd_signed_vma *)
1612 bfd_zalloc (abfd, size));
1613 if (local_got_refcounts == NULL)
1614 return FALSE;
1615 elf_local_got_refcounts (abfd) = local_got_refcounts;
1616 elf_x86_64_local_tlsdesc_gotent (abfd)
1617 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1618 elf_x86_64_local_got_tls_type (abfd)
1619 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1620 }
1621 local_got_refcounts[r_symndx] += 1;
1622 old_tls_type
1623 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1624 }
1625
1626 /* If a TLS symbol is accessed using IE at least once,
1627 there is no point to use dynamic model for it. */
1628 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1629 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1630 || tls_type != GOT_TLS_IE))
1631 {
1632 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1633 tls_type = old_tls_type;
1634 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1635 && GOT_TLS_GD_ANY_P (tls_type))
1636 tls_type |= old_tls_type;
1637 else
1638 {
1639 if (h)
1640 name = h->root.root.string;
1641 else
1642 name = bfd_elf_sym_name (abfd, symtab_hdr,
1643 isym, NULL);
1644 (*_bfd_error_handler)
1645 (_("%B: '%s' accessed both as normal and thread local symbol"),
1646 abfd, name);
1647 return FALSE;
1648 }
1649 }
1650
1651 if (old_tls_type != tls_type)
1652 {
1653 if (h != NULL)
1654 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1655 else
1656 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1657 }
1658 }
1659 /* Fall through */
1660
1661 case R_X86_64_GOTOFF64:
1662 case R_X86_64_GOTPC32:
1663 case R_X86_64_GOTPC64:
1664 create_got:
1665 if (htab->elf.sgot == NULL)
1666 {
1667 if (htab->elf.dynobj == NULL)
1668 htab->elf.dynobj = abfd;
1669 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1670 info))
1671 return FALSE;
1672 }
1673 break;
1674
1675 case R_X86_64_PLT32:
1676 /* This symbol requires a procedure linkage table entry. We
1677 actually build the entry in adjust_dynamic_symbol,
1678 because this might be a case of linking PIC code which is
1679 never referenced by a dynamic object, in which case we
1680 don't need to generate a procedure linkage table entry
1681 after all. */
1682
1683 /* If this is a local symbol, we resolve it directly without
1684 creating a procedure linkage table entry. */
1685 if (h == NULL)
1686 continue;
1687
1688 h->needs_plt = 1;
1689 h->plt.refcount += 1;
1690 break;
1691
1692 case R_X86_64_PLTOFF64:
1693 /* This tries to form the 'address' of a function relative
1694 to GOT. For global symbols we need a PLT entry. */
1695 if (h != NULL)
1696 {
1697 h->needs_plt = 1;
1698 h->plt.refcount += 1;
1699 }
1700 goto create_got;
1701
1702 case R_X86_64_32:
1703 if (!ABI_64_P (abfd))
1704 goto pointer;
1705 case R_X86_64_8:
1706 case R_X86_64_16:
1707 case R_X86_64_32S:
1708 /* Let's help debug shared library creation. These relocs
1709 cannot be used in shared libs. Don't error out for
1710 sections we don't care about, such as debug sections or
1711 non-constant sections. */
1712 if (info->shared
1713 && (sec->flags & SEC_ALLOC) != 0
1714 && (sec->flags & SEC_READONLY) != 0)
1715 {
1716 if (h)
1717 name = h->root.root.string;
1718 else
1719 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1720 (*_bfd_error_handler)
1721 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1722 abfd, x86_64_elf_howto_table[r_type].name, name);
1723 bfd_set_error (bfd_error_bad_value);
1724 return FALSE;
1725 }
1726 /* Fall through. */
1727
1728 case R_X86_64_PC8:
1729 case R_X86_64_PC16:
1730 case R_X86_64_PC32:
1731 case R_X86_64_PC64:
1732 case R_X86_64_64:
1733 pointer:
1734 if (h != NULL && info->executable)
1735 {
1736 /* If this reloc is in a read-only section, we might
1737 need a copy reloc. We can't check reliably at this
1738 stage whether the section is read-only, as input
1739 sections have not yet been mapped to output sections.
1740 Tentatively set the flag for now, and correct in
1741 adjust_dynamic_symbol. */
1742 h->non_got_ref = 1;
1743
1744 /* We may need a .plt entry if the function this reloc
1745 refers to is in a shared lib. */
1746 h->plt.refcount += 1;
1747 if (r_type != R_X86_64_PC32 && r_type != R_X86_64_PC64)
1748 h->pointer_equality_needed = 1;
1749 }
1750
1751 /* If we are creating a shared library, and this is a reloc
1752 against a global symbol, or a non PC relative reloc
1753 against a local symbol, then we need to copy the reloc
1754 into the shared library. However, if we are linking with
1755 -Bsymbolic, we do not need to copy a reloc against a
1756 global symbol which is defined in an object we are
1757 including in the link (i.e., DEF_REGULAR is set). At
1758 this point we have not seen all the input files, so it is
1759 possible that DEF_REGULAR is not set now but will be set
1760 later (it is never cleared). In case of a weak definition,
1761 DEF_REGULAR may be cleared later by a strong definition in
1762 a shared library. We account for that possibility below by
1763 storing information in the relocs_copied field of the hash
1764 table entry. A similar situation occurs when creating
1765 shared libraries and symbol visibility changes render the
1766 symbol local.
1767
1768 If on the other hand, we are creating an executable, we
1769 may need to keep relocations for symbols satisfied by a
1770 dynamic library if we manage to avoid copy relocs for the
1771 symbol. */
1772 if ((info->shared
1773 && (sec->flags & SEC_ALLOC) != 0
1774 && (! IS_X86_64_PCREL_TYPE (r_type)
1775 || (h != NULL
1776 && (! SYMBOLIC_BIND (info, h)
1777 || h->root.type == bfd_link_hash_defweak
1778 || !h->def_regular))))
1779 || (ELIMINATE_COPY_RELOCS
1780 && !info->shared
1781 && (sec->flags & SEC_ALLOC) != 0
1782 && h != NULL
1783 && (h->root.type == bfd_link_hash_defweak
1784 || !h->def_regular)))
1785 {
1786 struct elf_dyn_relocs *p;
1787 struct elf_dyn_relocs **head;
1788
1789 /* We must copy these reloc types into the output file.
1790 Create a reloc section in dynobj and make room for
1791 this reloc. */
1792 if (sreloc == NULL)
1793 {
1794 if (htab->elf.dynobj == NULL)
1795 htab->elf.dynobj = abfd;
1796
1797 sreloc = _bfd_elf_make_dynamic_reloc_section
1798 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
1799 abfd, /*rela?*/ TRUE);
1800
1801 if (sreloc == NULL)
1802 return FALSE;
1803 }
1804
1805 /* If this is a global symbol, we count the number of
1806 relocations we need for this symbol. */
1807 if (h != NULL)
1808 {
1809 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
1810 }
1811 else
1812 {
1813 /* Track dynamic relocs needed for local syms too.
1814 We really need local syms available to do this
1815 easily. Oh well. */
1816 asection *s;
1817 void **vpp;
1818
1819 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1820 abfd, r_symndx);
1821 if (isym == NULL)
1822 return FALSE;
1823
1824 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
1825 if (s == NULL)
1826 s = sec;
1827
1828 /* Beware of type punned pointers vs strict aliasing
1829 rules. */
1830 vpp = &(elf_section_data (s)->local_dynrel);
1831 head = (struct elf_dyn_relocs **)vpp;
1832 }
1833
1834 p = *head;
1835 if (p == NULL || p->sec != sec)
1836 {
1837 bfd_size_type amt = sizeof *p;
1838
1839 p = ((struct elf_dyn_relocs *)
1840 bfd_alloc (htab->elf.dynobj, amt));
1841 if (p == NULL)
1842 return FALSE;
1843 p->next = *head;
1844 *head = p;
1845 p->sec = sec;
1846 p->count = 0;
1847 p->pc_count = 0;
1848 }
1849
1850 p->count += 1;
1851 if (IS_X86_64_PCREL_TYPE (r_type))
1852 p->pc_count += 1;
1853 }
1854 break;
1855
1856 /* This relocation describes the C++ object vtable hierarchy.
1857 Reconstruct it for later use during GC. */
1858 case R_X86_64_GNU_VTINHERIT:
1859 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
1860 return FALSE;
1861 break;
1862
1863 /* This relocation describes which C++ vtable entries are actually
1864 used. Record for later use during GC. */
1865 case R_X86_64_GNU_VTENTRY:
1866 BFD_ASSERT (h != NULL);
1867 if (h != NULL
1868 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
1869 return FALSE;
1870 break;
1871
1872 default:
1873 break;
1874 }
1875 }
1876
1877 return TRUE;
1878 }
1879
1880 /* Return the section that should be marked against GC for a given
1881 relocation. */
1882
1883 static asection *
1884 elf_x86_64_gc_mark_hook (asection *sec,
1885 struct bfd_link_info *info,
1886 Elf_Internal_Rela *rel,
1887 struct elf_link_hash_entry *h,
1888 Elf_Internal_Sym *sym)
1889 {
1890 if (h != NULL)
1891 switch (ELF32_R_TYPE (rel->r_info))
1892 {
1893 case R_X86_64_GNU_VTINHERIT:
1894 case R_X86_64_GNU_VTENTRY:
1895 return NULL;
1896 }
1897
1898 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
1899 }
1900
1901 /* Update the got entry reference counts for the section being removed. */
1902
1903 static bfd_boolean
1904 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
1905 asection *sec,
1906 const Elf_Internal_Rela *relocs)
1907 {
1908 struct elf_x86_64_link_hash_table *htab;
1909 Elf_Internal_Shdr *symtab_hdr;
1910 struct elf_link_hash_entry **sym_hashes;
1911 bfd_signed_vma *local_got_refcounts;
1912 const Elf_Internal_Rela *rel, *relend;
1913
1914 if (info->relocatable)
1915 return TRUE;
1916
1917 htab = elf_x86_64_hash_table (info);
1918 if (htab == NULL)
1919 return FALSE;
1920
1921 elf_section_data (sec)->local_dynrel = NULL;
1922
1923 symtab_hdr = &elf_symtab_hdr (abfd);
1924 sym_hashes = elf_sym_hashes (abfd);
1925 local_got_refcounts = elf_local_got_refcounts (abfd);
1926
1927 htab = elf_x86_64_hash_table (info);
1928 relend = relocs + sec->reloc_count;
1929 for (rel = relocs; rel < relend; rel++)
1930 {
1931 unsigned long r_symndx;
1932 unsigned int r_type;
1933 struct elf_link_hash_entry *h = NULL;
1934
1935 r_symndx = htab->r_sym (rel->r_info);
1936 if (r_symndx >= symtab_hdr->sh_info)
1937 {
1938 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1939 while (h->root.type == bfd_link_hash_indirect
1940 || h->root.type == bfd_link_hash_warning)
1941 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1942 }
1943 else
1944 {
1945 /* A local symbol. */
1946 Elf_Internal_Sym *isym;
1947
1948 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1949 abfd, r_symndx);
1950
1951 /* Check relocation against local STT_GNU_IFUNC symbol. */
1952 if (isym != NULL
1953 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1954 {
1955 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
1956 if (h == NULL)
1957 abort ();
1958 }
1959 }
1960
1961 if (h)
1962 {
1963 struct elf_x86_64_link_hash_entry *eh;
1964 struct elf_dyn_relocs **pp;
1965 struct elf_dyn_relocs *p;
1966
1967 eh = (struct elf_x86_64_link_hash_entry *) h;
1968
1969 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
1970 if (p->sec == sec)
1971 {
1972 /* Everything must go for SEC. */
1973 *pp = p->next;
1974 break;
1975 }
1976 }
1977
1978 r_type = ELF32_R_TYPE (rel->r_info);
1979 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1980 symtab_hdr, sym_hashes,
1981 &r_type, GOT_UNKNOWN,
1982 rel, relend, h, r_symndx))
1983 return FALSE;
1984
1985 switch (r_type)
1986 {
1987 case R_X86_64_TLSLD:
1988 if (htab->tls_ld_got.refcount > 0)
1989 htab->tls_ld_got.refcount -= 1;
1990 break;
1991
1992 case R_X86_64_TLSGD:
1993 case R_X86_64_GOTPC32_TLSDESC:
1994 case R_X86_64_TLSDESC_CALL:
1995 case R_X86_64_GOTTPOFF:
1996 case R_X86_64_GOT32:
1997 case R_X86_64_GOTPCREL:
1998 case R_X86_64_GOT64:
1999 case R_X86_64_GOTPCREL64:
2000 case R_X86_64_GOTPLT64:
2001 if (h != NULL)
2002 {
2003 if (r_type == R_X86_64_GOTPLT64 && h->plt.refcount > 0)
2004 h->plt.refcount -= 1;
2005 if (h->got.refcount > 0)
2006 h->got.refcount -= 1;
2007 if (h->type == STT_GNU_IFUNC)
2008 {
2009 if (h->plt.refcount > 0)
2010 h->plt.refcount -= 1;
2011 }
2012 }
2013 else if (local_got_refcounts != NULL)
2014 {
2015 if (local_got_refcounts[r_symndx] > 0)
2016 local_got_refcounts[r_symndx] -= 1;
2017 }
2018 break;
2019
2020 case R_X86_64_8:
2021 case R_X86_64_16:
2022 case R_X86_64_32:
2023 case R_X86_64_64:
2024 case R_X86_64_32S:
2025 case R_X86_64_PC8:
2026 case R_X86_64_PC16:
2027 case R_X86_64_PC32:
2028 case R_X86_64_PC64:
2029 if (info->shared
2030 && (h == NULL || h->type != STT_GNU_IFUNC))
2031 break;
2032 /* Fall thru */
2033
2034 case R_X86_64_PLT32:
2035 case R_X86_64_PLTOFF64:
2036 if (h != NULL)
2037 {
2038 if (h->plt.refcount > 0)
2039 h->plt.refcount -= 1;
2040 }
2041 break;
2042
2043 default:
2044 break;
2045 }
2046 }
2047
2048 return TRUE;
2049 }
2050
2051 /* Adjust a symbol defined by a dynamic object and referenced by a
2052 regular object. The current definition is in some section of the
2053 dynamic object, but we're not including those sections. We have to
2054 change the definition to something the rest of the link can
2055 understand. */
2056
2057 static bfd_boolean
2058 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2059 struct elf_link_hash_entry *h)
2060 {
2061 struct elf_x86_64_link_hash_table *htab;
2062 asection *s;
2063 struct elf_x86_64_link_hash_entry *eh;
2064 struct elf_dyn_relocs *p;
2065
2066 /* STT_GNU_IFUNC symbol must go through PLT. */
2067 if (h->type == STT_GNU_IFUNC)
2068 {
2069 /* All local STT_GNU_IFUNC references must be treate as local
2070 calls via local PLT. */
2071 if (h->ref_regular
2072 && SYMBOL_CALLS_LOCAL (info, h))
2073 {
2074 bfd_size_type pc_count = 0, count = 0;
2075 struct elf_dyn_relocs **pp;
2076
2077 eh = (struct elf_x86_64_link_hash_entry *) h;
2078 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2079 {
2080 pc_count += p->pc_count;
2081 p->count -= p->pc_count;
2082 p->pc_count = 0;
2083 count += p->count;
2084 if (p->count == 0)
2085 *pp = p->next;
2086 else
2087 pp = &p->next;
2088 }
2089
2090 if (pc_count || count)
2091 {
2092 h->needs_plt = 1;
2093 h->plt.refcount += 1;
2094 h->non_got_ref = 1;
2095 }
2096 }
2097
2098 if (h->plt.refcount <= 0)
2099 {
2100 h->plt.offset = (bfd_vma) -1;
2101 h->needs_plt = 0;
2102 }
2103 return TRUE;
2104 }
2105
2106 /* If this is a function, put it in the procedure linkage table. We
2107 will fill in the contents of the procedure linkage table later,
2108 when we know the address of the .got section. */
2109 if (h->type == STT_FUNC
2110 || h->needs_plt)
2111 {
2112 if (h->plt.refcount <= 0
2113 || SYMBOL_CALLS_LOCAL (info, h)
2114 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2115 && h->root.type == bfd_link_hash_undefweak))
2116 {
2117 /* This case can occur if we saw a PLT32 reloc in an input
2118 file, but the symbol was never referred to by a dynamic
2119 object, or if all references were garbage collected. In
2120 such a case, we don't actually need to build a procedure
2121 linkage table, and we can just do a PC32 reloc instead. */
2122 h->plt.offset = (bfd_vma) -1;
2123 h->needs_plt = 0;
2124 }
2125
2126 return TRUE;
2127 }
2128 else
2129 /* It's possible that we incorrectly decided a .plt reloc was
2130 needed for an R_X86_64_PC32 reloc to a non-function sym in
2131 check_relocs. We can't decide accurately between function and
2132 non-function syms in check-relocs; Objects loaded later in
2133 the link may change h->type. So fix it now. */
2134 h->plt.offset = (bfd_vma) -1;
2135
2136 /* If this is a weak symbol, and there is a real definition, the
2137 processor independent code will have arranged for us to see the
2138 real definition first, and we can just use the same value. */
2139 if (h->u.weakdef != NULL)
2140 {
2141 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2142 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2143 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2144 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2145 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2146 h->non_got_ref = h->u.weakdef->non_got_ref;
2147 return TRUE;
2148 }
2149
2150 /* This is a reference to a symbol defined by a dynamic object which
2151 is not a function. */
2152
2153 /* If we are creating a shared library, we must presume that the
2154 only references to the symbol are via the global offset table.
2155 For such cases we need not do anything here; the relocations will
2156 be handled correctly by relocate_section. */
2157 if (info->shared)
2158 return TRUE;
2159
2160 /* If there are no references to this symbol that do not use the
2161 GOT, we don't need to generate a copy reloc. */
2162 if (!h->non_got_ref)
2163 return TRUE;
2164
2165 /* If -z nocopyreloc was given, we won't generate them either. */
2166 if (info->nocopyreloc)
2167 {
2168 h->non_got_ref = 0;
2169 return TRUE;
2170 }
2171
2172 if (ELIMINATE_COPY_RELOCS)
2173 {
2174 eh = (struct elf_x86_64_link_hash_entry *) h;
2175 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2176 {
2177 s = p->sec->output_section;
2178 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2179 break;
2180 }
2181
2182 /* If we didn't find any dynamic relocs in read-only sections, then
2183 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2184 if (p == NULL)
2185 {
2186 h->non_got_ref = 0;
2187 return TRUE;
2188 }
2189 }
2190
2191 /* We must allocate the symbol in our .dynbss section, which will
2192 become part of the .bss section of the executable. There will be
2193 an entry for this symbol in the .dynsym section. The dynamic
2194 object will contain position independent code, so all references
2195 from the dynamic object to this symbol will go through the global
2196 offset table. The dynamic linker will use the .dynsym entry to
2197 determine the address it must put in the global offset table, so
2198 both the dynamic object and the regular object will refer to the
2199 same memory location for the variable. */
2200
2201 htab = elf_x86_64_hash_table (info);
2202 if (htab == NULL)
2203 return FALSE;
2204
2205 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2206 to copy the initial value out of the dynamic object and into the
2207 runtime process image. */
2208 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2209 {
2210 const struct elf_backend_data *bed;
2211 bed = get_elf_backend_data (info->output_bfd);
2212 htab->srelbss->size += bed->s->sizeof_rela;
2213 h->needs_copy = 1;
2214 }
2215
2216 s = htab->sdynbss;
2217
2218 return _bfd_elf_adjust_dynamic_copy (h, s);
2219 }
2220
2221 /* Allocate space in .plt, .got and associated reloc sections for
2222 dynamic relocs. */
2223
2224 static bfd_boolean
2225 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2226 {
2227 struct bfd_link_info *info;
2228 struct elf_x86_64_link_hash_table *htab;
2229 struct elf_x86_64_link_hash_entry *eh;
2230 struct elf_dyn_relocs *p;
2231 const struct elf_backend_data *bed;
2232 unsigned int plt_entry_size;
2233
2234 if (h->root.type == bfd_link_hash_indirect)
2235 return TRUE;
2236
2237 eh = (struct elf_x86_64_link_hash_entry *) h;
2238
2239 info = (struct bfd_link_info *) inf;
2240 htab = elf_x86_64_hash_table (info);
2241 if (htab == NULL)
2242 return FALSE;
2243 bed = get_elf_backend_data (info->output_bfd);
2244 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2245
2246 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2247 here if it is defined and referenced in a non-shared object. */
2248 if (h->type == STT_GNU_IFUNC
2249 && h->def_regular)
2250 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2251 &eh->dyn_relocs,
2252 plt_entry_size,
2253 GOT_ENTRY_SIZE);
2254 else if (htab->elf.dynamic_sections_created
2255 && h->plt.refcount > 0)
2256 {
2257 /* Make sure this symbol is output as a dynamic symbol.
2258 Undefined weak syms won't yet be marked as dynamic. */
2259 if (h->dynindx == -1
2260 && !h->forced_local)
2261 {
2262 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2263 return FALSE;
2264 }
2265
2266 if (info->shared
2267 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2268 {
2269 asection *s = htab->elf.splt;
2270
2271 /* If this is the first .plt entry, make room for the special
2272 first entry. */
2273 if (s->size == 0)
2274 s->size += plt_entry_size;
2275
2276 h->plt.offset = s->size;
2277
2278 /* If this symbol is not defined in a regular file, and we are
2279 not generating a shared library, then set the symbol to this
2280 location in the .plt. This is required to make function
2281 pointers compare as equal between the normal executable and
2282 the shared library. */
2283 if (! info->shared
2284 && !h->def_regular)
2285 {
2286 h->root.u.def.section = s;
2287 h->root.u.def.value = h->plt.offset;
2288 }
2289
2290 /* Make room for this entry. */
2291 s->size += plt_entry_size;
2292
2293 /* We also need to make an entry in the .got.plt section, which
2294 will be placed in the .got section by the linker script. */
2295 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2296
2297 /* We also need to make an entry in the .rela.plt section. */
2298 htab->elf.srelplt->size += bed->s->sizeof_rela;
2299 htab->elf.srelplt->reloc_count++;
2300 }
2301 else
2302 {
2303 h->plt.offset = (bfd_vma) -1;
2304 h->needs_plt = 0;
2305 }
2306 }
2307 else
2308 {
2309 h->plt.offset = (bfd_vma) -1;
2310 h->needs_plt = 0;
2311 }
2312
2313 eh->tlsdesc_got = (bfd_vma) -1;
2314
2315 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2316 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2317 if (h->got.refcount > 0
2318 && info->executable
2319 && h->dynindx == -1
2320 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2321 {
2322 h->got.offset = (bfd_vma) -1;
2323 }
2324 else if (h->got.refcount > 0)
2325 {
2326 asection *s;
2327 bfd_boolean dyn;
2328 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2329
2330 /* Make sure this symbol is output as a dynamic symbol.
2331 Undefined weak syms won't yet be marked as dynamic. */
2332 if (h->dynindx == -1
2333 && !h->forced_local)
2334 {
2335 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2336 return FALSE;
2337 }
2338
2339 if (GOT_TLS_GDESC_P (tls_type))
2340 {
2341 eh->tlsdesc_got = htab->elf.sgotplt->size
2342 - elf_x86_64_compute_jump_table_size (htab);
2343 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2344 h->got.offset = (bfd_vma) -2;
2345 }
2346 if (! GOT_TLS_GDESC_P (tls_type)
2347 || GOT_TLS_GD_P (tls_type))
2348 {
2349 s = htab->elf.sgot;
2350 h->got.offset = s->size;
2351 s->size += GOT_ENTRY_SIZE;
2352 if (GOT_TLS_GD_P (tls_type))
2353 s->size += GOT_ENTRY_SIZE;
2354 }
2355 dyn = htab->elf.dynamic_sections_created;
2356 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2357 and two if global.
2358 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2359 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2360 || tls_type == GOT_TLS_IE)
2361 htab->elf.srelgot->size += bed->s->sizeof_rela;
2362 else if (GOT_TLS_GD_P (tls_type))
2363 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2364 else if (! GOT_TLS_GDESC_P (tls_type)
2365 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2366 || h->root.type != bfd_link_hash_undefweak)
2367 && (info->shared
2368 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2369 htab->elf.srelgot->size += bed->s->sizeof_rela;
2370 if (GOT_TLS_GDESC_P (tls_type))
2371 {
2372 htab->elf.srelplt->size += bed->s->sizeof_rela;
2373 htab->tlsdesc_plt = (bfd_vma) -1;
2374 }
2375 }
2376 else
2377 h->got.offset = (bfd_vma) -1;
2378
2379 if (eh->dyn_relocs == NULL)
2380 return TRUE;
2381
2382 /* In the shared -Bsymbolic case, discard space allocated for
2383 dynamic pc-relative relocs against symbols which turn out to be
2384 defined in regular objects. For the normal shared case, discard
2385 space for pc-relative relocs that have become local due to symbol
2386 visibility changes. */
2387
2388 if (info->shared)
2389 {
2390 /* Relocs that use pc_count are those that appear on a call
2391 insn, or certain REL relocs that can generated via assembly.
2392 We want calls to protected symbols to resolve directly to the
2393 function rather than going via the plt. If people want
2394 function pointer comparisons to work as expected then they
2395 should avoid writing weird assembly. */
2396 if (SYMBOL_CALLS_LOCAL (info, h))
2397 {
2398 struct elf_dyn_relocs **pp;
2399
2400 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2401 {
2402 p->count -= p->pc_count;
2403 p->pc_count = 0;
2404 if (p->count == 0)
2405 *pp = p->next;
2406 else
2407 pp = &p->next;
2408 }
2409 }
2410
2411 /* Also discard relocs on undefined weak syms with non-default
2412 visibility. */
2413 if (eh->dyn_relocs != NULL
2414 && h->root.type == bfd_link_hash_undefweak)
2415 {
2416 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2417 eh->dyn_relocs = NULL;
2418
2419 /* Make sure undefined weak symbols are output as a dynamic
2420 symbol in PIEs. */
2421 else if (h->dynindx == -1
2422 && ! h->forced_local
2423 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2424 return FALSE;
2425 }
2426
2427 }
2428 else if (ELIMINATE_COPY_RELOCS)
2429 {
2430 /* For the non-shared case, discard space for relocs against
2431 symbols which turn out to need copy relocs or are not
2432 dynamic. */
2433
2434 if (!h->non_got_ref
2435 && ((h->def_dynamic
2436 && !h->def_regular)
2437 || (htab->elf.dynamic_sections_created
2438 && (h->root.type == bfd_link_hash_undefweak
2439 || h->root.type == bfd_link_hash_undefined))))
2440 {
2441 /* Make sure this symbol is output as a dynamic symbol.
2442 Undefined weak syms won't yet be marked as dynamic. */
2443 if (h->dynindx == -1
2444 && ! h->forced_local
2445 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2446 return FALSE;
2447
2448 /* If that succeeded, we know we'll be keeping all the
2449 relocs. */
2450 if (h->dynindx != -1)
2451 goto keep;
2452 }
2453
2454 eh->dyn_relocs = NULL;
2455
2456 keep: ;
2457 }
2458
2459 /* Finally, allocate space. */
2460 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2461 {
2462 asection * sreloc;
2463
2464 sreloc = elf_section_data (p->sec)->sreloc;
2465
2466 BFD_ASSERT (sreloc != NULL);
2467
2468 sreloc->size += p->count * bed->s->sizeof_rela;
2469 }
2470
2471 return TRUE;
2472 }
2473
2474 /* Allocate space in .plt, .got and associated reloc sections for
2475 local dynamic relocs. */
2476
2477 static bfd_boolean
2478 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2479 {
2480 struct elf_link_hash_entry *h
2481 = (struct elf_link_hash_entry *) *slot;
2482
2483 if (h->type != STT_GNU_IFUNC
2484 || !h->def_regular
2485 || !h->ref_regular
2486 || !h->forced_local
2487 || h->root.type != bfd_link_hash_defined)
2488 abort ();
2489
2490 return elf_x86_64_allocate_dynrelocs (h, inf);
2491 }
2492
2493 /* Find any dynamic relocs that apply to read-only sections. */
2494
2495 static bfd_boolean
2496 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2497 void * inf)
2498 {
2499 struct elf_x86_64_link_hash_entry *eh;
2500 struct elf_dyn_relocs *p;
2501
2502 /* Skip local IFUNC symbols. */
2503 if (h->forced_local && h->type == STT_GNU_IFUNC)
2504 return TRUE;
2505
2506 eh = (struct elf_x86_64_link_hash_entry *) h;
2507 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2508 {
2509 asection *s = p->sec->output_section;
2510
2511 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2512 {
2513 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2514
2515 info->flags |= DF_TEXTREL;
2516
2517 if (info->warn_shared_textrel && info->shared)
2518 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'.\n"),
2519 p->sec->owner, h->root.root.string,
2520 p->sec);
2521
2522 /* Not an error, just cut short the traversal. */
2523 return FALSE;
2524 }
2525 }
2526 return TRUE;
2527 }
2528
2529 /* Convert
2530 mov foo@GOTPCREL(%rip), %reg
2531 to
2532 lea foo(%rip), %reg
2533 with the local symbol, foo. */
2534
2535 static bfd_boolean
2536 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2537 struct bfd_link_info *link_info)
2538 {
2539 Elf_Internal_Shdr *symtab_hdr;
2540 Elf_Internal_Rela *internal_relocs;
2541 Elf_Internal_Rela *irel, *irelend;
2542 bfd_byte *contents;
2543 struct elf_x86_64_link_hash_table *htab;
2544 bfd_boolean changed_contents;
2545 bfd_boolean changed_relocs;
2546 bfd_signed_vma *local_got_refcounts;
2547
2548 /* Don't even try to convert non-ELF outputs. */
2549 if (!is_elf_hash_table (link_info->hash))
2550 return FALSE;
2551
2552 /* Nothing to do if there are no codes, no relocations or no output. */
2553 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2554 || sec->reloc_count == 0
2555 || discarded_section (sec))
2556 return TRUE;
2557
2558 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2559
2560 /* Load the relocations for this section. */
2561 internal_relocs = (_bfd_elf_link_read_relocs
2562 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2563 link_info->keep_memory));
2564 if (internal_relocs == NULL)
2565 return FALSE;
2566
2567 htab = elf_x86_64_hash_table (link_info);
2568 changed_contents = FALSE;
2569 changed_relocs = FALSE;
2570 local_got_refcounts = elf_local_got_refcounts (abfd);
2571
2572 /* Get the section contents. */
2573 if (elf_section_data (sec)->this_hdr.contents != NULL)
2574 contents = elf_section_data (sec)->this_hdr.contents;
2575 else
2576 {
2577 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2578 goto error_return;
2579 }
2580
2581 irelend = internal_relocs + sec->reloc_count;
2582 for (irel = internal_relocs; irel < irelend; irel++)
2583 {
2584 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2585 unsigned int r_symndx = htab->r_sym (irel->r_info);
2586 unsigned int indx;
2587 struct elf_link_hash_entry *h;
2588
2589 if (r_type != R_X86_64_GOTPCREL)
2590 continue;
2591
2592 /* Get the symbol referred to by the reloc. */
2593 if (r_symndx < symtab_hdr->sh_info)
2594 {
2595 Elf_Internal_Sym *isym;
2596
2597 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2598 abfd, r_symndx);
2599
2600 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. */
2601 if (ELF_ST_TYPE (isym->st_info) != STT_GNU_IFUNC
2602 && bfd_get_8 (input_bfd,
2603 contents + irel->r_offset - 2) == 0x8b)
2604 {
2605 bfd_put_8 (output_bfd, 0x8d,
2606 contents + irel->r_offset - 2);
2607 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2608 if (local_got_refcounts != NULL
2609 && local_got_refcounts[r_symndx] > 0)
2610 local_got_refcounts[r_symndx] -= 1;
2611 changed_contents = TRUE;
2612 changed_relocs = TRUE;
2613 }
2614 continue;
2615 }
2616
2617 indx = r_symndx - symtab_hdr->sh_info;
2618 h = elf_sym_hashes (abfd)[indx];
2619 BFD_ASSERT (h != NULL);
2620
2621 while (h->root.type == bfd_link_hash_indirect
2622 || h->root.type == bfd_link_hash_warning)
2623 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2624
2625 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
2626 avoid optimizing _DYNAMIC since ld.so may use its link-time
2627 address. */
2628 if (h->def_regular
2629 && h->type != STT_GNU_IFUNC
2630 && h != htab->elf.hdynamic
2631 && SYMBOL_REFERENCES_LOCAL (link_info, h)
2632 && bfd_get_8 (input_bfd,
2633 contents + irel->r_offset - 2) == 0x8b)
2634 {
2635 bfd_put_8 (output_bfd, 0x8d,
2636 contents + irel->r_offset - 2);
2637 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2638 if (h->got.refcount > 0)
2639 h->got.refcount -= 1;
2640 changed_contents = TRUE;
2641 changed_relocs = TRUE;
2642 }
2643 }
2644
2645 if (contents != NULL
2646 && elf_section_data (sec)->this_hdr.contents != contents)
2647 {
2648 if (!changed_contents && !link_info->keep_memory)
2649 free (contents);
2650 else
2651 {
2652 /* Cache the section contents for elf_link_input_bfd. */
2653 elf_section_data (sec)->this_hdr.contents = contents;
2654 }
2655 }
2656
2657 if (elf_section_data (sec)->relocs != internal_relocs)
2658 {
2659 if (!changed_relocs)
2660 free (internal_relocs);
2661 else
2662 elf_section_data (sec)->relocs = internal_relocs;
2663 }
2664
2665 return TRUE;
2666
2667 error_return:
2668 if (contents != NULL
2669 && elf_section_data (sec)->this_hdr.contents != contents)
2670 free (contents);
2671 if (internal_relocs != NULL
2672 && elf_section_data (sec)->relocs != internal_relocs)
2673 free (internal_relocs);
2674 return FALSE;
2675 }
2676
2677 /* Set the sizes of the dynamic sections. */
2678
2679 static bfd_boolean
2680 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
2681 struct bfd_link_info *info)
2682 {
2683 struct elf_x86_64_link_hash_table *htab;
2684 bfd *dynobj;
2685 asection *s;
2686 bfd_boolean relocs;
2687 bfd *ibfd;
2688 const struct elf_backend_data *bed;
2689
2690 htab = elf_x86_64_hash_table (info);
2691 if (htab == NULL)
2692 return FALSE;
2693 bed = get_elf_backend_data (output_bfd);
2694
2695 dynobj = htab->elf.dynobj;
2696 if (dynobj == NULL)
2697 abort ();
2698
2699 if (htab->elf.dynamic_sections_created)
2700 {
2701 /* Set the contents of the .interp section to the interpreter. */
2702 if (info->executable)
2703 {
2704 s = bfd_get_linker_section (dynobj, ".interp");
2705 if (s == NULL)
2706 abort ();
2707 s->size = htab->dynamic_interpreter_size;
2708 s->contents = (unsigned char *) htab->dynamic_interpreter;
2709 }
2710 }
2711
2712 /* Set up .got offsets for local syms, and space for local dynamic
2713 relocs. */
2714 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2715 {
2716 bfd_signed_vma *local_got;
2717 bfd_signed_vma *end_local_got;
2718 char *local_tls_type;
2719 bfd_vma *local_tlsdesc_gotent;
2720 bfd_size_type locsymcount;
2721 Elf_Internal_Shdr *symtab_hdr;
2722 asection *srel;
2723
2724 if (! is_x86_64_elf (ibfd))
2725 continue;
2726
2727 for (s = ibfd->sections; s != NULL; s = s->next)
2728 {
2729 struct elf_dyn_relocs *p;
2730
2731 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
2732 return FALSE;
2733
2734 for (p = (struct elf_dyn_relocs *)
2735 (elf_section_data (s)->local_dynrel);
2736 p != NULL;
2737 p = p->next)
2738 {
2739 if (!bfd_is_abs_section (p->sec)
2740 && bfd_is_abs_section (p->sec->output_section))
2741 {
2742 /* Input section has been discarded, either because
2743 it is a copy of a linkonce section or due to
2744 linker script /DISCARD/, so we'll be discarding
2745 the relocs too. */
2746 }
2747 else if (p->count != 0)
2748 {
2749 srel = elf_section_data (p->sec)->sreloc;
2750 srel->size += p->count * bed->s->sizeof_rela;
2751 if ((p->sec->output_section->flags & SEC_READONLY) != 0
2752 && (info->flags & DF_TEXTREL) == 0)
2753 {
2754 info->flags |= DF_TEXTREL;
2755 if (info->warn_shared_textrel && info->shared)
2756 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'.\n"),
2757 p->sec->owner, p->sec);
2758 }
2759 }
2760 }
2761 }
2762
2763 local_got = elf_local_got_refcounts (ibfd);
2764 if (!local_got)
2765 continue;
2766
2767 symtab_hdr = &elf_symtab_hdr (ibfd);
2768 locsymcount = symtab_hdr->sh_info;
2769 end_local_got = local_got + locsymcount;
2770 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
2771 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
2772 s = htab->elf.sgot;
2773 srel = htab->elf.srelgot;
2774 for (; local_got < end_local_got;
2775 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
2776 {
2777 *local_tlsdesc_gotent = (bfd_vma) -1;
2778 if (*local_got > 0)
2779 {
2780 if (GOT_TLS_GDESC_P (*local_tls_type))
2781 {
2782 *local_tlsdesc_gotent = htab->elf.sgotplt->size
2783 - elf_x86_64_compute_jump_table_size (htab);
2784 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2785 *local_got = (bfd_vma) -2;
2786 }
2787 if (! GOT_TLS_GDESC_P (*local_tls_type)
2788 || GOT_TLS_GD_P (*local_tls_type))
2789 {
2790 *local_got = s->size;
2791 s->size += GOT_ENTRY_SIZE;
2792 if (GOT_TLS_GD_P (*local_tls_type))
2793 s->size += GOT_ENTRY_SIZE;
2794 }
2795 if (info->shared
2796 || GOT_TLS_GD_ANY_P (*local_tls_type)
2797 || *local_tls_type == GOT_TLS_IE)
2798 {
2799 if (GOT_TLS_GDESC_P (*local_tls_type))
2800 {
2801 htab->elf.srelplt->size
2802 += bed->s->sizeof_rela;
2803 htab->tlsdesc_plt = (bfd_vma) -1;
2804 }
2805 if (! GOT_TLS_GDESC_P (*local_tls_type)
2806 || GOT_TLS_GD_P (*local_tls_type))
2807 srel->size += bed->s->sizeof_rela;
2808 }
2809 }
2810 else
2811 *local_got = (bfd_vma) -1;
2812 }
2813 }
2814
2815 if (htab->tls_ld_got.refcount > 0)
2816 {
2817 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
2818 relocs. */
2819 htab->tls_ld_got.offset = htab->elf.sgot->size;
2820 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
2821 htab->elf.srelgot->size += bed->s->sizeof_rela;
2822 }
2823 else
2824 htab->tls_ld_got.offset = -1;
2825
2826 /* Allocate global sym .plt and .got entries, and space for global
2827 sym dynamic relocs. */
2828 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
2829 info);
2830
2831 /* Allocate .plt and .got entries, and space for local symbols. */
2832 htab_traverse (htab->loc_hash_table,
2833 elf_x86_64_allocate_local_dynrelocs,
2834 info);
2835
2836 /* For every jump slot reserved in the sgotplt, reloc_count is
2837 incremented. However, when we reserve space for TLS descriptors,
2838 it's not incremented, so in order to compute the space reserved
2839 for them, it suffices to multiply the reloc count by the jump
2840 slot size.
2841
2842 PR ld/13302: We start next_irelative_index at the end of .rela.plt
2843 so that R_X86_64_IRELATIVE entries come last. */
2844 if (htab->elf.srelplt)
2845 {
2846 htab->sgotplt_jump_table_size
2847 = elf_x86_64_compute_jump_table_size (htab);
2848 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
2849 }
2850 else if (htab->elf.irelplt)
2851 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
2852
2853 if (htab->tlsdesc_plt)
2854 {
2855 /* If we're not using lazy TLS relocations, don't generate the
2856 PLT and GOT entries they require. */
2857 if ((info->flags & DF_BIND_NOW))
2858 htab->tlsdesc_plt = 0;
2859 else
2860 {
2861 htab->tlsdesc_got = htab->elf.sgot->size;
2862 htab->elf.sgot->size += GOT_ENTRY_SIZE;
2863 /* Reserve room for the initial entry.
2864 FIXME: we could probably do away with it in this case. */
2865 if (htab->elf.splt->size == 0)
2866 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
2867 htab->tlsdesc_plt = htab->elf.splt->size;
2868 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
2869 }
2870 }
2871
2872 if (htab->elf.sgotplt)
2873 {
2874 /* Don't allocate .got.plt section if there are no GOT nor PLT
2875 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
2876 if ((htab->elf.hgot == NULL
2877 || !htab->elf.hgot->ref_regular_nonweak)
2878 && (htab->elf.sgotplt->size
2879 == get_elf_backend_data (output_bfd)->got_header_size)
2880 && (htab->elf.splt == NULL
2881 || htab->elf.splt->size == 0)
2882 && (htab->elf.sgot == NULL
2883 || htab->elf.sgot->size == 0)
2884 && (htab->elf.iplt == NULL
2885 || htab->elf.iplt->size == 0)
2886 && (htab->elf.igotplt == NULL
2887 || htab->elf.igotplt->size == 0))
2888 htab->elf.sgotplt->size = 0;
2889 }
2890
2891 if (htab->plt_eh_frame != NULL
2892 && htab->elf.splt != NULL
2893 && htab->elf.splt->size != 0
2894 && !bfd_is_abs_section (htab->elf.splt->output_section)
2895 && _bfd_elf_eh_frame_present (info))
2896 {
2897 const struct elf_x86_64_backend_data *arch_data
2898 = (const struct elf_x86_64_backend_data *) bed->arch_data;
2899 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
2900 }
2901
2902 /* We now have determined the sizes of the various dynamic sections.
2903 Allocate memory for them. */
2904 relocs = FALSE;
2905 for (s = dynobj->sections; s != NULL; s = s->next)
2906 {
2907 if ((s->flags & SEC_LINKER_CREATED) == 0)
2908 continue;
2909
2910 if (s == htab->elf.splt
2911 || s == htab->elf.sgot
2912 || s == htab->elf.sgotplt
2913 || s == htab->elf.iplt
2914 || s == htab->elf.igotplt
2915 || s == htab->plt_eh_frame
2916 || s == htab->sdynbss)
2917 {
2918 /* Strip this section if we don't need it; see the
2919 comment below. */
2920 }
2921 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
2922 {
2923 if (s->size != 0 && s != htab->elf.srelplt)
2924 relocs = TRUE;
2925
2926 /* We use the reloc_count field as a counter if we need
2927 to copy relocs into the output file. */
2928 if (s != htab->elf.srelplt)
2929 s->reloc_count = 0;
2930 }
2931 else
2932 {
2933 /* It's not one of our sections, so don't allocate space. */
2934 continue;
2935 }
2936
2937 if (s->size == 0)
2938 {
2939 /* If we don't need this section, strip it from the
2940 output file. This is mostly to handle .rela.bss and
2941 .rela.plt. We must create both sections in
2942 create_dynamic_sections, because they must be created
2943 before the linker maps input sections to output
2944 sections. The linker does that before
2945 adjust_dynamic_symbol is called, and it is that
2946 function which decides whether anything needs to go
2947 into these sections. */
2948
2949 s->flags |= SEC_EXCLUDE;
2950 continue;
2951 }
2952
2953 if ((s->flags & SEC_HAS_CONTENTS) == 0)
2954 continue;
2955
2956 /* Allocate memory for the section contents. We use bfd_zalloc
2957 here in case unused entries are not reclaimed before the
2958 section's contents are written out. This should not happen,
2959 but this way if it does, we get a R_X86_64_NONE reloc instead
2960 of garbage. */
2961 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
2962 if (s->contents == NULL)
2963 return FALSE;
2964 }
2965
2966 if (htab->plt_eh_frame != NULL
2967 && htab->plt_eh_frame->contents != NULL)
2968 {
2969 const struct elf_x86_64_backend_data *arch_data
2970 = (const struct elf_x86_64_backend_data *) bed->arch_data;
2971
2972 memcpy (htab->plt_eh_frame->contents,
2973 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
2974 bfd_put_32 (dynobj, htab->elf.splt->size,
2975 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
2976 }
2977
2978 if (htab->elf.dynamic_sections_created)
2979 {
2980 /* Add some entries to the .dynamic section. We fill in the
2981 values later, in elf_x86_64_finish_dynamic_sections, but we
2982 must add the entries now so that we get the correct size for
2983 the .dynamic section. The DT_DEBUG entry is filled in by the
2984 dynamic linker and used by the debugger. */
2985 #define add_dynamic_entry(TAG, VAL) \
2986 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
2987
2988 if (info->executable)
2989 {
2990 if (!add_dynamic_entry (DT_DEBUG, 0))
2991 return FALSE;
2992 }
2993
2994 if (htab->elf.splt->size != 0)
2995 {
2996 if (!add_dynamic_entry (DT_PLTGOT, 0)
2997 || !add_dynamic_entry (DT_PLTRELSZ, 0)
2998 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
2999 || !add_dynamic_entry (DT_JMPREL, 0))
3000 return FALSE;
3001
3002 if (htab->tlsdesc_plt
3003 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3004 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3005 return FALSE;
3006 }
3007
3008 if (relocs)
3009 {
3010 if (!add_dynamic_entry (DT_RELA, 0)
3011 || !add_dynamic_entry (DT_RELASZ, 0)
3012 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3013 return FALSE;
3014
3015 /* If any dynamic relocs apply to a read-only section,
3016 then we need a DT_TEXTREL entry. */
3017 if ((info->flags & DF_TEXTREL) == 0)
3018 elf_link_hash_traverse (&htab->elf,
3019 elf_x86_64_readonly_dynrelocs,
3020 info);
3021
3022 if ((info->flags & DF_TEXTREL) != 0)
3023 {
3024 if (!add_dynamic_entry (DT_TEXTREL, 0))
3025 return FALSE;
3026 }
3027 }
3028 }
3029 #undef add_dynamic_entry
3030
3031 return TRUE;
3032 }
3033
3034 static bfd_boolean
3035 elf_x86_64_always_size_sections (bfd *output_bfd,
3036 struct bfd_link_info *info)
3037 {
3038 asection *tls_sec = elf_hash_table (info)->tls_sec;
3039
3040 if (tls_sec)
3041 {
3042 struct elf_link_hash_entry *tlsbase;
3043
3044 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3045 "_TLS_MODULE_BASE_",
3046 FALSE, FALSE, FALSE);
3047
3048 if (tlsbase && tlsbase->type == STT_TLS)
3049 {
3050 struct elf_x86_64_link_hash_table *htab;
3051 struct bfd_link_hash_entry *bh = NULL;
3052 const struct elf_backend_data *bed
3053 = get_elf_backend_data (output_bfd);
3054
3055 htab = elf_x86_64_hash_table (info);
3056 if (htab == NULL)
3057 return FALSE;
3058
3059 if (!(_bfd_generic_link_add_one_symbol
3060 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3061 tls_sec, 0, NULL, FALSE,
3062 bed->collect, &bh)))
3063 return FALSE;
3064
3065 htab->tls_module_base = bh;
3066
3067 tlsbase = (struct elf_link_hash_entry *)bh;
3068 tlsbase->def_regular = 1;
3069 tlsbase->other = STV_HIDDEN;
3070 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3071 }
3072 }
3073
3074 return TRUE;
3075 }
3076
3077 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3078 executables. Rather than setting it to the beginning of the TLS
3079 section, we have to set it to the end. This function may be called
3080 multiple times, it is idempotent. */
3081
3082 static void
3083 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3084 {
3085 struct elf_x86_64_link_hash_table *htab;
3086 struct bfd_link_hash_entry *base;
3087
3088 if (!info->executable)
3089 return;
3090
3091 htab = elf_x86_64_hash_table (info);
3092 if (htab == NULL)
3093 return;
3094
3095 base = htab->tls_module_base;
3096 if (base == NULL)
3097 return;
3098
3099 base->u.def.value = htab->elf.tls_size;
3100 }
3101
3102 /* Return the base VMA address which should be subtracted from real addresses
3103 when resolving @dtpoff relocation.
3104 This is PT_TLS segment p_vaddr. */
3105
3106 static bfd_vma
3107 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3108 {
3109 /* If tls_sec is NULL, we should have signalled an error already. */
3110 if (elf_hash_table (info)->tls_sec == NULL)
3111 return 0;
3112 return elf_hash_table (info)->tls_sec->vma;
3113 }
3114
3115 /* Return the relocation value for @tpoff relocation
3116 if STT_TLS virtual address is ADDRESS. */
3117
3118 static bfd_vma
3119 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3120 {
3121 struct elf_link_hash_table *htab = elf_hash_table (info);
3122 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3123 bfd_vma static_tls_size;
3124
3125 /* If tls_segment is NULL, we should have signalled an error already. */
3126 if (htab->tls_sec == NULL)
3127 return 0;
3128
3129 /* Consider special static TLS alignment requirements. */
3130 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3131 return address - static_tls_size - htab->tls_sec->vma;
3132 }
3133
3134 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3135 branch? */
3136
3137 static bfd_boolean
3138 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3139 {
3140 /* Opcode Instruction
3141 0xe8 call
3142 0xe9 jump
3143 0x0f 0x8x conditional jump */
3144 return ((offset > 0
3145 && (contents [offset - 1] == 0xe8
3146 || contents [offset - 1] == 0xe9))
3147 || (offset > 1
3148 && contents [offset - 2] == 0x0f
3149 && (contents [offset - 1] & 0xf0) == 0x80));
3150 }
3151
3152 /* Relocate an x86_64 ELF section. */
3153
3154 static bfd_boolean
3155 elf_x86_64_relocate_section (bfd *output_bfd,
3156 struct bfd_link_info *info,
3157 bfd *input_bfd,
3158 asection *input_section,
3159 bfd_byte *contents,
3160 Elf_Internal_Rela *relocs,
3161 Elf_Internal_Sym *local_syms,
3162 asection **local_sections)
3163 {
3164 struct elf_x86_64_link_hash_table *htab;
3165 Elf_Internal_Shdr *symtab_hdr;
3166 struct elf_link_hash_entry **sym_hashes;
3167 bfd_vma *local_got_offsets;
3168 bfd_vma *local_tlsdesc_gotents;
3169 Elf_Internal_Rela *rel;
3170 Elf_Internal_Rela *relend;
3171 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3172
3173 BFD_ASSERT (is_x86_64_elf (input_bfd));
3174
3175 htab = elf_x86_64_hash_table (info);
3176 if (htab == NULL)
3177 return FALSE;
3178 symtab_hdr = &elf_symtab_hdr (input_bfd);
3179 sym_hashes = elf_sym_hashes (input_bfd);
3180 local_got_offsets = elf_local_got_offsets (input_bfd);
3181 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3182
3183 elf_x86_64_set_tls_module_base (info);
3184
3185 rel = relocs;
3186 relend = relocs + input_section->reloc_count;
3187 for (; rel < relend; rel++)
3188 {
3189 unsigned int r_type;
3190 reloc_howto_type *howto;
3191 unsigned long r_symndx;
3192 struct elf_link_hash_entry *h;
3193 Elf_Internal_Sym *sym;
3194 asection *sec;
3195 bfd_vma off, offplt;
3196 bfd_vma relocation;
3197 bfd_boolean unresolved_reloc;
3198 bfd_reloc_status_type r;
3199 int tls_type;
3200 asection *base_got;
3201
3202 r_type = ELF32_R_TYPE (rel->r_info);
3203 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3204 || r_type == (int) R_X86_64_GNU_VTENTRY)
3205 continue;
3206
3207 if (r_type >= R_X86_64_max)
3208 {
3209 bfd_set_error (bfd_error_bad_value);
3210 return FALSE;
3211 }
3212
3213 if (r_type != (int) R_X86_64_32
3214 || ABI_64_P (output_bfd))
3215 howto = x86_64_elf_howto_table + r_type;
3216 else
3217 howto = (x86_64_elf_howto_table
3218 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3219 r_symndx = htab->r_sym (rel->r_info);
3220 h = NULL;
3221 sym = NULL;
3222 sec = NULL;
3223 unresolved_reloc = FALSE;
3224 if (r_symndx < symtab_hdr->sh_info)
3225 {
3226 sym = local_syms + r_symndx;
3227 sec = local_sections[r_symndx];
3228
3229 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3230 &sec, rel);
3231
3232 /* Relocate against local STT_GNU_IFUNC symbol. */
3233 if (!info->relocatable
3234 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3235 {
3236 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3237 rel, FALSE);
3238 if (h == NULL)
3239 abort ();
3240
3241 /* Set STT_GNU_IFUNC symbol value. */
3242 h->root.u.def.value = sym->st_value;
3243 h->root.u.def.section = sec;
3244 }
3245 }
3246 else
3247 {
3248 bfd_boolean warned ATTRIBUTE_UNUSED;
3249
3250 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3251 r_symndx, symtab_hdr, sym_hashes,
3252 h, sec, relocation,
3253 unresolved_reloc, warned);
3254 }
3255
3256 if (sec != NULL && discarded_section (sec))
3257 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3258 rel, 1, relend, howto, 0, contents);
3259
3260 if (info->relocatable)
3261 continue;
3262
3263 if (rel->r_addend == 0
3264 && r_type == R_X86_64_64
3265 && !ABI_64_P (output_bfd))
3266 {
3267 /* For x32, treat R_X86_64_64 like R_X86_64_32 and zero-extend
3268 it to 64bit if addend is zero. */
3269 r_type = R_X86_64_32;
3270 memset (contents + rel->r_offset + 4, 0, 4);
3271 }
3272
3273 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3274 it here if it is defined in a non-shared object. */
3275 if (h != NULL
3276 && h->type == STT_GNU_IFUNC
3277 && h->def_regular)
3278 {
3279 asection *plt;
3280 bfd_vma plt_index;
3281 const char *name;
3282
3283 if ((input_section->flags & SEC_ALLOC) == 0
3284 || h->plt.offset == (bfd_vma) -1)
3285 abort ();
3286
3287 /* STT_GNU_IFUNC symbol must go through PLT. */
3288 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
3289 relocation = (plt->output_section->vma
3290 + plt->output_offset + h->plt.offset);
3291
3292 switch (r_type)
3293 {
3294 default:
3295 if (h->root.root.string)
3296 name = h->root.root.string;
3297 else
3298 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3299 NULL);
3300 (*_bfd_error_handler)
3301 (_("%B: relocation %s against STT_GNU_IFUNC "
3302 "symbol `%s' isn't handled by %s"), input_bfd,
3303 x86_64_elf_howto_table[r_type].name,
3304 name, __FUNCTION__);
3305 bfd_set_error (bfd_error_bad_value);
3306 return FALSE;
3307
3308 case R_X86_64_32S:
3309 if (info->shared)
3310 abort ();
3311 goto do_relocation;
3312
3313 case R_X86_64_32:
3314 if (ABI_64_P (output_bfd))
3315 goto do_relocation;
3316 /* FALLTHROUGH */
3317 case R_X86_64_64:
3318 if (rel->r_addend != 0)
3319 {
3320 if (h->root.root.string)
3321 name = h->root.root.string;
3322 else
3323 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3324 sym, NULL);
3325 (*_bfd_error_handler)
3326 (_("%B: relocation %s against STT_GNU_IFUNC "
3327 "symbol `%s' has non-zero addend: %d"),
3328 input_bfd, x86_64_elf_howto_table[r_type].name,
3329 name, rel->r_addend);
3330 bfd_set_error (bfd_error_bad_value);
3331 return FALSE;
3332 }
3333
3334 /* Generate dynamic relcoation only when there is a
3335 non-GOT reference in a shared object. */
3336 if (info->shared && h->non_got_ref)
3337 {
3338 Elf_Internal_Rela outrel;
3339 asection *sreloc;
3340
3341 /* Need a dynamic relocation to get the real function
3342 address. */
3343 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3344 info,
3345 input_section,
3346 rel->r_offset);
3347 if (outrel.r_offset == (bfd_vma) -1
3348 || outrel.r_offset == (bfd_vma) -2)
3349 abort ();
3350
3351 outrel.r_offset += (input_section->output_section->vma
3352 + input_section->output_offset);
3353
3354 if (h->dynindx == -1
3355 || h->forced_local
3356 || info->executable)
3357 {
3358 /* This symbol is resolved locally. */
3359 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3360 outrel.r_addend = (h->root.u.def.value
3361 + h->root.u.def.section->output_section->vma
3362 + h->root.u.def.section->output_offset);
3363 }
3364 else
3365 {
3366 outrel.r_info = htab->r_info (h->dynindx, r_type);
3367 outrel.r_addend = 0;
3368 }
3369
3370 sreloc = htab->elf.irelifunc;
3371 elf_append_rela (output_bfd, sreloc, &outrel);
3372
3373 /* If this reloc is against an external symbol, we
3374 do not want to fiddle with the addend. Otherwise,
3375 we need to include the symbol value so that it
3376 becomes an addend for the dynamic reloc. For an
3377 internal symbol, we have updated addend. */
3378 continue;
3379 }
3380 /* FALLTHROUGH */
3381 case R_X86_64_PC32:
3382 case R_X86_64_PC64:
3383 case R_X86_64_PLT32:
3384 goto do_relocation;
3385
3386 case R_X86_64_GOTPCREL:
3387 case R_X86_64_GOTPCREL64:
3388 base_got = htab->elf.sgot;
3389 off = h->got.offset;
3390
3391 if (base_got == NULL)
3392 abort ();
3393
3394 if (off == (bfd_vma) -1)
3395 {
3396 /* We can't use h->got.offset here to save state, or
3397 even just remember the offset, as finish_dynamic_symbol
3398 would use that as offset into .got. */
3399
3400 if (htab->elf.splt != NULL)
3401 {
3402 plt_index = h->plt.offset / plt_entry_size - 1;
3403 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3404 base_got = htab->elf.sgotplt;
3405 }
3406 else
3407 {
3408 plt_index = h->plt.offset / plt_entry_size;
3409 off = plt_index * GOT_ENTRY_SIZE;
3410 base_got = htab->elf.igotplt;
3411 }
3412
3413 if (h->dynindx == -1
3414 || h->forced_local
3415 || info->symbolic)
3416 {
3417 /* This references the local defitionion. We must
3418 initialize this entry in the global offset table.
3419 Since the offset must always be a multiple of 8,
3420 we use the least significant bit to record
3421 whether we have initialized it already.
3422
3423 When doing a dynamic link, we create a .rela.got
3424 relocation entry to initialize the value. This
3425 is done in the finish_dynamic_symbol routine. */
3426 if ((off & 1) != 0)
3427 off &= ~1;
3428 else
3429 {
3430 bfd_put_64 (output_bfd, relocation,
3431 base_got->contents + off);
3432 /* Note that this is harmless for the GOTPLT64
3433 case, as -1 | 1 still is -1. */
3434 h->got.offset |= 1;
3435 }
3436 }
3437 }
3438
3439 relocation = (base_got->output_section->vma
3440 + base_got->output_offset + off);
3441
3442 goto do_relocation;
3443 }
3444 }
3445
3446 /* When generating a shared object, the relocations handled here are
3447 copied into the output file to be resolved at run time. */
3448 switch (r_type)
3449 {
3450 case R_X86_64_GOT32:
3451 case R_X86_64_GOT64:
3452 /* Relocation is to the entry for this symbol in the global
3453 offset table. */
3454 case R_X86_64_GOTPCREL:
3455 case R_X86_64_GOTPCREL64:
3456 /* Use global offset table entry as symbol value. */
3457 case R_X86_64_GOTPLT64:
3458 /* This is the same as GOT64 for relocation purposes, but
3459 indicates the existence of a PLT entry. The difficulty is,
3460 that we must calculate the GOT slot offset from the PLT
3461 offset, if this symbol got a PLT entry (it was global).
3462 Additionally if it's computed from the PLT entry, then that
3463 GOT offset is relative to .got.plt, not to .got. */
3464 base_got = htab->elf.sgot;
3465
3466 if (htab->elf.sgot == NULL)
3467 abort ();
3468
3469 if (h != NULL)
3470 {
3471 bfd_boolean dyn;
3472
3473 off = h->got.offset;
3474 if (h->needs_plt
3475 && h->plt.offset != (bfd_vma)-1
3476 && off == (bfd_vma)-1)
3477 {
3478 /* We can't use h->got.offset here to save
3479 state, or even just remember the offset, as
3480 finish_dynamic_symbol would use that as offset into
3481 .got. */
3482 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3483 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3484 base_got = htab->elf.sgotplt;
3485 }
3486
3487 dyn = htab->elf.dynamic_sections_created;
3488
3489 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3490 || (info->shared
3491 && SYMBOL_REFERENCES_LOCAL (info, h))
3492 || (ELF_ST_VISIBILITY (h->other)
3493 && h->root.type == bfd_link_hash_undefweak))
3494 {
3495 /* This is actually a static link, or it is a -Bsymbolic
3496 link and the symbol is defined locally, or the symbol
3497 was forced to be local because of a version file. We
3498 must initialize this entry in the global offset table.
3499 Since the offset must always be a multiple of 8, we
3500 use the least significant bit to record whether we
3501 have initialized it already.
3502
3503 When doing a dynamic link, we create a .rela.got
3504 relocation entry to initialize the value. This is
3505 done in the finish_dynamic_symbol routine. */
3506 if ((off & 1) != 0)
3507 off &= ~1;
3508 else
3509 {
3510 bfd_put_64 (output_bfd, relocation,
3511 base_got->contents + off);
3512 /* Note that this is harmless for the GOTPLT64 case,
3513 as -1 | 1 still is -1. */
3514 h->got.offset |= 1;
3515 }
3516 }
3517 else
3518 unresolved_reloc = FALSE;
3519 }
3520 else
3521 {
3522 if (local_got_offsets == NULL)
3523 abort ();
3524
3525 off = local_got_offsets[r_symndx];
3526
3527 /* The offset must always be a multiple of 8. We use
3528 the least significant bit to record whether we have
3529 already generated the necessary reloc. */
3530 if ((off & 1) != 0)
3531 off &= ~1;
3532 else
3533 {
3534 bfd_put_64 (output_bfd, relocation,
3535 base_got->contents + off);
3536
3537 if (info->shared)
3538 {
3539 asection *s;
3540 Elf_Internal_Rela outrel;
3541
3542 /* We need to generate a R_X86_64_RELATIVE reloc
3543 for the dynamic linker. */
3544 s = htab->elf.srelgot;
3545 if (s == NULL)
3546 abort ();
3547
3548 outrel.r_offset = (base_got->output_section->vma
3549 + base_got->output_offset
3550 + off);
3551 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3552 outrel.r_addend = relocation;
3553 elf_append_rela (output_bfd, s, &outrel);
3554 }
3555
3556 local_got_offsets[r_symndx] |= 1;
3557 }
3558 }
3559
3560 if (off >= (bfd_vma) -2)
3561 abort ();
3562
3563 relocation = base_got->output_section->vma
3564 + base_got->output_offset + off;
3565 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
3566 relocation -= htab->elf.sgotplt->output_section->vma
3567 - htab->elf.sgotplt->output_offset;
3568
3569 break;
3570
3571 case R_X86_64_GOTOFF64:
3572 /* Relocation is relative to the start of the global offset
3573 table. */
3574
3575 /* Check to make sure it isn't a protected function symbol
3576 for shared library since it may not be local when used
3577 as function address. */
3578 if (!info->executable
3579 && h
3580 && !SYMBOLIC_BIND (info, h)
3581 && h->def_regular
3582 && h->type == STT_FUNC
3583 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3584 {
3585 (*_bfd_error_handler)
3586 (_("%B: relocation R_X86_64_GOTOFF64 against protected function `%s' can not be used when making a shared object"),
3587 input_bfd, h->root.root.string);
3588 bfd_set_error (bfd_error_bad_value);
3589 return FALSE;
3590 }
3591
3592 /* Note that sgot is not involved in this
3593 calculation. We always want the start of .got.plt. If we
3594 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3595 permitted by the ABI, we might have to change this
3596 calculation. */
3597 relocation -= htab->elf.sgotplt->output_section->vma
3598 + htab->elf.sgotplt->output_offset;
3599 break;
3600
3601 case R_X86_64_GOTPC32:
3602 case R_X86_64_GOTPC64:
3603 /* Use global offset table as symbol value. */
3604 relocation = htab->elf.sgotplt->output_section->vma
3605 + htab->elf.sgotplt->output_offset;
3606 unresolved_reloc = FALSE;
3607 break;
3608
3609 case R_X86_64_PLTOFF64:
3610 /* Relocation is PLT entry relative to GOT. For local
3611 symbols it's the symbol itself relative to GOT. */
3612 if (h != NULL
3613 /* See PLT32 handling. */
3614 && h->plt.offset != (bfd_vma) -1
3615 && htab->elf.splt != NULL)
3616 {
3617 relocation = (htab->elf.splt->output_section->vma
3618 + htab->elf.splt->output_offset
3619 + h->plt.offset);
3620 unresolved_reloc = FALSE;
3621 }
3622
3623 relocation -= htab->elf.sgotplt->output_section->vma
3624 + htab->elf.sgotplt->output_offset;
3625 break;
3626
3627 case R_X86_64_PLT32:
3628 /* Relocation is to the entry for this symbol in the
3629 procedure linkage table. */
3630
3631 /* Resolve a PLT32 reloc against a local symbol directly,
3632 without using the procedure linkage table. */
3633 if (h == NULL)
3634 break;
3635
3636 if (h->plt.offset == (bfd_vma) -1
3637 || htab->elf.splt == NULL)
3638 {
3639 /* We didn't make a PLT entry for this symbol. This
3640 happens when statically linking PIC code, or when
3641 using -Bsymbolic. */
3642 break;
3643 }
3644
3645 relocation = (htab->elf.splt->output_section->vma
3646 + htab->elf.splt->output_offset
3647 + h->plt.offset);
3648 unresolved_reloc = FALSE;
3649 break;
3650
3651 case R_X86_64_PC8:
3652 case R_X86_64_PC16:
3653 case R_X86_64_PC32:
3654 if (info->shared
3655 && (input_section->flags & SEC_ALLOC) != 0
3656 && (input_section->flags & SEC_READONLY) != 0
3657 && h != NULL)
3658 {
3659 bfd_boolean fail = FALSE;
3660 bfd_boolean branch
3661 = (r_type == R_X86_64_PC32
3662 && is_32bit_relative_branch (contents, rel->r_offset));
3663
3664 if (SYMBOL_REFERENCES_LOCAL (info, h))
3665 {
3666 /* Symbol is referenced locally. Make sure it is
3667 defined locally or for a branch. */
3668 fail = !h->def_regular && !branch;
3669 }
3670 else
3671 {
3672 /* Symbol isn't referenced locally. We only allow
3673 branch to symbol with non-default visibility. */
3674 fail = (!branch
3675 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
3676 }
3677
3678 if (fail)
3679 {
3680 const char *fmt;
3681 const char *v;
3682 const char *pic = "";
3683
3684 switch (ELF_ST_VISIBILITY (h->other))
3685 {
3686 case STV_HIDDEN:
3687 v = _("hidden symbol");
3688 break;
3689 case STV_INTERNAL:
3690 v = _("internal symbol");
3691 break;
3692 case STV_PROTECTED:
3693 v = _("protected symbol");
3694 break;
3695 default:
3696 v = _("symbol");
3697 pic = _("; recompile with -fPIC");
3698 break;
3699 }
3700
3701 if (h->def_regular)
3702 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
3703 else
3704 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
3705
3706 (*_bfd_error_handler) (fmt, input_bfd,
3707 x86_64_elf_howto_table[r_type].name,
3708 v, h->root.root.string, pic);
3709 bfd_set_error (bfd_error_bad_value);
3710 return FALSE;
3711 }
3712 }
3713 /* Fall through. */
3714
3715 case R_X86_64_8:
3716 case R_X86_64_16:
3717 case R_X86_64_32:
3718 case R_X86_64_PC64:
3719 case R_X86_64_64:
3720 /* FIXME: The ABI says the linker should make sure the value is
3721 the same when it's zeroextended to 64 bit. */
3722
3723 if ((input_section->flags & SEC_ALLOC) == 0)
3724 break;
3725
3726 if ((info->shared
3727 && (h == NULL
3728 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3729 || h->root.type != bfd_link_hash_undefweak)
3730 && (! IS_X86_64_PCREL_TYPE (r_type)
3731 || ! SYMBOL_CALLS_LOCAL (info, h)))
3732 || (ELIMINATE_COPY_RELOCS
3733 && !info->shared
3734 && h != NULL
3735 && h->dynindx != -1
3736 && !h->non_got_ref
3737 && ((h->def_dynamic
3738 && !h->def_regular)
3739 || h->root.type == bfd_link_hash_undefweak
3740 || h->root.type == bfd_link_hash_undefined)))
3741 {
3742 Elf_Internal_Rela outrel;
3743 bfd_boolean skip, relocate;
3744 asection *sreloc;
3745
3746 /* When generating a shared object, these relocations
3747 are copied into the output file to be resolved at run
3748 time. */
3749 skip = FALSE;
3750 relocate = FALSE;
3751
3752 outrel.r_offset =
3753 _bfd_elf_section_offset (output_bfd, info, input_section,
3754 rel->r_offset);
3755 if (outrel.r_offset == (bfd_vma) -1)
3756 skip = TRUE;
3757 else if (outrel.r_offset == (bfd_vma) -2)
3758 skip = TRUE, relocate = TRUE;
3759
3760 outrel.r_offset += (input_section->output_section->vma
3761 + input_section->output_offset);
3762
3763 if (skip)
3764 memset (&outrel, 0, sizeof outrel);
3765
3766 /* h->dynindx may be -1 if this symbol was marked to
3767 become local. */
3768 else if (h != NULL
3769 && h->dynindx != -1
3770 && (IS_X86_64_PCREL_TYPE (r_type)
3771 || ! info->shared
3772 || ! SYMBOLIC_BIND (info, h)
3773 || ! h->def_regular))
3774 {
3775 outrel.r_info = htab->r_info (h->dynindx, r_type);
3776 outrel.r_addend = rel->r_addend;
3777 }
3778 else
3779 {
3780 /* This symbol is local, or marked to become local. */
3781 if (r_type == htab->pointer_r_type)
3782 {
3783 relocate = TRUE;
3784 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3785 outrel.r_addend = relocation + rel->r_addend;
3786 }
3787 else if (r_type == R_X86_64_64
3788 && !ABI_64_P (output_bfd))
3789 {
3790 relocate = TRUE;
3791 outrel.r_info = htab->r_info (0,
3792 R_X86_64_RELATIVE64);
3793 outrel.r_addend = relocation + rel->r_addend;
3794 /* Check addend overflow. */
3795 if ((outrel.r_addend & 0x80000000)
3796 != (rel->r_addend & 0x80000000))
3797 {
3798 const char *name;
3799 int addend = rel->r_addend;
3800 if (h && h->root.root.string)
3801 name = h->root.root.string;
3802 else
3803 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3804 sym, NULL);
3805 if (addend < 0)
3806 (*_bfd_error_handler)
3807 (_("%B: addend -0x%x in relocation %s against "
3808 "symbol `%s' at 0x%lx in section `%A' is "
3809 "out of range"),
3810 input_bfd, input_section, addend,
3811 x86_64_elf_howto_table[r_type].name,
3812 name, (unsigned long) rel->r_offset);
3813 else
3814 (*_bfd_error_handler)
3815 (_("%B: addend 0x%x in relocation %s against "
3816 "symbol `%s' at 0x%lx in section `%A' is "
3817 "out of range"),
3818 input_bfd, input_section, addend,
3819 x86_64_elf_howto_table[r_type].name,
3820 name, (unsigned long) rel->r_offset);
3821 bfd_set_error (bfd_error_bad_value);
3822 return FALSE;
3823 }
3824 }
3825 else
3826 {
3827 long sindx;
3828
3829 if (bfd_is_abs_section (sec))
3830 sindx = 0;
3831 else if (sec == NULL || sec->owner == NULL)
3832 {
3833 bfd_set_error (bfd_error_bad_value);
3834 return FALSE;
3835 }
3836 else
3837 {
3838 asection *osec;
3839
3840 /* We are turning this relocation into one
3841 against a section symbol. It would be
3842 proper to subtract the symbol's value,
3843 osec->vma, from the emitted reloc addend,
3844 but ld.so expects buggy relocs. */
3845 osec = sec->output_section;
3846 sindx = elf_section_data (osec)->dynindx;
3847 if (sindx == 0)
3848 {
3849 asection *oi = htab->elf.text_index_section;
3850 sindx = elf_section_data (oi)->dynindx;
3851 }
3852 BFD_ASSERT (sindx != 0);
3853 }
3854
3855 outrel.r_info = htab->r_info (sindx, r_type);
3856 outrel.r_addend = relocation + rel->r_addend;
3857 }
3858 }
3859
3860 sreloc = elf_section_data (input_section)->sreloc;
3861
3862 if (sreloc == NULL || sreloc->contents == NULL)
3863 {
3864 r = bfd_reloc_notsupported;
3865 goto check_relocation_error;
3866 }
3867
3868 elf_append_rela (output_bfd, sreloc, &outrel);
3869
3870 /* If this reloc is against an external symbol, we do
3871 not want to fiddle with the addend. Otherwise, we
3872 need to include the symbol value so that it becomes
3873 an addend for the dynamic reloc. */
3874 if (! relocate)
3875 continue;
3876 }
3877
3878 break;
3879
3880 case R_X86_64_TLSGD:
3881 case R_X86_64_GOTPC32_TLSDESC:
3882 case R_X86_64_TLSDESC_CALL:
3883 case R_X86_64_GOTTPOFF:
3884 tls_type = GOT_UNKNOWN;
3885 if (h == NULL && local_got_offsets)
3886 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
3887 else if (h != NULL)
3888 tls_type = elf_x86_64_hash_entry (h)->tls_type;
3889
3890 if (! elf_x86_64_tls_transition (info, input_bfd,
3891 input_section, contents,
3892 symtab_hdr, sym_hashes,
3893 &r_type, tls_type, rel,
3894 relend, h, r_symndx))
3895 return FALSE;
3896
3897 if (r_type == R_X86_64_TPOFF32)
3898 {
3899 bfd_vma roff = rel->r_offset;
3900
3901 BFD_ASSERT (! unresolved_reloc);
3902
3903 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
3904 {
3905 /* GD->LE transition. For 64bit, change
3906 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3907 .word 0x6666; rex64; call __tls_get_addr
3908 into:
3909 movq %fs:0, %rax
3910 leaq foo@tpoff(%rax), %rax
3911 For 32bit, change
3912 leaq foo@tlsgd(%rip), %rdi
3913 .word 0x6666; rex64; call __tls_get_addr
3914 into:
3915 movl %fs:0, %eax
3916 leaq foo@tpoff(%rax), %rax */
3917 if (ABI_64_P (output_bfd))
3918 memcpy (contents + roff - 4,
3919 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3920 16);
3921 else
3922 memcpy (contents + roff - 3,
3923 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3924 15);
3925 bfd_put_32 (output_bfd,
3926 elf_x86_64_tpoff (info, relocation),
3927 contents + roff + 8);
3928 /* Skip R_X86_64_PC32/R_X86_64_PLT32. */
3929 rel++;
3930 continue;
3931 }
3932 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
3933 {
3934 /* GDesc -> LE transition.
3935 It's originally something like:
3936 leaq x@tlsdesc(%rip), %rax
3937
3938 Change it to:
3939 movl $x@tpoff, %rax. */
3940
3941 unsigned int val, type;
3942
3943 type = bfd_get_8 (input_bfd, contents + roff - 3);
3944 val = bfd_get_8 (input_bfd, contents + roff - 1);
3945 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3946 contents + roff - 3);
3947 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3948 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3949 contents + roff - 1);
3950 bfd_put_32 (output_bfd,
3951 elf_x86_64_tpoff (info, relocation),
3952 contents + roff);
3953 continue;
3954 }
3955 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
3956 {
3957 /* GDesc -> LE transition.
3958 It's originally:
3959 call *(%rax)
3960 Turn it into:
3961 xchg %ax,%ax. */
3962 bfd_put_8 (output_bfd, 0x66, contents + roff);
3963 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3964 continue;
3965 }
3966 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
3967 {
3968 /* IE->LE transition:
3969 Originally it can be one of:
3970 movq foo@gottpoff(%rip), %reg
3971 addq foo@gottpoff(%rip), %reg
3972 We change it into:
3973 movq $foo, %reg
3974 leaq foo(%reg), %reg
3975 addq $foo, %reg. */
3976
3977 unsigned int val, type, reg;
3978
3979 val = bfd_get_8 (input_bfd, contents + roff - 3);
3980 type = bfd_get_8 (input_bfd, contents + roff - 2);
3981 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3982 reg >>= 3;
3983 if (type == 0x8b)
3984 {
3985 /* movq */
3986 if (val == 0x4c)
3987 bfd_put_8 (output_bfd, 0x49,
3988 contents + roff - 3);
3989 else if (!ABI_64_P (output_bfd) && val == 0x44)
3990 bfd_put_8 (output_bfd, 0x41,
3991 contents + roff - 3);
3992 bfd_put_8 (output_bfd, 0xc7,
3993 contents + roff - 2);
3994 bfd_put_8 (output_bfd, 0xc0 | reg,
3995 contents + roff - 1);
3996 }
3997 else if (reg == 4)
3998 {
3999 /* addq -> addq - addressing with %rsp/%r12 is
4000 special */
4001 if (val == 0x4c)
4002 bfd_put_8 (output_bfd, 0x49,
4003 contents + roff - 3);
4004 else if (!ABI_64_P (output_bfd) && val == 0x44)
4005 bfd_put_8 (output_bfd, 0x41,
4006 contents + roff - 3);
4007 bfd_put_8 (output_bfd, 0x81,
4008 contents + roff - 2);
4009 bfd_put_8 (output_bfd, 0xc0 | reg,
4010 contents + roff - 1);
4011 }
4012 else
4013 {
4014 /* addq -> leaq */
4015 if (val == 0x4c)
4016 bfd_put_8 (output_bfd, 0x4d,
4017 contents + roff - 3);
4018 else if (!ABI_64_P (output_bfd) && val == 0x44)
4019 bfd_put_8 (output_bfd, 0x45,
4020 contents + roff - 3);
4021 bfd_put_8 (output_bfd, 0x8d,
4022 contents + roff - 2);
4023 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4024 contents + roff - 1);
4025 }
4026 bfd_put_32 (output_bfd,
4027 elf_x86_64_tpoff (info, relocation),
4028 contents + roff);
4029 continue;
4030 }
4031 else
4032 BFD_ASSERT (FALSE);
4033 }
4034
4035 if (htab->elf.sgot == NULL)
4036 abort ();
4037
4038 if (h != NULL)
4039 {
4040 off = h->got.offset;
4041 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4042 }
4043 else
4044 {
4045 if (local_got_offsets == NULL)
4046 abort ();
4047
4048 off = local_got_offsets[r_symndx];
4049 offplt = local_tlsdesc_gotents[r_symndx];
4050 }
4051
4052 if ((off & 1) != 0)
4053 off &= ~1;
4054 else
4055 {
4056 Elf_Internal_Rela outrel;
4057 int dr_type, indx;
4058 asection *sreloc;
4059
4060 if (htab->elf.srelgot == NULL)
4061 abort ();
4062
4063 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4064
4065 if (GOT_TLS_GDESC_P (tls_type))
4066 {
4067 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4068 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4069 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4070 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4071 + htab->elf.sgotplt->output_offset
4072 + offplt
4073 + htab->sgotplt_jump_table_size);
4074 sreloc = htab->elf.srelplt;
4075 if (indx == 0)
4076 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4077 else
4078 outrel.r_addend = 0;
4079 elf_append_rela (output_bfd, sreloc, &outrel);
4080 }
4081
4082 sreloc = htab->elf.srelgot;
4083
4084 outrel.r_offset = (htab->elf.sgot->output_section->vma
4085 + htab->elf.sgot->output_offset + off);
4086
4087 if (GOT_TLS_GD_P (tls_type))
4088 dr_type = R_X86_64_DTPMOD64;
4089 else if (GOT_TLS_GDESC_P (tls_type))
4090 goto dr_done;
4091 else
4092 dr_type = R_X86_64_TPOFF64;
4093
4094 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4095 outrel.r_addend = 0;
4096 if ((dr_type == R_X86_64_TPOFF64
4097 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4098 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4099 outrel.r_info = htab->r_info (indx, dr_type);
4100
4101 elf_append_rela (output_bfd, sreloc, &outrel);
4102
4103 if (GOT_TLS_GD_P (tls_type))
4104 {
4105 if (indx == 0)
4106 {
4107 BFD_ASSERT (! unresolved_reloc);
4108 bfd_put_64 (output_bfd,
4109 relocation - elf_x86_64_dtpoff_base (info),
4110 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4111 }
4112 else
4113 {
4114 bfd_put_64 (output_bfd, 0,
4115 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4116 outrel.r_info = htab->r_info (indx,
4117 R_X86_64_DTPOFF64);
4118 outrel.r_offset += GOT_ENTRY_SIZE;
4119 elf_append_rela (output_bfd, sreloc,
4120 &outrel);
4121 }
4122 }
4123
4124 dr_done:
4125 if (h != NULL)
4126 h->got.offset |= 1;
4127 else
4128 local_got_offsets[r_symndx] |= 1;
4129 }
4130
4131 if (off >= (bfd_vma) -2
4132 && ! GOT_TLS_GDESC_P (tls_type))
4133 abort ();
4134 if (r_type == ELF32_R_TYPE (rel->r_info))
4135 {
4136 if (r_type == R_X86_64_GOTPC32_TLSDESC
4137 || r_type == R_X86_64_TLSDESC_CALL)
4138 relocation = htab->elf.sgotplt->output_section->vma
4139 + htab->elf.sgotplt->output_offset
4140 + offplt + htab->sgotplt_jump_table_size;
4141 else
4142 relocation = htab->elf.sgot->output_section->vma
4143 + htab->elf.sgot->output_offset + off;
4144 unresolved_reloc = FALSE;
4145 }
4146 else
4147 {
4148 bfd_vma roff = rel->r_offset;
4149
4150 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4151 {
4152 /* GD->IE transition. For 64bit, change
4153 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4154 .word 0x6666; rex64; call __tls_get_addr@plt
4155 into:
4156 movq %fs:0, %rax
4157 addq foo@gottpoff(%rip), %rax
4158 For 32bit, change
4159 leaq foo@tlsgd(%rip), %rdi
4160 .word 0x6666; rex64; call __tls_get_addr@plt
4161 into:
4162 movl %fs:0, %eax
4163 addq foo@gottpoff(%rip), %rax */
4164 if (ABI_64_P (output_bfd))
4165 memcpy (contents + roff - 4,
4166 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4167 16);
4168 else
4169 memcpy (contents + roff - 3,
4170 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4171 15);
4172
4173 relocation = (htab->elf.sgot->output_section->vma
4174 + htab->elf.sgot->output_offset + off
4175 - roff
4176 - input_section->output_section->vma
4177 - input_section->output_offset
4178 - 12);
4179 bfd_put_32 (output_bfd, relocation,
4180 contents + roff + 8);
4181 /* Skip R_X86_64_PLT32. */
4182 rel++;
4183 continue;
4184 }
4185 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4186 {
4187 /* GDesc -> IE transition.
4188 It's originally something like:
4189 leaq x@tlsdesc(%rip), %rax
4190
4191 Change it to:
4192 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4193
4194 /* Now modify the instruction as appropriate. To
4195 turn a leaq into a movq in the form we use it, it
4196 suffices to change the second byte from 0x8d to
4197 0x8b. */
4198 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4199
4200 bfd_put_32 (output_bfd,
4201 htab->elf.sgot->output_section->vma
4202 + htab->elf.sgot->output_offset + off
4203 - rel->r_offset
4204 - input_section->output_section->vma
4205 - input_section->output_offset
4206 - 4,
4207 contents + roff);
4208 continue;
4209 }
4210 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4211 {
4212 /* GDesc -> IE transition.
4213 It's originally:
4214 call *(%rax)
4215
4216 Change it to:
4217 xchg %ax, %ax. */
4218
4219 bfd_put_8 (output_bfd, 0x66, contents + roff);
4220 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4221 continue;
4222 }
4223 else
4224 BFD_ASSERT (FALSE);
4225 }
4226 break;
4227
4228 case R_X86_64_TLSLD:
4229 if (! elf_x86_64_tls_transition (info, input_bfd,
4230 input_section, contents,
4231 symtab_hdr, sym_hashes,
4232 &r_type, GOT_UNKNOWN,
4233 rel, relend, h, r_symndx))
4234 return FALSE;
4235
4236 if (r_type != R_X86_64_TLSLD)
4237 {
4238 /* LD->LE transition:
4239 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4240 For 64bit, we change it into:
4241 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4242 For 32bit, we change it into:
4243 nopl 0x0(%rax); movl %fs:0, %eax. */
4244
4245 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4246 if (ABI_64_P (output_bfd))
4247 memcpy (contents + rel->r_offset - 3,
4248 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4249 else
4250 memcpy (contents + rel->r_offset - 3,
4251 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4252 /* Skip R_X86_64_PC32/R_X86_64_PLT32. */
4253 rel++;
4254 continue;
4255 }
4256
4257 if (htab->elf.sgot == NULL)
4258 abort ();
4259
4260 off = htab->tls_ld_got.offset;
4261 if (off & 1)
4262 off &= ~1;
4263 else
4264 {
4265 Elf_Internal_Rela outrel;
4266
4267 if (htab->elf.srelgot == NULL)
4268 abort ();
4269
4270 outrel.r_offset = (htab->elf.sgot->output_section->vma
4271 + htab->elf.sgot->output_offset + off);
4272
4273 bfd_put_64 (output_bfd, 0,
4274 htab->elf.sgot->contents + off);
4275 bfd_put_64 (output_bfd, 0,
4276 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4277 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4278 outrel.r_addend = 0;
4279 elf_append_rela (output_bfd, htab->elf.srelgot,
4280 &outrel);
4281 htab->tls_ld_got.offset |= 1;
4282 }
4283 relocation = htab->elf.sgot->output_section->vma
4284 + htab->elf.sgot->output_offset + off;
4285 unresolved_reloc = FALSE;
4286 break;
4287
4288 case R_X86_64_DTPOFF32:
4289 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4290 relocation -= elf_x86_64_dtpoff_base (info);
4291 else
4292 relocation = elf_x86_64_tpoff (info, relocation);
4293 break;
4294
4295 case R_X86_64_TPOFF32:
4296 case R_X86_64_TPOFF64:
4297 BFD_ASSERT (info->executable);
4298 relocation = elf_x86_64_tpoff (info, relocation);
4299 break;
4300
4301 default:
4302 break;
4303 }
4304
4305 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4306 because such sections are not SEC_ALLOC and thus ld.so will
4307 not process them. */
4308 if (unresolved_reloc
4309 && !((input_section->flags & SEC_DEBUGGING) != 0
4310 && h->def_dynamic)
4311 && _bfd_elf_section_offset (output_bfd, info, input_section,
4312 rel->r_offset) != (bfd_vma) -1)
4313 {
4314 (*_bfd_error_handler)
4315 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4316 input_bfd,
4317 input_section,
4318 (long) rel->r_offset,
4319 howto->name,
4320 h->root.root.string);
4321 return FALSE;
4322 }
4323
4324 do_relocation:
4325 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4326 contents, rel->r_offset,
4327 relocation, rel->r_addend);
4328
4329 check_relocation_error:
4330 if (r != bfd_reloc_ok)
4331 {
4332 const char *name;
4333
4334 if (h != NULL)
4335 name = h->root.root.string;
4336 else
4337 {
4338 name = bfd_elf_string_from_elf_section (input_bfd,
4339 symtab_hdr->sh_link,
4340 sym->st_name);
4341 if (name == NULL)
4342 return FALSE;
4343 if (*name == '\0')
4344 name = bfd_section_name (input_bfd, sec);
4345 }
4346
4347 if (r == bfd_reloc_overflow)
4348 {
4349 if (! ((*info->callbacks->reloc_overflow)
4350 (info, (h ? &h->root : NULL), name, howto->name,
4351 (bfd_vma) 0, input_bfd, input_section,
4352 rel->r_offset)))
4353 return FALSE;
4354 }
4355 else
4356 {
4357 (*_bfd_error_handler)
4358 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
4359 input_bfd, input_section,
4360 (long) rel->r_offset, name, (int) r);
4361 return FALSE;
4362 }
4363 }
4364 }
4365
4366 return TRUE;
4367 }
4368
4369 /* Finish up dynamic symbol handling. We set the contents of various
4370 dynamic sections here. */
4371
4372 static bfd_boolean
4373 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4374 struct bfd_link_info *info,
4375 struct elf_link_hash_entry *h,
4376 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
4377 {
4378 struct elf_x86_64_link_hash_table *htab;
4379 const struct elf_x86_64_backend_data *const abed
4380 = get_elf_x86_64_backend_data (output_bfd);
4381
4382 htab = elf_x86_64_hash_table (info);
4383 if (htab == NULL)
4384 return FALSE;
4385
4386 if (h->plt.offset != (bfd_vma) -1)
4387 {
4388 bfd_vma plt_index;
4389 bfd_vma got_offset;
4390 Elf_Internal_Rela rela;
4391 bfd_byte *loc;
4392 asection *plt, *gotplt, *relplt;
4393 const struct elf_backend_data *bed;
4394
4395 /* When building a static executable, use .iplt, .igot.plt and
4396 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4397 if (htab->elf.splt != NULL)
4398 {
4399 plt = htab->elf.splt;
4400 gotplt = htab->elf.sgotplt;
4401 relplt = htab->elf.srelplt;
4402 }
4403 else
4404 {
4405 plt = htab->elf.iplt;
4406 gotplt = htab->elf.igotplt;
4407 relplt = htab->elf.irelplt;
4408 }
4409
4410 /* This symbol has an entry in the procedure linkage table. Set
4411 it up. */
4412 if ((h->dynindx == -1
4413 && !((h->forced_local || info->executable)
4414 && h->def_regular
4415 && h->type == STT_GNU_IFUNC))
4416 || plt == NULL
4417 || gotplt == NULL
4418 || relplt == NULL)
4419 abort ();
4420
4421 /* Get the index in the procedure linkage table which
4422 corresponds to this symbol. This is the index of this symbol
4423 in all the symbols for which we are making plt entries. The
4424 first entry in the procedure linkage table is reserved.
4425
4426 Get the offset into the .got table of the entry that
4427 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4428 bytes. The first three are reserved for the dynamic linker.
4429
4430 For static executables, we don't reserve anything. */
4431
4432 if (plt == htab->elf.splt)
4433 {
4434 got_offset = h->plt.offset / abed->plt_entry_size - 1;
4435 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4436 }
4437 else
4438 {
4439 got_offset = h->plt.offset / abed->plt_entry_size;
4440 got_offset = got_offset * GOT_ENTRY_SIZE;
4441 }
4442
4443 /* Fill in the entry in the procedure linkage table. */
4444 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
4445 abed->plt_entry_size);
4446
4447 /* Insert the relocation positions of the plt section. */
4448
4449 /* Put offset the PC-relative instruction referring to the GOT entry,
4450 subtracting the size of that instruction. */
4451 bfd_put_32 (output_bfd,
4452 (gotplt->output_section->vma
4453 + gotplt->output_offset
4454 + got_offset
4455 - plt->output_section->vma
4456 - plt->output_offset
4457 - h->plt.offset
4458 - abed->plt_got_insn_size),
4459 plt->contents + h->plt.offset + abed->plt_got_offset);
4460
4461 /* Fill in the entry in the global offset table, initially this
4462 points to the second part of the PLT entry. */
4463 bfd_put_64 (output_bfd, (plt->output_section->vma
4464 + plt->output_offset
4465 + h->plt.offset + abed->plt_lazy_offset),
4466 gotplt->contents + got_offset);
4467
4468 /* Fill in the entry in the .rela.plt section. */
4469 rela.r_offset = (gotplt->output_section->vma
4470 + gotplt->output_offset
4471 + got_offset);
4472 if (h->dynindx == -1
4473 || ((info->executable
4474 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
4475 && h->def_regular
4476 && h->type == STT_GNU_IFUNC))
4477 {
4478 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4479 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4480 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4481 rela.r_addend = (h->root.u.def.value
4482 + h->root.u.def.section->output_section->vma
4483 + h->root.u.def.section->output_offset);
4484 /* R_X86_64_IRELATIVE comes last. */
4485 plt_index = htab->next_irelative_index--;
4486 }
4487 else
4488 {
4489 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4490 rela.r_addend = 0;
4491 plt_index = htab->next_jump_slot_index++;
4492 }
4493
4494 /* Don't fill PLT entry for static executables. */
4495 if (plt == htab->elf.splt)
4496 {
4497 /* Put relocation index. */
4498 bfd_put_32 (output_bfd, plt_index,
4499 plt->contents + h->plt.offset + abed->plt_reloc_offset);
4500 /* Put offset for jmp .PLT0. */
4501 bfd_put_32 (output_bfd, - (h->plt.offset + abed->plt_plt_insn_end),
4502 plt->contents + h->plt.offset + abed->plt_plt_offset);
4503 }
4504
4505 bed = get_elf_backend_data (output_bfd);
4506 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4507 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4508
4509 if (!h->def_regular)
4510 {
4511 /* Mark the symbol as undefined, rather than as defined in
4512 the .plt section. Leave the value if there were any
4513 relocations where pointer equality matters (this is a clue
4514 for the dynamic linker, to make function pointer
4515 comparisons work between an application and shared
4516 library), otherwise set it to zero. If a function is only
4517 called from a binary, there is no need to slow down
4518 shared libraries because of that. */
4519 sym->st_shndx = SHN_UNDEF;
4520 if (!h->pointer_equality_needed)
4521 sym->st_value = 0;
4522 }
4523 }
4524
4525 if (h->got.offset != (bfd_vma) -1
4526 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
4527 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
4528 {
4529 Elf_Internal_Rela rela;
4530
4531 /* This symbol has an entry in the global offset table. Set it
4532 up. */
4533 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4534 abort ();
4535
4536 rela.r_offset = (htab->elf.sgot->output_section->vma
4537 + htab->elf.sgot->output_offset
4538 + (h->got.offset &~ (bfd_vma) 1));
4539
4540 /* If this is a static link, or it is a -Bsymbolic link and the
4541 symbol is defined locally or was forced to be local because
4542 of a version file, we just want to emit a RELATIVE reloc.
4543 The entry in the global offset table will already have been
4544 initialized in the relocate_section function. */
4545 if (h->def_regular
4546 && h->type == STT_GNU_IFUNC)
4547 {
4548 if (info->shared)
4549 {
4550 /* Generate R_X86_64_GLOB_DAT. */
4551 goto do_glob_dat;
4552 }
4553 else
4554 {
4555 asection *plt;
4556
4557 if (!h->pointer_equality_needed)
4558 abort ();
4559
4560 /* For non-shared object, we can't use .got.plt, which
4561 contains the real function addres if we need pointer
4562 equality. We load the GOT entry with the PLT entry. */
4563 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4564 bfd_put_64 (output_bfd, (plt->output_section->vma
4565 + plt->output_offset
4566 + h->plt.offset),
4567 htab->elf.sgot->contents + h->got.offset);
4568 return TRUE;
4569 }
4570 }
4571 else if (info->shared
4572 && SYMBOL_REFERENCES_LOCAL (info, h))
4573 {
4574 if (!h->def_regular)
4575 return FALSE;
4576 BFD_ASSERT((h->got.offset & 1) != 0);
4577 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4578 rela.r_addend = (h->root.u.def.value
4579 + h->root.u.def.section->output_section->vma
4580 + h->root.u.def.section->output_offset);
4581 }
4582 else
4583 {
4584 BFD_ASSERT((h->got.offset & 1) == 0);
4585 do_glob_dat:
4586 bfd_put_64 (output_bfd, (bfd_vma) 0,
4587 htab->elf.sgot->contents + h->got.offset);
4588 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4589 rela.r_addend = 0;
4590 }
4591
4592 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
4593 }
4594
4595 if (h->needs_copy)
4596 {
4597 Elf_Internal_Rela rela;
4598
4599 /* This symbol needs a copy reloc. Set it up. */
4600
4601 if (h->dynindx == -1
4602 || (h->root.type != bfd_link_hash_defined
4603 && h->root.type != bfd_link_hash_defweak)
4604 || htab->srelbss == NULL)
4605 abort ();
4606
4607 rela.r_offset = (h->root.u.def.value
4608 + h->root.u.def.section->output_section->vma
4609 + h->root.u.def.section->output_offset);
4610 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4611 rela.r_addend = 0;
4612 elf_append_rela (output_bfd, htab->srelbss, &rela);
4613 }
4614
4615 return TRUE;
4616 }
4617
4618 /* Finish up local dynamic symbol handling. We set the contents of
4619 various dynamic sections here. */
4620
4621 static bfd_boolean
4622 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4623 {
4624 struct elf_link_hash_entry *h
4625 = (struct elf_link_hash_entry *) *slot;
4626 struct bfd_link_info *info
4627 = (struct bfd_link_info *) inf;
4628
4629 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4630 info, h, NULL);
4631 }
4632
4633 /* Used to decide how to sort relocs in an optimal manner for the
4634 dynamic linker, before writing them out. */
4635
4636 static enum elf_reloc_type_class
4637 elf_x86_64_reloc_type_class (const Elf_Internal_Rela *rela)
4638 {
4639 switch ((int) ELF32_R_TYPE (rela->r_info))
4640 {
4641 case R_X86_64_RELATIVE:
4642 case R_X86_64_RELATIVE64:
4643 return reloc_class_relative;
4644 case R_X86_64_JUMP_SLOT:
4645 return reloc_class_plt;
4646 case R_X86_64_COPY:
4647 return reloc_class_copy;
4648 default:
4649 return reloc_class_normal;
4650 }
4651 }
4652
4653 /* Finish up the dynamic sections. */
4654
4655 static bfd_boolean
4656 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4657 struct bfd_link_info *info)
4658 {
4659 struct elf_x86_64_link_hash_table *htab;
4660 bfd *dynobj;
4661 asection *sdyn;
4662 const struct elf_x86_64_backend_data *const abed
4663 = get_elf_x86_64_backend_data (output_bfd);
4664
4665 htab = elf_x86_64_hash_table (info);
4666 if (htab == NULL)
4667 return FALSE;
4668
4669 dynobj = htab->elf.dynobj;
4670 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
4671
4672 if (htab->elf.dynamic_sections_created)
4673 {
4674 bfd_byte *dyncon, *dynconend;
4675 const struct elf_backend_data *bed;
4676 bfd_size_type sizeof_dyn;
4677
4678 if (sdyn == NULL || htab->elf.sgot == NULL)
4679 abort ();
4680
4681 bed = get_elf_backend_data (dynobj);
4682 sizeof_dyn = bed->s->sizeof_dyn;
4683 dyncon = sdyn->contents;
4684 dynconend = sdyn->contents + sdyn->size;
4685 for (; dyncon < dynconend; dyncon += sizeof_dyn)
4686 {
4687 Elf_Internal_Dyn dyn;
4688 asection *s;
4689
4690 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
4691
4692 switch (dyn.d_tag)
4693 {
4694 default:
4695 continue;
4696
4697 case DT_PLTGOT:
4698 s = htab->elf.sgotplt;
4699 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
4700 break;
4701
4702 case DT_JMPREL:
4703 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
4704 break;
4705
4706 case DT_PLTRELSZ:
4707 s = htab->elf.srelplt->output_section;
4708 dyn.d_un.d_val = s->size;
4709 break;
4710
4711 case DT_RELASZ:
4712 /* The procedure linkage table relocs (DT_JMPREL) should
4713 not be included in the overall relocs (DT_RELA).
4714 Therefore, we override the DT_RELASZ entry here to
4715 make it not include the JMPREL relocs. Since the
4716 linker script arranges for .rela.plt to follow all
4717 other relocation sections, we don't have to worry
4718 about changing the DT_RELA entry. */
4719 if (htab->elf.srelplt != NULL)
4720 {
4721 s = htab->elf.srelplt->output_section;
4722 dyn.d_un.d_val -= s->size;
4723 }
4724 break;
4725
4726 case DT_TLSDESC_PLT:
4727 s = htab->elf.splt;
4728 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
4729 + htab->tlsdesc_plt;
4730 break;
4731
4732 case DT_TLSDESC_GOT:
4733 s = htab->elf.sgot;
4734 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
4735 + htab->tlsdesc_got;
4736 break;
4737 }
4738
4739 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
4740 }
4741
4742 /* Fill in the special first entry in the procedure linkage table. */
4743 if (htab->elf.splt && htab->elf.splt->size > 0)
4744 {
4745 /* Fill in the first entry in the procedure linkage table. */
4746 memcpy (htab->elf.splt->contents,
4747 abed->plt0_entry, abed->plt_entry_size);
4748 /* Add offset for pushq GOT+8(%rip), since the instruction
4749 uses 6 bytes subtract this value. */
4750 bfd_put_32 (output_bfd,
4751 (htab->elf.sgotplt->output_section->vma
4752 + htab->elf.sgotplt->output_offset
4753 + 8
4754 - htab->elf.splt->output_section->vma
4755 - htab->elf.splt->output_offset
4756 - 6),
4757 htab->elf.splt->contents + abed->plt0_got1_offset);
4758 /* Add offset for the PC-relative instruction accessing GOT+16,
4759 subtracting the offset to the end of that instruction. */
4760 bfd_put_32 (output_bfd,
4761 (htab->elf.sgotplt->output_section->vma
4762 + htab->elf.sgotplt->output_offset
4763 + 16
4764 - htab->elf.splt->output_section->vma
4765 - htab->elf.splt->output_offset
4766 - abed->plt0_got2_insn_end),
4767 htab->elf.splt->contents + abed->plt0_got2_offset);
4768
4769 elf_section_data (htab->elf.splt->output_section)
4770 ->this_hdr.sh_entsize = abed->plt_entry_size;
4771
4772 if (htab->tlsdesc_plt)
4773 {
4774 bfd_put_64 (output_bfd, (bfd_vma) 0,
4775 htab->elf.sgot->contents + htab->tlsdesc_got);
4776
4777 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4778 abed->plt0_entry, abed->plt_entry_size);
4779
4780 /* Add offset for pushq GOT+8(%rip), since the
4781 instruction uses 6 bytes subtract this value. */
4782 bfd_put_32 (output_bfd,
4783 (htab->elf.sgotplt->output_section->vma
4784 + htab->elf.sgotplt->output_offset
4785 + 8
4786 - htab->elf.splt->output_section->vma
4787 - htab->elf.splt->output_offset
4788 - htab->tlsdesc_plt
4789 - 6),
4790 htab->elf.splt->contents
4791 + htab->tlsdesc_plt + abed->plt0_got1_offset);
4792 /* Add offset for the PC-relative instruction accessing GOT+TDG,
4793 where TGD stands for htab->tlsdesc_got, subtracting the offset
4794 to the end of that instruction. */
4795 bfd_put_32 (output_bfd,
4796 (htab->elf.sgot->output_section->vma
4797 + htab->elf.sgot->output_offset
4798 + htab->tlsdesc_got
4799 - htab->elf.splt->output_section->vma
4800 - htab->elf.splt->output_offset
4801 - htab->tlsdesc_plt
4802 - abed->plt0_got2_insn_end),
4803 htab->elf.splt->contents
4804 + htab->tlsdesc_plt + abed->plt0_got2_offset);
4805 }
4806 }
4807 }
4808
4809 if (htab->elf.sgotplt)
4810 {
4811 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
4812 {
4813 (*_bfd_error_handler)
4814 (_("discarded output section: `%A'"), htab->elf.sgotplt);
4815 return FALSE;
4816 }
4817
4818 /* Fill in the first three entries in the global offset table. */
4819 if (htab->elf.sgotplt->size > 0)
4820 {
4821 /* Set the first entry in the global offset table to the address of
4822 the dynamic section. */
4823 if (sdyn == NULL)
4824 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
4825 else
4826 bfd_put_64 (output_bfd,
4827 sdyn->output_section->vma + sdyn->output_offset,
4828 htab->elf.sgotplt->contents);
4829 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
4830 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
4831 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
4832 }
4833
4834 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
4835 GOT_ENTRY_SIZE;
4836 }
4837
4838 /* Adjust .eh_frame for .plt section. */
4839 if (htab->plt_eh_frame != NULL
4840 && htab->plt_eh_frame->contents != NULL)
4841 {
4842 if (htab->elf.splt != NULL
4843 && htab->elf.splt->size != 0
4844 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
4845 && htab->elf.splt->output_section != NULL
4846 && htab->plt_eh_frame->output_section != NULL)
4847 {
4848 bfd_vma plt_start = htab->elf.splt->output_section->vma;
4849 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
4850 + htab->plt_eh_frame->output_offset
4851 + PLT_FDE_START_OFFSET;
4852 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
4853 htab->plt_eh_frame->contents
4854 + PLT_FDE_START_OFFSET);
4855 }
4856 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
4857 {
4858 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
4859 htab->plt_eh_frame,
4860 htab->plt_eh_frame->contents))
4861 return FALSE;
4862 }
4863 }
4864
4865 if (htab->elf.sgot && htab->elf.sgot->size > 0)
4866 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
4867 = GOT_ENTRY_SIZE;
4868
4869 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4870 htab_traverse (htab->loc_hash_table,
4871 elf_x86_64_finish_local_dynamic_symbol,
4872 info);
4873
4874 return TRUE;
4875 }
4876
4877 /* Return address for Ith PLT stub in section PLT, for relocation REL
4878 or (bfd_vma) -1 if it should not be included. */
4879
4880 static bfd_vma
4881 elf_x86_64_plt_sym_val (bfd_vma i, const asection *plt,
4882 const arelent *rel ATTRIBUTE_UNUSED)
4883 {
4884 return plt->vma + (i + 1) * GET_PLT_ENTRY_SIZE (plt->owner);
4885 }
4886
4887 /* Handle an x86-64 specific section when reading an object file. This
4888 is called when elfcode.h finds a section with an unknown type. */
4889
4890 static bfd_boolean
4891 elf_x86_64_section_from_shdr (bfd *abfd,
4892 Elf_Internal_Shdr *hdr,
4893 const char *name,
4894 int shindex)
4895 {
4896 if (hdr->sh_type != SHT_X86_64_UNWIND)
4897 return FALSE;
4898
4899 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4900 return FALSE;
4901
4902 return TRUE;
4903 }
4904
4905 /* Hook called by the linker routine which adds symbols from an object
4906 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4907 of .bss. */
4908
4909 static bfd_boolean
4910 elf_x86_64_add_symbol_hook (bfd *abfd,
4911 struct bfd_link_info *info,
4912 Elf_Internal_Sym *sym,
4913 const char **namep ATTRIBUTE_UNUSED,
4914 flagword *flagsp ATTRIBUTE_UNUSED,
4915 asection **secp,
4916 bfd_vma *valp)
4917 {
4918 asection *lcomm;
4919
4920 switch (sym->st_shndx)
4921 {
4922 case SHN_X86_64_LCOMMON:
4923 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4924 if (lcomm == NULL)
4925 {
4926 lcomm = bfd_make_section_with_flags (abfd,
4927 "LARGE_COMMON",
4928 (SEC_ALLOC
4929 | SEC_IS_COMMON
4930 | SEC_LINKER_CREATED));
4931 if (lcomm == NULL)
4932 return FALSE;
4933 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4934 }
4935 *secp = lcomm;
4936 *valp = sym->st_size;
4937 return TRUE;
4938 }
4939
4940 if ((abfd->flags & DYNAMIC) == 0
4941 && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
4942 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
4943 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
4944
4945 return TRUE;
4946 }
4947
4948
4949 /* Given a BFD section, try to locate the corresponding ELF section
4950 index. */
4951
4952 static bfd_boolean
4953 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4954 asection *sec, int *index_return)
4955 {
4956 if (sec == &_bfd_elf_large_com_section)
4957 {
4958 *index_return = SHN_X86_64_LCOMMON;
4959 return TRUE;
4960 }
4961 return FALSE;
4962 }
4963
4964 /* Process a symbol. */
4965
4966 static void
4967 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4968 asymbol *asym)
4969 {
4970 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4971
4972 switch (elfsym->internal_elf_sym.st_shndx)
4973 {
4974 case SHN_X86_64_LCOMMON:
4975 asym->section = &_bfd_elf_large_com_section;
4976 asym->value = elfsym->internal_elf_sym.st_size;
4977 /* Common symbol doesn't set BSF_GLOBAL. */
4978 asym->flags &= ~BSF_GLOBAL;
4979 break;
4980 }
4981 }
4982
4983 static bfd_boolean
4984 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4985 {
4986 return (sym->st_shndx == SHN_COMMON
4987 || sym->st_shndx == SHN_X86_64_LCOMMON);
4988 }
4989
4990 static unsigned int
4991 elf_x86_64_common_section_index (asection *sec)
4992 {
4993 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4994 return SHN_COMMON;
4995 else
4996 return SHN_X86_64_LCOMMON;
4997 }
4998
4999 static asection *
5000 elf_x86_64_common_section (asection *sec)
5001 {
5002 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5003 return bfd_com_section_ptr;
5004 else
5005 return &_bfd_elf_large_com_section;
5006 }
5007
5008 static bfd_boolean
5009 elf_x86_64_merge_symbol (struct bfd_link_info *info ATTRIBUTE_UNUSED,
5010 struct elf_link_hash_entry **sym_hash ATTRIBUTE_UNUSED,
5011 struct elf_link_hash_entry *h,
5012 Elf_Internal_Sym *sym,
5013 asection **psec,
5014 bfd_vma *pvalue ATTRIBUTE_UNUSED,
5015 unsigned int *pold_alignment ATTRIBUTE_UNUSED,
5016 bfd_boolean *skip ATTRIBUTE_UNUSED,
5017 bfd_boolean *override ATTRIBUTE_UNUSED,
5018 bfd_boolean *type_change_ok ATTRIBUTE_UNUSED,
5019 bfd_boolean *size_change_ok ATTRIBUTE_UNUSED,
5020 bfd_boolean *newdyn ATTRIBUTE_UNUSED,
5021 bfd_boolean *newdef,
5022 bfd_boolean *newdyncommon ATTRIBUTE_UNUSED,
5023 bfd_boolean *newweak ATTRIBUTE_UNUSED,
5024 bfd *abfd ATTRIBUTE_UNUSED,
5025 asection **sec,
5026 bfd_boolean *olddyn ATTRIBUTE_UNUSED,
5027 bfd_boolean *olddef,
5028 bfd_boolean *olddyncommon ATTRIBUTE_UNUSED,
5029 bfd_boolean *oldweak ATTRIBUTE_UNUSED,
5030 bfd *oldbfd,
5031 asection **oldsec)
5032 {
5033 /* A normal common symbol and a large common symbol result in a
5034 normal common symbol. We turn the large common symbol into a
5035 normal one. */
5036 if (!*olddef
5037 && h->root.type == bfd_link_hash_common
5038 && !*newdef
5039 && bfd_is_com_section (*sec)
5040 && *oldsec != *sec)
5041 {
5042 if (sym->st_shndx == SHN_COMMON
5043 && (elf_section_flags (*oldsec) & SHF_X86_64_LARGE) != 0)
5044 {
5045 h->root.u.c.p->section
5046 = bfd_make_section_old_way (oldbfd, "COMMON");
5047 h->root.u.c.p->section->flags = SEC_ALLOC;
5048 }
5049 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5050 && (elf_section_flags (*oldsec) & SHF_X86_64_LARGE) == 0)
5051 *psec = *sec = bfd_com_section_ptr;
5052 }
5053
5054 return TRUE;
5055 }
5056
5057 static int
5058 elf_x86_64_additional_program_headers (bfd *abfd,
5059 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5060 {
5061 asection *s;
5062 int count = 0;
5063
5064 /* Check to see if we need a large readonly segment. */
5065 s = bfd_get_section_by_name (abfd, ".lrodata");
5066 if (s && (s->flags & SEC_LOAD))
5067 count++;
5068
5069 /* Check to see if we need a large data segment. Since .lbss sections
5070 is placed right after the .bss section, there should be no need for
5071 a large data segment just because of .lbss. */
5072 s = bfd_get_section_by_name (abfd, ".ldata");
5073 if (s && (s->flags & SEC_LOAD))
5074 count++;
5075
5076 return count;
5077 }
5078
5079 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5080
5081 static bfd_boolean
5082 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5083 {
5084 if (h->plt.offset != (bfd_vma) -1
5085 && !h->def_regular
5086 && !h->pointer_equality_needed)
5087 return FALSE;
5088
5089 return _bfd_elf_hash_symbol (h);
5090 }
5091
5092 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5093
5094 static bfd_boolean
5095 elf_x86_64_relocs_compatible (const bfd_target *input,
5096 const bfd_target *output)
5097 {
5098 return ((xvec_get_elf_backend_data (input)->s->elfclass
5099 == xvec_get_elf_backend_data (output)->s->elfclass)
5100 && _bfd_elf_relocs_compatible (input, output));
5101 }
5102
5103 static const struct bfd_elf_special_section
5104 elf_x86_64_special_sections[]=
5105 {
5106 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5107 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5108 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5109 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5110 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5111 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5112 { NULL, 0, 0, 0, 0 }
5113 };
5114
5115 #define TARGET_LITTLE_SYM bfd_elf64_x86_64_vec
5116 #define TARGET_LITTLE_NAME "elf64-x86-64"
5117 #define ELF_ARCH bfd_arch_i386
5118 #define ELF_TARGET_ID X86_64_ELF_DATA
5119 #define ELF_MACHINE_CODE EM_X86_64
5120 #define ELF_MAXPAGESIZE 0x200000
5121 #define ELF_MINPAGESIZE 0x1000
5122 #define ELF_COMMONPAGESIZE 0x1000
5123
5124 #define elf_backend_can_gc_sections 1
5125 #define elf_backend_can_refcount 1
5126 #define elf_backend_want_got_plt 1
5127 #define elf_backend_plt_readonly 1
5128 #define elf_backend_want_plt_sym 0
5129 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5130 #define elf_backend_rela_normal 1
5131 #define elf_backend_plt_alignment 4
5132
5133 #define elf_info_to_howto elf_x86_64_info_to_howto
5134
5135 #define bfd_elf64_bfd_link_hash_table_create \
5136 elf_x86_64_link_hash_table_create
5137 #define bfd_elf64_bfd_link_hash_table_free \
5138 elf_x86_64_link_hash_table_free
5139 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5140 #define bfd_elf64_bfd_reloc_name_lookup \
5141 elf_x86_64_reloc_name_lookup
5142
5143 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
5144 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5145 #define elf_backend_check_relocs elf_x86_64_check_relocs
5146 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
5147 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
5148 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5149 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5150 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
5151 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
5152 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5153 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5154 #ifdef CORE_HEADER
5155 #define elf_backend_write_core_note elf_x86_64_write_core_note
5156 #endif
5157 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5158 #define elf_backend_relocate_section elf_x86_64_relocate_section
5159 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
5160 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5161 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5162 #define elf_backend_plt_sym_val elf_x86_64_plt_sym_val
5163 #define elf_backend_object_p elf64_x86_64_elf_object_p
5164 #define bfd_elf64_mkobject elf_x86_64_mkobject
5165
5166 #define elf_backend_section_from_shdr \
5167 elf_x86_64_section_from_shdr
5168
5169 #define elf_backend_section_from_bfd_section \
5170 elf_x86_64_elf_section_from_bfd_section
5171 #define elf_backend_add_symbol_hook \
5172 elf_x86_64_add_symbol_hook
5173 #define elf_backend_symbol_processing \
5174 elf_x86_64_symbol_processing
5175 #define elf_backend_common_section_index \
5176 elf_x86_64_common_section_index
5177 #define elf_backend_common_section \
5178 elf_x86_64_common_section
5179 #define elf_backend_common_definition \
5180 elf_x86_64_common_definition
5181 #define elf_backend_merge_symbol \
5182 elf_x86_64_merge_symbol
5183 #define elf_backend_special_sections \
5184 elf_x86_64_special_sections
5185 #define elf_backend_additional_program_headers \
5186 elf_x86_64_additional_program_headers
5187 #define elf_backend_hash_symbol \
5188 elf_x86_64_hash_symbol
5189
5190 #define elf_backend_post_process_headers _bfd_elf_set_osabi
5191
5192 #include "elf64-target.h"
5193
5194 /* FreeBSD support. */
5195
5196 #undef TARGET_LITTLE_SYM
5197 #define TARGET_LITTLE_SYM bfd_elf64_x86_64_freebsd_vec
5198 #undef TARGET_LITTLE_NAME
5199 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5200
5201 #undef ELF_OSABI
5202 #define ELF_OSABI ELFOSABI_FREEBSD
5203
5204 #undef elf64_bed
5205 #define elf64_bed elf64_x86_64_fbsd_bed
5206
5207 #include "elf64-target.h"
5208
5209 /* Solaris 2 support. */
5210
5211 #undef TARGET_LITTLE_SYM
5212 #define TARGET_LITTLE_SYM bfd_elf64_x86_64_sol2_vec
5213 #undef TARGET_LITTLE_NAME
5214 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5215
5216 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5217 objects won't be recognized. */
5218 #undef ELF_OSABI
5219
5220 #undef elf64_bed
5221 #define elf64_bed elf64_x86_64_sol2_bed
5222
5223 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5224 boundary. */
5225 #undef elf_backend_static_tls_alignment
5226 #define elf_backend_static_tls_alignment 16
5227
5228 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5229
5230 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5231 File, p.63. */
5232 #undef elf_backend_want_plt_sym
5233 #define elf_backend_want_plt_sym 1
5234
5235 #include "elf64-target.h"
5236
5237 /* Native Client support. */
5238
5239 #undef TARGET_LITTLE_SYM
5240 #define TARGET_LITTLE_SYM bfd_elf64_x86_64_nacl_vec
5241 #undef TARGET_LITTLE_NAME
5242 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5243 #undef elf64_bed
5244 #define elf64_bed elf64_x86_64_nacl_bed
5245
5246 #undef ELF_MAXPAGESIZE
5247 #undef ELF_MINPAGESIZE
5248 #undef ELF_COMMONPAGESIZE
5249 #define ELF_MAXPAGESIZE 0x10000
5250 #define ELF_MINPAGESIZE 0x10000
5251 #define ELF_COMMONPAGESIZE 0x10000
5252
5253 /* Restore defaults. */
5254 #undef ELF_OSABI
5255 #undef elf_backend_static_tls_alignment
5256 #undef elf_backend_want_plt_sym
5257 #define elf_backend_want_plt_sym 0
5258
5259 /* NaCl uses substantially different PLT entries for the same effects. */
5260
5261 #undef elf_backend_plt_alignment
5262 #define elf_backend_plt_alignment 5
5263 #define NACL_PLT_ENTRY_SIZE 64
5264 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5265
5266 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5267 {
5268 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5269 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5270 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5271 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5272 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5273
5274 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5275 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopl %cs:0x0(%rax,%rax,1) */
5276
5277 /* 32 bytes of nop to pad out to the standard size. */
5278 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
5279 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5280 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
5281 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5282 0x66, /* excess data32 prefix */
5283 0x90 /* nop */
5284 };
5285
5286 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5287 {
5288 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5289 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5290 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5291 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5292
5293 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5294 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
5295 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5296
5297 /* Lazy GOT entries point here (32-byte aligned). */
5298 0x68, /* pushq immediate */
5299 0, 0, 0, 0, /* replaced with index into relocation table. */
5300 0xe9, /* jmp relative */
5301 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5302
5303 /* 22 bytes of nop to pad out to the standard size. */
5304 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
5305 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5306 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5307 };
5308
5309 /* .eh_frame covering the .plt section. */
5310
5311 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5312 {
5313 #if (PLT_CIE_LENGTH != 20 \
5314 || PLT_FDE_LENGTH != 36 \
5315 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5316 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5317 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
5318 #endif
5319 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5320 0, 0, 0, 0, /* CIE ID */
5321 1, /* CIE version */
5322 'z', 'R', 0, /* Augmentation string */
5323 1, /* Code alignment factor */
5324 0x78, /* Data alignment factor */
5325 16, /* Return address column */
5326 1, /* Augmentation size */
5327 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5328 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5329 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5330 DW_CFA_nop, DW_CFA_nop,
5331
5332 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5333 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5334 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5335 0, 0, 0, 0, /* .plt size goes here */
5336 0, /* Augmentation size */
5337 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5338 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5339 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5340 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5341 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5342 13, /* Block length */
5343 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5344 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5345 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5346 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5347 DW_CFA_nop, DW_CFA_nop
5348 };
5349
5350 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
5351 {
5352 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5353 elf_x86_64_nacl_plt_entry, /* plt_entry */
5354 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5355 2, /* plt0_got1_offset */
5356 9, /* plt0_got2_offset */
5357 13, /* plt0_got2_insn_end */
5358 3, /* plt_got_offset */
5359 33, /* plt_reloc_offset */
5360 38, /* plt_plt_offset */
5361 7, /* plt_got_insn_size */
5362 42, /* plt_plt_insn_end */
5363 32, /* plt_lazy_offset */
5364 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5365 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
5366 };
5367
5368 #undef elf_backend_arch_data
5369 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5370
5371 #undef elf_backend_modify_segment_map
5372 #define elf_backend_modify_segment_map nacl_modify_segment_map
5373 #undef elf_backend_modify_program_headers
5374 #define elf_backend_modify_program_headers nacl_modify_program_headers
5375
5376 #include "elf64-target.h"
5377
5378 /* Native Client x32 support. */
5379
5380 #undef TARGET_LITTLE_SYM
5381 #define TARGET_LITTLE_SYM bfd_elf32_x86_64_nacl_vec
5382 #undef TARGET_LITTLE_NAME
5383 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5384 #undef elf32_bed
5385 #define elf32_bed elf32_x86_64_nacl_bed
5386
5387 #define bfd_elf32_bfd_link_hash_table_create \
5388 elf_x86_64_link_hash_table_create
5389 #define bfd_elf32_bfd_link_hash_table_free \
5390 elf_x86_64_link_hash_table_free
5391 #define bfd_elf32_bfd_reloc_type_lookup \
5392 elf_x86_64_reloc_type_lookup
5393 #define bfd_elf32_bfd_reloc_name_lookup \
5394 elf_x86_64_reloc_name_lookup
5395 #define bfd_elf32_mkobject \
5396 elf_x86_64_mkobject
5397
5398 #undef elf_backend_object_p
5399 #define elf_backend_object_p \
5400 elf32_x86_64_elf_object_p
5401
5402 #undef elf_backend_bfd_from_remote_memory
5403 #define elf_backend_bfd_from_remote_memory \
5404 _bfd_elf32_bfd_from_remote_memory
5405
5406 #undef elf_backend_size_info
5407 #define elf_backend_size_info \
5408 _bfd_elf32_size_info
5409
5410 #include "elf32-target.h"
5411
5412 /* Restore defaults. */
5413 #undef elf_backend_object_p
5414 #define elf_backend_object_p elf64_x86_64_elf_object_p
5415 #undef elf_backend_bfd_from_remote_memory
5416 #undef elf_backend_size_info
5417 #undef elf_backend_modify_segment_map
5418 #undef elf_backend_modify_program_headers
5419
5420 /* Intel L1OM support. */
5421
5422 static bfd_boolean
5423 elf64_l1om_elf_object_p (bfd *abfd)
5424 {
5425 /* Set the right machine number for an L1OM elf64 file. */
5426 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5427 return TRUE;
5428 }
5429
5430 #undef TARGET_LITTLE_SYM
5431 #define TARGET_LITTLE_SYM bfd_elf64_l1om_vec
5432 #undef TARGET_LITTLE_NAME
5433 #define TARGET_LITTLE_NAME "elf64-l1om"
5434 #undef ELF_ARCH
5435 #define ELF_ARCH bfd_arch_l1om
5436
5437 #undef ELF_MACHINE_CODE
5438 #define ELF_MACHINE_CODE EM_L1OM
5439
5440 #undef ELF_OSABI
5441
5442 #undef elf64_bed
5443 #define elf64_bed elf64_l1om_bed
5444
5445 #undef elf_backend_object_p
5446 #define elf_backend_object_p elf64_l1om_elf_object_p
5447
5448 /* Restore defaults. */
5449 #undef ELF_MAXPAGESIZE
5450 #undef ELF_MINPAGESIZE
5451 #undef ELF_COMMONPAGESIZE
5452 #define ELF_MAXPAGESIZE 0x200000
5453 #define ELF_MINPAGESIZE 0x1000
5454 #define ELF_COMMONPAGESIZE 0x1000
5455 #undef elf_backend_plt_alignment
5456 #define elf_backend_plt_alignment 4
5457 #undef elf_backend_arch_data
5458 #define elf_backend_arch_data &elf_x86_64_arch_bed
5459
5460 #include "elf64-target.h"
5461
5462 /* FreeBSD L1OM support. */
5463
5464 #undef TARGET_LITTLE_SYM
5465 #define TARGET_LITTLE_SYM bfd_elf64_l1om_freebsd_vec
5466 #undef TARGET_LITTLE_NAME
5467 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5468
5469 #undef ELF_OSABI
5470 #define ELF_OSABI ELFOSABI_FREEBSD
5471
5472 #undef elf64_bed
5473 #define elf64_bed elf64_l1om_fbsd_bed
5474
5475 #include "elf64-target.h"
5476
5477 /* Intel K1OM support. */
5478
5479 static bfd_boolean
5480 elf64_k1om_elf_object_p (bfd *abfd)
5481 {
5482 /* Set the right machine number for an K1OM elf64 file. */
5483 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5484 return TRUE;
5485 }
5486
5487 #undef TARGET_LITTLE_SYM
5488 #define TARGET_LITTLE_SYM bfd_elf64_k1om_vec
5489 #undef TARGET_LITTLE_NAME
5490 #define TARGET_LITTLE_NAME "elf64-k1om"
5491 #undef ELF_ARCH
5492 #define ELF_ARCH bfd_arch_k1om
5493
5494 #undef ELF_MACHINE_CODE
5495 #define ELF_MACHINE_CODE EM_K1OM
5496
5497 #undef ELF_OSABI
5498
5499 #undef elf64_bed
5500 #define elf64_bed elf64_k1om_bed
5501
5502 #undef elf_backend_object_p
5503 #define elf_backend_object_p elf64_k1om_elf_object_p
5504
5505 #undef elf_backend_static_tls_alignment
5506
5507 #undef elf_backend_want_plt_sym
5508 #define elf_backend_want_plt_sym 0
5509
5510 #include "elf64-target.h"
5511
5512 /* FreeBSD K1OM support. */
5513
5514 #undef TARGET_LITTLE_SYM
5515 #define TARGET_LITTLE_SYM bfd_elf64_k1om_freebsd_vec
5516 #undef TARGET_LITTLE_NAME
5517 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5518
5519 #undef ELF_OSABI
5520 #define ELF_OSABI ELFOSABI_FREEBSD
5521
5522 #undef elf64_bed
5523 #define elf64_bed elf64_k1om_fbsd_bed
5524
5525 #include "elf64-target.h"
5526
5527 /* 32bit x86-64 support. */
5528
5529 #undef TARGET_LITTLE_SYM
5530 #define TARGET_LITTLE_SYM bfd_elf32_x86_64_vec
5531 #undef TARGET_LITTLE_NAME
5532 #define TARGET_LITTLE_NAME "elf32-x86-64"
5533 #undef elf32_bed
5534
5535 #undef ELF_ARCH
5536 #define ELF_ARCH bfd_arch_i386
5537
5538 #undef ELF_MACHINE_CODE
5539 #define ELF_MACHINE_CODE EM_X86_64
5540
5541 #undef ELF_OSABI
5542
5543 #undef elf_backend_object_p
5544 #define elf_backend_object_p \
5545 elf32_x86_64_elf_object_p
5546
5547 #undef elf_backend_bfd_from_remote_memory
5548 #define elf_backend_bfd_from_remote_memory \
5549 _bfd_elf32_bfd_from_remote_memory
5550
5551 #undef elf_backend_size_info
5552 #define elf_backend_size_info \
5553 _bfd_elf32_size_info
5554
5555 #include "elf32-target.h"
This page took 0.150159 seconds and 4 git commands to generate.