[gas][ARM] Document supported ARMv8 cores.
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return NULL;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if a weak symbol with a real definition needs a copy reloc.
761 When there is a weak symbol with a real definition, the processor
762 independent code will have arranged for us to see the real
763 definition first. We need to copy the needs_copy bit from the
764 real definition and check it when allowing copy reloc in PIE. */
765 unsigned int needs_copy : 1;
766
767 /* TRUE if symbol has at least one BND relocation. */
768 unsigned int has_bnd_reloc : 1;
769
770 /* Information about the GOT PLT entry. Filled when there are both
771 GOT and PLT relocations against the same function. */
772 union gotplt_union plt_got;
773
774 /* Information about the second PLT entry. Filled when has_bnd_reloc is
775 set. */
776 union gotplt_union plt_bnd;
777
778 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
779 starting at the end of the jump table. */
780 bfd_vma tlsdesc_got;
781 };
782
783 #define elf_x86_64_hash_entry(ent) \
784 ((struct elf_x86_64_link_hash_entry *)(ent))
785
786 struct elf_x86_64_obj_tdata
787 {
788 struct elf_obj_tdata root;
789
790 /* tls_type for each local got entry. */
791 char *local_got_tls_type;
792
793 /* GOTPLT entries for TLS descriptors. */
794 bfd_vma *local_tlsdesc_gotent;
795 };
796
797 #define elf_x86_64_tdata(abfd) \
798 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
799
800 #define elf_x86_64_local_got_tls_type(abfd) \
801 (elf_x86_64_tdata (abfd)->local_got_tls_type)
802
803 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
804 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
805
806 #define is_x86_64_elf(bfd) \
807 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
808 && elf_tdata (bfd) != NULL \
809 && elf_object_id (bfd) == X86_64_ELF_DATA)
810
811 static bfd_boolean
812 elf_x86_64_mkobject (bfd *abfd)
813 {
814 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
815 X86_64_ELF_DATA);
816 }
817
818 /* x86-64 ELF linker hash table. */
819
820 struct elf_x86_64_link_hash_table
821 {
822 struct elf_link_hash_table elf;
823
824 /* Short-cuts to get to dynamic linker sections. */
825 asection *sdynbss;
826 asection *srelbss;
827 asection *plt_eh_frame;
828 asection *plt_bnd;
829 asection *plt_got;
830
831 union
832 {
833 bfd_signed_vma refcount;
834 bfd_vma offset;
835 } tls_ld_got;
836
837 /* The amount of space used by the jump slots in the GOT. */
838 bfd_vma sgotplt_jump_table_size;
839
840 /* Small local sym cache. */
841 struct sym_cache sym_cache;
842
843 bfd_vma (*r_info) (bfd_vma, bfd_vma);
844 bfd_vma (*r_sym) (bfd_vma);
845 unsigned int pointer_r_type;
846 const char *dynamic_interpreter;
847 int dynamic_interpreter_size;
848
849 /* _TLS_MODULE_BASE_ symbol. */
850 struct bfd_link_hash_entry *tls_module_base;
851
852 /* Used by local STT_GNU_IFUNC symbols. */
853 htab_t loc_hash_table;
854 void * loc_hash_memory;
855
856 /* The offset into splt of the PLT entry for the TLS descriptor
857 resolver. Special values are 0, if not necessary (or not found
858 to be necessary yet), and -1 if needed but not determined
859 yet. */
860 bfd_vma tlsdesc_plt;
861 /* The offset into sgot of the GOT entry used by the PLT entry
862 above. */
863 bfd_vma tlsdesc_got;
864
865 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
866 bfd_vma next_jump_slot_index;
867 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
868 bfd_vma next_irelative_index;
869 };
870
871 /* Get the x86-64 ELF linker hash table from a link_info structure. */
872
873 #define elf_x86_64_hash_table(p) \
874 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
875 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
876
877 #define elf_x86_64_compute_jump_table_size(htab) \
878 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
879
880 /* Create an entry in an x86-64 ELF linker hash table. */
881
882 static struct bfd_hash_entry *
883 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
884 struct bfd_hash_table *table,
885 const char *string)
886 {
887 /* Allocate the structure if it has not already been allocated by a
888 subclass. */
889 if (entry == NULL)
890 {
891 entry = (struct bfd_hash_entry *)
892 bfd_hash_allocate (table,
893 sizeof (struct elf_x86_64_link_hash_entry));
894 if (entry == NULL)
895 return entry;
896 }
897
898 /* Call the allocation method of the superclass. */
899 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
900 if (entry != NULL)
901 {
902 struct elf_x86_64_link_hash_entry *eh;
903
904 eh = (struct elf_x86_64_link_hash_entry *) entry;
905 eh->dyn_relocs = NULL;
906 eh->tls_type = GOT_UNKNOWN;
907 eh->needs_copy = 0;
908 eh->has_bnd_reloc = 0;
909 eh->plt_bnd.offset = (bfd_vma) -1;
910 eh->plt_got.offset = (bfd_vma) -1;
911 eh->tlsdesc_got = (bfd_vma) -1;
912 }
913
914 return entry;
915 }
916
917 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
918 for local symbol so that we can handle local STT_GNU_IFUNC symbols
919 as global symbol. We reuse indx and dynstr_index for local symbol
920 hash since they aren't used by global symbols in this backend. */
921
922 static hashval_t
923 elf_x86_64_local_htab_hash (const void *ptr)
924 {
925 struct elf_link_hash_entry *h
926 = (struct elf_link_hash_entry *) ptr;
927 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
928 }
929
930 /* Compare local hash entries. */
931
932 static int
933 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
934 {
935 struct elf_link_hash_entry *h1
936 = (struct elf_link_hash_entry *) ptr1;
937 struct elf_link_hash_entry *h2
938 = (struct elf_link_hash_entry *) ptr2;
939
940 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
941 }
942
943 /* Find and/or create a hash entry for local symbol. */
944
945 static struct elf_link_hash_entry *
946 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
947 bfd *abfd, const Elf_Internal_Rela *rel,
948 bfd_boolean create)
949 {
950 struct elf_x86_64_link_hash_entry e, *ret;
951 asection *sec = abfd->sections;
952 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
953 htab->r_sym (rel->r_info));
954 void **slot;
955
956 e.elf.indx = sec->id;
957 e.elf.dynstr_index = htab->r_sym (rel->r_info);
958 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
959 create ? INSERT : NO_INSERT);
960
961 if (!slot)
962 return NULL;
963
964 if (*slot)
965 {
966 ret = (struct elf_x86_64_link_hash_entry *) *slot;
967 return &ret->elf;
968 }
969
970 ret = (struct elf_x86_64_link_hash_entry *)
971 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
972 sizeof (struct elf_x86_64_link_hash_entry));
973 if (ret)
974 {
975 memset (ret, 0, sizeof (*ret));
976 ret->elf.indx = sec->id;
977 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
978 ret->elf.dynindx = -1;
979 ret->plt_got.offset = (bfd_vma) -1;
980 *slot = ret;
981 }
982 return &ret->elf;
983 }
984
985 /* Destroy an X86-64 ELF linker hash table. */
986
987 static void
988 elf_x86_64_link_hash_table_free (bfd *obfd)
989 {
990 struct elf_x86_64_link_hash_table *htab
991 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
992
993 if (htab->loc_hash_table)
994 htab_delete (htab->loc_hash_table);
995 if (htab->loc_hash_memory)
996 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
997 _bfd_elf_link_hash_table_free (obfd);
998 }
999
1000 /* Create an X86-64 ELF linker hash table. */
1001
1002 static struct bfd_link_hash_table *
1003 elf_x86_64_link_hash_table_create (bfd *abfd)
1004 {
1005 struct elf_x86_64_link_hash_table *ret;
1006 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1007
1008 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1009 if (ret == NULL)
1010 return NULL;
1011
1012 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1013 elf_x86_64_link_hash_newfunc,
1014 sizeof (struct elf_x86_64_link_hash_entry),
1015 X86_64_ELF_DATA))
1016 {
1017 free (ret);
1018 return NULL;
1019 }
1020
1021 if (ABI_64_P (abfd))
1022 {
1023 ret->r_info = elf64_r_info;
1024 ret->r_sym = elf64_r_sym;
1025 ret->pointer_r_type = R_X86_64_64;
1026 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1027 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1028 }
1029 else
1030 {
1031 ret->r_info = elf32_r_info;
1032 ret->r_sym = elf32_r_sym;
1033 ret->pointer_r_type = R_X86_64_32;
1034 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1035 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1036 }
1037
1038 ret->loc_hash_table = htab_try_create (1024,
1039 elf_x86_64_local_htab_hash,
1040 elf_x86_64_local_htab_eq,
1041 NULL);
1042 ret->loc_hash_memory = objalloc_create ();
1043 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1044 {
1045 elf_x86_64_link_hash_table_free (abfd);
1046 return NULL;
1047 }
1048 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1049
1050 return &ret->elf.root;
1051 }
1052
1053 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1054 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1055 hash table. */
1056
1057 static bfd_boolean
1058 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1059 struct bfd_link_info *info)
1060 {
1061 struct elf_x86_64_link_hash_table *htab;
1062
1063 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1064 return FALSE;
1065
1066 htab = elf_x86_64_hash_table (info);
1067 if (htab == NULL)
1068 return FALSE;
1069
1070 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1071 if (!htab->sdynbss)
1072 abort ();
1073
1074 if (info->executable)
1075 {
1076 /* Always allow copy relocs for building executables. */
1077 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1078 if (s == NULL)
1079 {
1080 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1081 s = bfd_make_section_anyway_with_flags (dynobj,
1082 ".rela.bss",
1083 (bed->dynamic_sec_flags
1084 | SEC_READONLY));
1085 if (s == NULL
1086 || ! bfd_set_section_alignment (dynobj, s,
1087 bed->s->log_file_align))
1088 return FALSE;
1089 }
1090 htab->srelbss = s;
1091 }
1092
1093 if (!info->no_ld_generated_unwind_info
1094 && htab->plt_eh_frame == NULL
1095 && htab->elf.splt != NULL)
1096 {
1097 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1098 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1099 | SEC_LINKER_CREATED);
1100 htab->plt_eh_frame
1101 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1102 if (htab->plt_eh_frame == NULL
1103 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1104 return FALSE;
1105 }
1106 return TRUE;
1107 }
1108
1109 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1110
1111 static void
1112 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1113 struct elf_link_hash_entry *dir,
1114 struct elf_link_hash_entry *ind)
1115 {
1116 struct elf_x86_64_link_hash_entry *edir, *eind;
1117
1118 edir = (struct elf_x86_64_link_hash_entry *) dir;
1119 eind = (struct elf_x86_64_link_hash_entry *) ind;
1120
1121 if (!edir->has_bnd_reloc)
1122 edir->has_bnd_reloc = eind->has_bnd_reloc;
1123
1124 if (eind->dyn_relocs != NULL)
1125 {
1126 if (edir->dyn_relocs != NULL)
1127 {
1128 struct elf_dyn_relocs **pp;
1129 struct elf_dyn_relocs *p;
1130
1131 /* Add reloc counts against the indirect sym to the direct sym
1132 list. Merge any entries against the same section. */
1133 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1134 {
1135 struct elf_dyn_relocs *q;
1136
1137 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1138 if (q->sec == p->sec)
1139 {
1140 q->pc_count += p->pc_count;
1141 q->count += p->count;
1142 *pp = p->next;
1143 break;
1144 }
1145 if (q == NULL)
1146 pp = &p->next;
1147 }
1148 *pp = edir->dyn_relocs;
1149 }
1150
1151 edir->dyn_relocs = eind->dyn_relocs;
1152 eind->dyn_relocs = NULL;
1153 }
1154
1155 if (ind->root.type == bfd_link_hash_indirect
1156 && dir->got.refcount <= 0)
1157 {
1158 edir->tls_type = eind->tls_type;
1159 eind->tls_type = GOT_UNKNOWN;
1160 }
1161
1162 if (ELIMINATE_COPY_RELOCS
1163 && ind->root.type != bfd_link_hash_indirect
1164 && dir->dynamic_adjusted)
1165 {
1166 /* If called to transfer flags for a weakdef during processing
1167 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1168 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1169 dir->ref_dynamic |= ind->ref_dynamic;
1170 dir->ref_regular |= ind->ref_regular;
1171 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1172 dir->needs_plt |= ind->needs_plt;
1173 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1174 }
1175 else
1176 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1177 }
1178
1179 static bfd_boolean
1180 elf64_x86_64_elf_object_p (bfd *abfd)
1181 {
1182 /* Set the right machine number for an x86-64 elf64 file. */
1183 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1184 return TRUE;
1185 }
1186
1187 static bfd_boolean
1188 elf32_x86_64_elf_object_p (bfd *abfd)
1189 {
1190 /* Set the right machine number for an x86-64 elf32 file. */
1191 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1192 return TRUE;
1193 }
1194
1195 /* Return TRUE if the TLS access code sequence support transition
1196 from R_TYPE. */
1197
1198 static bfd_boolean
1199 elf_x86_64_check_tls_transition (bfd *abfd,
1200 struct bfd_link_info *info,
1201 asection *sec,
1202 bfd_byte *contents,
1203 Elf_Internal_Shdr *symtab_hdr,
1204 struct elf_link_hash_entry **sym_hashes,
1205 unsigned int r_type,
1206 const Elf_Internal_Rela *rel,
1207 const Elf_Internal_Rela *relend)
1208 {
1209 unsigned int val;
1210 unsigned long r_symndx;
1211 bfd_boolean largepic = FALSE;
1212 struct elf_link_hash_entry *h;
1213 bfd_vma offset;
1214 struct elf_x86_64_link_hash_table *htab;
1215
1216 /* Get the section contents. */
1217 if (contents == NULL)
1218 {
1219 if (elf_section_data (sec)->this_hdr.contents != NULL)
1220 contents = elf_section_data (sec)->this_hdr.contents;
1221 else
1222 {
1223 /* FIXME: How to better handle error condition? */
1224 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1225 return FALSE;
1226
1227 /* Cache the section contents for elf_link_input_bfd. */
1228 elf_section_data (sec)->this_hdr.contents = contents;
1229 }
1230 }
1231
1232 htab = elf_x86_64_hash_table (info);
1233 offset = rel->r_offset;
1234 switch (r_type)
1235 {
1236 case R_X86_64_TLSGD:
1237 case R_X86_64_TLSLD:
1238 if ((rel + 1) >= relend)
1239 return FALSE;
1240
1241 if (r_type == R_X86_64_TLSGD)
1242 {
1243 /* Check transition from GD access model. For 64bit, only
1244 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1245 .word 0x6666; rex64; call __tls_get_addr
1246 can transit to different access model. For 32bit, only
1247 leaq foo@tlsgd(%rip), %rdi
1248 .word 0x6666; rex64; call __tls_get_addr
1249 can transit to different access model. For largepic
1250 we also support:
1251 leaq foo@tlsgd(%rip), %rdi
1252 movabsq $__tls_get_addr@pltoff, %rax
1253 addq $rbx, %rax
1254 call *%rax. */
1255
1256 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1257 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1258
1259 if ((offset + 12) > sec->size)
1260 return FALSE;
1261
1262 if (memcmp (contents + offset + 4, call, 4) != 0)
1263 {
1264 if (!ABI_64_P (abfd)
1265 || (offset + 19) > sec->size
1266 || offset < 3
1267 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1268 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1269 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1270 != 0)
1271 return FALSE;
1272 largepic = TRUE;
1273 }
1274 else if (ABI_64_P (abfd))
1275 {
1276 if (offset < 4
1277 || memcmp (contents + offset - 4, leaq, 4) != 0)
1278 return FALSE;
1279 }
1280 else
1281 {
1282 if (offset < 3
1283 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1284 return FALSE;
1285 }
1286 }
1287 else
1288 {
1289 /* Check transition from LD access model. Only
1290 leaq foo@tlsld(%rip), %rdi;
1291 call __tls_get_addr
1292 can transit to different access model. For largepic
1293 we also support:
1294 leaq foo@tlsld(%rip), %rdi
1295 movabsq $__tls_get_addr@pltoff, %rax
1296 addq $rbx, %rax
1297 call *%rax. */
1298
1299 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1300
1301 if (offset < 3 || (offset + 9) > sec->size)
1302 return FALSE;
1303
1304 if (memcmp (contents + offset - 3, lea, 3) != 0)
1305 return FALSE;
1306
1307 if (0xe8 != *(contents + offset + 4))
1308 {
1309 if (!ABI_64_P (abfd)
1310 || (offset + 19) > sec->size
1311 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1312 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1313 != 0)
1314 return FALSE;
1315 largepic = TRUE;
1316 }
1317 }
1318
1319 r_symndx = htab->r_sym (rel[1].r_info);
1320 if (r_symndx < symtab_hdr->sh_info)
1321 return FALSE;
1322
1323 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1324 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1325 may be versioned. */
1326 return (h != NULL
1327 && h->root.root.string != NULL
1328 && (largepic
1329 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1330 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1331 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1332 && (strncmp (h->root.root.string,
1333 "__tls_get_addr", 14) == 0));
1334
1335 case R_X86_64_GOTTPOFF:
1336 /* Check transition from IE access model:
1337 mov foo@gottpoff(%rip), %reg
1338 add foo@gottpoff(%rip), %reg
1339 */
1340
1341 /* Check REX prefix first. */
1342 if (offset >= 3 && (offset + 4) <= sec->size)
1343 {
1344 val = bfd_get_8 (abfd, contents + offset - 3);
1345 if (val != 0x48 && val != 0x4c)
1346 {
1347 /* X32 may have 0x44 REX prefix or no REX prefix. */
1348 if (ABI_64_P (abfd))
1349 return FALSE;
1350 }
1351 }
1352 else
1353 {
1354 /* X32 may not have any REX prefix. */
1355 if (ABI_64_P (abfd))
1356 return FALSE;
1357 if (offset < 2 || (offset + 3) > sec->size)
1358 return FALSE;
1359 }
1360
1361 val = bfd_get_8 (abfd, contents + offset - 2);
1362 if (val != 0x8b && val != 0x03)
1363 return FALSE;
1364
1365 val = bfd_get_8 (abfd, contents + offset - 1);
1366 return (val & 0xc7) == 5;
1367
1368 case R_X86_64_GOTPC32_TLSDESC:
1369 /* Check transition from GDesc access model:
1370 leaq x@tlsdesc(%rip), %rax
1371
1372 Make sure it's a leaq adding rip to a 32-bit offset
1373 into any register, although it's probably almost always
1374 going to be rax. */
1375
1376 if (offset < 3 || (offset + 4) > sec->size)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 3);
1380 if ((val & 0xfb) != 0x48)
1381 return FALSE;
1382
1383 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1384 return FALSE;
1385
1386 val = bfd_get_8 (abfd, contents + offset - 1);
1387 return (val & 0xc7) == 0x05;
1388
1389 case R_X86_64_TLSDESC_CALL:
1390 /* Check transition from GDesc access model:
1391 call *x@tlsdesc(%rax)
1392 */
1393 if (offset + 2 <= sec->size)
1394 {
1395 /* Make sure that it's a call *x@tlsdesc(%rax). */
1396 static const unsigned char call[] = { 0xff, 0x10 };
1397 return memcmp (contents + offset, call, 2) == 0;
1398 }
1399
1400 return FALSE;
1401
1402 default:
1403 abort ();
1404 }
1405 }
1406
1407 /* Return TRUE if the TLS access transition is OK or no transition
1408 will be performed. Update R_TYPE if there is a transition. */
1409
1410 static bfd_boolean
1411 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1412 asection *sec, bfd_byte *contents,
1413 Elf_Internal_Shdr *symtab_hdr,
1414 struct elf_link_hash_entry **sym_hashes,
1415 unsigned int *r_type, int tls_type,
1416 const Elf_Internal_Rela *rel,
1417 const Elf_Internal_Rela *relend,
1418 struct elf_link_hash_entry *h,
1419 unsigned long r_symndx)
1420 {
1421 unsigned int from_type = *r_type;
1422 unsigned int to_type = from_type;
1423 bfd_boolean check = TRUE;
1424
1425 /* Skip TLS transition for functions. */
1426 if (h != NULL
1427 && (h->type == STT_FUNC
1428 || h->type == STT_GNU_IFUNC))
1429 return TRUE;
1430
1431 switch (from_type)
1432 {
1433 case R_X86_64_TLSGD:
1434 case R_X86_64_GOTPC32_TLSDESC:
1435 case R_X86_64_TLSDESC_CALL:
1436 case R_X86_64_GOTTPOFF:
1437 if (info->executable)
1438 {
1439 if (h == NULL)
1440 to_type = R_X86_64_TPOFF32;
1441 else
1442 to_type = R_X86_64_GOTTPOFF;
1443 }
1444
1445 /* When we are called from elf_x86_64_relocate_section,
1446 CONTENTS isn't NULL and there may be additional transitions
1447 based on TLS_TYPE. */
1448 if (contents != NULL)
1449 {
1450 unsigned int new_to_type = to_type;
1451
1452 if (info->executable
1453 && h != NULL
1454 && h->dynindx == -1
1455 && tls_type == GOT_TLS_IE)
1456 new_to_type = R_X86_64_TPOFF32;
1457
1458 if (to_type == R_X86_64_TLSGD
1459 || to_type == R_X86_64_GOTPC32_TLSDESC
1460 || to_type == R_X86_64_TLSDESC_CALL)
1461 {
1462 if (tls_type == GOT_TLS_IE)
1463 new_to_type = R_X86_64_GOTTPOFF;
1464 }
1465
1466 /* We checked the transition before when we were called from
1467 elf_x86_64_check_relocs. We only want to check the new
1468 transition which hasn't been checked before. */
1469 check = new_to_type != to_type && from_type == to_type;
1470 to_type = new_to_type;
1471 }
1472
1473 break;
1474
1475 case R_X86_64_TLSLD:
1476 if (info->executable)
1477 to_type = R_X86_64_TPOFF32;
1478 break;
1479
1480 default:
1481 return TRUE;
1482 }
1483
1484 /* Return TRUE if there is no transition. */
1485 if (from_type == to_type)
1486 return TRUE;
1487
1488 /* Check if the transition can be performed. */
1489 if (check
1490 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1491 symtab_hdr, sym_hashes,
1492 from_type, rel, relend))
1493 {
1494 reloc_howto_type *from, *to;
1495 const char *name;
1496
1497 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1498 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1499
1500 if (h)
1501 name = h->root.root.string;
1502 else
1503 {
1504 struct elf_x86_64_link_hash_table *htab;
1505
1506 htab = elf_x86_64_hash_table (info);
1507 if (htab == NULL)
1508 name = "*unknown*";
1509 else
1510 {
1511 Elf_Internal_Sym *isym;
1512
1513 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1514 abfd, r_symndx);
1515 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1516 }
1517 }
1518
1519 (*_bfd_error_handler)
1520 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1521 "in section `%A' failed"),
1522 abfd, sec, from->name, to->name, name,
1523 (unsigned long) rel->r_offset);
1524 bfd_set_error (bfd_error_bad_value);
1525 return FALSE;
1526 }
1527
1528 *r_type = to_type;
1529 return TRUE;
1530 }
1531
1532 /* Look through the relocs for a section during the first phase, and
1533 calculate needed space in the global offset table, procedure
1534 linkage table, and dynamic reloc sections. */
1535
1536 static bfd_boolean
1537 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1538 asection *sec,
1539 const Elf_Internal_Rela *relocs)
1540 {
1541 struct elf_x86_64_link_hash_table *htab;
1542 Elf_Internal_Shdr *symtab_hdr;
1543 struct elf_link_hash_entry **sym_hashes;
1544 const Elf_Internal_Rela *rel;
1545 const Elf_Internal_Rela *rel_end;
1546 asection *sreloc;
1547 bfd_boolean use_plt_got;
1548
1549 if (info->relocatable)
1550 return TRUE;
1551
1552 BFD_ASSERT (is_x86_64_elf (abfd));
1553
1554 htab = elf_x86_64_hash_table (info);
1555 if (htab == NULL)
1556 return FALSE;
1557
1558 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1559
1560 symtab_hdr = &elf_symtab_hdr (abfd);
1561 sym_hashes = elf_sym_hashes (abfd);
1562
1563 sreloc = NULL;
1564
1565 rel_end = relocs + sec->reloc_count;
1566 for (rel = relocs; rel < rel_end; rel++)
1567 {
1568 unsigned int r_type;
1569 unsigned long r_symndx;
1570 struct elf_link_hash_entry *h;
1571 Elf_Internal_Sym *isym;
1572 const char *name;
1573 bfd_boolean size_reloc;
1574
1575 r_symndx = htab->r_sym (rel->r_info);
1576 r_type = ELF32_R_TYPE (rel->r_info);
1577
1578 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1579 {
1580 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1581 abfd, r_symndx);
1582 return FALSE;
1583 }
1584
1585 if (r_symndx < symtab_hdr->sh_info)
1586 {
1587 /* A local symbol. */
1588 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1589 abfd, r_symndx);
1590 if (isym == NULL)
1591 return FALSE;
1592
1593 /* Check relocation against local STT_GNU_IFUNC symbol. */
1594 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1595 {
1596 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1597 TRUE);
1598 if (h == NULL)
1599 return FALSE;
1600
1601 /* Fake a STT_GNU_IFUNC symbol. */
1602 h->type = STT_GNU_IFUNC;
1603 h->def_regular = 1;
1604 h->ref_regular = 1;
1605 h->forced_local = 1;
1606 h->root.type = bfd_link_hash_defined;
1607 }
1608 else
1609 h = NULL;
1610 }
1611 else
1612 {
1613 isym = NULL;
1614 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1615 while (h->root.type == bfd_link_hash_indirect
1616 || h->root.type == bfd_link_hash_warning)
1617 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1618 }
1619
1620 /* Check invalid x32 relocations. */
1621 if (!ABI_64_P (abfd))
1622 switch (r_type)
1623 {
1624 default:
1625 break;
1626
1627 case R_X86_64_DTPOFF64:
1628 case R_X86_64_TPOFF64:
1629 case R_X86_64_PC64:
1630 case R_X86_64_GOTOFF64:
1631 case R_X86_64_GOT64:
1632 case R_X86_64_GOTPCREL64:
1633 case R_X86_64_GOTPC64:
1634 case R_X86_64_GOTPLT64:
1635 case R_X86_64_PLTOFF64:
1636 {
1637 if (h)
1638 name = h->root.root.string;
1639 else
1640 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1641 NULL);
1642 (*_bfd_error_handler)
1643 (_("%B: relocation %s against symbol `%s' isn't "
1644 "supported in x32 mode"), abfd,
1645 x86_64_elf_howto_table[r_type].name, name);
1646 bfd_set_error (bfd_error_bad_value);
1647 return FALSE;
1648 }
1649 break;
1650 }
1651
1652 if (h != NULL)
1653 {
1654 /* Create the ifunc sections for static executables. If we
1655 never see an indirect function symbol nor we are building
1656 a static executable, those sections will be empty and
1657 won't appear in output. */
1658 switch (r_type)
1659 {
1660 default:
1661 break;
1662
1663 case R_X86_64_PC32_BND:
1664 case R_X86_64_PLT32_BND:
1665 case R_X86_64_PC32:
1666 case R_X86_64_PLT32:
1667 case R_X86_64_32:
1668 case R_X86_64_64:
1669 /* MPX PLT is supported only if elf_x86_64_arch_bed
1670 is used in 64-bit mode. */
1671 if (ABI_64_P (abfd)
1672 && info->bndplt
1673 && (get_elf_x86_64_backend_data (abfd)
1674 == &elf_x86_64_arch_bed))
1675 {
1676 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
1677
1678 /* Create the second PLT for Intel MPX support. */
1679 if (htab->plt_bnd == NULL)
1680 {
1681 unsigned int plt_bnd_align;
1682 const struct elf_backend_data *bed;
1683
1684 bed = get_elf_backend_data (info->output_bfd);
1685 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1686 && (sizeof (elf_x86_64_bnd_plt2_entry)
1687 == sizeof (elf_x86_64_legacy_plt2_entry)));
1688 plt_bnd_align = 3;
1689
1690 if (htab->elf.dynobj == NULL)
1691 htab->elf.dynobj = abfd;
1692 htab->plt_bnd
1693 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1694 ".plt.bnd",
1695 (bed->dynamic_sec_flags
1696 | SEC_ALLOC
1697 | SEC_CODE
1698 | SEC_LOAD
1699 | SEC_READONLY));
1700 if (htab->plt_bnd == NULL
1701 || !bfd_set_section_alignment (htab->elf.dynobj,
1702 htab->plt_bnd,
1703 plt_bnd_align))
1704 return FALSE;
1705 }
1706 }
1707
1708 case R_X86_64_32S:
1709 case R_X86_64_PC64:
1710 case R_X86_64_GOTPCREL:
1711 case R_X86_64_GOTPCREL64:
1712 if (htab->elf.dynobj == NULL)
1713 htab->elf.dynobj = abfd;
1714 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1715 return FALSE;
1716 break;
1717 }
1718
1719 /* It is referenced by a non-shared object. */
1720 h->ref_regular = 1;
1721 h->root.non_ir_ref = 1;
1722 }
1723
1724 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1725 symtab_hdr, sym_hashes,
1726 &r_type, GOT_UNKNOWN,
1727 rel, rel_end, h, r_symndx))
1728 return FALSE;
1729
1730 switch (r_type)
1731 {
1732 case R_X86_64_TLSLD:
1733 htab->tls_ld_got.refcount += 1;
1734 goto create_got;
1735
1736 case R_X86_64_TPOFF32:
1737 if (!info->executable && ABI_64_P (abfd))
1738 {
1739 if (h)
1740 name = h->root.root.string;
1741 else
1742 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1743 NULL);
1744 (*_bfd_error_handler)
1745 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1746 abfd,
1747 x86_64_elf_howto_table[r_type].name, name);
1748 bfd_set_error (bfd_error_bad_value);
1749 return FALSE;
1750 }
1751 break;
1752
1753 case R_X86_64_GOTTPOFF:
1754 if (!info->executable)
1755 info->flags |= DF_STATIC_TLS;
1756 /* Fall through */
1757
1758 case R_X86_64_GOT32:
1759 case R_X86_64_GOTPCREL:
1760 case R_X86_64_TLSGD:
1761 case R_X86_64_GOT64:
1762 case R_X86_64_GOTPCREL64:
1763 case R_X86_64_GOTPLT64:
1764 case R_X86_64_GOTPC32_TLSDESC:
1765 case R_X86_64_TLSDESC_CALL:
1766 /* This symbol requires a global offset table entry. */
1767 {
1768 int tls_type, old_tls_type;
1769
1770 switch (r_type)
1771 {
1772 default: tls_type = GOT_NORMAL; break;
1773 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1774 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1775 case R_X86_64_GOTPC32_TLSDESC:
1776 case R_X86_64_TLSDESC_CALL:
1777 tls_type = GOT_TLS_GDESC; break;
1778 }
1779
1780 if (h != NULL)
1781 {
1782 h->got.refcount += 1;
1783 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1784 }
1785 else
1786 {
1787 bfd_signed_vma *local_got_refcounts;
1788
1789 /* This is a global offset table entry for a local symbol. */
1790 local_got_refcounts = elf_local_got_refcounts (abfd);
1791 if (local_got_refcounts == NULL)
1792 {
1793 bfd_size_type size;
1794
1795 size = symtab_hdr->sh_info;
1796 size *= sizeof (bfd_signed_vma)
1797 + sizeof (bfd_vma) + sizeof (char);
1798 local_got_refcounts = ((bfd_signed_vma *)
1799 bfd_zalloc (abfd, size));
1800 if (local_got_refcounts == NULL)
1801 return FALSE;
1802 elf_local_got_refcounts (abfd) = local_got_refcounts;
1803 elf_x86_64_local_tlsdesc_gotent (abfd)
1804 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1805 elf_x86_64_local_got_tls_type (abfd)
1806 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1807 }
1808 local_got_refcounts[r_symndx] += 1;
1809 old_tls_type
1810 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1811 }
1812
1813 /* If a TLS symbol is accessed using IE at least once,
1814 there is no point to use dynamic model for it. */
1815 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1816 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1817 || tls_type != GOT_TLS_IE))
1818 {
1819 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1820 tls_type = old_tls_type;
1821 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1822 && GOT_TLS_GD_ANY_P (tls_type))
1823 tls_type |= old_tls_type;
1824 else
1825 {
1826 if (h)
1827 name = h->root.root.string;
1828 else
1829 name = bfd_elf_sym_name (abfd, symtab_hdr,
1830 isym, NULL);
1831 (*_bfd_error_handler)
1832 (_("%B: '%s' accessed both as normal and thread local symbol"),
1833 abfd, name);
1834 bfd_set_error (bfd_error_bad_value);
1835 return FALSE;
1836 }
1837 }
1838
1839 if (old_tls_type != tls_type)
1840 {
1841 if (h != NULL)
1842 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1843 else
1844 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1845 }
1846 }
1847 /* Fall through */
1848
1849 case R_X86_64_GOTOFF64:
1850 case R_X86_64_GOTPC32:
1851 case R_X86_64_GOTPC64:
1852 create_got:
1853 if (htab->elf.sgot == NULL)
1854 {
1855 if (htab->elf.dynobj == NULL)
1856 htab->elf.dynobj = abfd;
1857 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1858 info))
1859 return FALSE;
1860 }
1861 break;
1862
1863 case R_X86_64_PLT32:
1864 case R_X86_64_PLT32_BND:
1865 /* This symbol requires a procedure linkage table entry. We
1866 actually build the entry in adjust_dynamic_symbol,
1867 because this might be a case of linking PIC code which is
1868 never referenced by a dynamic object, in which case we
1869 don't need to generate a procedure linkage table entry
1870 after all. */
1871
1872 /* If this is a local symbol, we resolve it directly without
1873 creating a procedure linkage table entry. */
1874 if (h == NULL)
1875 continue;
1876
1877 h->needs_plt = 1;
1878 h->plt.refcount += 1;
1879 break;
1880
1881 case R_X86_64_PLTOFF64:
1882 /* This tries to form the 'address' of a function relative
1883 to GOT. For global symbols we need a PLT entry. */
1884 if (h != NULL)
1885 {
1886 h->needs_plt = 1;
1887 h->plt.refcount += 1;
1888 }
1889 goto create_got;
1890
1891 case R_X86_64_SIZE32:
1892 case R_X86_64_SIZE64:
1893 size_reloc = TRUE;
1894 goto do_size;
1895
1896 case R_X86_64_32:
1897 if (!ABI_64_P (abfd))
1898 goto pointer;
1899 case R_X86_64_8:
1900 case R_X86_64_16:
1901 case R_X86_64_32S:
1902 /* Let's help debug shared library creation. These relocs
1903 cannot be used in shared libs. Don't error out for
1904 sections we don't care about, such as debug sections or
1905 non-constant sections. */
1906 if (info->shared
1907 && (sec->flags & SEC_ALLOC) != 0
1908 && (sec->flags & SEC_READONLY) != 0)
1909 {
1910 if (h)
1911 name = h->root.root.string;
1912 else
1913 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1914 (*_bfd_error_handler)
1915 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1916 abfd, x86_64_elf_howto_table[r_type].name, name);
1917 bfd_set_error (bfd_error_bad_value);
1918 return FALSE;
1919 }
1920 /* Fall through. */
1921
1922 case R_X86_64_PC8:
1923 case R_X86_64_PC16:
1924 case R_X86_64_PC32:
1925 case R_X86_64_PC32_BND:
1926 case R_X86_64_PC64:
1927 case R_X86_64_64:
1928 pointer:
1929 if (h != NULL && info->executable)
1930 {
1931 /* If this reloc is in a read-only section, we might
1932 need a copy reloc. We can't check reliably at this
1933 stage whether the section is read-only, as input
1934 sections have not yet been mapped to output sections.
1935 Tentatively set the flag for now, and correct in
1936 adjust_dynamic_symbol. */
1937 h->non_got_ref = 1;
1938
1939 /* We may need a .plt entry if the function this reloc
1940 refers to is in a shared lib. */
1941 h->plt.refcount += 1;
1942 if (r_type != R_X86_64_PC32
1943 && r_type != R_X86_64_PC32_BND
1944 && r_type != R_X86_64_PC64)
1945 h->pointer_equality_needed = 1;
1946 }
1947
1948 size_reloc = FALSE;
1949 do_size:
1950 /* If we are creating a shared library, and this is a reloc
1951 against a global symbol, or a non PC relative reloc
1952 against a local symbol, then we need to copy the reloc
1953 into the shared library. However, if we are linking with
1954 -Bsymbolic, we do not need to copy a reloc against a
1955 global symbol which is defined in an object we are
1956 including in the link (i.e., DEF_REGULAR is set). At
1957 this point we have not seen all the input files, so it is
1958 possible that DEF_REGULAR is not set now but will be set
1959 later (it is never cleared). In case of a weak definition,
1960 DEF_REGULAR may be cleared later by a strong definition in
1961 a shared library. We account for that possibility below by
1962 storing information in the relocs_copied field of the hash
1963 table entry. A similar situation occurs when creating
1964 shared libraries and symbol visibility changes render the
1965 symbol local.
1966
1967 If on the other hand, we are creating an executable, we
1968 may need to keep relocations for symbols satisfied by a
1969 dynamic library if we manage to avoid copy relocs for the
1970 symbol. */
1971 if ((info->shared
1972 && (sec->flags & SEC_ALLOC) != 0
1973 && (! IS_X86_64_PCREL_TYPE (r_type)
1974 || (h != NULL
1975 && (! SYMBOLIC_BIND (info, h)
1976 || h->root.type == bfd_link_hash_defweak
1977 || !h->def_regular))))
1978 || (ELIMINATE_COPY_RELOCS
1979 && !info->shared
1980 && (sec->flags & SEC_ALLOC) != 0
1981 && h != NULL
1982 && (h->root.type == bfd_link_hash_defweak
1983 || !h->def_regular)))
1984 {
1985 struct elf_dyn_relocs *p;
1986 struct elf_dyn_relocs **head;
1987
1988 /* We must copy these reloc types into the output file.
1989 Create a reloc section in dynobj and make room for
1990 this reloc. */
1991 if (sreloc == NULL)
1992 {
1993 if (htab->elf.dynobj == NULL)
1994 htab->elf.dynobj = abfd;
1995
1996 sreloc = _bfd_elf_make_dynamic_reloc_section
1997 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
1998 abfd, /*rela?*/ TRUE);
1999
2000 if (sreloc == NULL)
2001 return FALSE;
2002 }
2003
2004 /* If this is a global symbol, we count the number of
2005 relocations we need for this symbol. */
2006 if (h != NULL)
2007 {
2008 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2009 }
2010 else
2011 {
2012 /* Track dynamic relocs needed for local syms too.
2013 We really need local syms available to do this
2014 easily. Oh well. */
2015 asection *s;
2016 void **vpp;
2017
2018 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2019 abfd, r_symndx);
2020 if (isym == NULL)
2021 return FALSE;
2022
2023 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2024 if (s == NULL)
2025 s = sec;
2026
2027 /* Beware of type punned pointers vs strict aliasing
2028 rules. */
2029 vpp = &(elf_section_data (s)->local_dynrel);
2030 head = (struct elf_dyn_relocs **)vpp;
2031 }
2032
2033 p = *head;
2034 if (p == NULL || p->sec != sec)
2035 {
2036 bfd_size_type amt = sizeof *p;
2037
2038 p = ((struct elf_dyn_relocs *)
2039 bfd_alloc (htab->elf.dynobj, amt));
2040 if (p == NULL)
2041 return FALSE;
2042 p->next = *head;
2043 *head = p;
2044 p->sec = sec;
2045 p->count = 0;
2046 p->pc_count = 0;
2047 }
2048
2049 p->count += 1;
2050 /* Count size relocation as PC-relative relocation. */
2051 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2052 p->pc_count += 1;
2053 }
2054 break;
2055
2056 /* This relocation describes the C++ object vtable hierarchy.
2057 Reconstruct it for later use during GC. */
2058 case R_X86_64_GNU_VTINHERIT:
2059 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2060 return FALSE;
2061 break;
2062
2063 /* This relocation describes which C++ vtable entries are actually
2064 used. Record for later use during GC. */
2065 case R_X86_64_GNU_VTENTRY:
2066 BFD_ASSERT (h != NULL);
2067 if (h != NULL
2068 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2069 return FALSE;
2070 break;
2071
2072 default:
2073 break;
2074 }
2075
2076 if (use_plt_got
2077 && h != NULL
2078 && h->plt.refcount > 0
2079 && h->got.refcount > 0
2080 && htab->plt_got == NULL)
2081 {
2082 /* Create the GOT procedure linkage table. */
2083 unsigned int plt_got_align;
2084 const struct elf_backend_data *bed;
2085
2086 bed = get_elf_backend_data (info->output_bfd);
2087 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2088 && (sizeof (elf_x86_64_bnd_plt2_entry)
2089 == sizeof (elf_x86_64_legacy_plt2_entry)));
2090 plt_got_align = 3;
2091
2092 if (htab->elf.dynobj == NULL)
2093 htab->elf.dynobj = abfd;
2094 htab->plt_got
2095 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2096 ".plt.got",
2097 (bed->dynamic_sec_flags
2098 | SEC_ALLOC
2099 | SEC_CODE
2100 | SEC_LOAD
2101 | SEC_READONLY));
2102 if (htab->plt_got == NULL
2103 || !bfd_set_section_alignment (htab->elf.dynobj,
2104 htab->plt_got,
2105 plt_got_align))
2106 return FALSE;
2107 }
2108 }
2109
2110 return TRUE;
2111 }
2112
2113 /* Return the section that should be marked against GC for a given
2114 relocation. */
2115
2116 static asection *
2117 elf_x86_64_gc_mark_hook (asection *sec,
2118 struct bfd_link_info *info,
2119 Elf_Internal_Rela *rel,
2120 struct elf_link_hash_entry *h,
2121 Elf_Internal_Sym *sym)
2122 {
2123 if (h != NULL)
2124 switch (ELF32_R_TYPE (rel->r_info))
2125 {
2126 case R_X86_64_GNU_VTINHERIT:
2127 case R_X86_64_GNU_VTENTRY:
2128 return NULL;
2129 }
2130
2131 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2132 }
2133
2134 /* Update the got entry reference counts for the section being removed. */
2135
2136 static bfd_boolean
2137 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2138 asection *sec,
2139 const Elf_Internal_Rela *relocs)
2140 {
2141 struct elf_x86_64_link_hash_table *htab;
2142 Elf_Internal_Shdr *symtab_hdr;
2143 struct elf_link_hash_entry **sym_hashes;
2144 bfd_signed_vma *local_got_refcounts;
2145 const Elf_Internal_Rela *rel, *relend;
2146
2147 if (info->relocatable)
2148 return TRUE;
2149
2150 htab = elf_x86_64_hash_table (info);
2151 if (htab == NULL)
2152 return FALSE;
2153
2154 elf_section_data (sec)->local_dynrel = NULL;
2155
2156 symtab_hdr = &elf_symtab_hdr (abfd);
2157 sym_hashes = elf_sym_hashes (abfd);
2158 local_got_refcounts = elf_local_got_refcounts (abfd);
2159
2160 htab = elf_x86_64_hash_table (info);
2161 relend = relocs + sec->reloc_count;
2162 for (rel = relocs; rel < relend; rel++)
2163 {
2164 unsigned long r_symndx;
2165 unsigned int r_type;
2166 struct elf_link_hash_entry *h = NULL;
2167
2168 r_symndx = htab->r_sym (rel->r_info);
2169 if (r_symndx >= symtab_hdr->sh_info)
2170 {
2171 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2172 while (h->root.type == bfd_link_hash_indirect
2173 || h->root.type == bfd_link_hash_warning)
2174 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2175 }
2176 else
2177 {
2178 /* A local symbol. */
2179 Elf_Internal_Sym *isym;
2180
2181 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2182 abfd, r_symndx);
2183
2184 /* Check relocation against local STT_GNU_IFUNC symbol. */
2185 if (isym != NULL
2186 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2187 {
2188 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2189 if (h == NULL)
2190 abort ();
2191 }
2192 }
2193
2194 if (h)
2195 {
2196 struct elf_x86_64_link_hash_entry *eh;
2197 struct elf_dyn_relocs **pp;
2198 struct elf_dyn_relocs *p;
2199
2200 eh = (struct elf_x86_64_link_hash_entry *) h;
2201
2202 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2203 if (p->sec == sec)
2204 {
2205 /* Everything must go for SEC. */
2206 *pp = p->next;
2207 break;
2208 }
2209 }
2210
2211 r_type = ELF32_R_TYPE (rel->r_info);
2212 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2213 symtab_hdr, sym_hashes,
2214 &r_type, GOT_UNKNOWN,
2215 rel, relend, h, r_symndx))
2216 return FALSE;
2217
2218 switch (r_type)
2219 {
2220 case R_X86_64_TLSLD:
2221 if (htab->tls_ld_got.refcount > 0)
2222 htab->tls_ld_got.refcount -= 1;
2223 break;
2224
2225 case R_X86_64_TLSGD:
2226 case R_X86_64_GOTPC32_TLSDESC:
2227 case R_X86_64_TLSDESC_CALL:
2228 case R_X86_64_GOTTPOFF:
2229 case R_X86_64_GOT32:
2230 case R_X86_64_GOTPCREL:
2231 case R_X86_64_GOT64:
2232 case R_X86_64_GOTPCREL64:
2233 case R_X86_64_GOTPLT64:
2234 if (h != NULL)
2235 {
2236 if (h->got.refcount > 0)
2237 h->got.refcount -= 1;
2238 if (h->type == STT_GNU_IFUNC)
2239 {
2240 if (h->plt.refcount > 0)
2241 h->plt.refcount -= 1;
2242 }
2243 }
2244 else if (local_got_refcounts != NULL)
2245 {
2246 if (local_got_refcounts[r_symndx] > 0)
2247 local_got_refcounts[r_symndx] -= 1;
2248 }
2249 break;
2250
2251 case R_X86_64_8:
2252 case R_X86_64_16:
2253 case R_X86_64_32:
2254 case R_X86_64_64:
2255 case R_X86_64_32S:
2256 case R_X86_64_PC8:
2257 case R_X86_64_PC16:
2258 case R_X86_64_PC32:
2259 case R_X86_64_PC32_BND:
2260 case R_X86_64_PC64:
2261 case R_X86_64_SIZE32:
2262 case R_X86_64_SIZE64:
2263 if (info->shared
2264 && (h == NULL || h->type != STT_GNU_IFUNC))
2265 break;
2266 /* Fall thru */
2267
2268 case R_X86_64_PLT32:
2269 case R_X86_64_PLT32_BND:
2270 case R_X86_64_PLTOFF64:
2271 if (h != NULL)
2272 {
2273 if (h->plt.refcount > 0)
2274 h->plt.refcount -= 1;
2275 }
2276 break;
2277
2278 default:
2279 break;
2280 }
2281 }
2282
2283 return TRUE;
2284 }
2285
2286 /* Adjust a symbol defined by a dynamic object and referenced by a
2287 regular object. The current definition is in some section of the
2288 dynamic object, but we're not including those sections. We have to
2289 change the definition to something the rest of the link can
2290 understand. */
2291
2292 static bfd_boolean
2293 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2294 struct elf_link_hash_entry *h)
2295 {
2296 struct elf_x86_64_link_hash_table *htab;
2297 asection *s;
2298 struct elf_x86_64_link_hash_entry *eh;
2299 struct elf_dyn_relocs *p;
2300
2301 /* STT_GNU_IFUNC symbol must go through PLT. */
2302 if (h->type == STT_GNU_IFUNC)
2303 {
2304 /* All local STT_GNU_IFUNC references must be treate as local
2305 calls via local PLT. */
2306 if (h->ref_regular
2307 && SYMBOL_CALLS_LOCAL (info, h))
2308 {
2309 bfd_size_type pc_count = 0, count = 0;
2310 struct elf_dyn_relocs **pp;
2311
2312 eh = (struct elf_x86_64_link_hash_entry *) h;
2313 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2314 {
2315 pc_count += p->pc_count;
2316 p->count -= p->pc_count;
2317 p->pc_count = 0;
2318 count += p->count;
2319 if (p->count == 0)
2320 *pp = p->next;
2321 else
2322 pp = &p->next;
2323 }
2324
2325 if (pc_count || count)
2326 {
2327 h->needs_plt = 1;
2328 h->non_got_ref = 1;
2329 if (h->plt.refcount <= 0)
2330 h->plt.refcount = 1;
2331 else
2332 h->plt.refcount += 1;
2333 }
2334 }
2335
2336 if (h->plt.refcount <= 0)
2337 {
2338 h->plt.offset = (bfd_vma) -1;
2339 h->needs_plt = 0;
2340 }
2341 return TRUE;
2342 }
2343
2344 /* If this is a function, put it in the procedure linkage table. We
2345 will fill in the contents of the procedure linkage table later,
2346 when we know the address of the .got section. */
2347 if (h->type == STT_FUNC
2348 || h->needs_plt)
2349 {
2350 if (h->plt.refcount <= 0
2351 || SYMBOL_CALLS_LOCAL (info, h)
2352 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2353 && h->root.type == bfd_link_hash_undefweak))
2354 {
2355 /* This case can occur if we saw a PLT32 reloc in an input
2356 file, but the symbol was never referred to by a dynamic
2357 object, or if all references were garbage collected. In
2358 such a case, we don't actually need to build a procedure
2359 linkage table, and we can just do a PC32 reloc instead. */
2360 h->plt.offset = (bfd_vma) -1;
2361 h->needs_plt = 0;
2362 }
2363
2364 return TRUE;
2365 }
2366 else
2367 /* It's possible that we incorrectly decided a .plt reloc was
2368 needed for an R_X86_64_PC32 reloc to a non-function sym in
2369 check_relocs. We can't decide accurately between function and
2370 non-function syms in check-relocs; Objects loaded later in
2371 the link may change h->type. So fix it now. */
2372 h->plt.offset = (bfd_vma) -1;
2373
2374 /* If this is a weak symbol, and there is a real definition, the
2375 processor independent code will have arranged for us to see the
2376 real definition first, and we can just use the same value. */
2377 if (h->u.weakdef != NULL)
2378 {
2379 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2380 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2381 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2382 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2383 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2384 {
2385 eh = (struct elf_x86_64_link_hash_entry *) h;
2386 h->non_got_ref = h->u.weakdef->non_got_ref;
2387 eh->needs_copy = h->u.weakdef->needs_copy;
2388 }
2389 return TRUE;
2390 }
2391
2392 /* This is a reference to a symbol defined by a dynamic object which
2393 is not a function. */
2394
2395 /* If we are creating a shared library, we must presume that the
2396 only references to the symbol are via the global offset table.
2397 For such cases we need not do anything here; the relocations will
2398 be handled correctly by relocate_section. */
2399 if (!info->executable)
2400 return TRUE;
2401
2402 /* If there are no references to this symbol that do not use the
2403 GOT, we don't need to generate a copy reloc. */
2404 if (!h->non_got_ref)
2405 return TRUE;
2406
2407 /* If -z nocopyreloc was given, we won't generate them either. */
2408 if (info->nocopyreloc)
2409 {
2410 h->non_got_ref = 0;
2411 return TRUE;
2412 }
2413
2414 if (ELIMINATE_COPY_RELOCS)
2415 {
2416 eh = (struct elf_x86_64_link_hash_entry *) h;
2417 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2418 {
2419 s = p->sec->output_section;
2420 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2421 break;
2422 }
2423
2424 /* If we didn't find any dynamic relocs in read-only sections, then
2425 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2426 if (p == NULL)
2427 {
2428 h->non_got_ref = 0;
2429 return TRUE;
2430 }
2431 }
2432
2433 /* We must allocate the symbol in our .dynbss section, which will
2434 become part of the .bss section of the executable. There will be
2435 an entry for this symbol in the .dynsym section. The dynamic
2436 object will contain position independent code, so all references
2437 from the dynamic object to this symbol will go through the global
2438 offset table. The dynamic linker will use the .dynsym entry to
2439 determine the address it must put in the global offset table, so
2440 both the dynamic object and the regular object will refer to the
2441 same memory location for the variable. */
2442
2443 htab = elf_x86_64_hash_table (info);
2444 if (htab == NULL)
2445 return FALSE;
2446
2447 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2448 to copy the initial value out of the dynamic object and into the
2449 runtime process image. */
2450 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2451 {
2452 const struct elf_backend_data *bed;
2453 bed = get_elf_backend_data (info->output_bfd);
2454 htab->srelbss->size += bed->s->sizeof_rela;
2455 h->needs_copy = 1;
2456 }
2457
2458 s = htab->sdynbss;
2459
2460 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2461 }
2462
2463 /* Allocate space in .plt, .got and associated reloc sections for
2464 dynamic relocs. */
2465
2466 static bfd_boolean
2467 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2468 {
2469 struct bfd_link_info *info;
2470 struct elf_x86_64_link_hash_table *htab;
2471 struct elf_x86_64_link_hash_entry *eh;
2472 struct elf_dyn_relocs *p;
2473 const struct elf_backend_data *bed;
2474 unsigned int plt_entry_size;
2475
2476 if (h->root.type == bfd_link_hash_indirect)
2477 return TRUE;
2478
2479 eh = (struct elf_x86_64_link_hash_entry *) h;
2480
2481 info = (struct bfd_link_info *) inf;
2482 htab = elf_x86_64_hash_table (info);
2483 if (htab == NULL)
2484 return FALSE;
2485 bed = get_elf_backend_data (info->output_bfd);
2486 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2487
2488 /* We can't use the GOT PLT if pointer equality is needed since
2489 finish_dynamic_symbol won't clear symbol value and the dynamic
2490 linker won't update the GOT slot. We will get into an infinite
2491 loop at run-time. */
2492 if (htab->plt_got != NULL
2493 && h->type != STT_GNU_IFUNC
2494 && !h->pointer_equality_needed
2495 && h->plt.refcount > 0
2496 && h->got.refcount > 0)
2497 {
2498 /* Don't use the regular PLT if there are both GOT and GOTPLT
2499 reloctions. */
2500 h->plt.offset = (bfd_vma) -1;
2501
2502 /* Use the GOT PLT. */
2503 eh->plt_got.refcount = 1;
2504 }
2505
2506 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2507 here if it is defined and referenced in a non-shared object. */
2508 if (h->type == STT_GNU_IFUNC
2509 && h->def_regular)
2510 {
2511 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2512 &eh->dyn_relocs,
2513 plt_entry_size,
2514 plt_entry_size,
2515 GOT_ENTRY_SIZE))
2516 {
2517 asection *s = htab->plt_bnd;
2518 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2519 {
2520 /* Use the .plt.bnd section if it is created. */
2521 eh->plt_bnd.offset = s->size;
2522
2523 /* Make room for this entry in the .plt.bnd section. */
2524 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2525 }
2526
2527 return TRUE;
2528 }
2529 else
2530 return FALSE;
2531 }
2532 else if (htab->elf.dynamic_sections_created
2533 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0))
2534 {
2535 bfd_boolean use_plt_got = eh->plt_got.refcount > 0;
2536
2537 /* Make sure this symbol is output as a dynamic symbol.
2538 Undefined weak syms won't yet be marked as dynamic. */
2539 if (h->dynindx == -1
2540 && !h->forced_local)
2541 {
2542 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2543 return FALSE;
2544 }
2545
2546 if (info->shared
2547 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2548 {
2549 asection *s = htab->elf.splt;
2550 asection *bnd_s = htab->plt_bnd;
2551 asection *got_s = htab->plt_got;
2552
2553 /* If this is the first .plt entry, make room for the special
2554 first entry. */
2555 if (s->size == 0)
2556 s->size = plt_entry_size;
2557
2558 if (use_plt_got)
2559 eh->plt_got.offset = got_s->size;
2560 else
2561 {
2562 h->plt.offset = s->size;
2563 if (bnd_s)
2564 eh->plt_bnd.offset = bnd_s->size;
2565 }
2566
2567 /* If this symbol is not defined in a regular file, and we are
2568 not generating a shared library, then set the symbol to this
2569 location in the .plt. This is required to make function
2570 pointers compare as equal between the normal executable and
2571 the shared library. */
2572 if (! info->shared
2573 && !h->def_regular)
2574 {
2575 if (use_plt_got)
2576 {
2577 /* We need to make a call to the entry of the GOT PLT
2578 instead of regular PLT entry. */
2579 h->root.u.def.section = got_s;
2580 h->root.u.def.value = eh->plt_got.offset;
2581 }
2582 else
2583 {
2584 if (bnd_s)
2585 {
2586 /* We need to make a call to the entry of the second
2587 PLT instead of regular PLT entry. */
2588 h->root.u.def.section = bnd_s;
2589 h->root.u.def.value = eh->plt_bnd.offset;
2590 }
2591 else
2592 {
2593 h->root.u.def.section = s;
2594 h->root.u.def.value = h->plt.offset;
2595 }
2596 }
2597 }
2598
2599 /* Make room for this entry. */
2600 if (use_plt_got)
2601 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2602 else
2603 {
2604 s->size += plt_entry_size;
2605 if (bnd_s)
2606 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2607
2608 /* We also need to make an entry in the .got.plt section,
2609 which will be placed in the .got section by the linker
2610 script. */
2611 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2612
2613 /* We also need to make an entry in the .rela.plt
2614 section. */
2615 htab->elf.srelplt->size += bed->s->sizeof_rela;
2616 htab->elf.srelplt->reloc_count++;
2617 }
2618 }
2619 else
2620 {
2621 h->plt.offset = (bfd_vma) -1;
2622 h->needs_plt = 0;
2623 }
2624 }
2625 else
2626 {
2627 h->plt.offset = (bfd_vma) -1;
2628 h->needs_plt = 0;
2629 }
2630
2631 eh->tlsdesc_got = (bfd_vma) -1;
2632
2633 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2634 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2635 if (h->got.refcount > 0
2636 && info->executable
2637 && h->dynindx == -1
2638 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2639 {
2640 h->got.offset = (bfd_vma) -1;
2641 }
2642 else if (h->got.refcount > 0)
2643 {
2644 asection *s;
2645 bfd_boolean dyn;
2646 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2647
2648 /* Make sure this symbol is output as a dynamic symbol.
2649 Undefined weak syms won't yet be marked as dynamic. */
2650 if (h->dynindx == -1
2651 && !h->forced_local)
2652 {
2653 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2654 return FALSE;
2655 }
2656
2657 if (GOT_TLS_GDESC_P (tls_type))
2658 {
2659 eh->tlsdesc_got = htab->elf.sgotplt->size
2660 - elf_x86_64_compute_jump_table_size (htab);
2661 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2662 h->got.offset = (bfd_vma) -2;
2663 }
2664 if (! GOT_TLS_GDESC_P (tls_type)
2665 || GOT_TLS_GD_P (tls_type))
2666 {
2667 s = htab->elf.sgot;
2668 h->got.offset = s->size;
2669 s->size += GOT_ENTRY_SIZE;
2670 if (GOT_TLS_GD_P (tls_type))
2671 s->size += GOT_ENTRY_SIZE;
2672 }
2673 dyn = htab->elf.dynamic_sections_created;
2674 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2675 and two if global.
2676 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2677 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2678 || tls_type == GOT_TLS_IE)
2679 htab->elf.srelgot->size += bed->s->sizeof_rela;
2680 else if (GOT_TLS_GD_P (tls_type))
2681 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2682 else if (! GOT_TLS_GDESC_P (tls_type)
2683 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2684 || h->root.type != bfd_link_hash_undefweak)
2685 && (info->shared
2686 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2687 htab->elf.srelgot->size += bed->s->sizeof_rela;
2688 if (GOT_TLS_GDESC_P (tls_type))
2689 {
2690 htab->elf.srelplt->size += bed->s->sizeof_rela;
2691 htab->tlsdesc_plt = (bfd_vma) -1;
2692 }
2693 }
2694 else
2695 h->got.offset = (bfd_vma) -1;
2696
2697 if (eh->dyn_relocs == NULL)
2698 return TRUE;
2699
2700 /* In the shared -Bsymbolic case, discard space allocated for
2701 dynamic pc-relative relocs against symbols which turn out to be
2702 defined in regular objects. For the normal shared case, discard
2703 space for pc-relative relocs that have become local due to symbol
2704 visibility changes. */
2705
2706 if (info->shared)
2707 {
2708 /* Relocs that use pc_count are those that appear on a call
2709 insn, or certain REL relocs that can generated via assembly.
2710 We want calls to protected symbols to resolve directly to the
2711 function rather than going via the plt. If people want
2712 function pointer comparisons to work as expected then they
2713 should avoid writing weird assembly. */
2714 if (SYMBOL_CALLS_LOCAL (info, h))
2715 {
2716 struct elf_dyn_relocs **pp;
2717
2718 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2719 {
2720 p->count -= p->pc_count;
2721 p->pc_count = 0;
2722 if (p->count == 0)
2723 *pp = p->next;
2724 else
2725 pp = &p->next;
2726 }
2727 }
2728
2729 /* Also discard relocs on undefined weak syms with non-default
2730 visibility. */
2731 if (eh->dyn_relocs != NULL)
2732 {
2733 if (h->root.type == bfd_link_hash_undefweak)
2734 {
2735 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2736 eh->dyn_relocs = NULL;
2737
2738 /* Make sure undefined weak symbols are output as a dynamic
2739 symbol in PIEs. */
2740 else if (h->dynindx == -1
2741 && ! h->forced_local
2742 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2743 return FALSE;
2744 }
2745 /* For PIE, discard space for pc-relative relocs against
2746 symbols which turn out to need copy relocs. */
2747 else if (info->executable
2748 && (h->needs_copy || eh->needs_copy)
2749 && h->def_dynamic
2750 && !h->def_regular)
2751 {
2752 struct elf_dyn_relocs **pp;
2753
2754 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2755 {
2756 if (p->pc_count != 0)
2757 *pp = p->next;
2758 else
2759 pp = &p->next;
2760 }
2761 }
2762 }
2763 }
2764 else if (ELIMINATE_COPY_RELOCS)
2765 {
2766 /* For the non-shared case, discard space for relocs against
2767 symbols which turn out to need copy relocs or are not
2768 dynamic. */
2769
2770 if (!h->non_got_ref
2771 && ((h->def_dynamic
2772 && !h->def_regular)
2773 || (htab->elf.dynamic_sections_created
2774 && (h->root.type == bfd_link_hash_undefweak
2775 || h->root.type == bfd_link_hash_undefined))))
2776 {
2777 /* Make sure this symbol is output as a dynamic symbol.
2778 Undefined weak syms won't yet be marked as dynamic. */
2779 if (h->dynindx == -1
2780 && ! h->forced_local
2781 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2782 return FALSE;
2783
2784 /* If that succeeded, we know we'll be keeping all the
2785 relocs. */
2786 if (h->dynindx != -1)
2787 goto keep;
2788 }
2789
2790 eh->dyn_relocs = NULL;
2791
2792 keep: ;
2793 }
2794
2795 /* Finally, allocate space. */
2796 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2797 {
2798 asection * sreloc;
2799
2800 sreloc = elf_section_data (p->sec)->sreloc;
2801
2802 BFD_ASSERT (sreloc != NULL);
2803
2804 sreloc->size += p->count * bed->s->sizeof_rela;
2805 }
2806
2807 return TRUE;
2808 }
2809
2810 /* Allocate space in .plt, .got and associated reloc sections for
2811 local dynamic relocs. */
2812
2813 static bfd_boolean
2814 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2815 {
2816 struct elf_link_hash_entry *h
2817 = (struct elf_link_hash_entry *) *slot;
2818
2819 if (h->type != STT_GNU_IFUNC
2820 || !h->def_regular
2821 || !h->ref_regular
2822 || !h->forced_local
2823 || h->root.type != bfd_link_hash_defined)
2824 abort ();
2825
2826 return elf_x86_64_allocate_dynrelocs (h, inf);
2827 }
2828
2829 /* Find any dynamic relocs that apply to read-only sections. */
2830
2831 static bfd_boolean
2832 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2833 void * inf)
2834 {
2835 struct elf_x86_64_link_hash_entry *eh;
2836 struct elf_dyn_relocs *p;
2837
2838 /* Skip local IFUNC symbols. */
2839 if (h->forced_local && h->type == STT_GNU_IFUNC)
2840 return TRUE;
2841
2842 eh = (struct elf_x86_64_link_hash_entry *) h;
2843 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2844 {
2845 asection *s = p->sec->output_section;
2846
2847 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2848 {
2849 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2850
2851 info->flags |= DF_TEXTREL;
2852
2853 if ((info->warn_shared_textrel && info->shared)
2854 || info->error_textrel)
2855 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
2856 p->sec->owner, h->root.root.string,
2857 p->sec);
2858
2859 /* Not an error, just cut short the traversal. */
2860 return FALSE;
2861 }
2862 }
2863 return TRUE;
2864 }
2865
2866 /* Convert
2867 mov foo@GOTPCREL(%rip), %reg
2868 to
2869 lea foo(%rip), %reg
2870 with the local symbol, foo. */
2871
2872 static bfd_boolean
2873 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2874 struct bfd_link_info *link_info)
2875 {
2876 Elf_Internal_Shdr *symtab_hdr;
2877 Elf_Internal_Rela *internal_relocs;
2878 Elf_Internal_Rela *irel, *irelend;
2879 bfd_byte *contents;
2880 struct elf_x86_64_link_hash_table *htab;
2881 bfd_boolean changed_contents;
2882 bfd_boolean changed_relocs;
2883 bfd_signed_vma *local_got_refcounts;
2884
2885 /* Don't even try to convert non-ELF outputs. */
2886 if (!is_elf_hash_table (link_info->hash))
2887 return FALSE;
2888
2889 /* Nothing to do if there are no codes, no relocations or no output. */
2890 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2891 || sec->reloc_count == 0
2892 || bfd_is_abs_section (sec->output_section))
2893 return TRUE;
2894
2895 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2896
2897 /* Load the relocations for this section. */
2898 internal_relocs = (_bfd_elf_link_read_relocs
2899 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2900 link_info->keep_memory));
2901 if (internal_relocs == NULL)
2902 return FALSE;
2903
2904 htab = elf_x86_64_hash_table (link_info);
2905 changed_contents = FALSE;
2906 changed_relocs = FALSE;
2907 local_got_refcounts = elf_local_got_refcounts (abfd);
2908
2909 /* Get the section contents. */
2910 if (elf_section_data (sec)->this_hdr.contents != NULL)
2911 contents = elf_section_data (sec)->this_hdr.contents;
2912 else
2913 {
2914 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2915 goto error_return;
2916 }
2917
2918 irelend = internal_relocs + sec->reloc_count;
2919 for (irel = internal_relocs; irel < irelend; irel++)
2920 {
2921 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2922 unsigned int r_symndx = htab->r_sym (irel->r_info);
2923 unsigned int indx;
2924 struct elf_link_hash_entry *h;
2925
2926 if (r_type != R_X86_64_GOTPCREL)
2927 continue;
2928
2929 /* Get the symbol referred to by the reloc. */
2930 if (r_symndx < symtab_hdr->sh_info)
2931 {
2932 Elf_Internal_Sym *isym;
2933
2934 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2935 abfd, r_symndx);
2936
2937 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. */
2938 if (ELF_ST_TYPE (isym->st_info) != STT_GNU_IFUNC
2939 && irel->r_offset >= 2
2940 && bfd_get_8 (input_bfd,
2941 contents + irel->r_offset - 2) == 0x8b)
2942 {
2943 bfd_put_8 (output_bfd, 0x8d,
2944 contents + irel->r_offset - 2);
2945 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2946 if (local_got_refcounts != NULL
2947 && local_got_refcounts[r_symndx] > 0)
2948 local_got_refcounts[r_symndx] -= 1;
2949 changed_contents = TRUE;
2950 changed_relocs = TRUE;
2951 }
2952 continue;
2953 }
2954
2955 indx = r_symndx - symtab_hdr->sh_info;
2956 h = elf_sym_hashes (abfd)[indx];
2957 BFD_ASSERT (h != NULL);
2958
2959 while (h->root.type == bfd_link_hash_indirect
2960 || h->root.type == bfd_link_hash_warning)
2961 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2962
2963 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
2964 avoid optimizing _DYNAMIC since ld.so may use its link-time
2965 address. */
2966 if (h->def_regular
2967 && h->type != STT_GNU_IFUNC
2968 && h != htab->elf.hdynamic
2969 && SYMBOL_REFERENCES_LOCAL (link_info, h)
2970 && irel->r_offset >= 2
2971 && bfd_get_8 (input_bfd,
2972 contents + irel->r_offset - 2) == 0x8b)
2973 {
2974 bfd_put_8 (output_bfd, 0x8d,
2975 contents + irel->r_offset - 2);
2976 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2977 if (h->got.refcount > 0)
2978 h->got.refcount -= 1;
2979 changed_contents = TRUE;
2980 changed_relocs = TRUE;
2981 }
2982 }
2983
2984 if (contents != NULL
2985 && elf_section_data (sec)->this_hdr.contents != contents)
2986 {
2987 if (!changed_contents && !link_info->keep_memory)
2988 free (contents);
2989 else
2990 {
2991 /* Cache the section contents for elf_link_input_bfd. */
2992 elf_section_data (sec)->this_hdr.contents = contents;
2993 }
2994 }
2995
2996 if (elf_section_data (sec)->relocs != internal_relocs)
2997 {
2998 if (!changed_relocs)
2999 free (internal_relocs);
3000 else
3001 elf_section_data (sec)->relocs = internal_relocs;
3002 }
3003
3004 return TRUE;
3005
3006 error_return:
3007 if (contents != NULL
3008 && elf_section_data (sec)->this_hdr.contents != contents)
3009 free (contents);
3010 if (internal_relocs != NULL
3011 && elf_section_data (sec)->relocs != internal_relocs)
3012 free (internal_relocs);
3013 return FALSE;
3014 }
3015
3016 /* Set the sizes of the dynamic sections. */
3017
3018 static bfd_boolean
3019 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3020 struct bfd_link_info *info)
3021 {
3022 struct elf_x86_64_link_hash_table *htab;
3023 bfd *dynobj;
3024 asection *s;
3025 bfd_boolean relocs;
3026 bfd *ibfd;
3027 const struct elf_backend_data *bed;
3028
3029 htab = elf_x86_64_hash_table (info);
3030 if (htab == NULL)
3031 return FALSE;
3032 bed = get_elf_backend_data (output_bfd);
3033
3034 dynobj = htab->elf.dynobj;
3035 if (dynobj == NULL)
3036 abort ();
3037
3038 if (htab->elf.dynamic_sections_created)
3039 {
3040 /* Set the contents of the .interp section to the interpreter. */
3041 if (info->executable)
3042 {
3043 s = bfd_get_linker_section (dynobj, ".interp");
3044 if (s == NULL)
3045 abort ();
3046 s->size = htab->dynamic_interpreter_size;
3047 s->contents = (unsigned char *) htab->dynamic_interpreter;
3048 }
3049 }
3050
3051 /* Set up .got offsets for local syms, and space for local dynamic
3052 relocs. */
3053 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3054 {
3055 bfd_signed_vma *local_got;
3056 bfd_signed_vma *end_local_got;
3057 char *local_tls_type;
3058 bfd_vma *local_tlsdesc_gotent;
3059 bfd_size_type locsymcount;
3060 Elf_Internal_Shdr *symtab_hdr;
3061 asection *srel;
3062
3063 if (! is_x86_64_elf (ibfd))
3064 continue;
3065
3066 for (s = ibfd->sections; s != NULL; s = s->next)
3067 {
3068 struct elf_dyn_relocs *p;
3069
3070 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3071 return FALSE;
3072
3073 for (p = (struct elf_dyn_relocs *)
3074 (elf_section_data (s)->local_dynrel);
3075 p != NULL;
3076 p = p->next)
3077 {
3078 if (!bfd_is_abs_section (p->sec)
3079 && bfd_is_abs_section (p->sec->output_section))
3080 {
3081 /* Input section has been discarded, either because
3082 it is a copy of a linkonce section or due to
3083 linker script /DISCARD/, so we'll be discarding
3084 the relocs too. */
3085 }
3086 else if (p->count != 0)
3087 {
3088 srel = elf_section_data (p->sec)->sreloc;
3089 srel->size += p->count * bed->s->sizeof_rela;
3090 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3091 && (info->flags & DF_TEXTREL) == 0)
3092 {
3093 info->flags |= DF_TEXTREL;
3094 if ((info->warn_shared_textrel && info->shared)
3095 || info->error_textrel)
3096 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3097 p->sec->owner, p->sec);
3098 }
3099 }
3100 }
3101 }
3102
3103 local_got = elf_local_got_refcounts (ibfd);
3104 if (!local_got)
3105 continue;
3106
3107 symtab_hdr = &elf_symtab_hdr (ibfd);
3108 locsymcount = symtab_hdr->sh_info;
3109 end_local_got = local_got + locsymcount;
3110 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3111 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3112 s = htab->elf.sgot;
3113 srel = htab->elf.srelgot;
3114 for (; local_got < end_local_got;
3115 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3116 {
3117 *local_tlsdesc_gotent = (bfd_vma) -1;
3118 if (*local_got > 0)
3119 {
3120 if (GOT_TLS_GDESC_P (*local_tls_type))
3121 {
3122 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3123 - elf_x86_64_compute_jump_table_size (htab);
3124 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3125 *local_got = (bfd_vma) -2;
3126 }
3127 if (! GOT_TLS_GDESC_P (*local_tls_type)
3128 || GOT_TLS_GD_P (*local_tls_type))
3129 {
3130 *local_got = s->size;
3131 s->size += GOT_ENTRY_SIZE;
3132 if (GOT_TLS_GD_P (*local_tls_type))
3133 s->size += GOT_ENTRY_SIZE;
3134 }
3135 if (info->shared
3136 || GOT_TLS_GD_ANY_P (*local_tls_type)
3137 || *local_tls_type == GOT_TLS_IE)
3138 {
3139 if (GOT_TLS_GDESC_P (*local_tls_type))
3140 {
3141 htab->elf.srelplt->size
3142 += bed->s->sizeof_rela;
3143 htab->tlsdesc_plt = (bfd_vma) -1;
3144 }
3145 if (! GOT_TLS_GDESC_P (*local_tls_type)
3146 || GOT_TLS_GD_P (*local_tls_type))
3147 srel->size += bed->s->sizeof_rela;
3148 }
3149 }
3150 else
3151 *local_got = (bfd_vma) -1;
3152 }
3153 }
3154
3155 if (htab->tls_ld_got.refcount > 0)
3156 {
3157 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3158 relocs. */
3159 htab->tls_ld_got.offset = htab->elf.sgot->size;
3160 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3161 htab->elf.srelgot->size += bed->s->sizeof_rela;
3162 }
3163 else
3164 htab->tls_ld_got.offset = -1;
3165
3166 /* Allocate global sym .plt and .got entries, and space for global
3167 sym dynamic relocs. */
3168 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3169 info);
3170
3171 /* Allocate .plt and .got entries, and space for local symbols. */
3172 htab_traverse (htab->loc_hash_table,
3173 elf_x86_64_allocate_local_dynrelocs,
3174 info);
3175
3176 /* For every jump slot reserved in the sgotplt, reloc_count is
3177 incremented. However, when we reserve space for TLS descriptors,
3178 it's not incremented, so in order to compute the space reserved
3179 for them, it suffices to multiply the reloc count by the jump
3180 slot size.
3181
3182 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3183 so that R_X86_64_IRELATIVE entries come last. */
3184 if (htab->elf.srelplt)
3185 {
3186 htab->sgotplt_jump_table_size
3187 = elf_x86_64_compute_jump_table_size (htab);
3188 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3189 }
3190 else if (htab->elf.irelplt)
3191 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3192
3193 if (htab->tlsdesc_plt)
3194 {
3195 /* If we're not using lazy TLS relocations, don't generate the
3196 PLT and GOT entries they require. */
3197 if ((info->flags & DF_BIND_NOW))
3198 htab->tlsdesc_plt = 0;
3199 else
3200 {
3201 htab->tlsdesc_got = htab->elf.sgot->size;
3202 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3203 /* Reserve room for the initial entry.
3204 FIXME: we could probably do away with it in this case. */
3205 if (htab->elf.splt->size == 0)
3206 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3207 htab->tlsdesc_plt = htab->elf.splt->size;
3208 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3209 }
3210 }
3211
3212 if (htab->elf.sgotplt)
3213 {
3214 /* Don't allocate .got.plt section if there are no GOT nor PLT
3215 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3216 if ((htab->elf.hgot == NULL
3217 || !htab->elf.hgot->ref_regular_nonweak)
3218 && (htab->elf.sgotplt->size
3219 == get_elf_backend_data (output_bfd)->got_header_size)
3220 && (htab->elf.splt == NULL
3221 || htab->elf.splt->size == 0)
3222 && (htab->elf.sgot == NULL
3223 || htab->elf.sgot->size == 0)
3224 && (htab->elf.iplt == NULL
3225 || htab->elf.iplt->size == 0)
3226 && (htab->elf.igotplt == NULL
3227 || htab->elf.igotplt->size == 0))
3228 htab->elf.sgotplt->size = 0;
3229 }
3230
3231 if (htab->plt_eh_frame != NULL
3232 && htab->elf.splt != NULL
3233 && htab->elf.splt->size != 0
3234 && !bfd_is_abs_section (htab->elf.splt->output_section)
3235 && _bfd_elf_eh_frame_present (info))
3236 {
3237 const struct elf_x86_64_backend_data *arch_data
3238 = get_elf_x86_64_arch_data (bed);
3239 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3240 }
3241
3242 /* We now have determined the sizes of the various dynamic sections.
3243 Allocate memory for them. */
3244 relocs = FALSE;
3245 for (s = dynobj->sections; s != NULL; s = s->next)
3246 {
3247 if ((s->flags & SEC_LINKER_CREATED) == 0)
3248 continue;
3249
3250 if (s == htab->elf.splt
3251 || s == htab->elf.sgot
3252 || s == htab->elf.sgotplt
3253 || s == htab->elf.iplt
3254 || s == htab->elf.igotplt
3255 || s == htab->plt_bnd
3256 || s == htab->plt_got
3257 || s == htab->plt_eh_frame
3258 || s == htab->sdynbss)
3259 {
3260 /* Strip this section if we don't need it; see the
3261 comment below. */
3262 }
3263 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3264 {
3265 if (s->size != 0 && s != htab->elf.srelplt)
3266 relocs = TRUE;
3267
3268 /* We use the reloc_count field as a counter if we need
3269 to copy relocs into the output file. */
3270 if (s != htab->elf.srelplt)
3271 s->reloc_count = 0;
3272 }
3273 else
3274 {
3275 /* It's not one of our sections, so don't allocate space. */
3276 continue;
3277 }
3278
3279 if (s->size == 0)
3280 {
3281 /* If we don't need this section, strip it from the
3282 output file. This is mostly to handle .rela.bss and
3283 .rela.plt. We must create both sections in
3284 create_dynamic_sections, because they must be created
3285 before the linker maps input sections to output
3286 sections. The linker does that before
3287 adjust_dynamic_symbol is called, and it is that
3288 function which decides whether anything needs to go
3289 into these sections. */
3290
3291 s->flags |= SEC_EXCLUDE;
3292 continue;
3293 }
3294
3295 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3296 continue;
3297
3298 /* Allocate memory for the section contents. We use bfd_zalloc
3299 here in case unused entries are not reclaimed before the
3300 section's contents are written out. This should not happen,
3301 but this way if it does, we get a R_X86_64_NONE reloc instead
3302 of garbage. */
3303 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3304 if (s->contents == NULL)
3305 return FALSE;
3306 }
3307
3308 if (htab->plt_eh_frame != NULL
3309 && htab->plt_eh_frame->contents != NULL)
3310 {
3311 const struct elf_x86_64_backend_data *arch_data
3312 = get_elf_x86_64_arch_data (bed);
3313
3314 memcpy (htab->plt_eh_frame->contents,
3315 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3316 bfd_put_32 (dynobj, htab->elf.splt->size,
3317 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3318 }
3319
3320 if (htab->elf.dynamic_sections_created)
3321 {
3322 /* Add some entries to the .dynamic section. We fill in the
3323 values later, in elf_x86_64_finish_dynamic_sections, but we
3324 must add the entries now so that we get the correct size for
3325 the .dynamic section. The DT_DEBUG entry is filled in by the
3326 dynamic linker and used by the debugger. */
3327 #define add_dynamic_entry(TAG, VAL) \
3328 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3329
3330 if (info->executable)
3331 {
3332 if (!add_dynamic_entry (DT_DEBUG, 0))
3333 return FALSE;
3334 }
3335
3336 if (htab->elf.splt->size != 0)
3337 {
3338 if (!add_dynamic_entry (DT_PLTGOT, 0)
3339 || !add_dynamic_entry (DT_PLTRELSZ, 0)
3340 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3341 || !add_dynamic_entry (DT_JMPREL, 0))
3342 return FALSE;
3343
3344 if (htab->tlsdesc_plt
3345 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3346 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3347 return FALSE;
3348 }
3349
3350 if (relocs)
3351 {
3352 if (!add_dynamic_entry (DT_RELA, 0)
3353 || !add_dynamic_entry (DT_RELASZ, 0)
3354 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3355 return FALSE;
3356
3357 /* If any dynamic relocs apply to a read-only section,
3358 then we need a DT_TEXTREL entry. */
3359 if ((info->flags & DF_TEXTREL) == 0)
3360 elf_link_hash_traverse (&htab->elf,
3361 elf_x86_64_readonly_dynrelocs,
3362 info);
3363
3364 if ((info->flags & DF_TEXTREL) != 0)
3365 {
3366 if (!add_dynamic_entry (DT_TEXTREL, 0))
3367 return FALSE;
3368 }
3369 }
3370 }
3371 #undef add_dynamic_entry
3372
3373 return TRUE;
3374 }
3375
3376 static bfd_boolean
3377 elf_x86_64_always_size_sections (bfd *output_bfd,
3378 struct bfd_link_info *info)
3379 {
3380 asection *tls_sec = elf_hash_table (info)->tls_sec;
3381
3382 if (tls_sec)
3383 {
3384 struct elf_link_hash_entry *tlsbase;
3385
3386 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3387 "_TLS_MODULE_BASE_",
3388 FALSE, FALSE, FALSE);
3389
3390 if (tlsbase && tlsbase->type == STT_TLS)
3391 {
3392 struct elf_x86_64_link_hash_table *htab;
3393 struct bfd_link_hash_entry *bh = NULL;
3394 const struct elf_backend_data *bed
3395 = get_elf_backend_data (output_bfd);
3396
3397 htab = elf_x86_64_hash_table (info);
3398 if (htab == NULL)
3399 return FALSE;
3400
3401 if (!(_bfd_generic_link_add_one_symbol
3402 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3403 tls_sec, 0, NULL, FALSE,
3404 bed->collect, &bh)))
3405 return FALSE;
3406
3407 htab->tls_module_base = bh;
3408
3409 tlsbase = (struct elf_link_hash_entry *)bh;
3410 tlsbase->def_regular = 1;
3411 tlsbase->other = STV_HIDDEN;
3412 tlsbase->root.linker_def = 1;
3413 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3414 }
3415 }
3416
3417 return TRUE;
3418 }
3419
3420 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3421 executables. Rather than setting it to the beginning of the TLS
3422 section, we have to set it to the end. This function may be called
3423 multiple times, it is idempotent. */
3424
3425 static void
3426 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3427 {
3428 struct elf_x86_64_link_hash_table *htab;
3429 struct bfd_link_hash_entry *base;
3430
3431 if (!info->executable)
3432 return;
3433
3434 htab = elf_x86_64_hash_table (info);
3435 if (htab == NULL)
3436 return;
3437
3438 base = htab->tls_module_base;
3439 if (base == NULL)
3440 return;
3441
3442 base->u.def.value = htab->elf.tls_size;
3443 }
3444
3445 /* Return the base VMA address which should be subtracted from real addresses
3446 when resolving @dtpoff relocation.
3447 This is PT_TLS segment p_vaddr. */
3448
3449 static bfd_vma
3450 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3451 {
3452 /* If tls_sec is NULL, we should have signalled an error already. */
3453 if (elf_hash_table (info)->tls_sec == NULL)
3454 return 0;
3455 return elf_hash_table (info)->tls_sec->vma;
3456 }
3457
3458 /* Return the relocation value for @tpoff relocation
3459 if STT_TLS virtual address is ADDRESS. */
3460
3461 static bfd_vma
3462 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3463 {
3464 struct elf_link_hash_table *htab = elf_hash_table (info);
3465 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3466 bfd_vma static_tls_size;
3467
3468 /* If tls_segment is NULL, we should have signalled an error already. */
3469 if (htab->tls_sec == NULL)
3470 return 0;
3471
3472 /* Consider special static TLS alignment requirements. */
3473 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3474 return address - static_tls_size - htab->tls_sec->vma;
3475 }
3476
3477 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3478 branch? */
3479
3480 static bfd_boolean
3481 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3482 {
3483 /* Opcode Instruction
3484 0xe8 call
3485 0xe9 jump
3486 0x0f 0x8x conditional jump */
3487 return ((offset > 0
3488 && (contents [offset - 1] == 0xe8
3489 || contents [offset - 1] == 0xe9))
3490 || (offset > 1
3491 && contents [offset - 2] == 0x0f
3492 && (contents [offset - 1] & 0xf0) == 0x80));
3493 }
3494
3495 /* Relocate an x86_64 ELF section. */
3496
3497 static bfd_boolean
3498 elf_x86_64_relocate_section (bfd *output_bfd,
3499 struct bfd_link_info *info,
3500 bfd *input_bfd,
3501 asection *input_section,
3502 bfd_byte *contents,
3503 Elf_Internal_Rela *relocs,
3504 Elf_Internal_Sym *local_syms,
3505 asection **local_sections)
3506 {
3507 struct elf_x86_64_link_hash_table *htab;
3508 Elf_Internal_Shdr *symtab_hdr;
3509 struct elf_link_hash_entry **sym_hashes;
3510 bfd_vma *local_got_offsets;
3511 bfd_vma *local_tlsdesc_gotents;
3512 Elf_Internal_Rela *rel;
3513 Elf_Internal_Rela *relend;
3514 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3515
3516 BFD_ASSERT (is_x86_64_elf (input_bfd));
3517
3518 htab = elf_x86_64_hash_table (info);
3519 if (htab == NULL)
3520 return FALSE;
3521 symtab_hdr = &elf_symtab_hdr (input_bfd);
3522 sym_hashes = elf_sym_hashes (input_bfd);
3523 local_got_offsets = elf_local_got_offsets (input_bfd);
3524 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3525
3526 elf_x86_64_set_tls_module_base (info);
3527
3528 rel = relocs;
3529 relend = relocs + input_section->reloc_count;
3530 for (; rel < relend; rel++)
3531 {
3532 unsigned int r_type;
3533 reloc_howto_type *howto;
3534 unsigned long r_symndx;
3535 struct elf_link_hash_entry *h;
3536 struct elf_x86_64_link_hash_entry *eh;
3537 Elf_Internal_Sym *sym;
3538 asection *sec;
3539 bfd_vma off, offplt, plt_offset;
3540 bfd_vma relocation;
3541 bfd_boolean unresolved_reloc;
3542 bfd_reloc_status_type r;
3543 int tls_type;
3544 asection *base_got, *resolved_plt;
3545 bfd_vma st_size;
3546
3547 r_type = ELF32_R_TYPE (rel->r_info);
3548 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3549 || r_type == (int) R_X86_64_GNU_VTENTRY)
3550 continue;
3551
3552 if (r_type >= (int) R_X86_64_standard)
3553 {
3554 (*_bfd_error_handler)
3555 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3556 input_bfd, input_section, r_type);
3557 bfd_set_error (bfd_error_bad_value);
3558 return FALSE;
3559 }
3560
3561 if (r_type != (int) R_X86_64_32
3562 || ABI_64_P (output_bfd))
3563 howto = x86_64_elf_howto_table + r_type;
3564 else
3565 howto = (x86_64_elf_howto_table
3566 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3567 r_symndx = htab->r_sym (rel->r_info);
3568 h = NULL;
3569 sym = NULL;
3570 sec = NULL;
3571 unresolved_reloc = FALSE;
3572 if (r_symndx < symtab_hdr->sh_info)
3573 {
3574 sym = local_syms + r_symndx;
3575 sec = local_sections[r_symndx];
3576
3577 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3578 &sec, rel);
3579 st_size = sym->st_size;
3580
3581 /* Relocate against local STT_GNU_IFUNC symbol. */
3582 if (!info->relocatable
3583 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3584 {
3585 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3586 rel, FALSE);
3587 if (h == NULL)
3588 abort ();
3589
3590 /* Set STT_GNU_IFUNC symbol value. */
3591 h->root.u.def.value = sym->st_value;
3592 h->root.u.def.section = sec;
3593 }
3594 }
3595 else
3596 {
3597 bfd_boolean warned ATTRIBUTE_UNUSED;
3598 bfd_boolean ignored ATTRIBUTE_UNUSED;
3599
3600 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3601 r_symndx, symtab_hdr, sym_hashes,
3602 h, sec, relocation,
3603 unresolved_reloc, warned, ignored);
3604 st_size = h->size;
3605 }
3606
3607 if (sec != NULL && discarded_section (sec))
3608 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3609 rel, 1, relend, howto, 0, contents);
3610
3611 if (info->relocatable)
3612 continue;
3613
3614 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3615 {
3616 if (r_type == R_X86_64_64)
3617 {
3618 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3619 zero-extend it to 64bit if addend is zero. */
3620 r_type = R_X86_64_32;
3621 memset (contents + rel->r_offset + 4, 0, 4);
3622 }
3623 else if (r_type == R_X86_64_SIZE64)
3624 {
3625 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3626 zero-extend it to 64bit if addend is zero. */
3627 r_type = R_X86_64_SIZE32;
3628 memset (contents + rel->r_offset + 4, 0, 4);
3629 }
3630 }
3631
3632 eh = (struct elf_x86_64_link_hash_entry *) h;
3633
3634 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3635 it here if it is defined in a non-shared object. */
3636 if (h != NULL
3637 && h->type == STT_GNU_IFUNC
3638 && h->def_regular)
3639 {
3640 bfd_vma plt_index;
3641 const char *name;
3642
3643 if ((input_section->flags & SEC_ALLOC) == 0
3644 || h->plt.offset == (bfd_vma) -1)
3645 abort ();
3646
3647 /* STT_GNU_IFUNC symbol must go through PLT. */
3648 if (htab->elf.splt != NULL)
3649 {
3650 if (htab->plt_bnd != NULL)
3651 {
3652 resolved_plt = htab->plt_bnd;
3653 plt_offset = eh->plt_bnd.offset;
3654 }
3655 else
3656 {
3657 resolved_plt = htab->elf.splt;
3658 plt_offset = h->plt.offset;
3659 }
3660 }
3661 else
3662 {
3663 resolved_plt = htab->elf.iplt;
3664 plt_offset = h->plt.offset;
3665 }
3666
3667 relocation = (resolved_plt->output_section->vma
3668 + resolved_plt->output_offset + plt_offset);
3669
3670 switch (r_type)
3671 {
3672 default:
3673 if (h->root.root.string)
3674 name = h->root.root.string;
3675 else
3676 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3677 NULL);
3678 (*_bfd_error_handler)
3679 (_("%B: relocation %s against STT_GNU_IFUNC "
3680 "symbol `%s' isn't handled by %s"), input_bfd,
3681 x86_64_elf_howto_table[r_type].name,
3682 name, __FUNCTION__);
3683 bfd_set_error (bfd_error_bad_value);
3684 return FALSE;
3685
3686 case R_X86_64_32S:
3687 if (info->shared)
3688 abort ();
3689 goto do_relocation;
3690
3691 case R_X86_64_32:
3692 if (ABI_64_P (output_bfd))
3693 goto do_relocation;
3694 /* FALLTHROUGH */
3695 case R_X86_64_64:
3696 if (rel->r_addend != 0)
3697 {
3698 if (h->root.root.string)
3699 name = h->root.root.string;
3700 else
3701 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3702 sym, NULL);
3703 (*_bfd_error_handler)
3704 (_("%B: relocation %s against STT_GNU_IFUNC "
3705 "symbol `%s' has non-zero addend: %d"),
3706 input_bfd, x86_64_elf_howto_table[r_type].name,
3707 name, rel->r_addend);
3708 bfd_set_error (bfd_error_bad_value);
3709 return FALSE;
3710 }
3711
3712 /* Generate dynamic relcoation only when there is a
3713 non-GOT reference in a shared object. */
3714 if (info->shared && h->non_got_ref)
3715 {
3716 Elf_Internal_Rela outrel;
3717 asection *sreloc;
3718
3719 /* Need a dynamic relocation to get the real function
3720 address. */
3721 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3722 info,
3723 input_section,
3724 rel->r_offset);
3725 if (outrel.r_offset == (bfd_vma) -1
3726 || outrel.r_offset == (bfd_vma) -2)
3727 abort ();
3728
3729 outrel.r_offset += (input_section->output_section->vma
3730 + input_section->output_offset);
3731
3732 if (h->dynindx == -1
3733 || h->forced_local
3734 || info->executable)
3735 {
3736 /* This symbol is resolved locally. */
3737 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3738 outrel.r_addend = (h->root.u.def.value
3739 + h->root.u.def.section->output_section->vma
3740 + h->root.u.def.section->output_offset);
3741 }
3742 else
3743 {
3744 outrel.r_info = htab->r_info (h->dynindx, r_type);
3745 outrel.r_addend = 0;
3746 }
3747
3748 sreloc = htab->elf.irelifunc;
3749 elf_append_rela (output_bfd, sreloc, &outrel);
3750
3751 /* If this reloc is against an external symbol, we
3752 do not want to fiddle with the addend. Otherwise,
3753 we need to include the symbol value so that it
3754 becomes an addend for the dynamic reloc. For an
3755 internal symbol, we have updated addend. */
3756 continue;
3757 }
3758 /* FALLTHROUGH */
3759 case R_X86_64_PC32:
3760 case R_X86_64_PC32_BND:
3761 case R_X86_64_PC64:
3762 case R_X86_64_PLT32:
3763 case R_X86_64_PLT32_BND:
3764 goto do_relocation;
3765
3766 case R_X86_64_GOTPCREL:
3767 case R_X86_64_GOTPCREL64:
3768 base_got = htab->elf.sgot;
3769 off = h->got.offset;
3770
3771 if (base_got == NULL)
3772 abort ();
3773
3774 if (off == (bfd_vma) -1)
3775 {
3776 /* We can't use h->got.offset here to save state, or
3777 even just remember the offset, as finish_dynamic_symbol
3778 would use that as offset into .got. */
3779
3780 if (htab->elf.splt != NULL)
3781 {
3782 plt_index = h->plt.offset / plt_entry_size - 1;
3783 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3784 base_got = htab->elf.sgotplt;
3785 }
3786 else
3787 {
3788 plt_index = h->plt.offset / plt_entry_size;
3789 off = plt_index * GOT_ENTRY_SIZE;
3790 base_got = htab->elf.igotplt;
3791 }
3792
3793 if (h->dynindx == -1
3794 || h->forced_local
3795 || info->symbolic)
3796 {
3797 /* This references the local defitionion. We must
3798 initialize this entry in the global offset table.
3799 Since the offset must always be a multiple of 8,
3800 we use the least significant bit to record
3801 whether we have initialized it already.
3802
3803 When doing a dynamic link, we create a .rela.got
3804 relocation entry to initialize the value. This
3805 is done in the finish_dynamic_symbol routine. */
3806 if ((off & 1) != 0)
3807 off &= ~1;
3808 else
3809 {
3810 bfd_put_64 (output_bfd, relocation,
3811 base_got->contents + off);
3812 /* Note that this is harmless for the GOTPLT64
3813 case, as -1 | 1 still is -1. */
3814 h->got.offset |= 1;
3815 }
3816 }
3817 }
3818
3819 relocation = (base_got->output_section->vma
3820 + base_got->output_offset + off);
3821
3822 goto do_relocation;
3823 }
3824 }
3825
3826 /* When generating a shared object, the relocations handled here are
3827 copied into the output file to be resolved at run time. */
3828 switch (r_type)
3829 {
3830 case R_X86_64_GOT32:
3831 case R_X86_64_GOT64:
3832 /* Relocation is to the entry for this symbol in the global
3833 offset table. */
3834 case R_X86_64_GOTPCREL:
3835 case R_X86_64_GOTPCREL64:
3836 /* Use global offset table entry as symbol value. */
3837 case R_X86_64_GOTPLT64:
3838 /* This is obsolete and treated the the same as GOT64. */
3839 base_got = htab->elf.sgot;
3840
3841 if (htab->elf.sgot == NULL)
3842 abort ();
3843
3844 if (h != NULL)
3845 {
3846 bfd_boolean dyn;
3847
3848 off = h->got.offset;
3849 if (h->needs_plt
3850 && h->plt.offset != (bfd_vma)-1
3851 && off == (bfd_vma)-1)
3852 {
3853 /* We can't use h->got.offset here to save
3854 state, or even just remember the offset, as
3855 finish_dynamic_symbol would use that as offset into
3856 .got. */
3857 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3858 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3859 base_got = htab->elf.sgotplt;
3860 }
3861
3862 dyn = htab->elf.dynamic_sections_created;
3863
3864 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3865 || (info->shared
3866 && SYMBOL_REFERENCES_LOCAL (info, h))
3867 || (ELF_ST_VISIBILITY (h->other)
3868 && h->root.type == bfd_link_hash_undefweak))
3869 {
3870 /* This is actually a static link, or it is a -Bsymbolic
3871 link and the symbol is defined locally, or the symbol
3872 was forced to be local because of a version file. We
3873 must initialize this entry in the global offset table.
3874 Since the offset must always be a multiple of 8, we
3875 use the least significant bit to record whether we
3876 have initialized it already.
3877
3878 When doing a dynamic link, we create a .rela.got
3879 relocation entry to initialize the value. This is
3880 done in the finish_dynamic_symbol routine. */
3881 if ((off & 1) != 0)
3882 off &= ~1;
3883 else
3884 {
3885 bfd_put_64 (output_bfd, relocation,
3886 base_got->contents + off);
3887 /* Note that this is harmless for the GOTPLT64 case,
3888 as -1 | 1 still is -1. */
3889 h->got.offset |= 1;
3890 }
3891 }
3892 else
3893 unresolved_reloc = FALSE;
3894 }
3895 else
3896 {
3897 if (local_got_offsets == NULL)
3898 abort ();
3899
3900 off = local_got_offsets[r_symndx];
3901
3902 /* The offset must always be a multiple of 8. We use
3903 the least significant bit to record whether we have
3904 already generated the necessary reloc. */
3905 if ((off & 1) != 0)
3906 off &= ~1;
3907 else
3908 {
3909 bfd_put_64 (output_bfd, relocation,
3910 base_got->contents + off);
3911
3912 if (info->shared)
3913 {
3914 asection *s;
3915 Elf_Internal_Rela outrel;
3916
3917 /* We need to generate a R_X86_64_RELATIVE reloc
3918 for the dynamic linker. */
3919 s = htab->elf.srelgot;
3920 if (s == NULL)
3921 abort ();
3922
3923 outrel.r_offset = (base_got->output_section->vma
3924 + base_got->output_offset
3925 + off);
3926 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3927 outrel.r_addend = relocation;
3928 elf_append_rela (output_bfd, s, &outrel);
3929 }
3930
3931 local_got_offsets[r_symndx] |= 1;
3932 }
3933 }
3934
3935 if (off >= (bfd_vma) -2)
3936 abort ();
3937
3938 relocation = base_got->output_section->vma
3939 + base_got->output_offset + off;
3940 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
3941 relocation -= htab->elf.sgotplt->output_section->vma
3942 - htab->elf.sgotplt->output_offset;
3943
3944 break;
3945
3946 case R_X86_64_GOTOFF64:
3947 /* Relocation is relative to the start of the global offset
3948 table. */
3949
3950 /* Check to make sure it isn't a protected function symbol
3951 for shared library since it may not be local when used
3952 as function address. */
3953 if (!info->executable
3954 && h
3955 && !SYMBOLIC_BIND (info, h)
3956 && h->def_regular
3957 && h->type == STT_FUNC
3958 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3959 {
3960 (*_bfd_error_handler)
3961 (_("%B: relocation R_X86_64_GOTOFF64 against protected function `%s' can not be used when making a shared object"),
3962 input_bfd, h->root.root.string);
3963 bfd_set_error (bfd_error_bad_value);
3964 return FALSE;
3965 }
3966
3967 /* Note that sgot is not involved in this
3968 calculation. We always want the start of .got.plt. If we
3969 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3970 permitted by the ABI, we might have to change this
3971 calculation. */
3972 relocation -= htab->elf.sgotplt->output_section->vma
3973 + htab->elf.sgotplt->output_offset;
3974 break;
3975
3976 case R_X86_64_GOTPC32:
3977 case R_X86_64_GOTPC64:
3978 /* Use global offset table as symbol value. */
3979 relocation = htab->elf.sgotplt->output_section->vma
3980 + htab->elf.sgotplt->output_offset;
3981 unresolved_reloc = FALSE;
3982 break;
3983
3984 case R_X86_64_PLTOFF64:
3985 /* Relocation is PLT entry relative to GOT. For local
3986 symbols it's the symbol itself relative to GOT. */
3987 if (h != NULL
3988 /* See PLT32 handling. */
3989 && h->plt.offset != (bfd_vma) -1
3990 && htab->elf.splt != NULL)
3991 {
3992 if (htab->plt_bnd != NULL)
3993 {
3994 resolved_plt = htab->plt_bnd;
3995 plt_offset = eh->plt_bnd.offset;
3996 }
3997 else
3998 {
3999 resolved_plt = htab->elf.splt;
4000 plt_offset = h->plt.offset;
4001 }
4002
4003 relocation = (resolved_plt->output_section->vma
4004 + resolved_plt->output_offset
4005 + plt_offset);
4006 unresolved_reloc = FALSE;
4007 }
4008
4009 relocation -= htab->elf.sgotplt->output_section->vma
4010 + htab->elf.sgotplt->output_offset;
4011 break;
4012
4013 case R_X86_64_PLT32:
4014 case R_X86_64_PLT32_BND:
4015 /* Relocation is to the entry for this symbol in the
4016 procedure linkage table. */
4017
4018 /* Resolve a PLT32 reloc against a local symbol directly,
4019 without using the procedure linkage table. */
4020 if (h == NULL)
4021 break;
4022
4023 if ((h->plt.offset == (bfd_vma) -1
4024 && eh->plt_got.offset == (bfd_vma) -1)
4025 || htab->elf.splt == NULL)
4026 {
4027 /* We didn't make a PLT entry for this symbol. This
4028 happens when statically linking PIC code, or when
4029 using -Bsymbolic. */
4030 break;
4031 }
4032
4033 if (h->plt.offset != (bfd_vma) -1)
4034 {
4035 if (htab->plt_bnd != NULL)
4036 {
4037 resolved_plt = htab->plt_bnd;
4038 plt_offset = eh->plt_bnd.offset;
4039 }
4040 else
4041 {
4042 resolved_plt = htab->elf.splt;
4043 plt_offset = h->plt.offset;
4044 }
4045 }
4046 else
4047 {
4048 /* Use the GOT PLT. */
4049 resolved_plt = htab->plt_got;
4050 plt_offset = eh->plt_got.offset;
4051 }
4052
4053 relocation = (resolved_plt->output_section->vma
4054 + resolved_plt->output_offset
4055 + plt_offset);
4056 unresolved_reloc = FALSE;
4057 break;
4058
4059 case R_X86_64_SIZE32:
4060 case R_X86_64_SIZE64:
4061 /* Set to symbol size. */
4062 relocation = st_size;
4063 goto direct;
4064
4065 case R_X86_64_PC8:
4066 case R_X86_64_PC16:
4067 case R_X86_64_PC32:
4068 case R_X86_64_PC32_BND:
4069 /* Don't complain about -fPIC if the symbol is undefined when
4070 building executable. */
4071 if (info->shared
4072 && (input_section->flags & SEC_ALLOC) != 0
4073 && (input_section->flags & SEC_READONLY) != 0
4074 && h != NULL
4075 && !(info->executable
4076 && h->root.type == bfd_link_hash_undefined))
4077 {
4078 bfd_boolean fail = FALSE;
4079 bfd_boolean branch
4080 = ((r_type == R_X86_64_PC32
4081 || r_type == R_X86_64_PC32_BND)
4082 && is_32bit_relative_branch (contents, rel->r_offset));
4083
4084 if (SYMBOL_REFERENCES_LOCAL (info, h))
4085 {
4086 /* Symbol is referenced locally. Make sure it is
4087 defined locally or for a branch. */
4088 fail = !h->def_regular && !branch;
4089 }
4090 else if (!(info->executable
4091 && (h->needs_copy || eh->needs_copy)))
4092 {
4093 /* Symbol doesn't need copy reloc and isn't referenced
4094 locally. We only allow branch to symbol with
4095 non-default visibility. */
4096 fail = (!branch
4097 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4098 }
4099
4100 if (fail)
4101 {
4102 const char *fmt;
4103 const char *v;
4104 const char *pic = "";
4105
4106 switch (ELF_ST_VISIBILITY (h->other))
4107 {
4108 case STV_HIDDEN:
4109 v = _("hidden symbol");
4110 break;
4111 case STV_INTERNAL:
4112 v = _("internal symbol");
4113 break;
4114 case STV_PROTECTED:
4115 v = _("protected symbol");
4116 break;
4117 default:
4118 v = _("symbol");
4119 pic = _("; recompile with -fPIC");
4120 break;
4121 }
4122
4123 if (h->def_regular)
4124 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4125 else
4126 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4127
4128 (*_bfd_error_handler) (fmt, input_bfd,
4129 x86_64_elf_howto_table[r_type].name,
4130 v, h->root.root.string, pic);
4131 bfd_set_error (bfd_error_bad_value);
4132 return FALSE;
4133 }
4134 }
4135 /* Fall through. */
4136
4137 case R_X86_64_8:
4138 case R_X86_64_16:
4139 case R_X86_64_32:
4140 case R_X86_64_PC64:
4141 case R_X86_64_64:
4142 /* FIXME: The ABI says the linker should make sure the value is
4143 the same when it's zeroextended to 64 bit. */
4144
4145 direct:
4146 if ((input_section->flags & SEC_ALLOC) == 0)
4147 break;
4148
4149 /* Don't copy a pc-relative relocation into the output file
4150 if the symbol needs copy reloc or the symbol is undefined
4151 when building executable. */
4152 if ((info->shared
4153 && !(info->executable
4154 && h != NULL
4155 && (h->needs_copy
4156 || eh->needs_copy
4157 || h->root.type == bfd_link_hash_undefined)
4158 && IS_X86_64_PCREL_TYPE (r_type))
4159 && (h == NULL
4160 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4161 || h->root.type != bfd_link_hash_undefweak)
4162 && ((! IS_X86_64_PCREL_TYPE (r_type)
4163 && r_type != R_X86_64_SIZE32
4164 && r_type != R_X86_64_SIZE64)
4165 || ! SYMBOL_CALLS_LOCAL (info, h)))
4166 || (ELIMINATE_COPY_RELOCS
4167 && !info->shared
4168 && h != NULL
4169 && h->dynindx != -1
4170 && !h->non_got_ref
4171 && ((h->def_dynamic
4172 && !h->def_regular)
4173 || h->root.type == bfd_link_hash_undefweak
4174 || h->root.type == bfd_link_hash_undefined)))
4175 {
4176 Elf_Internal_Rela outrel;
4177 bfd_boolean skip, relocate;
4178 asection *sreloc;
4179
4180 /* When generating a shared object, these relocations
4181 are copied into the output file to be resolved at run
4182 time. */
4183 skip = FALSE;
4184 relocate = FALSE;
4185
4186 outrel.r_offset =
4187 _bfd_elf_section_offset (output_bfd, info, input_section,
4188 rel->r_offset);
4189 if (outrel.r_offset == (bfd_vma) -1)
4190 skip = TRUE;
4191 else if (outrel.r_offset == (bfd_vma) -2)
4192 skip = TRUE, relocate = TRUE;
4193
4194 outrel.r_offset += (input_section->output_section->vma
4195 + input_section->output_offset);
4196
4197 if (skip)
4198 memset (&outrel, 0, sizeof outrel);
4199
4200 /* h->dynindx may be -1 if this symbol was marked to
4201 become local. */
4202 else if (h != NULL
4203 && h->dynindx != -1
4204 && (IS_X86_64_PCREL_TYPE (r_type)
4205 || ! info->shared
4206 || ! SYMBOLIC_BIND (info, h)
4207 || ! h->def_regular))
4208 {
4209 outrel.r_info = htab->r_info (h->dynindx, r_type);
4210 outrel.r_addend = rel->r_addend;
4211 }
4212 else
4213 {
4214 /* This symbol is local, or marked to become local. */
4215 if (r_type == htab->pointer_r_type)
4216 {
4217 relocate = TRUE;
4218 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4219 outrel.r_addend = relocation + rel->r_addend;
4220 }
4221 else if (r_type == R_X86_64_64
4222 && !ABI_64_P (output_bfd))
4223 {
4224 relocate = TRUE;
4225 outrel.r_info = htab->r_info (0,
4226 R_X86_64_RELATIVE64);
4227 outrel.r_addend = relocation + rel->r_addend;
4228 /* Check addend overflow. */
4229 if ((outrel.r_addend & 0x80000000)
4230 != (rel->r_addend & 0x80000000))
4231 {
4232 const char *name;
4233 int addend = rel->r_addend;
4234 if (h && h->root.root.string)
4235 name = h->root.root.string;
4236 else
4237 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4238 sym, NULL);
4239 if (addend < 0)
4240 (*_bfd_error_handler)
4241 (_("%B: addend -0x%x in relocation %s against "
4242 "symbol `%s' at 0x%lx in section `%A' is "
4243 "out of range"),
4244 input_bfd, input_section, addend,
4245 x86_64_elf_howto_table[r_type].name,
4246 name, (unsigned long) rel->r_offset);
4247 else
4248 (*_bfd_error_handler)
4249 (_("%B: addend 0x%x in relocation %s against "
4250 "symbol `%s' at 0x%lx in section `%A' is "
4251 "out of range"),
4252 input_bfd, input_section, addend,
4253 x86_64_elf_howto_table[r_type].name,
4254 name, (unsigned long) rel->r_offset);
4255 bfd_set_error (bfd_error_bad_value);
4256 return FALSE;
4257 }
4258 }
4259 else
4260 {
4261 long sindx;
4262
4263 if (bfd_is_abs_section (sec))
4264 sindx = 0;
4265 else if (sec == NULL || sec->owner == NULL)
4266 {
4267 bfd_set_error (bfd_error_bad_value);
4268 return FALSE;
4269 }
4270 else
4271 {
4272 asection *osec;
4273
4274 /* We are turning this relocation into one
4275 against a section symbol. It would be
4276 proper to subtract the symbol's value,
4277 osec->vma, from the emitted reloc addend,
4278 but ld.so expects buggy relocs. */
4279 osec = sec->output_section;
4280 sindx = elf_section_data (osec)->dynindx;
4281 if (sindx == 0)
4282 {
4283 asection *oi = htab->elf.text_index_section;
4284 sindx = elf_section_data (oi)->dynindx;
4285 }
4286 BFD_ASSERT (sindx != 0);
4287 }
4288
4289 outrel.r_info = htab->r_info (sindx, r_type);
4290 outrel.r_addend = relocation + rel->r_addend;
4291 }
4292 }
4293
4294 sreloc = elf_section_data (input_section)->sreloc;
4295
4296 if (sreloc == NULL || sreloc->contents == NULL)
4297 {
4298 r = bfd_reloc_notsupported;
4299 goto check_relocation_error;
4300 }
4301
4302 elf_append_rela (output_bfd, sreloc, &outrel);
4303
4304 /* If this reloc is against an external symbol, we do
4305 not want to fiddle with the addend. Otherwise, we
4306 need to include the symbol value so that it becomes
4307 an addend for the dynamic reloc. */
4308 if (! relocate)
4309 continue;
4310 }
4311
4312 break;
4313
4314 case R_X86_64_TLSGD:
4315 case R_X86_64_GOTPC32_TLSDESC:
4316 case R_X86_64_TLSDESC_CALL:
4317 case R_X86_64_GOTTPOFF:
4318 tls_type = GOT_UNKNOWN;
4319 if (h == NULL && local_got_offsets)
4320 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4321 else if (h != NULL)
4322 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4323
4324 if (! elf_x86_64_tls_transition (info, input_bfd,
4325 input_section, contents,
4326 symtab_hdr, sym_hashes,
4327 &r_type, tls_type, rel,
4328 relend, h, r_symndx))
4329 return FALSE;
4330
4331 if (r_type == R_X86_64_TPOFF32)
4332 {
4333 bfd_vma roff = rel->r_offset;
4334
4335 BFD_ASSERT (! unresolved_reloc);
4336
4337 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4338 {
4339 /* GD->LE transition. For 64bit, change
4340 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4341 .word 0x6666; rex64; call __tls_get_addr
4342 into:
4343 movq %fs:0, %rax
4344 leaq foo@tpoff(%rax), %rax
4345 For 32bit, change
4346 leaq foo@tlsgd(%rip), %rdi
4347 .word 0x6666; rex64; call __tls_get_addr
4348 into:
4349 movl %fs:0, %eax
4350 leaq foo@tpoff(%rax), %rax
4351 For largepic, change:
4352 leaq foo@tlsgd(%rip), %rdi
4353 movabsq $__tls_get_addr@pltoff, %rax
4354 addq %rbx, %rax
4355 call *%rax
4356 into:
4357 movq %fs:0, %rax
4358 leaq foo@tpoff(%rax), %rax
4359 nopw 0x0(%rax,%rax,1) */
4360 int largepic = 0;
4361 if (ABI_64_P (output_bfd)
4362 && contents[roff + 5] == (bfd_byte) '\xb8')
4363 {
4364 memcpy (contents + roff - 3,
4365 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4366 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4367 largepic = 1;
4368 }
4369 else if (ABI_64_P (output_bfd))
4370 memcpy (contents + roff - 4,
4371 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4372 16);
4373 else
4374 memcpy (contents + roff - 3,
4375 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4376 15);
4377 bfd_put_32 (output_bfd,
4378 elf_x86_64_tpoff (info, relocation),
4379 contents + roff + 8 + largepic);
4380 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4381 rel++;
4382 continue;
4383 }
4384 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4385 {
4386 /* GDesc -> LE transition.
4387 It's originally something like:
4388 leaq x@tlsdesc(%rip), %rax
4389
4390 Change it to:
4391 movl $x@tpoff, %rax. */
4392
4393 unsigned int val, type;
4394
4395 type = bfd_get_8 (input_bfd, contents + roff - 3);
4396 val = bfd_get_8 (input_bfd, contents + roff - 1);
4397 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4398 contents + roff - 3);
4399 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4400 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4401 contents + roff - 1);
4402 bfd_put_32 (output_bfd,
4403 elf_x86_64_tpoff (info, relocation),
4404 contents + roff);
4405 continue;
4406 }
4407 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4408 {
4409 /* GDesc -> LE transition.
4410 It's originally:
4411 call *(%rax)
4412 Turn it into:
4413 xchg %ax,%ax. */
4414 bfd_put_8 (output_bfd, 0x66, contents + roff);
4415 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4416 continue;
4417 }
4418 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4419 {
4420 /* IE->LE transition:
4421 For 64bit, originally it can be one of:
4422 movq foo@gottpoff(%rip), %reg
4423 addq foo@gottpoff(%rip), %reg
4424 We change it into:
4425 movq $foo, %reg
4426 leaq foo(%reg), %reg
4427 addq $foo, %reg.
4428 For 32bit, originally it can be one of:
4429 movq foo@gottpoff(%rip), %reg
4430 addl foo@gottpoff(%rip), %reg
4431 We change it into:
4432 movq $foo, %reg
4433 leal foo(%reg), %reg
4434 addl $foo, %reg. */
4435
4436 unsigned int val, type, reg;
4437
4438 if (roff >= 3)
4439 val = bfd_get_8 (input_bfd, contents + roff - 3);
4440 else
4441 val = 0;
4442 type = bfd_get_8 (input_bfd, contents + roff - 2);
4443 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4444 reg >>= 3;
4445 if (type == 0x8b)
4446 {
4447 /* movq */
4448 if (val == 0x4c)
4449 bfd_put_8 (output_bfd, 0x49,
4450 contents + roff - 3);
4451 else if (!ABI_64_P (output_bfd) && val == 0x44)
4452 bfd_put_8 (output_bfd, 0x41,
4453 contents + roff - 3);
4454 bfd_put_8 (output_bfd, 0xc7,
4455 contents + roff - 2);
4456 bfd_put_8 (output_bfd, 0xc0 | reg,
4457 contents + roff - 1);
4458 }
4459 else if (reg == 4)
4460 {
4461 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4462 is special */
4463 if (val == 0x4c)
4464 bfd_put_8 (output_bfd, 0x49,
4465 contents + roff - 3);
4466 else if (!ABI_64_P (output_bfd) && val == 0x44)
4467 bfd_put_8 (output_bfd, 0x41,
4468 contents + roff - 3);
4469 bfd_put_8 (output_bfd, 0x81,
4470 contents + roff - 2);
4471 bfd_put_8 (output_bfd, 0xc0 | reg,
4472 contents + roff - 1);
4473 }
4474 else
4475 {
4476 /* addq/addl -> leaq/leal */
4477 if (val == 0x4c)
4478 bfd_put_8 (output_bfd, 0x4d,
4479 contents + roff - 3);
4480 else if (!ABI_64_P (output_bfd) && val == 0x44)
4481 bfd_put_8 (output_bfd, 0x45,
4482 contents + roff - 3);
4483 bfd_put_8 (output_bfd, 0x8d,
4484 contents + roff - 2);
4485 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4486 contents + roff - 1);
4487 }
4488 bfd_put_32 (output_bfd,
4489 elf_x86_64_tpoff (info, relocation),
4490 contents + roff);
4491 continue;
4492 }
4493 else
4494 BFD_ASSERT (FALSE);
4495 }
4496
4497 if (htab->elf.sgot == NULL)
4498 abort ();
4499
4500 if (h != NULL)
4501 {
4502 off = h->got.offset;
4503 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4504 }
4505 else
4506 {
4507 if (local_got_offsets == NULL)
4508 abort ();
4509
4510 off = local_got_offsets[r_symndx];
4511 offplt = local_tlsdesc_gotents[r_symndx];
4512 }
4513
4514 if ((off & 1) != 0)
4515 off &= ~1;
4516 else
4517 {
4518 Elf_Internal_Rela outrel;
4519 int dr_type, indx;
4520 asection *sreloc;
4521
4522 if (htab->elf.srelgot == NULL)
4523 abort ();
4524
4525 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4526
4527 if (GOT_TLS_GDESC_P (tls_type))
4528 {
4529 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4530 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4531 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4532 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4533 + htab->elf.sgotplt->output_offset
4534 + offplt
4535 + htab->sgotplt_jump_table_size);
4536 sreloc = htab->elf.srelplt;
4537 if (indx == 0)
4538 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4539 else
4540 outrel.r_addend = 0;
4541 elf_append_rela (output_bfd, sreloc, &outrel);
4542 }
4543
4544 sreloc = htab->elf.srelgot;
4545
4546 outrel.r_offset = (htab->elf.sgot->output_section->vma
4547 + htab->elf.sgot->output_offset + off);
4548
4549 if (GOT_TLS_GD_P (tls_type))
4550 dr_type = R_X86_64_DTPMOD64;
4551 else if (GOT_TLS_GDESC_P (tls_type))
4552 goto dr_done;
4553 else
4554 dr_type = R_X86_64_TPOFF64;
4555
4556 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4557 outrel.r_addend = 0;
4558 if ((dr_type == R_X86_64_TPOFF64
4559 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4560 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4561 outrel.r_info = htab->r_info (indx, dr_type);
4562
4563 elf_append_rela (output_bfd, sreloc, &outrel);
4564
4565 if (GOT_TLS_GD_P (tls_type))
4566 {
4567 if (indx == 0)
4568 {
4569 BFD_ASSERT (! unresolved_reloc);
4570 bfd_put_64 (output_bfd,
4571 relocation - elf_x86_64_dtpoff_base (info),
4572 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4573 }
4574 else
4575 {
4576 bfd_put_64 (output_bfd, 0,
4577 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4578 outrel.r_info = htab->r_info (indx,
4579 R_X86_64_DTPOFF64);
4580 outrel.r_offset += GOT_ENTRY_SIZE;
4581 elf_append_rela (output_bfd, sreloc,
4582 &outrel);
4583 }
4584 }
4585
4586 dr_done:
4587 if (h != NULL)
4588 h->got.offset |= 1;
4589 else
4590 local_got_offsets[r_symndx] |= 1;
4591 }
4592
4593 if (off >= (bfd_vma) -2
4594 && ! GOT_TLS_GDESC_P (tls_type))
4595 abort ();
4596 if (r_type == ELF32_R_TYPE (rel->r_info))
4597 {
4598 if (r_type == R_X86_64_GOTPC32_TLSDESC
4599 || r_type == R_X86_64_TLSDESC_CALL)
4600 relocation = htab->elf.sgotplt->output_section->vma
4601 + htab->elf.sgotplt->output_offset
4602 + offplt + htab->sgotplt_jump_table_size;
4603 else
4604 relocation = htab->elf.sgot->output_section->vma
4605 + htab->elf.sgot->output_offset + off;
4606 unresolved_reloc = FALSE;
4607 }
4608 else
4609 {
4610 bfd_vma roff = rel->r_offset;
4611
4612 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4613 {
4614 /* GD->IE transition. For 64bit, change
4615 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4616 .word 0x6666; rex64; call __tls_get_addr@plt
4617 into:
4618 movq %fs:0, %rax
4619 addq foo@gottpoff(%rip), %rax
4620 For 32bit, change
4621 leaq foo@tlsgd(%rip), %rdi
4622 .word 0x6666; rex64; call __tls_get_addr@plt
4623 into:
4624 movl %fs:0, %eax
4625 addq foo@gottpoff(%rip), %rax
4626 For largepic, change:
4627 leaq foo@tlsgd(%rip), %rdi
4628 movabsq $__tls_get_addr@pltoff, %rax
4629 addq %rbx, %rax
4630 call *%rax
4631 into:
4632 movq %fs:0, %rax
4633 addq foo@gottpoff(%rax), %rax
4634 nopw 0x0(%rax,%rax,1) */
4635 int largepic = 0;
4636 if (ABI_64_P (output_bfd)
4637 && contents[roff + 5] == (bfd_byte) '\xb8')
4638 {
4639 memcpy (contents + roff - 3,
4640 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4641 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4642 largepic = 1;
4643 }
4644 else if (ABI_64_P (output_bfd))
4645 memcpy (contents + roff - 4,
4646 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4647 16);
4648 else
4649 memcpy (contents + roff - 3,
4650 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4651 15);
4652
4653 relocation = (htab->elf.sgot->output_section->vma
4654 + htab->elf.sgot->output_offset + off
4655 - roff
4656 - largepic
4657 - input_section->output_section->vma
4658 - input_section->output_offset
4659 - 12);
4660 bfd_put_32 (output_bfd, relocation,
4661 contents + roff + 8 + largepic);
4662 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4663 rel++;
4664 continue;
4665 }
4666 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4667 {
4668 /* GDesc -> IE transition.
4669 It's originally something like:
4670 leaq x@tlsdesc(%rip), %rax
4671
4672 Change it to:
4673 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4674
4675 /* Now modify the instruction as appropriate. To
4676 turn a leaq into a movq in the form we use it, it
4677 suffices to change the second byte from 0x8d to
4678 0x8b. */
4679 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4680
4681 bfd_put_32 (output_bfd,
4682 htab->elf.sgot->output_section->vma
4683 + htab->elf.sgot->output_offset + off
4684 - rel->r_offset
4685 - input_section->output_section->vma
4686 - input_section->output_offset
4687 - 4,
4688 contents + roff);
4689 continue;
4690 }
4691 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4692 {
4693 /* GDesc -> IE transition.
4694 It's originally:
4695 call *(%rax)
4696
4697 Change it to:
4698 xchg %ax, %ax. */
4699
4700 bfd_put_8 (output_bfd, 0x66, contents + roff);
4701 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4702 continue;
4703 }
4704 else
4705 BFD_ASSERT (FALSE);
4706 }
4707 break;
4708
4709 case R_X86_64_TLSLD:
4710 if (! elf_x86_64_tls_transition (info, input_bfd,
4711 input_section, contents,
4712 symtab_hdr, sym_hashes,
4713 &r_type, GOT_UNKNOWN,
4714 rel, relend, h, r_symndx))
4715 return FALSE;
4716
4717 if (r_type != R_X86_64_TLSLD)
4718 {
4719 /* LD->LE transition:
4720 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4721 For 64bit, we change it into:
4722 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4723 For 32bit, we change it into:
4724 nopl 0x0(%rax); movl %fs:0, %eax.
4725 For largepic, change:
4726 leaq foo@tlsgd(%rip), %rdi
4727 movabsq $__tls_get_addr@pltoff, %rax
4728 addq %rbx, %rax
4729 call *%rax
4730 into:
4731 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4732 movq %fs:0, %eax */
4733
4734 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4735 if (ABI_64_P (output_bfd)
4736 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4737 memcpy (contents + rel->r_offset - 3,
4738 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4739 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4740 else if (ABI_64_P (output_bfd))
4741 memcpy (contents + rel->r_offset - 3,
4742 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4743 else
4744 memcpy (contents + rel->r_offset - 3,
4745 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4746 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4747 rel++;
4748 continue;
4749 }
4750
4751 if (htab->elf.sgot == NULL)
4752 abort ();
4753
4754 off = htab->tls_ld_got.offset;
4755 if (off & 1)
4756 off &= ~1;
4757 else
4758 {
4759 Elf_Internal_Rela outrel;
4760
4761 if (htab->elf.srelgot == NULL)
4762 abort ();
4763
4764 outrel.r_offset = (htab->elf.sgot->output_section->vma
4765 + htab->elf.sgot->output_offset + off);
4766
4767 bfd_put_64 (output_bfd, 0,
4768 htab->elf.sgot->contents + off);
4769 bfd_put_64 (output_bfd, 0,
4770 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4771 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4772 outrel.r_addend = 0;
4773 elf_append_rela (output_bfd, htab->elf.srelgot,
4774 &outrel);
4775 htab->tls_ld_got.offset |= 1;
4776 }
4777 relocation = htab->elf.sgot->output_section->vma
4778 + htab->elf.sgot->output_offset + off;
4779 unresolved_reloc = FALSE;
4780 break;
4781
4782 case R_X86_64_DTPOFF32:
4783 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4784 relocation -= elf_x86_64_dtpoff_base (info);
4785 else
4786 relocation = elf_x86_64_tpoff (info, relocation);
4787 break;
4788
4789 case R_X86_64_TPOFF32:
4790 case R_X86_64_TPOFF64:
4791 BFD_ASSERT (info->executable);
4792 relocation = elf_x86_64_tpoff (info, relocation);
4793 break;
4794
4795 case R_X86_64_DTPOFF64:
4796 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4797 relocation -= elf_x86_64_dtpoff_base (info);
4798 break;
4799
4800 default:
4801 break;
4802 }
4803
4804 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4805 because such sections are not SEC_ALLOC and thus ld.so will
4806 not process them. */
4807 if (unresolved_reloc
4808 && !((input_section->flags & SEC_DEBUGGING) != 0
4809 && h->def_dynamic)
4810 && _bfd_elf_section_offset (output_bfd, info, input_section,
4811 rel->r_offset) != (bfd_vma) -1)
4812 {
4813 (*_bfd_error_handler)
4814 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4815 input_bfd,
4816 input_section,
4817 (long) rel->r_offset,
4818 howto->name,
4819 h->root.root.string);
4820 return FALSE;
4821 }
4822
4823 do_relocation:
4824 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4825 contents, rel->r_offset,
4826 relocation, rel->r_addend);
4827
4828 check_relocation_error:
4829 if (r != bfd_reloc_ok)
4830 {
4831 const char *name;
4832
4833 if (h != NULL)
4834 name = h->root.root.string;
4835 else
4836 {
4837 name = bfd_elf_string_from_elf_section (input_bfd,
4838 symtab_hdr->sh_link,
4839 sym->st_name);
4840 if (name == NULL)
4841 return FALSE;
4842 if (*name == '\0')
4843 name = bfd_section_name (input_bfd, sec);
4844 }
4845
4846 if (r == bfd_reloc_overflow)
4847 {
4848 if (! ((*info->callbacks->reloc_overflow)
4849 (info, (h ? &h->root : NULL), name, howto->name,
4850 (bfd_vma) 0, input_bfd, input_section,
4851 rel->r_offset)))
4852 return FALSE;
4853 }
4854 else
4855 {
4856 (*_bfd_error_handler)
4857 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
4858 input_bfd, input_section,
4859 (long) rel->r_offset, name, (int) r);
4860 return FALSE;
4861 }
4862 }
4863 }
4864
4865 return TRUE;
4866 }
4867
4868 /* Finish up dynamic symbol handling. We set the contents of various
4869 dynamic sections here. */
4870
4871 static bfd_boolean
4872 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4873 struct bfd_link_info *info,
4874 struct elf_link_hash_entry *h,
4875 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
4876 {
4877 struct elf_x86_64_link_hash_table *htab;
4878 const struct elf_x86_64_backend_data *abed;
4879 bfd_boolean use_plt_bnd;
4880 struct elf_x86_64_link_hash_entry *eh;
4881
4882 htab = elf_x86_64_hash_table (info);
4883 if (htab == NULL)
4884 return FALSE;
4885
4886 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
4887 section only if there is .plt section. */
4888 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
4889 abed = (use_plt_bnd
4890 ? &elf_x86_64_bnd_arch_bed
4891 : get_elf_x86_64_backend_data (output_bfd));
4892
4893 eh = (struct elf_x86_64_link_hash_entry *) h;
4894
4895 if (h->plt.offset != (bfd_vma) -1)
4896 {
4897 bfd_vma plt_index;
4898 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
4899 bfd_vma plt_plt_insn_end, plt_got_insn_size;
4900 Elf_Internal_Rela rela;
4901 bfd_byte *loc;
4902 asection *plt, *gotplt, *relplt, *resolved_plt;
4903 const struct elf_backend_data *bed;
4904 bfd_vma plt_got_pcrel_offset;
4905
4906 /* When building a static executable, use .iplt, .igot.plt and
4907 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4908 if (htab->elf.splt != NULL)
4909 {
4910 plt = htab->elf.splt;
4911 gotplt = htab->elf.sgotplt;
4912 relplt = htab->elf.srelplt;
4913 }
4914 else
4915 {
4916 plt = htab->elf.iplt;
4917 gotplt = htab->elf.igotplt;
4918 relplt = htab->elf.irelplt;
4919 }
4920
4921 /* This symbol has an entry in the procedure linkage table. Set
4922 it up. */
4923 if ((h->dynindx == -1
4924 && !((h->forced_local || info->executable)
4925 && h->def_regular
4926 && h->type == STT_GNU_IFUNC))
4927 || plt == NULL
4928 || gotplt == NULL
4929 || relplt == NULL)
4930 abort ();
4931
4932 /* Get the index in the procedure linkage table which
4933 corresponds to this symbol. This is the index of this symbol
4934 in all the symbols for which we are making plt entries. The
4935 first entry in the procedure linkage table is reserved.
4936
4937 Get the offset into the .got table of the entry that
4938 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4939 bytes. The first three are reserved for the dynamic linker.
4940
4941 For static executables, we don't reserve anything. */
4942
4943 if (plt == htab->elf.splt)
4944 {
4945 got_offset = h->plt.offset / abed->plt_entry_size - 1;
4946 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4947 }
4948 else
4949 {
4950 got_offset = h->plt.offset / abed->plt_entry_size;
4951 got_offset = got_offset * GOT_ENTRY_SIZE;
4952 }
4953
4954 plt_plt_insn_end = abed->plt_plt_insn_end;
4955 plt_plt_offset = abed->plt_plt_offset;
4956 plt_got_insn_size = abed->plt_got_insn_size;
4957 plt_got_offset = abed->plt_got_offset;
4958 if (use_plt_bnd)
4959 {
4960 /* Use the second PLT with BND relocations. */
4961 const bfd_byte *plt_entry, *plt2_entry;
4962
4963 if (eh->has_bnd_reloc)
4964 {
4965 plt_entry = elf_x86_64_bnd_plt_entry;
4966 plt2_entry = elf_x86_64_bnd_plt2_entry;
4967 }
4968 else
4969 {
4970 plt_entry = elf_x86_64_legacy_plt_entry;
4971 plt2_entry = elf_x86_64_legacy_plt2_entry;
4972
4973 /* Subtract 1 since there is no BND prefix. */
4974 plt_plt_insn_end -= 1;
4975 plt_plt_offset -= 1;
4976 plt_got_insn_size -= 1;
4977 plt_got_offset -= 1;
4978 }
4979
4980 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
4981 == sizeof (elf_x86_64_legacy_plt_entry));
4982
4983 /* Fill in the entry in the procedure linkage table. */
4984 memcpy (plt->contents + h->plt.offset,
4985 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
4986 /* Fill in the entry in the second PLT. */
4987 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
4988 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
4989
4990 resolved_plt = htab->plt_bnd;
4991 plt_offset = eh->plt_bnd.offset;
4992 }
4993 else
4994 {
4995 /* Fill in the entry in the procedure linkage table. */
4996 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
4997 abed->plt_entry_size);
4998
4999 resolved_plt = plt;
5000 plt_offset = h->plt.offset;
5001 }
5002
5003 /* Insert the relocation positions of the plt section. */
5004
5005 /* Put offset the PC-relative instruction referring to the GOT entry,
5006 subtracting the size of that instruction. */
5007 plt_got_pcrel_offset = (gotplt->output_section->vma
5008 + gotplt->output_offset
5009 + got_offset
5010 - resolved_plt->output_section->vma
5011 - resolved_plt->output_offset
5012 - plt_offset
5013 - plt_got_insn_size);
5014
5015 /* Check PC-relative offset overflow in PLT entry. */
5016 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5017 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5018 output_bfd, h->root.root.string);
5019
5020 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5021 resolved_plt->contents + plt_offset + plt_got_offset);
5022
5023 /* Fill in the entry in the global offset table, initially this
5024 points to the second part of the PLT entry. */
5025 bfd_put_64 (output_bfd, (plt->output_section->vma
5026 + plt->output_offset
5027 + h->plt.offset + abed->plt_lazy_offset),
5028 gotplt->contents + got_offset);
5029
5030 /* Fill in the entry in the .rela.plt section. */
5031 rela.r_offset = (gotplt->output_section->vma
5032 + gotplt->output_offset
5033 + got_offset);
5034 if (h->dynindx == -1
5035 || ((info->executable
5036 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5037 && h->def_regular
5038 && h->type == STT_GNU_IFUNC))
5039 {
5040 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5041 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5042 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5043 rela.r_addend = (h->root.u.def.value
5044 + h->root.u.def.section->output_section->vma
5045 + h->root.u.def.section->output_offset);
5046 /* R_X86_64_IRELATIVE comes last. */
5047 plt_index = htab->next_irelative_index--;
5048 }
5049 else
5050 {
5051 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5052 rela.r_addend = 0;
5053 plt_index = htab->next_jump_slot_index++;
5054 }
5055
5056 /* Don't fill PLT entry for static executables. */
5057 if (plt == htab->elf.splt)
5058 {
5059 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5060
5061 /* Put relocation index. */
5062 bfd_put_32 (output_bfd, plt_index,
5063 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5064
5065 /* Put offset for jmp .PLT0 and check for overflow. We don't
5066 check relocation index for overflow since branch displacement
5067 will overflow first. */
5068 if (plt0_offset > 0x80000000)
5069 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5070 output_bfd, h->root.root.string);
5071 bfd_put_32 (output_bfd, - plt0_offset,
5072 plt->contents + h->plt.offset + plt_plt_offset);
5073 }
5074
5075 bed = get_elf_backend_data (output_bfd);
5076 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5077 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5078 }
5079 else if (eh->plt_got.offset != (bfd_vma) -1)
5080 {
5081 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5082 asection *plt, *got;
5083 bfd_boolean got_after_plt;
5084 int32_t got_pcrel_offset;
5085 const bfd_byte *got_plt_entry;
5086
5087 /* Set the entry in the GOT procedure linkage table. */
5088 plt = htab->plt_got;
5089 got = htab->elf.sgot;
5090 got_offset = h->got.offset;
5091
5092 if (got_offset == (bfd_vma) -1
5093 || h->type == STT_GNU_IFUNC
5094 || plt == NULL
5095 || got == NULL)
5096 abort ();
5097
5098 /* Use the second PLT entry template for the GOT PLT since they
5099 are the identical. */
5100 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5101 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5102 if (eh->has_bnd_reloc)
5103 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5104 else
5105 {
5106 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5107
5108 /* Subtract 1 since there is no BND prefix. */
5109 plt_got_insn_size -= 1;
5110 plt_got_offset -= 1;
5111 }
5112
5113 /* Fill in the entry in the GOT procedure linkage table. */
5114 plt_offset = eh->plt_got.offset;
5115 memcpy (plt->contents + plt_offset,
5116 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5117
5118 /* Put offset the PC-relative instruction referring to the GOT
5119 entry, subtracting the size of that instruction. */
5120 got_pcrel_offset = (got->output_section->vma
5121 + got->output_offset
5122 + got_offset
5123 - plt->output_section->vma
5124 - plt->output_offset
5125 - plt_offset
5126 - plt_got_insn_size);
5127
5128 /* Check PC-relative offset overflow in GOT PLT entry. */
5129 got_after_plt = got->output_section->vma > plt->output_section->vma;
5130 if ((got_after_plt && got_pcrel_offset < 0)
5131 || (!got_after_plt && got_pcrel_offset > 0))
5132 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5133 output_bfd, h->root.root.string);
5134
5135 bfd_put_32 (output_bfd, got_pcrel_offset,
5136 plt->contents + plt_offset + plt_got_offset);
5137 }
5138
5139 if (!h->def_regular
5140 && (h->plt.offset != (bfd_vma) -1
5141 || eh->plt_got.offset != (bfd_vma) -1))
5142 {
5143 /* Mark the symbol as undefined, rather than as defined in
5144 the .plt section. Leave the value if there were any
5145 relocations where pointer equality matters (this is a clue
5146 for the dynamic linker, to make function pointer
5147 comparisons work between an application and shared
5148 library), otherwise set it to zero. If a function is only
5149 called from a binary, there is no need to slow down
5150 shared libraries because of that. */
5151 sym->st_shndx = SHN_UNDEF;
5152 if (!h->pointer_equality_needed)
5153 sym->st_value = 0;
5154 }
5155
5156 if (h->got.offset != (bfd_vma) -1
5157 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5158 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5159 {
5160 Elf_Internal_Rela rela;
5161
5162 /* This symbol has an entry in the global offset table. Set it
5163 up. */
5164 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5165 abort ();
5166
5167 rela.r_offset = (htab->elf.sgot->output_section->vma
5168 + htab->elf.sgot->output_offset
5169 + (h->got.offset &~ (bfd_vma) 1));
5170
5171 /* If this is a static link, or it is a -Bsymbolic link and the
5172 symbol is defined locally or was forced to be local because
5173 of a version file, we just want to emit a RELATIVE reloc.
5174 The entry in the global offset table will already have been
5175 initialized in the relocate_section function. */
5176 if (h->def_regular
5177 && h->type == STT_GNU_IFUNC)
5178 {
5179 if (info->shared)
5180 {
5181 /* Generate R_X86_64_GLOB_DAT. */
5182 goto do_glob_dat;
5183 }
5184 else
5185 {
5186 asection *plt;
5187
5188 if (!h->pointer_equality_needed)
5189 abort ();
5190
5191 /* For non-shared object, we can't use .got.plt, which
5192 contains the real function addres if we need pointer
5193 equality. We load the GOT entry with the PLT entry. */
5194 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5195 bfd_put_64 (output_bfd, (plt->output_section->vma
5196 + plt->output_offset
5197 + h->plt.offset),
5198 htab->elf.sgot->contents + h->got.offset);
5199 return TRUE;
5200 }
5201 }
5202 else if (info->shared
5203 && SYMBOL_REFERENCES_LOCAL (info, h))
5204 {
5205 if (!h->def_regular)
5206 return FALSE;
5207 BFD_ASSERT((h->got.offset & 1) != 0);
5208 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5209 rela.r_addend = (h->root.u.def.value
5210 + h->root.u.def.section->output_section->vma
5211 + h->root.u.def.section->output_offset);
5212 }
5213 else
5214 {
5215 BFD_ASSERT((h->got.offset & 1) == 0);
5216 do_glob_dat:
5217 bfd_put_64 (output_bfd, (bfd_vma) 0,
5218 htab->elf.sgot->contents + h->got.offset);
5219 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5220 rela.r_addend = 0;
5221 }
5222
5223 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5224 }
5225
5226 if (h->needs_copy)
5227 {
5228 Elf_Internal_Rela rela;
5229
5230 /* This symbol needs a copy reloc. Set it up. */
5231
5232 if (h->dynindx == -1
5233 || (h->root.type != bfd_link_hash_defined
5234 && h->root.type != bfd_link_hash_defweak)
5235 || htab->srelbss == NULL)
5236 abort ();
5237
5238 rela.r_offset = (h->root.u.def.value
5239 + h->root.u.def.section->output_section->vma
5240 + h->root.u.def.section->output_offset);
5241 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5242 rela.r_addend = 0;
5243 elf_append_rela (output_bfd, htab->srelbss, &rela);
5244 }
5245
5246 return TRUE;
5247 }
5248
5249 /* Finish up local dynamic symbol handling. We set the contents of
5250 various dynamic sections here. */
5251
5252 static bfd_boolean
5253 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5254 {
5255 struct elf_link_hash_entry *h
5256 = (struct elf_link_hash_entry *) *slot;
5257 struct bfd_link_info *info
5258 = (struct bfd_link_info *) inf;
5259
5260 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5261 info, h, NULL);
5262 }
5263
5264 /* Used to decide how to sort relocs in an optimal manner for the
5265 dynamic linker, before writing them out. */
5266
5267 static enum elf_reloc_type_class
5268 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5269 const asection *rel_sec ATTRIBUTE_UNUSED,
5270 const Elf_Internal_Rela *rela)
5271 {
5272 switch ((int) ELF32_R_TYPE (rela->r_info))
5273 {
5274 case R_X86_64_RELATIVE:
5275 case R_X86_64_RELATIVE64:
5276 return reloc_class_relative;
5277 case R_X86_64_JUMP_SLOT:
5278 return reloc_class_plt;
5279 case R_X86_64_COPY:
5280 return reloc_class_copy;
5281 default:
5282 return reloc_class_normal;
5283 }
5284 }
5285
5286 /* Finish up the dynamic sections. */
5287
5288 static bfd_boolean
5289 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5290 struct bfd_link_info *info)
5291 {
5292 struct elf_x86_64_link_hash_table *htab;
5293 bfd *dynobj;
5294 asection *sdyn;
5295 const struct elf_x86_64_backend_data *abed;
5296
5297 htab = elf_x86_64_hash_table (info);
5298 if (htab == NULL)
5299 return FALSE;
5300
5301 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5302 section only if there is .plt section. */
5303 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5304 ? &elf_x86_64_bnd_arch_bed
5305 : get_elf_x86_64_backend_data (output_bfd));
5306
5307 dynobj = htab->elf.dynobj;
5308 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5309
5310 if (htab->elf.dynamic_sections_created)
5311 {
5312 bfd_byte *dyncon, *dynconend;
5313 const struct elf_backend_data *bed;
5314 bfd_size_type sizeof_dyn;
5315
5316 if (sdyn == NULL || htab->elf.sgot == NULL)
5317 abort ();
5318
5319 bed = get_elf_backend_data (dynobj);
5320 sizeof_dyn = bed->s->sizeof_dyn;
5321 dyncon = sdyn->contents;
5322 dynconend = sdyn->contents + sdyn->size;
5323 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5324 {
5325 Elf_Internal_Dyn dyn;
5326 asection *s;
5327
5328 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5329
5330 switch (dyn.d_tag)
5331 {
5332 default:
5333 continue;
5334
5335 case DT_PLTGOT:
5336 s = htab->elf.sgotplt;
5337 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5338 break;
5339
5340 case DT_JMPREL:
5341 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5342 break;
5343
5344 case DT_PLTRELSZ:
5345 s = htab->elf.srelplt->output_section;
5346 dyn.d_un.d_val = s->size;
5347 break;
5348
5349 case DT_RELASZ:
5350 /* The procedure linkage table relocs (DT_JMPREL) should
5351 not be included in the overall relocs (DT_RELA).
5352 Therefore, we override the DT_RELASZ entry here to
5353 make it not include the JMPREL relocs. Since the
5354 linker script arranges for .rela.plt to follow all
5355 other relocation sections, we don't have to worry
5356 about changing the DT_RELA entry. */
5357 if (htab->elf.srelplt != NULL)
5358 {
5359 s = htab->elf.srelplt->output_section;
5360 dyn.d_un.d_val -= s->size;
5361 }
5362 break;
5363
5364 case DT_TLSDESC_PLT:
5365 s = htab->elf.splt;
5366 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5367 + htab->tlsdesc_plt;
5368 break;
5369
5370 case DT_TLSDESC_GOT:
5371 s = htab->elf.sgot;
5372 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5373 + htab->tlsdesc_got;
5374 break;
5375 }
5376
5377 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5378 }
5379
5380 /* Fill in the special first entry in the procedure linkage table. */
5381 if (htab->elf.splt && htab->elf.splt->size > 0)
5382 {
5383 /* Fill in the first entry in the procedure linkage table. */
5384 memcpy (htab->elf.splt->contents,
5385 abed->plt0_entry, abed->plt_entry_size);
5386 /* Add offset for pushq GOT+8(%rip), since the instruction
5387 uses 6 bytes subtract this value. */
5388 bfd_put_32 (output_bfd,
5389 (htab->elf.sgotplt->output_section->vma
5390 + htab->elf.sgotplt->output_offset
5391 + 8
5392 - htab->elf.splt->output_section->vma
5393 - htab->elf.splt->output_offset
5394 - 6),
5395 htab->elf.splt->contents + abed->plt0_got1_offset);
5396 /* Add offset for the PC-relative instruction accessing GOT+16,
5397 subtracting the offset to the end of that instruction. */
5398 bfd_put_32 (output_bfd,
5399 (htab->elf.sgotplt->output_section->vma
5400 + htab->elf.sgotplt->output_offset
5401 + 16
5402 - htab->elf.splt->output_section->vma
5403 - htab->elf.splt->output_offset
5404 - abed->plt0_got2_insn_end),
5405 htab->elf.splt->contents + abed->plt0_got2_offset);
5406
5407 elf_section_data (htab->elf.splt->output_section)
5408 ->this_hdr.sh_entsize = abed->plt_entry_size;
5409
5410 if (htab->tlsdesc_plt)
5411 {
5412 bfd_put_64 (output_bfd, (bfd_vma) 0,
5413 htab->elf.sgot->contents + htab->tlsdesc_got);
5414
5415 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5416 abed->plt0_entry, abed->plt_entry_size);
5417
5418 /* Add offset for pushq GOT+8(%rip), since the
5419 instruction uses 6 bytes subtract this value. */
5420 bfd_put_32 (output_bfd,
5421 (htab->elf.sgotplt->output_section->vma
5422 + htab->elf.sgotplt->output_offset
5423 + 8
5424 - htab->elf.splt->output_section->vma
5425 - htab->elf.splt->output_offset
5426 - htab->tlsdesc_plt
5427 - 6),
5428 htab->elf.splt->contents
5429 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5430 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5431 where TGD stands for htab->tlsdesc_got, subtracting the offset
5432 to the end of that instruction. */
5433 bfd_put_32 (output_bfd,
5434 (htab->elf.sgot->output_section->vma
5435 + htab->elf.sgot->output_offset
5436 + htab->tlsdesc_got
5437 - htab->elf.splt->output_section->vma
5438 - htab->elf.splt->output_offset
5439 - htab->tlsdesc_plt
5440 - abed->plt0_got2_insn_end),
5441 htab->elf.splt->contents
5442 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5443 }
5444 }
5445 }
5446
5447 if (htab->plt_bnd != NULL)
5448 elf_section_data (htab->plt_bnd->output_section)
5449 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5450
5451 if (htab->elf.sgotplt)
5452 {
5453 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5454 {
5455 (*_bfd_error_handler)
5456 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5457 return FALSE;
5458 }
5459
5460 /* Fill in the first three entries in the global offset table. */
5461 if (htab->elf.sgotplt->size > 0)
5462 {
5463 /* Set the first entry in the global offset table to the address of
5464 the dynamic section. */
5465 if (sdyn == NULL)
5466 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5467 else
5468 bfd_put_64 (output_bfd,
5469 sdyn->output_section->vma + sdyn->output_offset,
5470 htab->elf.sgotplt->contents);
5471 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5472 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5473 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5474 }
5475
5476 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5477 GOT_ENTRY_SIZE;
5478 }
5479
5480 /* Adjust .eh_frame for .plt section. */
5481 if (htab->plt_eh_frame != NULL
5482 && htab->plt_eh_frame->contents != NULL)
5483 {
5484 if (htab->elf.splt != NULL
5485 && htab->elf.splt->size != 0
5486 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5487 && htab->elf.splt->output_section != NULL
5488 && htab->plt_eh_frame->output_section != NULL)
5489 {
5490 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5491 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5492 + htab->plt_eh_frame->output_offset
5493 + PLT_FDE_START_OFFSET;
5494 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5495 htab->plt_eh_frame->contents
5496 + PLT_FDE_START_OFFSET);
5497 }
5498 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5499 {
5500 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5501 htab->plt_eh_frame,
5502 htab->plt_eh_frame->contents))
5503 return FALSE;
5504 }
5505 }
5506
5507 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5508 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5509 = GOT_ENTRY_SIZE;
5510
5511 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5512 htab_traverse (htab->loc_hash_table,
5513 elf_x86_64_finish_local_dynamic_symbol,
5514 info);
5515
5516 return TRUE;
5517 }
5518
5519 /* Return an array of PLT entry symbol values. */
5520
5521 static bfd_vma *
5522 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
5523 asection *relplt)
5524 {
5525 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5526 arelent *p;
5527 long count, i;
5528 bfd_vma *plt_sym_val;
5529 bfd_vma plt_offset;
5530 bfd_byte *plt_contents;
5531 const struct elf_x86_64_backend_data *bed;
5532 Elf_Internal_Shdr *hdr;
5533 asection *plt_bnd;
5534
5535 /* Get the .plt section contents. PLT passed down may point to the
5536 .plt.bnd section. Make sure that PLT always points to the .plt
5537 section. */
5538 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
5539 if (plt_bnd)
5540 {
5541 if (plt != plt_bnd)
5542 abort ();
5543 plt = bfd_get_section_by_name (abfd, ".plt");
5544 if (plt == NULL)
5545 abort ();
5546 bed = &elf_x86_64_bnd_arch_bed;
5547 }
5548 else
5549 bed = get_elf_x86_64_backend_data (abfd);
5550
5551 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
5552 if (plt_contents == NULL)
5553 return NULL;
5554 if (!bfd_get_section_contents (abfd, (asection *) plt,
5555 plt_contents, 0, plt->size))
5556 {
5557 bad_return:
5558 free (plt_contents);
5559 return NULL;
5560 }
5561
5562 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5563 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5564 goto bad_return;
5565
5566 hdr = &elf_section_data (relplt)->this_hdr;
5567 count = relplt->size / hdr->sh_entsize;
5568
5569 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
5570 if (plt_sym_val == NULL)
5571 goto bad_return;
5572
5573 for (i = 0; i < count; i++)
5574 plt_sym_val[i] = -1;
5575
5576 plt_offset = bed->plt_entry_size;
5577 p = relplt->relocation;
5578 for (i = 0; i < count; i++, p++)
5579 {
5580 long reloc_index;
5581
5582 /* Skip unknown relocation. */
5583 if (p->howto == NULL)
5584 continue;
5585
5586 if (p->howto->type != R_X86_64_JUMP_SLOT
5587 && p->howto->type != R_X86_64_IRELATIVE)
5588 continue;
5589
5590 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
5591 + bed->plt_reloc_offset));
5592 if (reloc_index >= count)
5593 abort ();
5594 if (plt_bnd)
5595 {
5596 /* This is the index in .plt section. */
5597 long plt_index = plt_offset / bed->plt_entry_size;
5598 /* Store VMA + the offset in .plt.bnd section. */
5599 plt_sym_val[reloc_index] =
5600 (plt_bnd->vma
5601 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
5602 }
5603 else
5604 plt_sym_val[reloc_index] = plt->vma + plt_offset;
5605 plt_offset += bed->plt_entry_size;
5606 }
5607
5608 free (plt_contents);
5609
5610 return plt_sym_val;
5611 }
5612
5613 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5614 support. */
5615
5616 static long
5617 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5618 long symcount,
5619 asymbol **syms,
5620 long dynsymcount,
5621 asymbol **dynsyms,
5622 asymbol **ret)
5623 {
5624 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
5625 as PLT if it exists. */
5626 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5627 if (plt == NULL)
5628 plt = bfd_get_section_by_name (abfd, ".plt");
5629 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
5630 dynsymcount, dynsyms, ret,
5631 plt,
5632 elf_x86_64_get_plt_sym_val);
5633 }
5634
5635 /* Handle an x86-64 specific section when reading an object file. This
5636 is called when elfcode.h finds a section with an unknown type. */
5637
5638 static bfd_boolean
5639 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5640 const char *name, int shindex)
5641 {
5642 if (hdr->sh_type != SHT_X86_64_UNWIND)
5643 return FALSE;
5644
5645 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5646 return FALSE;
5647
5648 return TRUE;
5649 }
5650
5651 /* Hook called by the linker routine which adds symbols from an object
5652 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5653 of .bss. */
5654
5655 static bfd_boolean
5656 elf_x86_64_add_symbol_hook (bfd *abfd,
5657 struct bfd_link_info *info,
5658 Elf_Internal_Sym *sym,
5659 const char **namep ATTRIBUTE_UNUSED,
5660 flagword *flagsp ATTRIBUTE_UNUSED,
5661 asection **secp,
5662 bfd_vma *valp)
5663 {
5664 asection *lcomm;
5665
5666 switch (sym->st_shndx)
5667 {
5668 case SHN_X86_64_LCOMMON:
5669 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5670 if (lcomm == NULL)
5671 {
5672 lcomm = bfd_make_section_with_flags (abfd,
5673 "LARGE_COMMON",
5674 (SEC_ALLOC
5675 | SEC_IS_COMMON
5676 | SEC_LINKER_CREATED));
5677 if (lcomm == NULL)
5678 return FALSE;
5679 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5680 }
5681 *secp = lcomm;
5682 *valp = sym->st_size;
5683 return TRUE;
5684 }
5685
5686 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
5687 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
5688 && (abfd->flags & DYNAMIC) == 0
5689 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5690 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
5691
5692 return TRUE;
5693 }
5694
5695
5696 /* Given a BFD section, try to locate the corresponding ELF section
5697 index. */
5698
5699 static bfd_boolean
5700 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5701 asection *sec, int *index_return)
5702 {
5703 if (sec == &_bfd_elf_large_com_section)
5704 {
5705 *index_return = SHN_X86_64_LCOMMON;
5706 return TRUE;
5707 }
5708 return FALSE;
5709 }
5710
5711 /* Process a symbol. */
5712
5713 static void
5714 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5715 asymbol *asym)
5716 {
5717 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5718
5719 switch (elfsym->internal_elf_sym.st_shndx)
5720 {
5721 case SHN_X86_64_LCOMMON:
5722 asym->section = &_bfd_elf_large_com_section;
5723 asym->value = elfsym->internal_elf_sym.st_size;
5724 /* Common symbol doesn't set BSF_GLOBAL. */
5725 asym->flags &= ~BSF_GLOBAL;
5726 break;
5727 }
5728 }
5729
5730 static bfd_boolean
5731 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5732 {
5733 return (sym->st_shndx == SHN_COMMON
5734 || sym->st_shndx == SHN_X86_64_LCOMMON);
5735 }
5736
5737 static unsigned int
5738 elf_x86_64_common_section_index (asection *sec)
5739 {
5740 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5741 return SHN_COMMON;
5742 else
5743 return SHN_X86_64_LCOMMON;
5744 }
5745
5746 static asection *
5747 elf_x86_64_common_section (asection *sec)
5748 {
5749 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5750 return bfd_com_section_ptr;
5751 else
5752 return &_bfd_elf_large_com_section;
5753 }
5754
5755 static bfd_boolean
5756 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5757 const Elf_Internal_Sym *sym,
5758 asection **psec,
5759 bfd_boolean newdef,
5760 bfd_boolean olddef,
5761 bfd *oldbfd,
5762 const asection *oldsec)
5763 {
5764 /* A normal common symbol and a large common symbol result in a
5765 normal common symbol. We turn the large common symbol into a
5766 normal one. */
5767 if (!olddef
5768 && h->root.type == bfd_link_hash_common
5769 && !newdef
5770 && bfd_is_com_section (*psec)
5771 && oldsec != *psec)
5772 {
5773 if (sym->st_shndx == SHN_COMMON
5774 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5775 {
5776 h->root.u.c.p->section
5777 = bfd_make_section_old_way (oldbfd, "COMMON");
5778 h->root.u.c.p->section->flags = SEC_ALLOC;
5779 }
5780 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5781 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5782 *psec = bfd_com_section_ptr;
5783 }
5784
5785 return TRUE;
5786 }
5787
5788 static int
5789 elf_x86_64_additional_program_headers (bfd *abfd,
5790 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5791 {
5792 asection *s;
5793 int count = 0;
5794
5795 /* Check to see if we need a large readonly segment. */
5796 s = bfd_get_section_by_name (abfd, ".lrodata");
5797 if (s && (s->flags & SEC_LOAD))
5798 count++;
5799
5800 /* Check to see if we need a large data segment. Since .lbss sections
5801 is placed right after the .bss section, there should be no need for
5802 a large data segment just because of .lbss. */
5803 s = bfd_get_section_by_name (abfd, ".ldata");
5804 if (s && (s->flags & SEC_LOAD))
5805 count++;
5806
5807 return count;
5808 }
5809
5810 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5811
5812 static bfd_boolean
5813 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5814 {
5815 if (h->plt.offset != (bfd_vma) -1
5816 && !h->def_regular
5817 && !h->pointer_equality_needed)
5818 return FALSE;
5819
5820 return _bfd_elf_hash_symbol (h);
5821 }
5822
5823 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5824
5825 static bfd_boolean
5826 elf_x86_64_relocs_compatible (const bfd_target *input,
5827 const bfd_target *output)
5828 {
5829 return ((xvec_get_elf_backend_data (input)->s->elfclass
5830 == xvec_get_elf_backend_data (output)->s->elfclass)
5831 && _bfd_elf_relocs_compatible (input, output));
5832 }
5833
5834 static const struct bfd_elf_special_section
5835 elf_x86_64_special_sections[]=
5836 {
5837 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5838 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5839 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5840 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5841 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5842 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5843 { NULL, 0, 0, 0, 0 }
5844 };
5845
5846 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5847 #define TARGET_LITTLE_NAME "elf64-x86-64"
5848 #define ELF_ARCH bfd_arch_i386
5849 #define ELF_TARGET_ID X86_64_ELF_DATA
5850 #define ELF_MACHINE_CODE EM_X86_64
5851 #define ELF_MAXPAGESIZE 0x200000
5852 #define ELF_MINPAGESIZE 0x1000
5853 #define ELF_COMMONPAGESIZE 0x1000
5854
5855 #define elf_backend_can_gc_sections 1
5856 #define elf_backend_can_refcount 1
5857 #define elf_backend_want_got_plt 1
5858 #define elf_backend_plt_readonly 1
5859 #define elf_backend_want_plt_sym 0
5860 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5861 #define elf_backend_rela_normal 1
5862 #define elf_backend_plt_alignment 4
5863
5864 #define elf_info_to_howto elf_x86_64_info_to_howto
5865
5866 #define bfd_elf64_bfd_link_hash_table_create \
5867 elf_x86_64_link_hash_table_create
5868 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5869 #define bfd_elf64_bfd_reloc_name_lookup \
5870 elf_x86_64_reloc_name_lookup
5871
5872 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
5873 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5874 #define elf_backend_check_relocs elf_x86_64_check_relocs
5875 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
5876 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
5877 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5878 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5879 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
5880 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
5881 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5882 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5883 #ifdef CORE_HEADER
5884 #define elf_backend_write_core_note elf_x86_64_write_core_note
5885 #endif
5886 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5887 #define elf_backend_relocate_section elf_x86_64_relocate_section
5888 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
5889 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5890 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5891 #define elf_backend_object_p elf64_x86_64_elf_object_p
5892 #define bfd_elf64_mkobject elf_x86_64_mkobject
5893 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5894
5895 #define elf_backend_section_from_shdr \
5896 elf_x86_64_section_from_shdr
5897
5898 #define elf_backend_section_from_bfd_section \
5899 elf_x86_64_elf_section_from_bfd_section
5900 #define elf_backend_add_symbol_hook \
5901 elf_x86_64_add_symbol_hook
5902 #define elf_backend_symbol_processing \
5903 elf_x86_64_symbol_processing
5904 #define elf_backend_common_section_index \
5905 elf_x86_64_common_section_index
5906 #define elf_backend_common_section \
5907 elf_x86_64_common_section
5908 #define elf_backend_common_definition \
5909 elf_x86_64_common_definition
5910 #define elf_backend_merge_symbol \
5911 elf_x86_64_merge_symbol
5912 #define elf_backend_special_sections \
5913 elf_x86_64_special_sections
5914 #define elf_backend_additional_program_headers \
5915 elf_x86_64_additional_program_headers
5916 #define elf_backend_hash_symbol \
5917 elf_x86_64_hash_symbol
5918
5919 #include "elf64-target.h"
5920
5921 /* FreeBSD support. */
5922
5923 #undef TARGET_LITTLE_SYM
5924 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5925 #undef TARGET_LITTLE_NAME
5926 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5927
5928 #undef ELF_OSABI
5929 #define ELF_OSABI ELFOSABI_FREEBSD
5930
5931 #undef elf64_bed
5932 #define elf64_bed elf64_x86_64_fbsd_bed
5933
5934 #include "elf64-target.h"
5935
5936 /* Solaris 2 support. */
5937
5938 #undef TARGET_LITTLE_SYM
5939 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5940 #undef TARGET_LITTLE_NAME
5941 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5942
5943 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5944 objects won't be recognized. */
5945 #undef ELF_OSABI
5946
5947 #undef elf64_bed
5948 #define elf64_bed elf64_x86_64_sol2_bed
5949
5950 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5951 boundary. */
5952 #undef elf_backend_static_tls_alignment
5953 #define elf_backend_static_tls_alignment 16
5954
5955 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5956
5957 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5958 File, p.63. */
5959 #undef elf_backend_want_plt_sym
5960 #define elf_backend_want_plt_sym 1
5961
5962 #include "elf64-target.h"
5963
5964 /* Native Client support. */
5965
5966 static bfd_boolean
5967 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5968 {
5969 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5970 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5971 return TRUE;
5972 }
5973
5974 #undef TARGET_LITTLE_SYM
5975 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5976 #undef TARGET_LITTLE_NAME
5977 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5978 #undef elf64_bed
5979 #define elf64_bed elf64_x86_64_nacl_bed
5980
5981 #undef ELF_MAXPAGESIZE
5982 #undef ELF_MINPAGESIZE
5983 #undef ELF_COMMONPAGESIZE
5984 #define ELF_MAXPAGESIZE 0x10000
5985 #define ELF_MINPAGESIZE 0x10000
5986 #define ELF_COMMONPAGESIZE 0x10000
5987
5988 /* Restore defaults. */
5989 #undef ELF_OSABI
5990 #undef elf_backend_static_tls_alignment
5991 #undef elf_backend_want_plt_sym
5992 #define elf_backend_want_plt_sym 0
5993
5994 /* NaCl uses substantially different PLT entries for the same effects. */
5995
5996 #undef elf_backend_plt_alignment
5997 #define elf_backend_plt_alignment 5
5998 #define NACL_PLT_ENTRY_SIZE 64
5999 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6000
6001 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6002 {
6003 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6004 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6005 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6006 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6007 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6008
6009 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6010 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6011
6012 /* 32 bytes of nop to pad out to the standard size. */
6013 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6014 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6015 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6016 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6017 0x66, /* excess data32 prefix */
6018 0x90 /* nop */
6019 };
6020
6021 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6022 {
6023 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6024 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6025 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6026 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6027
6028 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6029 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6030 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6031
6032 /* Lazy GOT entries point here (32-byte aligned). */
6033 0x68, /* pushq immediate */
6034 0, 0, 0, 0, /* replaced with index into relocation table. */
6035 0xe9, /* jmp relative */
6036 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6037
6038 /* 22 bytes of nop to pad out to the standard size. */
6039 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6040 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6041 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6042 };
6043
6044 /* .eh_frame covering the .plt section. */
6045
6046 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6047 {
6048 #if (PLT_CIE_LENGTH != 20 \
6049 || PLT_FDE_LENGTH != 36 \
6050 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6051 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6052 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6053 #endif
6054 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6055 0, 0, 0, 0, /* CIE ID */
6056 1, /* CIE version */
6057 'z', 'R', 0, /* Augmentation string */
6058 1, /* Code alignment factor */
6059 0x78, /* Data alignment factor */
6060 16, /* Return address column */
6061 1, /* Augmentation size */
6062 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6063 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6064 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6065 DW_CFA_nop, DW_CFA_nop,
6066
6067 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6068 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6069 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6070 0, 0, 0, 0, /* .plt size goes here */
6071 0, /* Augmentation size */
6072 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6073 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6074 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6075 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6076 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6077 13, /* Block length */
6078 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6079 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6080 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6081 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6082 DW_CFA_nop, DW_CFA_nop
6083 };
6084
6085 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6086 {
6087 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6088 elf_x86_64_nacl_plt_entry, /* plt_entry */
6089 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6090 2, /* plt0_got1_offset */
6091 9, /* plt0_got2_offset */
6092 13, /* plt0_got2_insn_end */
6093 3, /* plt_got_offset */
6094 33, /* plt_reloc_offset */
6095 38, /* plt_plt_offset */
6096 7, /* plt_got_insn_size */
6097 42, /* plt_plt_insn_end */
6098 32, /* plt_lazy_offset */
6099 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6100 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6101 };
6102
6103 #undef elf_backend_arch_data
6104 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6105
6106 #undef elf_backend_object_p
6107 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6108 #undef elf_backend_modify_segment_map
6109 #define elf_backend_modify_segment_map nacl_modify_segment_map
6110 #undef elf_backend_modify_program_headers
6111 #define elf_backend_modify_program_headers nacl_modify_program_headers
6112 #undef elf_backend_final_write_processing
6113 #define elf_backend_final_write_processing nacl_final_write_processing
6114
6115 #include "elf64-target.h"
6116
6117 /* Native Client x32 support. */
6118
6119 static bfd_boolean
6120 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6121 {
6122 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6123 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6124 return TRUE;
6125 }
6126
6127 #undef TARGET_LITTLE_SYM
6128 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6129 #undef TARGET_LITTLE_NAME
6130 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6131 #undef elf32_bed
6132 #define elf32_bed elf32_x86_64_nacl_bed
6133
6134 #define bfd_elf32_bfd_link_hash_table_create \
6135 elf_x86_64_link_hash_table_create
6136 #define bfd_elf32_bfd_reloc_type_lookup \
6137 elf_x86_64_reloc_type_lookup
6138 #define bfd_elf32_bfd_reloc_name_lookup \
6139 elf_x86_64_reloc_name_lookup
6140 #define bfd_elf32_mkobject \
6141 elf_x86_64_mkobject
6142 #define bfd_elf32_get_synthetic_symtab \
6143 elf_x86_64_get_synthetic_symtab
6144
6145 #undef elf_backend_object_p
6146 #define elf_backend_object_p \
6147 elf32_x86_64_nacl_elf_object_p
6148
6149 #undef elf_backend_bfd_from_remote_memory
6150 #define elf_backend_bfd_from_remote_memory \
6151 _bfd_elf32_bfd_from_remote_memory
6152
6153 #undef elf_backend_size_info
6154 #define elf_backend_size_info \
6155 _bfd_elf32_size_info
6156
6157 #include "elf32-target.h"
6158
6159 /* Restore defaults. */
6160 #undef elf_backend_object_p
6161 #define elf_backend_object_p elf64_x86_64_elf_object_p
6162 #undef elf_backend_bfd_from_remote_memory
6163 #undef elf_backend_size_info
6164 #undef elf_backend_modify_segment_map
6165 #undef elf_backend_modify_program_headers
6166 #undef elf_backend_final_write_processing
6167
6168 /* Intel L1OM support. */
6169
6170 static bfd_boolean
6171 elf64_l1om_elf_object_p (bfd *abfd)
6172 {
6173 /* Set the right machine number for an L1OM elf64 file. */
6174 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6175 return TRUE;
6176 }
6177
6178 #undef TARGET_LITTLE_SYM
6179 #define TARGET_LITTLE_SYM l1om_elf64_vec
6180 #undef TARGET_LITTLE_NAME
6181 #define TARGET_LITTLE_NAME "elf64-l1om"
6182 #undef ELF_ARCH
6183 #define ELF_ARCH bfd_arch_l1om
6184
6185 #undef ELF_MACHINE_CODE
6186 #define ELF_MACHINE_CODE EM_L1OM
6187
6188 #undef ELF_OSABI
6189
6190 #undef elf64_bed
6191 #define elf64_bed elf64_l1om_bed
6192
6193 #undef elf_backend_object_p
6194 #define elf_backend_object_p elf64_l1om_elf_object_p
6195
6196 /* Restore defaults. */
6197 #undef ELF_MAXPAGESIZE
6198 #undef ELF_MINPAGESIZE
6199 #undef ELF_COMMONPAGESIZE
6200 #define ELF_MAXPAGESIZE 0x200000
6201 #define ELF_MINPAGESIZE 0x1000
6202 #define ELF_COMMONPAGESIZE 0x1000
6203 #undef elf_backend_plt_alignment
6204 #define elf_backend_plt_alignment 4
6205 #undef elf_backend_arch_data
6206 #define elf_backend_arch_data &elf_x86_64_arch_bed
6207
6208 #include "elf64-target.h"
6209
6210 /* FreeBSD L1OM support. */
6211
6212 #undef TARGET_LITTLE_SYM
6213 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6214 #undef TARGET_LITTLE_NAME
6215 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6216
6217 #undef ELF_OSABI
6218 #define ELF_OSABI ELFOSABI_FREEBSD
6219
6220 #undef elf64_bed
6221 #define elf64_bed elf64_l1om_fbsd_bed
6222
6223 #include "elf64-target.h"
6224
6225 /* Intel K1OM support. */
6226
6227 static bfd_boolean
6228 elf64_k1om_elf_object_p (bfd *abfd)
6229 {
6230 /* Set the right machine number for an K1OM elf64 file. */
6231 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6232 return TRUE;
6233 }
6234
6235 #undef TARGET_LITTLE_SYM
6236 #define TARGET_LITTLE_SYM k1om_elf64_vec
6237 #undef TARGET_LITTLE_NAME
6238 #define TARGET_LITTLE_NAME "elf64-k1om"
6239 #undef ELF_ARCH
6240 #define ELF_ARCH bfd_arch_k1om
6241
6242 #undef ELF_MACHINE_CODE
6243 #define ELF_MACHINE_CODE EM_K1OM
6244
6245 #undef ELF_OSABI
6246
6247 #undef elf64_bed
6248 #define elf64_bed elf64_k1om_bed
6249
6250 #undef elf_backend_object_p
6251 #define elf_backend_object_p elf64_k1om_elf_object_p
6252
6253 #undef elf_backend_static_tls_alignment
6254
6255 #undef elf_backend_want_plt_sym
6256 #define elf_backend_want_plt_sym 0
6257
6258 #include "elf64-target.h"
6259
6260 /* FreeBSD K1OM support. */
6261
6262 #undef TARGET_LITTLE_SYM
6263 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6264 #undef TARGET_LITTLE_NAME
6265 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6266
6267 #undef ELF_OSABI
6268 #define ELF_OSABI ELFOSABI_FREEBSD
6269
6270 #undef elf64_bed
6271 #define elf64_bed elf64_k1om_fbsd_bed
6272
6273 #include "elf64-target.h"
6274
6275 /* 32bit x86-64 support. */
6276
6277 #undef TARGET_LITTLE_SYM
6278 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6279 #undef TARGET_LITTLE_NAME
6280 #define TARGET_LITTLE_NAME "elf32-x86-64"
6281 #undef elf32_bed
6282
6283 #undef ELF_ARCH
6284 #define ELF_ARCH bfd_arch_i386
6285
6286 #undef ELF_MACHINE_CODE
6287 #define ELF_MACHINE_CODE EM_X86_64
6288
6289 #undef ELF_OSABI
6290
6291 #undef elf_backend_object_p
6292 #define elf_backend_object_p \
6293 elf32_x86_64_elf_object_p
6294
6295 #undef elf_backend_bfd_from_remote_memory
6296 #define elf_backend_bfd_from_remote_memory \
6297 _bfd_elf32_bfd_from_remote_memory
6298
6299 #undef elf_backend_size_info
6300 #define elf_backend_size_info \
6301 _bfd_elf32_size_info
6302
6303 #include "elf32-target.h"
This page took 0.577202 seconds and 4 git commands to generate.