Only discard space for pc-relative relocs symbols
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return NULL;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if a weak symbol with a real definition needs a copy reloc.
761 When there is a weak symbol with a real definition, the processor
762 independent code will have arranged for us to see the real
763 definition first. We need to copy the needs_copy bit from the
764 real definition and check it when allowing copy reloc in PIE. */
765 unsigned int needs_copy : 1;
766
767 /* TRUE if symbol has at least one BND relocation. */
768 unsigned int has_bnd_reloc : 1;
769
770 /* Information about the GOT PLT entry. Filled when there are both
771 GOT and PLT relocations against the same function. */
772 union gotplt_union plt_got;
773
774 /* Information about the second PLT entry. Filled when has_bnd_reloc is
775 set. */
776 union gotplt_union plt_bnd;
777
778 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
779 starting at the end of the jump table. */
780 bfd_vma tlsdesc_got;
781 };
782
783 #define elf_x86_64_hash_entry(ent) \
784 ((struct elf_x86_64_link_hash_entry *)(ent))
785
786 struct elf_x86_64_obj_tdata
787 {
788 struct elf_obj_tdata root;
789
790 /* tls_type for each local got entry. */
791 char *local_got_tls_type;
792
793 /* GOTPLT entries for TLS descriptors. */
794 bfd_vma *local_tlsdesc_gotent;
795 };
796
797 #define elf_x86_64_tdata(abfd) \
798 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
799
800 #define elf_x86_64_local_got_tls_type(abfd) \
801 (elf_x86_64_tdata (abfd)->local_got_tls_type)
802
803 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
804 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
805
806 #define is_x86_64_elf(bfd) \
807 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
808 && elf_tdata (bfd) != NULL \
809 && elf_object_id (bfd) == X86_64_ELF_DATA)
810
811 static bfd_boolean
812 elf_x86_64_mkobject (bfd *abfd)
813 {
814 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
815 X86_64_ELF_DATA);
816 }
817
818 /* x86-64 ELF linker hash table. */
819
820 struct elf_x86_64_link_hash_table
821 {
822 struct elf_link_hash_table elf;
823
824 /* Short-cuts to get to dynamic linker sections. */
825 asection *sdynbss;
826 asection *srelbss;
827 asection *plt_eh_frame;
828 asection *plt_bnd;
829 asection *plt_got;
830
831 union
832 {
833 bfd_signed_vma refcount;
834 bfd_vma offset;
835 } tls_ld_got;
836
837 /* The amount of space used by the jump slots in the GOT. */
838 bfd_vma sgotplt_jump_table_size;
839
840 /* Small local sym cache. */
841 struct sym_cache sym_cache;
842
843 bfd_vma (*r_info) (bfd_vma, bfd_vma);
844 bfd_vma (*r_sym) (bfd_vma);
845 unsigned int pointer_r_type;
846 const char *dynamic_interpreter;
847 int dynamic_interpreter_size;
848
849 /* _TLS_MODULE_BASE_ symbol. */
850 struct bfd_link_hash_entry *tls_module_base;
851
852 /* Used by local STT_GNU_IFUNC symbols. */
853 htab_t loc_hash_table;
854 void * loc_hash_memory;
855
856 /* The offset into splt of the PLT entry for the TLS descriptor
857 resolver. Special values are 0, if not necessary (or not found
858 to be necessary yet), and -1 if needed but not determined
859 yet. */
860 bfd_vma tlsdesc_plt;
861 /* The offset into sgot of the GOT entry used by the PLT entry
862 above. */
863 bfd_vma tlsdesc_got;
864
865 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
866 bfd_vma next_jump_slot_index;
867 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
868 bfd_vma next_irelative_index;
869 };
870
871 /* Get the x86-64 ELF linker hash table from a link_info structure. */
872
873 #define elf_x86_64_hash_table(p) \
874 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
875 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
876
877 #define elf_x86_64_compute_jump_table_size(htab) \
878 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
879
880 /* Create an entry in an x86-64 ELF linker hash table. */
881
882 static struct bfd_hash_entry *
883 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
884 struct bfd_hash_table *table,
885 const char *string)
886 {
887 /* Allocate the structure if it has not already been allocated by a
888 subclass. */
889 if (entry == NULL)
890 {
891 entry = (struct bfd_hash_entry *)
892 bfd_hash_allocate (table,
893 sizeof (struct elf_x86_64_link_hash_entry));
894 if (entry == NULL)
895 return entry;
896 }
897
898 /* Call the allocation method of the superclass. */
899 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
900 if (entry != NULL)
901 {
902 struct elf_x86_64_link_hash_entry *eh;
903
904 eh = (struct elf_x86_64_link_hash_entry *) entry;
905 eh->dyn_relocs = NULL;
906 eh->tls_type = GOT_UNKNOWN;
907 eh->needs_copy = 0;
908 eh->has_bnd_reloc = 0;
909 eh->plt_bnd.offset = (bfd_vma) -1;
910 eh->plt_got.offset = (bfd_vma) -1;
911 eh->tlsdesc_got = (bfd_vma) -1;
912 }
913
914 return entry;
915 }
916
917 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
918 for local symbol so that we can handle local STT_GNU_IFUNC symbols
919 as global symbol. We reuse indx and dynstr_index for local symbol
920 hash since they aren't used by global symbols in this backend. */
921
922 static hashval_t
923 elf_x86_64_local_htab_hash (const void *ptr)
924 {
925 struct elf_link_hash_entry *h
926 = (struct elf_link_hash_entry *) ptr;
927 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
928 }
929
930 /* Compare local hash entries. */
931
932 static int
933 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
934 {
935 struct elf_link_hash_entry *h1
936 = (struct elf_link_hash_entry *) ptr1;
937 struct elf_link_hash_entry *h2
938 = (struct elf_link_hash_entry *) ptr2;
939
940 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
941 }
942
943 /* Find and/or create a hash entry for local symbol. */
944
945 static struct elf_link_hash_entry *
946 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
947 bfd *abfd, const Elf_Internal_Rela *rel,
948 bfd_boolean create)
949 {
950 struct elf_x86_64_link_hash_entry e, *ret;
951 asection *sec = abfd->sections;
952 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
953 htab->r_sym (rel->r_info));
954 void **slot;
955
956 e.elf.indx = sec->id;
957 e.elf.dynstr_index = htab->r_sym (rel->r_info);
958 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
959 create ? INSERT : NO_INSERT);
960
961 if (!slot)
962 return NULL;
963
964 if (*slot)
965 {
966 ret = (struct elf_x86_64_link_hash_entry *) *slot;
967 return &ret->elf;
968 }
969
970 ret = (struct elf_x86_64_link_hash_entry *)
971 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
972 sizeof (struct elf_x86_64_link_hash_entry));
973 if (ret)
974 {
975 memset (ret, 0, sizeof (*ret));
976 ret->elf.indx = sec->id;
977 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
978 ret->elf.dynindx = -1;
979 ret->plt_got.offset = (bfd_vma) -1;
980 *slot = ret;
981 }
982 return &ret->elf;
983 }
984
985 /* Destroy an X86-64 ELF linker hash table. */
986
987 static void
988 elf_x86_64_link_hash_table_free (bfd *obfd)
989 {
990 struct elf_x86_64_link_hash_table *htab
991 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
992
993 if (htab->loc_hash_table)
994 htab_delete (htab->loc_hash_table);
995 if (htab->loc_hash_memory)
996 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
997 _bfd_elf_link_hash_table_free (obfd);
998 }
999
1000 /* Create an X86-64 ELF linker hash table. */
1001
1002 static struct bfd_link_hash_table *
1003 elf_x86_64_link_hash_table_create (bfd *abfd)
1004 {
1005 struct elf_x86_64_link_hash_table *ret;
1006 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1007
1008 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1009 if (ret == NULL)
1010 return NULL;
1011
1012 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1013 elf_x86_64_link_hash_newfunc,
1014 sizeof (struct elf_x86_64_link_hash_entry),
1015 X86_64_ELF_DATA))
1016 {
1017 free (ret);
1018 return NULL;
1019 }
1020
1021 if (ABI_64_P (abfd))
1022 {
1023 ret->r_info = elf64_r_info;
1024 ret->r_sym = elf64_r_sym;
1025 ret->pointer_r_type = R_X86_64_64;
1026 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1027 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1028 }
1029 else
1030 {
1031 ret->r_info = elf32_r_info;
1032 ret->r_sym = elf32_r_sym;
1033 ret->pointer_r_type = R_X86_64_32;
1034 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1035 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1036 }
1037
1038 ret->loc_hash_table = htab_try_create (1024,
1039 elf_x86_64_local_htab_hash,
1040 elf_x86_64_local_htab_eq,
1041 NULL);
1042 ret->loc_hash_memory = objalloc_create ();
1043 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1044 {
1045 elf_x86_64_link_hash_table_free (abfd);
1046 return NULL;
1047 }
1048 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1049
1050 return &ret->elf.root;
1051 }
1052
1053 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1054 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1055 hash table. */
1056
1057 static bfd_boolean
1058 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1059 struct bfd_link_info *info)
1060 {
1061 struct elf_x86_64_link_hash_table *htab;
1062
1063 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1064 return FALSE;
1065
1066 htab = elf_x86_64_hash_table (info);
1067 if (htab == NULL)
1068 return FALSE;
1069
1070 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1071 if (!htab->sdynbss)
1072 abort ();
1073
1074 if (info->executable)
1075 {
1076 /* Always allow copy relocs for building executables. */
1077 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1078 if (s == NULL)
1079 {
1080 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1081 s = bfd_make_section_anyway_with_flags (dynobj,
1082 ".rela.bss",
1083 (bed->dynamic_sec_flags
1084 | SEC_READONLY));
1085 if (s == NULL
1086 || ! bfd_set_section_alignment (dynobj, s,
1087 bed->s->log_file_align))
1088 return FALSE;
1089 }
1090 htab->srelbss = s;
1091 }
1092
1093 if (!info->no_ld_generated_unwind_info
1094 && htab->plt_eh_frame == NULL
1095 && htab->elf.splt != NULL)
1096 {
1097 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1098 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1099 | SEC_LINKER_CREATED);
1100 htab->plt_eh_frame
1101 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1102 if (htab->plt_eh_frame == NULL
1103 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1104 return FALSE;
1105 }
1106 return TRUE;
1107 }
1108
1109 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1110
1111 static void
1112 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1113 struct elf_link_hash_entry *dir,
1114 struct elf_link_hash_entry *ind)
1115 {
1116 struct elf_x86_64_link_hash_entry *edir, *eind;
1117
1118 edir = (struct elf_x86_64_link_hash_entry *) dir;
1119 eind = (struct elf_x86_64_link_hash_entry *) ind;
1120
1121 if (!edir->has_bnd_reloc)
1122 edir->has_bnd_reloc = eind->has_bnd_reloc;
1123
1124 if (eind->dyn_relocs != NULL)
1125 {
1126 if (edir->dyn_relocs != NULL)
1127 {
1128 struct elf_dyn_relocs **pp;
1129 struct elf_dyn_relocs *p;
1130
1131 /* Add reloc counts against the indirect sym to the direct sym
1132 list. Merge any entries against the same section. */
1133 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1134 {
1135 struct elf_dyn_relocs *q;
1136
1137 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1138 if (q->sec == p->sec)
1139 {
1140 q->pc_count += p->pc_count;
1141 q->count += p->count;
1142 *pp = p->next;
1143 break;
1144 }
1145 if (q == NULL)
1146 pp = &p->next;
1147 }
1148 *pp = edir->dyn_relocs;
1149 }
1150
1151 edir->dyn_relocs = eind->dyn_relocs;
1152 eind->dyn_relocs = NULL;
1153 }
1154
1155 if (ind->root.type == bfd_link_hash_indirect
1156 && dir->got.refcount <= 0)
1157 {
1158 edir->tls_type = eind->tls_type;
1159 eind->tls_type = GOT_UNKNOWN;
1160 }
1161
1162 if (ELIMINATE_COPY_RELOCS
1163 && ind->root.type != bfd_link_hash_indirect
1164 && dir->dynamic_adjusted)
1165 {
1166 /* If called to transfer flags for a weakdef during processing
1167 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1168 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1169 dir->ref_dynamic |= ind->ref_dynamic;
1170 dir->ref_regular |= ind->ref_regular;
1171 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1172 dir->needs_plt |= ind->needs_plt;
1173 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1174 }
1175 else
1176 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1177 }
1178
1179 static bfd_boolean
1180 elf64_x86_64_elf_object_p (bfd *abfd)
1181 {
1182 /* Set the right machine number for an x86-64 elf64 file. */
1183 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1184 return TRUE;
1185 }
1186
1187 static bfd_boolean
1188 elf32_x86_64_elf_object_p (bfd *abfd)
1189 {
1190 /* Set the right machine number for an x86-64 elf32 file. */
1191 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1192 return TRUE;
1193 }
1194
1195 /* Return TRUE if the TLS access code sequence support transition
1196 from R_TYPE. */
1197
1198 static bfd_boolean
1199 elf_x86_64_check_tls_transition (bfd *abfd,
1200 struct bfd_link_info *info,
1201 asection *sec,
1202 bfd_byte *contents,
1203 Elf_Internal_Shdr *symtab_hdr,
1204 struct elf_link_hash_entry **sym_hashes,
1205 unsigned int r_type,
1206 const Elf_Internal_Rela *rel,
1207 const Elf_Internal_Rela *relend)
1208 {
1209 unsigned int val;
1210 unsigned long r_symndx;
1211 bfd_boolean largepic = FALSE;
1212 struct elf_link_hash_entry *h;
1213 bfd_vma offset;
1214 struct elf_x86_64_link_hash_table *htab;
1215
1216 /* Get the section contents. */
1217 if (contents == NULL)
1218 {
1219 if (elf_section_data (sec)->this_hdr.contents != NULL)
1220 contents = elf_section_data (sec)->this_hdr.contents;
1221 else
1222 {
1223 /* FIXME: How to better handle error condition? */
1224 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1225 return FALSE;
1226
1227 /* Cache the section contents for elf_link_input_bfd. */
1228 elf_section_data (sec)->this_hdr.contents = contents;
1229 }
1230 }
1231
1232 htab = elf_x86_64_hash_table (info);
1233 offset = rel->r_offset;
1234 switch (r_type)
1235 {
1236 case R_X86_64_TLSGD:
1237 case R_X86_64_TLSLD:
1238 if ((rel + 1) >= relend)
1239 return FALSE;
1240
1241 if (r_type == R_X86_64_TLSGD)
1242 {
1243 /* Check transition from GD access model. For 64bit, only
1244 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1245 .word 0x6666; rex64; call __tls_get_addr
1246 can transit to different access model. For 32bit, only
1247 leaq foo@tlsgd(%rip), %rdi
1248 .word 0x6666; rex64; call __tls_get_addr
1249 can transit to different access model. For largepic
1250 we also support:
1251 leaq foo@tlsgd(%rip), %rdi
1252 movabsq $__tls_get_addr@pltoff, %rax
1253 addq $rbx, %rax
1254 call *%rax. */
1255
1256 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1257 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1258
1259 if ((offset + 12) > sec->size)
1260 return FALSE;
1261
1262 if (memcmp (contents + offset + 4, call, 4) != 0)
1263 {
1264 if (!ABI_64_P (abfd)
1265 || (offset + 19) > sec->size
1266 || offset < 3
1267 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1268 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1269 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1270 != 0)
1271 return FALSE;
1272 largepic = TRUE;
1273 }
1274 else if (ABI_64_P (abfd))
1275 {
1276 if (offset < 4
1277 || memcmp (contents + offset - 4, leaq, 4) != 0)
1278 return FALSE;
1279 }
1280 else
1281 {
1282 if (offset < 3
1283 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1284 return FALSE;
1285 }
1286 }
1287 else
1288 {
1289 /* Check transition from LD access model. Only
1290 leaq foo@tlsld(%rip), %rdi;
1291 call __tls_get_addr
1292 can transit to different access model. For largepic
1293 we also support:
1294 leaq foo@tlsld(%rip), %rdi
1295 movabsq $__tls_get_addr@pltoff, %rax
1296 addq $rbx, %rax
1297 call *%rax. */
1298
1299 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1300
1301 if (offset < 3 || (offset + 9) > sec->size)
1302 return FALSE;
1303
1304 if (memcmp (contents + offset - 3, lea, 3) != 0)
1305 return FALSE;
1306
1307 if (0xe8 != *(contents + offset + 4))
1308 {
1309 if (!ABI_64_P (abfd)
1310 || (offset + 19) > sec->size
1311 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1312 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1313 != 0)
1314 return FALSE;
1315 largepic = TRUE;
1316 }
1317 }
1318
1319 r_symndx = htab->r_sym (rel[1].r_info);
1320 if (r_symndx < symtab_hdr->sh_info)
1321 return FALSE;
1322
1323 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1324 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1325 may be versioned. */
1326 return (h != NULL
1327 && h->root.root.string != NULL
1328 && (largepic
1329 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1330 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1331 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1332 && (strncmp (h->root.root.string,
1333 "__tls_get_addr", 14) == 0));
1334
1335 case R_X86_64_GOTTPOFF:
1336 /* Check transition from IE access model:
1337 mov foo@gottpoff(%rip), %reg
1338 add foo@gottpoff(%rip), %reg
1339 */
1340
1341 /* Check REX prefix first. */
1342 if (offset >= 3 && (offset + 4) <= sec->size)
1343 {
1344 val = bfd_get_8 (abfd, contents + offset - 3);
1345 if (val != 0x48 && val != 0x4c)
1346 {
1347 /* X32 may have 0x44 REX prefix or no REX prefix. */
1348 if (ABI_64_P (abfd))
1349 return FALSE;
1350 }
1351 }
1352 else
1353 {
1354 /* X32 may not have any REX prefix. */
1355 if (ABI_64_P (abfd))
1356 return FALSE;
1357 if (offset < 2 || (offset + 3) > sec->size)
1358 return FALSE;
1359 }
1360
1361 val = bfd_get_8 (abfd, contents + offset - 2);
1362 if (val != 0x8b && val != 0x03)
1363 return FALSE;
1364
1365 val = bfd_get_8 (abfd, contents + offset - 1);
1366 return (val & 0xc7) == 5;
1367
1368 case R_X86_64_GOTPC32_TLSDESC:
1369 /* Check transition from GDesc access model:
1370 leaq x@tlsdesc(%rip), %rax
1371
1372 Make sure it's a leaq adding rip to a 32-bit offset
1373 into any register, although it's probably almost always
1374 going to be rax. */
1375
1376 if (offset < 3 || (offset + 4) > sec->size)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 3);
1380 if ((val & 0xfb) != 0x48)
1381 return FALSE;
1382
1383 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1384 return FALSE;
1385
1386 val = bfd_get_8 (abfd, contents + offset - 1);
1387 return (val & 0xc7) == 0x05;
1388
1389 case R_X86_64_TLSDESC_CALL:
1390 /* Check transition from GDesc access model:
1391 call *x@tlsdesc(%rax)
1392 */
1393 if (offset + 2 <= sec->size)
1394 {
1395 /* Make sure that it's a call *x@tlsdesc(%rax). */
1396 static const unsigned char call[] = { 0xff, 0x10 };
1397 return memcmp (contents + offset, call, 2) == 0;
1398 }
1399
1400 return FALSE;
1401
1402 default:
1403 abort ();
1404 }
1405 }
1406
1407 /* Return TRUE if the TLS access transition is OK or no transition
1408 will be performed. Update R_TYPE if there is a transition. */
1409
1410 static bfd_boolean
1411 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1412 asection *sec, bfd_byte *contents,
1413 Elf_Internal_Shdr *symtab_hdr,
1414 struct elf_link_hash_entry **sym_hashes,
1415 unsigned int *r_type, int tls_type,
1416 const Elf_Internal_Rela *rel,
1417 const Elf_Internal_Rela *relend,
1418 struct elf_link_hash_entry *h,
1419 unsigned long r_symndx)
1420 {
1421 unsigned int from_type = *r_type;
1422 unsigned int to_type = from_type;
1423 bfd_boolean check = TRUE;
1424
1425 /* Skip TLS transition for functions. */
1426 if (h != NULL
1427 && (h->type == STT_FUNC
1428 || h->type == STT_GNU_IFUNC))
1429 return TRUE;
1430
1431 switch (from_type)
1432 {
1433 case R_X86_64_TLSGD:
1434 case R_X86_64_GOTPC32_TLSDESC:
1435 case R_X86_64_TLSDESC_CALL:
1436 case R_X86_64_GOTTPOFF:
1437 if (info->executable)
1438 {
1439 if (h == NULL)
1440 to_type = R_X86_64_TPOFF32;
1441 else
1442 to_type = R_X86_64_GOTTPOFF;
1443 }
1444
1445 /* When we are called from elf_x86_64_relocate_section,
1446 CONTENTS isn't NULL and there may be additional transitions
1447 based on TLS_TYPE. */
1448 if (contents != NULL)
1449 {
1450 unsigned int new_to_type = to_type;
1451
1452 if (info->executable
1453 && h != NULL
1454 && h->dynindx == -1
1455 && tls_type == GOT_TLS_IE)
1456 new_to_type = R_X86_64_TPOFF32;
1457
1458 if (to_type == R_X86_64_TLSGD
1459 || to_type == R_X86_64_GOTPC32_TLSDESC
1460 || to_type == R_X86_64_TLSDESC_CALL)
1461 {
1462 if (tls_type == GOT_TLS_IE)
1463 new_to_type = R_X86_64_GOTTPOFF;
1464 }
1465
1466 /* We checked the transition before when we were called from
1467 elf_x86_64_check_relocs. We only want to check the new
1468 transition which hasn't been checked before. */
1469 check = new_to_type != to_type && from_type == to_type;
1470 to_type = new_to_type;
1471 }
1472
1473 break;
1474
1475 case R_X86_64_TLSLD:
1476 if (info->executable)
1477 to_type = R_X86_64_TPOFF32;
1478 break;
1479
1480 default:
1481 return TRUE;
1482 }
1483
1484 /* Return TRUE if there is no transition. */
1485 if (from_type == to_type)
1486 return TRUE;
1487
1488 /* Check if the transition can be performed. */
1489 if (check
1490 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1491 symtab_hdr, sym_hashes,
1492 from_type, rel, relend))
1493 {
1494 reloc_howto_type *from, *to;
1495 const char *name;
1496
1497 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1498 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1499
1500 if (h)
1501 name = h->root.root.string;
1502 else
1503 {
1504 struct elf_x86_64_link_hash_table *htab;
1505
1506 htab = elf_x86_64_hash_table (info);
1507 if (htab == NULL)
1508 name = "*unknown*";
1509 else
1510 {
1511 Elf_Internal_Sym *isym;
1512
1513 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1514 abfd, r_symndx);
1515 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1516 }
1517 }
1518
1519 (*_bfd_error_handler)
1520 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1521 "in section `%A' failed"),
1522 abfd, sec, from->name, to->name, name,
1523 (unsigned long) rel->r_offset);
1524 bfd_set_error (bfd_error_bad_value);
1525 return FALSE;
1526 }
1527
1528 *r_type = to_type;
1529 return TRUE;
1530 }
1531
1532 /* Look through the relocs for a section during the first phase, and
1533 calculate needed space in the global offset table, procedure
1534 linkage table, and dynamic reloc sections. */
1535
1536 static bfd_boolean
1537 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1538 asection *sec,
1539 const Elf_Internal_Rela *relocs)
1540 {
1541 struct elf_x86_64_link_hash_table *htab;
1542 Elf_Internal_Shdr *symtab_hdr;
1543 struct elf_link_hash_entry **sym_hashes;
1544 const Elf_Internal_Rela *rel;
1545 const Elf_Internal_Rela *rel_end;
1546 asection *sreloc;
1547 bfd_boolean use_plt_got;
1548
1549 if (info->relocatable)
1550 return TRUE;
1551
1552 BFD_ASSERT (is_x86_64_elf (abfd));
1553
1554 htab = elf_x86_64_hash_table (info);
1555 if (htab == NULL)
1556 return FALSE;
1557
1558 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1559
1560 symtab_hdr = &elf_symtab_hdr (abfd);
1561 sym_hashes = elf_sym_hashes (abfd);
1562
1563 sreloc = NULL;
1564
1565 rel_end = relocs + sec->reloc_count;
1566 for (rel = relocs; rel < rel_end; rel++)
1567 {
1568 unsigned int r_type;
1569 unsigned long r_symndx;
1570 struct elf_link_hash_entry *h;
1571 Elf_Internal_Sym *isym;
1572 const char *name;
1573 bfd_boolean size_reloc;
1574
1575 r_symndx = htab->r_sym (rel->r_info);
1576 r_type = ELF32_R_TYPE (rel->r_info);
1577
1578 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1579 {
1580 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1581 abfd, r_symndx);
1582 return FALSE;
1583 }
1584
1585 if (r_symndx < symtab_hdr->sh_info)
1586 {
1587 /* A local symbol. */
1588 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1589 abfd, r_symndx);
1590 if (isym == NULL)
1591 return FALSE;
1592
1593 /* Check relocation against local STT_GNU_IFUNC symbol. */
1594 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1595 {
1596 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1597 TRUE);
1598 if (h == NULL)
1599 return FALSE;
1600
1601 /* Fake a STT_GNU_IFUNC symbol. */
1602 h->type = STT_GNU_IFUNC;
1603 h->def_regular = 1;
1604 h->ref_regular = 1;
1605 h->forced_local = 1;
1606 h->root.type = bfd_link_hash_defined;
1607 }
1608 else
1609 h = NULL;
1610 }
1611 else
1612 {
1613 isym = NULL;
1614 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1615 while (h->root.type == bfd_link_hash_indirect
1616 || h->root.type == bfd_link_hash_warning)
1617 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1618 }
1619
1620 /* Check invalid x32 relocations. */
1621 if (!ABI_64_P (abfd))
1622 switch (r_type)
1623 {
1624 default:
1625 break;
1626
1627 case R_X86_64_DTPOFF64:
1628 case R_X86_64_TPOFF64:
1629 case R_X86_64_PC64:
1630 case R_X86_64_GOTOFF64:
1631 case R_X86_64_GOT64:
1632 case R_X86_64_GOTPCREL64:
1633 case R_X86_64_GOTPC64:
1634 case R_X86_64_GOTPLT64:
1635 case R_X86_64_PLTOFF64:
1636 {
1637 if (h)
1638 name = h->root.root.string;
1639 else
1640 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1641 NULL);
1642 (*_bfd_error_handler)
1643 (_("%B: relocation %s against symbol `%s' isn't "
1644 "supported in x32 mode"), abfd,
1645 x86_64_elf_howto_table[r_type].name, name);
1646 bfd_set_error (bfd_error_bad_value);
1647 return FALSE;
1648 }
1649 break;
1650 }
1651
1652 if (h != NULL)
1653 {
1654 /* Create the ifunc sections for static executables. If we
1655 never see an indirect function symbol nor we are building
1656 a static executable, those sections will be empty and
1657 won't appear in output. */
1658 switch (r_type)
1659 {
1660 default:
1661 break;
1662
1663 case R_X86_64_PC32_BND:
1664 case R_X86_64_PLT32_BND:
1665 case R_X86_64_PC32:
1666 case R_X86_64_PLT32:
1667 case R_X86_64_32:
1668 case R_X86_64_64:
1669 /* MPX PLT is supported only if elf_x86_64_arch_bed
1670 is used in 64-bit mode. */
1671 if (ABI_64_P (abfd)
1672 && info->bndplt
1673 && (get_elf_x86_64_backend_data (abfd)
1674 == &elf_x86_64_arch_bed))
1675 {
1676 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
1677
1678 /* Create the second PLT for Intel MPX support. */
1679 if (htab->plt_bnd == NULL)
1680 {
1681 unsigned int plt_bnd_align;
1682 const struct elf_backend_data *bed;
1683
1684 bed = get_elf_backend_data (info->output_bfd);
1685 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1686 && (sizeof (elf_x86_64_bnd_plt2_entry)
1687 == sizeof (elf_x86_64_legacy_plt2_entry)));
1688 plt_bnd_align = 3;
1689
1690 if (htab->elf.dynobj == NULL)
1691 htab->elf.dynobj = abfd;
1692 htab->plt_bnd
1693 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1694 ".plt.bnd",
1695 (bed->dynamic_sec_flags
1696 | SEC_ALLOC
1697 | SEC_CODE
1698 | SEC_LOAD
1699 | SEC_READONLY));
1700 if (htab->plt_bnd == NULL
1701 || !bfd_set_section_alignment (htab->elf.dynobj,
1702 htab->plt_bnd,
1703 plt_bnd_align))
1704 return FALSE;
1705 }
1706 }
1707
1708 case R_X86_64_32S:
1709 case R_X86_64_PC64:
1710 case R_X86_64_GOTPCREL:
1711 case R_X86_64_GOTPCREL64:
1712 if (htab->elf.dynobj == NULL)
1713 htab->elf.dynobj = abfd;
1714 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1715 return FALSE;
1716 break;
1717 }
1718
1719 /* It is referenced by a non-shared object. */
1720 h->ref_regular = 1;
1721 h->root.non_ir_ref = 1;
1722 }
1723
1724 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1725 symtab_hdr, sym_hashes,
1726 &r_type, GOT_UNKNOWN,
1727 rel, rel_end, h, r_symndx))
1728 return FALSE;
1729
1730 switch (r_type)
1731 {
1732 case R_X86_64_TLSLD:
1733 htab->tls_ld_got.refcount += 1;
1734 goto create_got;
1735
1736 case R_X86_64_TPOFF32:
1737 if (!info->executable && ABI_64_P (abfd))
1738 {
1739 if (h)
1740 name = h->root.root.string;
1741 else
1742 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1743 NULL);
1744 (*_bfd_error_handler)
1745 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1746 abfd,
1747 x86_64_elf_howto_table[r_type].name, name);
1748 bfd_set_error (bfd_error_bad_value);
1749 return FALSE;
1750 }
1751 break;
1752
1753 case R_X86_64_GOTTPOFF:
1754 if (!info->executable)
1755 info->flags |= DF_STATIC_TLS;
1756 /* Fall through */
1757
1758 case R_X86_64_GOT32:
1759 case R_X86_64_GOTPCREL:
1760 case R_X86_64_TLSGD:
1761 case R_X86_64_GOT64:
1762 case R_X86_64_GOTPCREL64:
1763 case R_X86_64_GOTPLT64:
1764 case R_X86_64_GOTPC32_TLSDESC:
1765 case R_X86_64_TLSDESC_CALL:
1766 /* This symbol requires a global offset table entry. */
1767 {
1768 int tls_type, old_tls_type;
1769
1770 switch (r_type)
1771 {
1772 default: tls_type = GOT_NORMAL; break;
1773 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1774 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1775 case R_X86_64_GOTPC32_TLSDESC:
1776 case R_X86_64_TLSDESC_CALL:
1777 tls_type = GOT_TLS_GDESC; break;
1778 }
1779
1780 if (h != NULL)
1781 {
1782 h->got.refcount += 1;
1783 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1784 }
1785 else
1786 {
1787 bfd_signed_vma *local_got_refcounts;
1788
1789 /* This is a global offset table entry for a local symbol. */
1790 local_got_refcounts = elf_local_got_refcounts (abfd);
1791 if (local_got_refcounts == NULL)
1792 {
1793 bfd_size_type size;
1794
1795 size = symtab_hdr->sh_info;
1796 size *= sizeof (bfd_signed_vma)
1797 + sizeof (bfd_vma) + sizeof (char);
1798 local_got_refcounts = ((bfd_signed_vma *)
1799 bfd_zalloc (abfd, size));
1800 if (local_got_refcounts == NULL)
1801 return FALSE;
1802 elf_local_got_refcounts (abfd) = local_got_refcounts;
1803 elf_x86_64_local_tlsdesc_gotent (abfd)
1804 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1805 elf_x86_64_local_got_tls_type (abfd)
1806 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1807 }
1808 local_got_refcounts[r_symndx] += 1;
1809 old_tls_type
1810 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1811 }
1812
1813 /* If a TLS symbol is accessed using IE at least once,
1814 there is no point to use dynamic model for it. */
1815 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1816 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1817 || tls_type != GOT_TLS_IE))
1818 {
1819 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1820 tls_type = old_tls_type;
1821 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1822 && GOT_TLS_GD_ANY_P (tls_type))
1823 tls_type |= old_tls_type;
1824 else
1825 {
1826 if (h)
1827 name = h->root.root.string;
1828 else
1829 name = bfd_elf_sym_name (abfd, symtab_hdr,
1830 isym, NULL);
1831 (*_bfd_error_handler)
1832 (_("%B: '%s' accessed both as normal and thread local symbol"),
1833 abfd, name);
1834 bfd_set_error (bfd_error_bad_value);
1835 return FALSE;
1836 }
1837 }
1838
1839 if (old_tls_type != tls_type)
1840 {
1841 if (h != NULL)
1842 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1843 else
1844 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1845 }
1846 }
1847 /* Fall through */
1848
1849 case R_X86_64_GOTOFF64:
1850 case R_X86_64_GOTPC32:
1851 case R_X86_64_GOTPC64:
1852 create_got:
1853 if (htab->elf.sgot == NULL)
1854 {
1855 if (htab->elf.dynobj == NULL)
1856 htab->elf.dynobj = abfd;
1857 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1858 info))
1859 return FALSE;
1860 }
1861 break;
1862
1863 case R_X86_64_PLT32:
1864 case R_X86_64_PLT32_BND:
1865 /* This symbol requires a procedure linkage table entry. We
1866 actually build the entry in adjust_dynamic_symbol,
1867 because this might be a case of linking PIC code which is
1868 never referenced by a dynamic object, in which case we
1869 don't need to generate a procedure linkage table entry
1870 after all. */
1871
1872 /* If this is a local symbol, we resolve it directly without
1873 creating a procedure linkage table entry. */
1874 if (h == NULL)
1875 continue;
1876
1877 h->needs_plt = 1;
1878 h->plt.refcount += 1;
1879 break;
1880
1881 case R_X86_64_PLTOFF64:
1882 /* This tries to form the 'address' of a function relative
1883 to GOT. For global symbols we need a PLT entry. */
1884 if (h != NULL)
1885 {
1886 h->needs_plt = 1;
1887 h->plt.refcount += 1;
1888 }
1889 goto create_got;
1890
1891 case R_X86_64_SIZE32:
1892 case R_X86_64_SIZE64:
1893 size_reloc = TRUE;
1894 goto do_size;
1895
1896 case R_X86_64_32:
1897 if (!ABI_64_P (abfd))
1898 goto pointer;
1899 case R_X86_64_8:
1900 case R_X86_64_16:
1901 case R_X86_64_32S:
1902 /* Let's help debug shared library creation. These relocs
1903 cannot be used in shared libs. Don't error out for
1904 sections we don't care about, such as debug sections or
1905 non-constant sections. */
1906 if (info->shared
1907 && (sec->flags & SEC_ALLOC) != 0
1908 && (sec->flags & SEC_READONLY) != 0)
1909 {
1910 if (h)
1911 name = h->root.root.string;
1912 else
1913 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1914 (*_bfd_error_handler)
1915 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1916 abfd, x86_64_elf_howto_table[r_type].name, name);
1917 bfd_set_error (bfd_error_bad_value);
1918 return FALSE;
1919 }
1920 /* Fall through. */
1921
1922 case R_X86_64_PC8:
1923 case R_X86_64_PC16:
1924 case R_X86_64_PC32:
1925 case R_X86_64_PC32_BND:
1926 case R_X86_64_PC64:
1927 case R_X86_64_64:
1928 pointer:
1929 if (h != NULL && info->executable)
1930 {
1931 /* If this reloc is in a read-only section, we might
1932 need a copy reloc. We can't check reliably at this
1933 stage whether the section is read-only, as input
1934 sections have not yet been mapped to output sections.
1935 Tentatively set the flag for now, and correct in
1936 adjust_dynamic_symbol. */
1937 h->non_got_ref = 1;
1938
1939 /* We may need a .plt entry if the function this reloc
1940 refers to is in a shared lib. */
1941 h->plt.refcount += 1;
1942 if (r_type != R_X86_64_PC32
1943 && r_type != R_X86_64_PC32_BND
1944 && r_type != R_X86_64_PC64)
1945 h->pointer_equality_needed = 1;
1946 }
1947
1948 size_reloc = FALSE;
1949 do_size:
1950 /* If we are creating a shared library, and this is a reloc
1951 against a global symbol, or a non PC relative reloc
1952 against a local symbol, then we need to copy the reloc
1953 into the shared library. However, if we are linking with
1954 -Bsymbolic, we do not need to copy a reloc against a
1955 global symbol which is defined in an object we are
1956 including in the link (i.e., DEF_REGULAR is set). At
1957 this point we have not seen all the input files, so it is
1958 possible that DEF_REGULAR is not set now but will be set
1959 later (it is never cleared). In case of a weak definition,
1960 DEF_REGULAR may be cleared later by a strong definition in
1961 a shared library. We account for that possibility below by
1962 storing information in the relocs_copied field of the hash
1963 table entry. A similar situation occurs when creating
1964 shared libraries and symbol visibility changes render the
1965 symbol local.
1966
1967 If on the other hand, we are creating an executable, we
1968 may need to keep relocations for symbols satisfied by a
1969 dynamic library if we manage to avoid copy relocs for the
1970 symbol. */
1971 if ((info->shared
1972 && (sec->flags & SEC_ALLOC) != 0
1973 && (! IS_X86_64_PCREL_TYPE (r_type)
1974 || (h != NULL
1975 && (! SYMBOLIC_BIND (info, h)
1976 || h->root.type == bfd_link_hash_defweak
1977 || !h->def_regular))))
1978 || (ELIMINATE_COPY_RELOCS
1979 && !info->shared
1980 && (sec->flags & SEC_ALLOC) != 0
1981 && h != NULL
1982 && (h->root.type == bfd_link_hash_defweak
1983 || !h->def_regular)))
1984 {
1985 struct elf_dyn_relocs *p;
1986 struct elf_dyn_relocs **head;
1987
1988 /* We must copy these reloc types into the output file.
1989 Create a reloc section in dynobj and make room for
1990 this reloc. */
1991 if (sreloc == NULL)
1992 {
1993 if (htab->elf.dynobj == NULL)
1994 htab->elf.dynobj = abfd;
1995
1996 sreloc = _bfd_elf_make_dynamic_reloc_section
1997 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
1998 abfd, /*rela?*/ TRUE);
1999
2000 if (sreloc == NULL)
2001 return FALSE;
2002 }
2003
2004 /* If this is a global symbol, we count the number of
2005 relocations we need for this symbol. */
2006 if (h != NULL)
2007 {
2008 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2009 }
2010 else
2011 {
2012 /* Track dynamic relocs needed for local syms too.
2013 We really need local syms available to do this
2014 easily. Oh well. */
2015 asection *s;
2016 void **vpp;
2017
2018 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2019 abfd, r_symndx);
2020 if (isym == NULL)
2021 return FALSE;
2022
2023 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2024 if (s == NULL)
2025 s = sec;
2026
2027 /* Beware of type punned pointers vs strict aliasing
2028 rules. */
2029 vpp = &(elf_section_data (s)->local_dynrel);
2030 head = (struct elf_dyn_relocs **)vpp;
2031 }
2032
2033 p = *head;
2034 if (p == NULL || p->sec != sec)
2035 {
2036 bfd_size_type amt = sizeof *p;
2037
2038 p = ((struct elf_dyn_relocs *)
2039 bfd_alloc (htab->elf.dynobj, amt));
2040 if (p == NULL)
2041 return FALSE;
2042 p->next = *head;
2043 *head = p;
2044 p->sec = sec;
2045 p->count = 0;
2046 p->pc_count = 0;
2047 }
2048
2049 p->count += 1;
2050 /* Count size relocation as PC-relative relocation. */
2051 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2052 p->pc_count += 1;
2053 }
2054 break;
2055
2056 /* This relocation describes the C++ object vtable hierarchy.
2057 Reconstruct it for later use during GC. */
2058 case R_X86_64_GNU_VTINHERIT:
2059 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2060 return FALSE;
2061 break;
2062
2063 /* This relocation describes which C++ vtable entries are actually
2064 used. Record for later use during GC. */
2065 case R_X86_64_GNU_VTENTRY:
2066 BFD_ASSERT (h != NULL);
2067 if (h != NULL
2068 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2069 return FALSE;
2070 break;
2071
2072 default:
2073 break;
2074 }
2075
2076 if (use_plt_got
2077 && h != NULL
2078 && h->plt.refcount > 0
2079 && h->got.refcount > 0
2080 && htab->plt_got == NULL)
2081 {
2082 /* Create the GOT procedure linkage table. */
2083 unsigned int plt_got_align;
2084 const struct elf_backend_data *bed;
2085
2086 bed = get_elf_backend_data (info->output_bfd);
2087 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2088 && (sizeof (elf_x86_64_bnd_plt2_entry)
2089 == sizeof (elf_x86_64_legacy_plt2_entry)));
2090 plt_got_align = 3;
2091
2092 if (htab->elf.dynobj == NULL)
2093 htab->elf.dynobj = abfd;
2094 htab->plt_got
2095 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2096 ".plt.got",
2097 (bed->dynamic_sec_flags
2098 | SEC_ALLOC
2099 | SEC_CODE
2100 | SEC_LOAD
2101 | SEC_READONLY));
2102 if (htab->plt_got == NULL
2103 || !bfd_set_section_alignment (htab->elf.dynobj,
2104 htab->plt_got,
2105 plt_got_align))
2106 return FALSE;
2107 }
2108 }
2109
2110 return TRUE;
2111 }
2112
2113 /* Return the section that should be marked against GC for a given
2114 relocation. */
2115
2116 static asection *
2117 elf_x86_64_gc_mark_hook (asection *sec,
2118 struct bfd_link_info *info,
2119 Elf_Internal_Rela *rel,
2120 struct elf_link_hash_entry *h,
2121 Elf_Internal_Sym *sym)
2122 {
2123 if (h != NULL)
2124 switch (ELF32_R_TYPE (rel->r_info))
2125 {
2126 case R_X86_64_GNU_VTINHERIT:
2127 case R_X86_64_GNU_VTENTRY:
2128 return NULL;
2129 }
2130
2131 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2132 }
2133
2134 /* Update the got entry reference counts for the section being removed. */
2135
2136 static bfd_boolean
2137 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2138 asection *sec,
2139 const Elf_Internal_Rela *relocs)
2140 {
2141 struct elf_x86_64_link_hash_table *htab;
2142 Elf_Internal_Shdr *symtab_hdr;
2143 struct elf_link_hash_entry **sym_hashes;
2144 bfd_signed_vma *local_got_refcounts;
2145 const Elf_Internal_Rela *rel, *relend;
2146
2147 if (info->relocatable)
2148 return TRUE;
2149
2150 htab = elf_x86_64_hash_table (info);
2151 if (htab == NULL)
2152 return FALSE;
2153
2154 elf_section_data (sec)->local_dynrel = NULL;
2155
2156 symtab_hdr = &elf_symtab_hdr (abfd);
2157 sym_hashes = elf_sym_hashes (abfd);
2158 local_got_refcounts = elf_local_got_refcounts (abfd);
2159
2160 htab = elf_x86_64_hash_table (info);
2161 relend = relocs + sec->reloc_count;
2162 for (rel = relocs; rel < relend; rel++)
2163 {
2164 unsigned long r_symndx;
2165 unsigned int r_type;
2166 struct elf_link_hash_entry *h = NULL;
2167
2168 r_symndx = htab->r_sym (rel->r_info);
2169 if (r_symndx >= symtab_hdr->sh_info)
2170 {
2171 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2172 while (h->root.type == bfd_link_hash_indirect
2173 || h->root.type == bfd_link_hash_warning)
2174 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2175 }
2176 else
2177 {
2178 /* A local symbol. */
2179 Elf_Internal_Sym *isym;
2180
2181 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2182 abfd, r_symndx);
2183
2184 /* Check relocation against local STT_GNU_IFUNC symbol. */
2185 if (isym != NULL
2186 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2187 {
2188 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2189 if (h == NULL)
2190 abort ();
2191 }
2192 }
2193
2194 if (h)
2195 {
2196 struct elf_x86_64_link_hash_entry *eh;
2197 struct elf_dyn_relocs **pp;
2198 struct elf_dyn_relocs *p;
2199
2200 eh = (struct elf_x86_64_link_hash_entry *) h;
2201
2202 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2203 if (p->sec == sec)
2204 {
2205 /* Everything must go for SEC. */
2206 *pp = p->next;
2207 break;
2208 }
2209 }
2210
2211 r_type = ELF32_R_TYPE (rel->r_info);
2212 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2213 symtab_hdr, sym_hashes,
2214 &r_type, GOT_UNKNOWN,
2215 rel, relend, h, r_symndx))
2216 return FALSE;
2217
2218 switch (r_type)
2219 {
2220 case R_X86_64_TLSLD:
2221 if (htab->tls_ld_got.refcount > 0)
2222 htab->tls_ld_got.refcount -= 1;
2223 break;
2224
2225 case R_X86_64_TLSGD:
2226 case R_X86_64_GOTPC32_TLSDESC:
2227 case R_X86_64_TLSDESC_CALL:
2228 case R_X86_64_GOTTPOFF:
2229 case R_X86_64_GOT32:
2230 case R_X86_64_GOTPCREL:
2231 case R_X86_64_GOT64:
2232 case R_X86_64_GOTPCREL64:
2233 case R_X86_64_GOTPLT64:
2234 if (h != NULL)
2235 {
2236 if (h->got.refcount > 0)
2237 h->got.refcount -= 1;
2238 if (h->type == STT_GNU_IFUNC)
2239 {
2240 if (h->plt.refcount > 0)
2241 h->plt.refcount -= 1;
2242 }
2243 }
2244 else if (local_got_refcounts != NULL)
2245 {
2246 if (local_got_refcounts[r_symndx] > 0)
2247 local_got_refcounts[r_symndx] -= 1;
2248 }
2249 break;
2250
2251 case R_X86_64_8:
2252 case R_X86_64_16:
2253 case R_X86_64_32:
2254 case R_X86_64_64:
2255 case R_X86_64_32S:
2256 case R_X86_64_PC8:
2257 case R_X86_64_PC16:
2258 case R_X86_64_PC32:
2259 case R_X86_64_PC32_BND:
2260 case R_X86_64_PC64:
2261 case R_X86_64_SIZE32:
2262 case R_X86_64_SIZE64:
2263 if (info->shared
2264 && (h == NULL || h->type != STT_GNU_IFUNC))
2265 break;
2266 /* Fall thru */
2267
2268 case R_X86_64_PLT32:
2269 case R_X86_64_PLT32_BND:
2270 case R_X86_64_PLTOFF64:
2271 if (h != NULL)
2272 {
2273 if (h->plt.refcount > 0)
2274 h->plt.refcount -= 1;
2275 }
2276 break;
2277
2278 default:
2279 break;
2280 }
2281 }
2282
2283 return TRUE;
2284 }
2285
2286 /* Adjust a symbol defined by a dynamic object and referenced by a
2287 regular object. The current definition is in some section of the
2288 dynamic object, but we're not including those sections. We have to
2289 change the definition to something the rest of the link can
2290 understand. */
2291
2292 static bfd_boolean
2293 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2294 struct elf_link_hash_entry *h)
2295 {
2296 struct elf_x86_64_link_hash_table *htab;
2297 asection *s;
2298 struct elf_x86_64_link_hash_entry *eh;
2299 struct elf_dyn_relocs *p;
2300
2301 /* STT_GNU_IFUNC symbol must go through PLT. */
2302 if (h->type == STT_GNU_IFUNC)
2303 {
2304 /* All local STT_GNU_IFUNC references must be treate as local
2305 calls via local PLT. */
2306 if (h->ref_regular
2307 && SYMBOL_CALLS_LOCAL (info, h))
2308 {
2309 bfd_size_type pc_count = 0, count = 0;
2310 struct elf_dyn_relocs **pp;
2311
2312 eh = (struct elf_x86_64_link_hash_entry *) h;
2313 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2314 {
2315 pc_count += p->pc_count;
2316 p->count -= p->pc_count;
2317 p->pc_count = 0;
2318 count += p->count;
2319 if (p->count == 0)
2320 *pp = p->next;
2321 else
2322 pp = &p->next;
2323 }
2324
2325 if (pc_count || count)
2326 {
2327 h->needs_plt = 1;
2328 h->non_got_ref = 1;
2329 if (h->plt.refcount <= 0)
2330 h->plt.refcount = 1;
2331 else
2332 h->plt.refcount += 1;
2333 }
2334 }
2335
2336 if (h->plt.refcount <= 0)
2337 {
2338 h->plt.offset = (bfd_vma) -1;
2339 h->needs_plt = 0;
2340 }
2341 return TRUE;
2342 }
2343
2344 /* If this is a function, put it in the procedure linkage table. We
2345 will fill in the contents of the procedure linkage table later,
2346 when we know the address of the .got section. */
2347 if (h->type == STT_FUNC
2348 || h->needs_plt)
2349 {
2350 if (h->plt.refcount <= 0
2351 || SYMBOL_CALLS_LOCAL (info, h)
2352 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2353 && h->root.type == bfd_link_hash_undefweak))
2354 {
2355 /* This case can occur if we saw a PLT32 reloc in an input
2356 file, but the symbol was never referred to by a dynamic
2357 object, or if all references were garbage collected. In
2358 such a case, we don't actually need to build a procedure
2359 linkage table, and we can just do a PC32 reloc instead. */
2360 h->plt.offset = (bfd_vma) -1;
2361 h->needs_plt = 0;
2362 }
2363
2364 return TRUE;
2365 }
2366 else
2367 /* It's possible that we incorrectly decided a .plt reloc was
2368 needed for an R_X86_64_PC32 reloc to a non-function sym in
2369 check_relocs. We can't decide accurately between function and
2370 non-function syms in check-relocs; Objects loaded later in
2371 the link may change h->type. So fix it now. */
2372 h->plt.offset = (bfd_vma) -1;
2373
2374 /* If this is a weak symbol, and there is a real definition, the
2375 processor independent code will have arranged for us to see the
2376 real definition first, and we can just use the same value. */
2377 if (h->u.weakdef != NULL)
2378 {
2379 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2380 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2381 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2382 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2383 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2384 {
2385 eh = (struct elf_x86_64_link_hash_entry *) h;
2386 h->non_got_ref = h->u.weakdef->non_got_ref;
2387 eh->needs_copy = h->u.weakdef->needs_copy;
2388 }
2389 return TRUE;
2390 }
2391
2392 /* This is a reference to a symbol defined by a dynamic object which
2393 is not a function. */
2394
2395 /* If we are creating a shared library, we must presume that the
2396 only references to the symbol are via the global offset table.
2397 For such cases we need not do anything here; the relocations will
2398 be handled correctly by relocate_section. */
2399 if (!info->executable)
2400 return TRUE;
2401
2402 /* If there are no references to this symbol that do not use the
2403 GOT, we don't need to generate a copy reloc. */
2404 if (!h->non_got_ref)
2405 return TRUE;
2406
2407 /* If -z nocopyreloc was given, we won't generate them either. */
2408 if (info->nocopyreloc)
2409 {
2410 h->non_got_ref = 0;
2411 return TRUE;
2412 }
2413
2414 if (ELIMINATE_COPY_RELOCS)
2415 {
2416 eh = (struct elf_x86_64_link_hash_entry *) h;
2417 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2418 {
2419 s = p->sec->output_section;
2420 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2421 break;
2422 }
2423
2424 /* If we didn't find any dynamic relocs in read-only sections, then
2425 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2426 if (p == NULL)
2427 {
2428 h->non_got_ref = 0;
2429 return TRUE;
2430 }
2431 }
2432
2433 /* We must allocate the symbol in our .dynbss section, which will
2434 become part of the .bss section of the executable. There will be
2435 an entry for this symbol in the .dynsym section. The dynamic
2436 object will contain position independent code, so all references
2437 from the dynamic object to this symbol will go through the global
2438 offset table. The dynamic linker will use the .dynsym entry to
2439 determine the address it must put in the global offset table, so
2440 both the dynamic object and the regular object will refer to the
2441 same memory location for the variable. */
2442
2443 htab = elf_x86_64_hash_table (info);
2444 if (htab == NULL)
2445 return FALSE;
2446
2447 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2448 to copy the initial value out of the dynamic object and into the
2449 runtime process image. */
2450 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2451 {
2452 const struct elf_backend_data *bed;
2453 bed = get_elf_backend_data (info->output_bfd);
2454 htab->srelbss->size += bed->s->sizeof_rela;
2455 h->needs_copy = 1;
2456 }
2457
2458 s = htab->sdynbss;
2459
2460 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2461 }
2462
2463 /* Allocate space in .plt, .got and associated reloc sections for
2464 dynamic relocs. */
2465
2466 static bfd_boolean
2467 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2468 {
2469 struct bfd_link_info *info;
2470 struct elf_x86_64_link_hash_table *htab;
2471 struct elf_x86_64_link_hash_entry *eh;
2472 struct elf_dyn_relocs *p;
2473 const struct elf_backend_data *bed;
2474 unsigned int plt_entry_size;
2475
2476 if (h->root.type == bfd_link_hash_indirect)
2477 return TRUE;
2478
2479 eh = (struct elf_x86_64_link_hash_entry *) h;
2480
2481 info = (struct bfd_link_info *) inf;
2482 htab = elf_x86_64_hash_table (info);
2483 if (htab == NULL)
2484 return FALSE;
2485 bed = get_elf_backend_data (info->output_bfd);
2486 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2487
2488 /* We can't use the GOT PLT if pointer equality is needed since
2489 finish_dynamic_symbol won't clear symbol value and the dynamic
2490 linker won't update the GOT slot. We will get into an infinite
2491 loop at run-time. */
2492 if (htab->plt_got != NULL
2493 && h->type != STT_GNU_IFUNC
2494 && !h->pointer_equality_needed
2495 && h->plt.refcount > 0
2496 && h->got.refcount > 0)
2497 {
2498 /* Don't use the regular PLT if there are both GOT and GOTPLT
2499 reloctions. */
2500 h->plt.offset = (bfd_vma) -1;
2501
2502 /* Use the GOT PLT. */
2503 eh->plt_got.refcount = 1;
2504 }
2505
2506 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2507 here if it is defined and referenced in a non-shared object. */
2508 if (h->type == STT_GNU_IFUNC
2509 && h->def_regular)
2510 {
2511 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2512 &eh->dyn_relocs,
2513 plt_entry_size,
2514 plt_entry_size,
2515 GOT_ENTRY_SIZE))
2516 {
2517 asection *s = htab->plt_bnd;
2518 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2519 {
2520 /* Use the .plt.bnd section if it is created. */
2521 eh->plt_bnd.offset = s->size;
2522
2523 /* Make room for this entry in the .plt.bnd section. */
2524 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2525 }
2526
2527 return TRUE;
2528 }
2529 else
2530 return FALSE;
2531 }
2532 else if (htab->elf.dynamic_sections_created
2533 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0))
2534 {
2535 bfd_boolean use_plt_got = eh->plt_got.refcount > 0;
2536
2537 /* Make sure this symbol is output as a dynamic symbol.
2538 Undefined weak syms won't yet be marked as dynamic. */
2539 if (h->dynindx == -1
2540 && !h->forced_local)
2541 {
2542 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2543 return FALSE;
2544 }
2545
2546 if (info->shared
2547 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2548 {
2549 asection *s = htab->elf.splt;
2550 asection *bnd_s = htab->plt_bnd;
2551 asection *got_s = htab->plt_got;
2552
2553 /* If this is the first .plt entry, make room for the special
2554 first entry. */
2555 if (s->size == 0)
2556 s->size = plt_entry_size;
2557
2558 if (use_plt_got)
2559 eh->plt_got.offset = got_s->size;
2560 else
2561 {
2562 h->plt.offset = s->size;
2563 if (bnd_s)
2564 eh->plt_bnd.offset = bnd_s->size;
2565 }
2566
2567 /* If this symbol is not defined in a regular file, and we are
2568 not generating a shared library, then set the symbol to this
2569 location in the .plt. This is required to make function
2570 pointers compare as equal between the normal executable and
2571 the shared library. */
2572 if (! info->shared
2573 && !h->def_regular)
2574 {
2575 if (use_plt_got)
2576 {
2577 /* We need to make a call to the entry of the GOT PLT
2578 instead of regular PLT entry. */
2579 h->root.u.def.section = got_s;
2580 h->root.u.def.value = eh->plt_got.offset;
2581 }
2582 else
2583 {
2584 if (bnd_s)
2585 {
2586 /* We need to make a call to the entry of the second
2587 PLT instead of regular PLT entry. */
2588 h->root.u.def.section = bnd_s;
2589 h->root.u.def.value = eh->plt_bnd.offset;
2590 }
2591 else
2592 {
2593 h->root.u.def.section = s;
2594 h->root.u.def.value = h->plt.offset;
2595 }
2596 }
2597 }
2598
2599 /* Make room for this entry. */
2600 if (use_plt_got)
2601 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2602 else
2603 {
2604 s->size += plt_entry_size;
2605 if (bnd_s)
2606 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2607
2608 /* We also need to make an entry in the .got.plt section,
2609 which will be placed in the .got section by the linker
2610 script. */
2611 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2612
2613 /* We also need to make an entry in the .rela.plt
2614 section. */
2615 htab->elf.srelplt->size += bed->s->sizeof_rela;
2616 htab->elf.srelplt->reloc_count++;
2617 }
2618 }
2619 else
2620 {
2621 h->plt.offset = (bfd_vma) -1;
2622 h->needs_plt = 0;
2623 }
2624 }
2625 else
2626 {
2627 h->plt.offset = (bfd_vma) -1;
2628 h->needs_plt = 0;
2629 }
2630
2631 eh->tlsdesc_got = (bfd_vma) -1;
2632
2633 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2634 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2635 if (h->got.refcount > 0
2636 && info->executable
2637 && h->dynindx == -1
2638 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2639 {
2640 h->got.offset = (bfd_vma) -1;
2641 }
2642 else if (h->got.refcount > 0)
2643 {
2644 asection *s;
2645 bfd_boolean dyn;
2646 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2647
2648 /* Make sure this symbol is output as a dynamic symbol.
2649 Undefined weak syms won't yet be marked as dynamic. */
2650 if (h->dynindx == -1
2651 && !h->forced_local)
2652 {
2653 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2654 return FALSE;
2655 }
2656
2657 if (GOT_TLS_GDESC_P (tls_type))
2658 {
2659 eh->tlsdesc_got = htab->elf.sgotplt->size
2660 - elf_x86_64_compute_jump_table_size (htab);
2661 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2662 h->got.offset = (bfd_vma) -2;
2663 }
2664 if (! GOT_TLS_GDESC_P (tls_type)
2665 || GOT_TLS_GD_P (tls_type))
2666 {
2667 s = htab->elf.sgot;
2668 h->got.offset = s->size;
2669 s->size += GOT_ENTRY_SIZE;
2670 if (GOT_TLS_GD_P (tls_type))
2671 s->size += GOT_ENTRY_SIZE;
2672 }
2673 dyn = htab->elf.dynamic_sections_created;
2674 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2675 and two if global.
2676 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2677 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2678 || tls_type == GOT_TLS_IE)
2679 htab->elf.srelgot->size += bed->s->sizeof_rela;
2680 else if (GOT_TLS_GD_P (tls_type))
2681 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2682 else if (! GOT_TLS_GDESC_P (tls_type)
2683 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2684 || h->root.type != bfd_link_hash_undefweak)
2685 && (info->shared
2686 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2687 htab->elf.srelgot->size += bed->s->sizeof_rela;
2688 if (GOT_TLS_GDESC_P (tls_type))
2689 {
2690 htab->elf.srelplt->size += bed->s->sizeof_rela;
2691 htab->tlsdesc_plt = (bfd_vma) -1;
2692 }
2693 }
2694 else
2695 h->got.offset = (bfd_vma) -1;
2696
2697 if (eh->dyn_relocs == NULL)
2698 return TRUE;
2699
2700 /* In the shared -Bsymbolic case, discard space allocated for
2701 dynamic pc-relative relocs against symbols which turn out to be
2702 defined in regular objects. For the normal shared case, discard
2703 space for pc-relative relocs that have become local due to symbol
2704 visibility changes. */
2705
2706 if (info->shared)
2707 {
2708 /* Relocs that use pc_count are those that appear on a call
2709 insn, or certain REL relocs that can generated via assembly.
2710 We want calls to protected symbols to resolve directly to the
2711 function rather than going via the plt. If people want
2712 function pointer comparisons to work as expected then they
2713 should avoid writing weird assembly. */
2714 if (SYMBOL_CALLS_LOCAL (info, h))
2715 {
2716 struct elf_dyn_relocs **pp;
2717
2718 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2719 {
2720 p->count -= p->pc_count;
2721 p->pc_count = 0;
2722 if (p->count == 0)
2723 *pp = p->next;
2724 else
2725 pp = &p->next;
2726 }
2727 }
2728
2729 /* Also discard relocs on undefined weak syms with non-default
2730 visibility. */
2731 if (eh->dyn_relocs != NULL)
2732 {
2733 if (h->root.type == bfd_link_hash_undefweak)
2734 {
2735 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2736 eh->dyn_relocs = NULL;
2737
2738 /* Make sure undefined weak symbols are output as a dynamic
2739 symbol in PIEs. */
2740 else if (h->dynindx == -1
2741 && ! h->forced_local
2742 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2743 return FALSE;
2744 }
2745 /* For PIE, discard space for pc-relative relocs against
2746 symbols which turn out to need copy relocs. */
2747 else if (info->executable
2748 && (h->needs_copy || eh->needs_copy)
2749 && h->def_dynamic
2750 && !h->def_regular)
2751 {
2752 struct elf_dyn_relocs **pp;
2753
2754 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2755 {
2756 if (p->pc_count != 0)
2757 *pp = p->next;
2758 else
2759 pp = &p->next;
2760 }
2761 }
2762 }
2763 }
2764 else if (ELIMINATE_COPY_RELOCS)
2765 {
2766 /* For the non-shared case, discard space for relocs against
2767 symbols which turn out to need copy relocs or are not
2768 dynamic. */
2769
2770 if (!h->non_got_ref
2771 && ((h->def_dynamic
2772 && !h->def_regular)
2773 || (htab->elf.dynamic_sections_created
2774 && (h->root.type == bfd_link_hash_undefweak
2775 || h->root.type == bfd_link_hash_undefined))))
2776 {
2777 /* Make sure this symbol is output as a dynamic symbol.
2778 Undefined weak syms won't yet be marked as dynamic. */
2779 if (h->dynindx == -1
2780 && ! h->forced_local
2781 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2782 return FALSE;
2783
2784 /* If that succeeded, we know we'll be keeping all the
2785 relocs. */
2786 if (h->dynindx != -1)
2787 goto keep;
2788 }
2789
2790 eh->dyn_relocs = NULL;
2791
2792 keep: ;
2793 }
2794
2795 /* Finally, allocate space. */
2796 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2797 {
2798 asection * sreloc;
2799
2800 sreloc = elf_section_data (p->sec)->sreloc;
2801
2802 BFD_ASSERT (sreloc != NULL);
2803
2804 sreloc->size += p->count * bed->s->sizeof_rela;
2805 }
2806
2807 return TRUE;
2808 }
2809
2810 /* Allocate space in .plt, .got and associated reloc sections for
2811 local dynamic relocs. */
2812
2813 static bfd_boolean
2814 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2815 {
2816 struct elf_link_hash_entry *h
2817 = (struct elf_link_hash_entry *) *slot;
2818
2819 if (h->type != STT_GNU_IFUNC
2820 || !h->def_regular
2821 || !h->ref_regular
2822 || !h->forced_local
2823 || h->root.type != bfd_link_hash_defined)
2824 abort ();
2825
2826 return elf_x86_64_allocate_dynrelocs (h, inf);
2827 }
2828
2829 /* Find any dynamic relocs that apply to read-only sections. */
2830
2831 static bfd_boolean
2832 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2833 void * inf)
2834 {
2835 struct elf_x86_64_link_hash_entry *eh;
2836 struct elf_dyn_relocs *p;
2837
2838 /* Skip local IFUNC symbols. */
2839 if (h->forced_local && h->type == STT_GNU_IFUNC)
2840 return TRUE;
2841
2842 eh = (struct elf_x86_64_link_hash_entry *) h;
2843 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2844 {
2845 asection *s = p->sec->output_section;
2846
2847 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2848 {
2849 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2850
2851 info->flags |= DF_TEXTREL;
2852
2853 if (info->warn_shared_textrel && info->shared)
2854 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'.\n"),
2855 p->sec->owner, h->root.root.string,
2856 p->sec);
2857
2858 /* Not an error, just cut short the traversal. */
2859 return FALSE;
2860 }
2861 }
2862 return TRUE;
2863 }
2864
2865 /* Convert
2866 mov foo@GOTPCREL(%rip), %reg
2867 to
2868 lea foo(%rip), %reg
2869 with the local symbol, foo. */
2870
2871 static bfd_boolean
2872 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2873 struct bfd_link_info *link_info)
2874 {
2875 Elf_Internal_Shdr *symtab_hdr;
2876 Elf_Internal_Rela *internal_relocs;
2877 Elf_Internal_Rela *irel, *irelend;
2878 bfd_byte *contents;
2879 struct elf_x86_64_link_hash_table *htab;
2880 bfd_boolean changed_contents;
2881 bfd_boolean changed_relocs;
2882 bfd_signed_vma *local_got_refcounts;
2883
2884 /* Don't even try to convert non-ELF outputs. */
2885 if (!is_elf_hash_table (link_info->hash))
2886 return FALSE;
2887
2888 /* Nothing to do if there are no codes, no relocations or no output. */
2889 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2890 || sec->reloc_count == 0
2891 || bfd_is_abs_section (sec->output_section))
2892 return TRUE;
2893
2894 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2895
2896 /* Load the relocations for this section. */
2897 internal_relocs = (_bfd_elf_link_read_relocs
2898 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2899 link_info->keep_memory));
2900 if (internal_relocs == NULL)
2901 return FALSE;
2902
2903 htab = elf_x86_64_hash_table (link_info);
2904 changed_contents = FALSE;
2905 changed_relocs = FALSE;
2906 local_got_refcounts = elf_local_got_refcounts (abfd);
2907
2908 /* Get the section contents. */
2909 if (elf_section_data (sec)->this_hdr.contents != NULL)
2910 contents = elf_section_data (sec)->this_hdr.contents;
2911 else
2912 {
2913 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2914 goto error_return;
2915 }
2916
2917 irelend = internal_relocs + sec->reloc_count;
2918 for (irel = internal_relocs; irel < irelend; irel++)
2919 {
2920 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2921 unsigned int r_symndx = htab->r_sym (irel->r_info);
2922 unsigned int indx;
2923 struct elf_link_hash_entry *h;
2924
2925 if (r_type != R_X86_64_GOTPCREL)
2926 continue;
2927
2928 /* Get the symbol referred to by the reloc. */
2929 if (r_symndx < symtab_hdr->sh_info)
2930 {
2931 Elf_Internal_Sym *isym;
2932
2933 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2934 abfd, r_symndx);
2935
2936 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. */
2937 if (ELF_ST_TYPE (isym->st_info) != STT_GNU_IFUNC
2938 && irel->r_offset >= 2
2939 && bfd_get_8 (input_bfd,
2940 contents + irel->r_offset - 2) == 0x8b)
2941 {
2942 bfd_put_8 (output_bfd, 0x8d,
2943 contents + irel->r_offset - 2);
2944 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2945 if (local_got_refcounts != NULL
2946 && local_got_refcounts[r_symndx] > 0)
2947 local_got_refcounts[r_symndx] -= 1;
2948 changed_contents = TRUE;
2949 changed_relocs = TRUE;
2950 }
2951 continue;
2952 }
2953
2954 indx = r_symndx - symtab_hdr->sh_info;
2955 h = elf_sym_hashes (abfd)[indx];
2956 BFD_ASSERT (h != NULL);
2957
2958 while (h->root.type == bfd_link_hash_indirect
2959 || h->root.type == bfd_link_hash_warning)
2960 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2961
2962 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
2963 avoid optimizing _DYNAMIC since ld.so may use its link-time
2964 address. */
2965 if (h->def_regular
2966 && h->type != STT_GNU_IFUNC
2967 && h != htab->elf.hdynamic
2968 && SYMBOL_REFERENCES_LOCAL (link_info, h)
2969 && irel->r_offset >= 2
2970 && bfd_get_8 (input_bfd,
2971 contents + irel->r_offset - 2) == 0x8b)
2972 {
2973 bfd_put_8 (output_bfd, 0x8d,
2974 contents + irel->r_offset - 2);
2975 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2976 if (h->got.refcount > 0)
2977 h->got.refcount -= 1;
2978 changed_contents = TRUE;
2979 changed_relocs = TRUE;
2980 }
2981 }
2982
2983 if (contents != NULL
2984 && elf_section_data (sec)->this_hdr.contents != contents)
2985 {
2986 if (!changed_contents && !link_info->keep_memory)
2987 free (contents);
2988 else
2989 {
2990 /* Cache the section contents for elf_link_input_bfd. */
2991 elf_section_data (sec)->this_hdr.contents = contents;
2992 }
2993 }
2994
2995 if (elf_section_data (sec)->relocs != internal_relocs)
2996 {
2997 if (!changed_relocs)
2998 free (internal_relocs);
2999 else
3000 elf_section_data (sec)->relocs = internal_relocs;
3001 }
3002
3003 return TRUE;
3004
3005 error_return:
3006 if (contents != NULL
3007 && elf_section_data (sec)->this_hdr.contents != contents)
3008 free (contents);
3009 if (internal_relocs != NULL
3010 && elf_section_data (sec)->relocs != internal_relocs)
3011 free (internal_relocs);
3012 return FALSE;
3013 }
3014
3015 /* Set the sizes of the dynamic sections. */
3016
3017 static bfd_boolean
3018 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3019 struct bfd_link_info *info)
3020 {
3021 struct elf_x86_64_link_hash_table *htab;
3022 bfd *dynobj;
3023 asection *s;
3024 bfd_boolean relocs;
3025 bfd *ibfd;
3026 const struct elf_backend_data *bed;
3027
3028 htab = elf_x86_64_hash_table (info);
3029 if (htab == NULL)
3030 return FALSE;
3031 bed = get_elf_backend_data (output_bfd);
3032
3033 dynobj = htab->elf.dynobj;
3034 if (dynobj == NULL)
3035 abort ();
3036
3037 if (htab->elf.dynamic_sections_created)
3038 {
3039 /* Set the contents of the .interp section to the interpreter. */
3040 if (info->executable)
3041 {
3042 s = bfd_get_linker_section (dynobj, ".interp");
3043 if (s == NULL)
3044 abort ();
3045 s->size = htab->dynamic_interpreter_size;
3046 s->contents = (unsigned char *) htab->dynamic_interpreter;
3047 }
3048 }
3049
3050 /* Set up .got offsets for local syms, and space for local dynamic
3051 relocs. */
3052 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3053 {
3054 bfd_signed_vma *local_got;
3055 bfd_signed_vma *end_local_got;
3056 char *local_tls_type;
3057 bfd_vma *local_tlsdesc_gotent;
3058 bfd_size_type locsymcount;
3059 Elf_Internal_Shdr *symtab_hdr;
3060 asection *srel;
3061
3062 if (! is_x86_64_elf (ibfd))
3063 continue;
3064
3065 for (s = ibfd->sections; s != NULL; s = s->next)
3066 {
3067 struct elf_dyn_relocs *p;
3068
3069 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3070 return FALSE;
3071
3072 for (p = (struct elf_dyn_relocs *)
3073 (elf_section_data (s)->local_dynrel);
3074 p != NULL;
3075 p = p->next)
3076 {
3077 if (!bfd_is_abs_section (p->sec)
3078 && bfd_is_abs_section (p->sec->output_section))
3079 {
3080 /* Input section has been discarded, either because
3081 it is a copy of a linkonce section or due to
3082 linker script /DISCARD/, so we'll be discarding
3083 the relocs too. */
3084 }
3085 else if (p->count != 0)
3086 {
3087 srel = elf_section_data (p->sec)->sreloc;
3088 srel->size += p->count * bed->s->sizeof_rela;
3089 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3090 && (info->flags & DF_TEXTREL) == 0)
3091 {
3092 info->flags |= DF_TEXTREL;
3093 if (info->warn_shared_textrel && info->shared)
3094 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'.\n"),
3095 p->sec->owner, p->sec);
3096 }
3097 }
3098 }
3099 }
3100
3101 local_got = elf_local_got_refcounts (ibfd);
3102 if (!local_got)
3103 continue;
3104
3105 symtab_hdr = &elf_symtab_hdr (ibfd);
3106 locsymcount = symtab_hdr->sh_info;
3107 end_local_got = local_got + locsymcount;
3108 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3109 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3110 s = htab->elf.sgot;
3111 srel = htab->elf.srelgot;
3112 for (; local_got < end_local_got;
3113 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3114 {
3115 *local_tlsdesc_gotent = (bfd_vma) -1;
3116 if (*local_got > 0)
3117 {
3118 if (GOT_TLS_GDESC_P (*local_tls_type))
3119 {
3120 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3121 - elf_x86_64_compute_jump_table_size (htab);
3122 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3123 *local_got = (bfd_vma) -2;
3124 }
3125 if (! GOT_TLS_GDESC_P (*local_tls_type)
3126 || GOT_TLS_GD_P (*local_tls_type))
3127 {
3128 *local_got = s->size;
3129 s->size += GOT_ENTRY_SIZE;
3130 if (GOT_TLS_GD_P (*local_tls_type))
3131 s->size += GOT_ENTRY_SIZE;
3132 }
3133 if (info->shared
3134 || GOT_TLS_GD_ANY_P (*local_tls_type)
3135 || *local_tls_type == GOT_TLS_IE)
3136 {
3137 if (GOT_TLS_GDESC_P (*local_tls_type))
3138 {
3139 htab->elf.srelplt->size
3140 += bed->s->sizeof_rela;
3141 htab->tlsdesc_plt = (bfd_vma) -1;
3142 }
3143 if (! GOT_TLS_GDESC_P (*local_tls_type)
3144 || GOT_TLS_GD_P (*local_tls_type))
3145 srel->size += bed->s->sizeof_rela;
3146 }
3147 }
3148 else
3149 *local_got = (bfd_vma) -1;
3150 }
3151 }
3152
3153 if (htab->tls_ld_got.refcount > 0)
3154 {
3155 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3156 relocs. */
3157 htab->tls_ld_got.offset = htab->elf.sgot->size;
3158 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3159 htab->elf.srelgot->size += bed->s->sizeof_rela;
3160 }
3161 else
3162 htab->tls_ld_got.offset = -1;
3163
3164 /* Allocate global sym .plt and .got entries, and space for global
3165 sym dynamic relocs. */
3166 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3167 info);
3168
3169 /* Allocate .plt and .got entries, and space for local symbols. */
3170 htab_traverse (htab->loc_hash_table,
3171 elf_x86_64_allocate_local_dynrelocs,
3172 info);
3173
3174 /* For every jump slot reserved in the sgotplt, reloc_count is
3175 incremented. However, when we reserve space for TLS descriptors,
3176 it's not incremented, so in order to compute the space reserved
3177 for them, it suffices to multiply the reloc count by the jump
3178 slot size.
3179
3180 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3181 so that R_X86_64_IRELATIVE entries come last. */
3182 if (htab->elf.srelplt)
3183 {
3184 htab->sgotplt_jump_table_size
3185 = elf_x86_64_compute_jump_table_size (htab);
3186 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3187 }
3188 else if (htab->elf.irelplt)
3189 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3190
3191 if (htab->tlsdesc_plt)
3192 {
3193 /* If we're not using lazy TLS relocations, don't generate the
3194 PLT and GOT entries they require. */
3195 if ((info->flags & DF_BIND_NOW))
3196 htab->tlsdesc_plt = 0;
3197 else
3198 {
3199 htab->tlsdesc_got = htab->elf.sgot->size;
3200 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3201 /* Reserve room for the initial entry.
3202 FIXME: we could probably do away with it in this case. */
3203 if (htab->elf.splt->size == 0)
3204 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3205 htab->tlsdesc_plt = htab->elf.splt->size;
3206 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3207 }
3208 }
3209
3210 if (htab->elf.sgotplt)
3211 {
3212 /* Don't allocate .got.plt section if there are no GOT nor PLT
3213 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3214 if ((htab->elf.hgot == NULL
3215 || !htab->elf.hgot->ref_regular_nonweak)
3216 && (htab->elf.sgotplt->size
3217 == get_elf_backend_data (output_bfd)->got_header_size)
3218 && (htab->elf.splt == NULL
3219 || htab->elf.splt->size == 0)
3220 && (htab->elf.sgot == NULL
3221 || htab->elf.sgot->size == 0)
3222 && (htab->elf.iplt == NULL
3223 || htab->elf.iplt->size == 0)
3224 && (htab->elf.igotplt == NULL
3225 || htab->elf.igotplt->size == 0))
3226 htab->elf.sgotplt->size = 0;
3227 }
3228
3229 if (htab->plt_eh_frame != NULL
3230 && htab->elf.splt != NULL
3231 && htab->elf.splt->size != 0
3232 && !bfd_is_abs_section (htab->elf.splt->output_section)
3233 && _bfd_elf_eh_frame_present (info))
3234 {
3235 const struct elf_x86_64_backend_data *arch_data
3236 = get_elf_x86_64_arch_data (bed);
3237 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3238 }
3239
3240 /* We now have determined the sizes of the various dynamic sections.
3241 Allocate memory for them. */
3242 relocs = FALSE;
3243 for (s = dynobj->sections; s != NULL; s = s->next)
3244 {
3245 if ((s->flags & SEC_LINKER_CREATED) == 0)
3246 continue;
3247
3248 if (s == htab->elf.splt
3249 || s == htab->elf.sgot
3250 || s == htab->elf.sgotplt
3251 || s == htab->elf.iplt
3252 || s == htab->elf.igotplt
3253 || s == htab->plt_bnd
3254 || s == htab->plt_got
3255 || s == htab->plt_eh_frame
3256 || s == htab->sdynbss)
3257 {
3258 /* Strip this section if we don't need it; see the
3259 comment below. */
3260 }
3261 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3262 {
3263 if (s->size != 0 && s != htab->elf.srelplt)
3264 relocs = TRUE;
3265
3266 /* We use the reloc_count field as a counter if we need
3267 to copy relocs into the output file. */
3268 if (s != htab->elf.srelplt)
3269 s->reloc_count = 0;
3270 }
3271 else
3272 {
3273 /* It's not one of our sections, so don't allocate space. */
3274 continue;
3275 }
3276
3277 if (s->size == 0)
3278 {
3279 /* If we don't need this section, strip it from the
3280 output file. This is mostly to handle .rela.bss and
3281 .rela.plt. We must create both sections in
3282 create_dynamic_sections, because they must be created
3283 before the linker maps input sections to output
3284 sections. The linker does that before
3285 adjust_dynamic_symbol is called, and it is that
3286 function which decides whether anything needs to go
3287 into these sections. */
3288
3289 s->flags |= SEC_EXCLUDE;
3290 continue;
3291 }
3292
3293 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3294 continue;
3295
3296 /* Allocate memory for the section contents. We use bfd_zalloc
3297 here in case unused entries are not reclaimed before the
3298 section's contents are written out. This should not happen,
3299 but this way if it does, we get a R_X86_64_NONE reloc instead
3300 of garbage. */
3301 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3302 if (s->contents == NULL)
3303 return FALSE;
3304 }
3305
3306 if (htab->plt_eh_frame != NULL
3307 && htab->plt_eh_frame->contents != NULL)
3308 {
3309 const struct elf_x86_64_backend_data *arch_data
3310 = get_elf_x86_64_arch_data (bed);
3311
3312 memcpy (htab->plt_eh_frame->contents,
3313 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3314 bfd_put_32 (dynobj, htab->elf.splt->size,
3315 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3316 }
3317
3318 if (htab->elf.dynamic_sections_created)
3319 {
3320 /* Add some entries to the .dynamic section. We fill in the
3321 values later, in elf_x86_64_finish_dynamic_sections, but we
3322 must add the entries now so that we get the correct size for
3323 the .dynamic section. The DT_DEBUG entry is filled in by the
3324 dynamic linker and used by the debugger. */
3325 #define add_dynamic_entry(TAG, VAL) \
3326 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3327
3328 if (info->executable)
3329 {
3330 if (!add_dynamic_entry (DT_DEBUG, 0))
3331 return FALSE;
3332 }
3333
3334 if (htab->elf.splt->size != 0)
3335 {
3336 if (!add_dynamic_entry (DT_PLTGOT, 0)
3337 || !add_dynamic_entry (DT_PLTRELSZ, 0)
3338 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3339 || !add_dynamic_entry (DT_JMPREL, 0))
3340 return FALSE;
3341
3342 if (htab->tlsdesc_plt
3343 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3344 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3345 return FALSE;
3346 }
3347
3348 if (relocs)
3349 {
3350 if (!add_dynamic_entry (DT_RELA, 0)
3351 || !add_dynamic_entry (DT_RELASZ, 0)
3352 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3353 return FALSE;
3354
3355 /* If any dynamic relocs apply to a read-only section,
3356 then we need a DT_TEXTREL entry. */
3357 if ((info->flags & DF_TEXTREL) == 0)
3358 elf_link_hash_traverse (&htab->elf,
3359 elf_x86_64_readonly_dynrelocs,
3360 info);
3361
3362 if ((info->flags & DF_TEXTREL) != 0)
3363 {
3364 if (!add_dynamic_entry (DT_TEXTREL, 0))
3365 return FALSE;
3366 }
3367 }
3368 }
3369 #undef add_dynamic_entry
3370
3371 return TRUE;
3372 }
3373
3374 static bfd_boolean
3375 elf_x86_64_always_size_sections (bfd *output_bfd,
3376 struct bfd_link_info *info)
3377 {
3378 asection *tls_sec = elf_hash_table (info)->tls_sec;
3379
3380 if (tls_sec)
3381 {
3382 struct elf_link_hash_entry *tlsbase;
3383
3384 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3385 "_TLS_MODULE_BASE_",
3386 FALSE, FALSE, FALSE);
3387
3388 if (tlsbase && tlsbase->type == STT_TLS)
3389 {
3390 struct elf_x86_64_link_hash_table *htab;
3391 struct bfd_link_hash_entry *bh = NULL;
3392 const struct elf_backend_data *bed
3393 = get_elf_backend_data (output_bfd);
3394
3395 htab = elf_x86_64_hash_table (info);
3396 if (htab == NULL)
3397 return FALSE;
3398
3399 if (!(_bfd_generic_link_add_one_symbol
3400 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3401 tls_sec, 0, NULL, FALSE,
3402 bed->collect, &bh)))
3403 return FALSE;
3404
3405 htab->tls_module_base = bh;
3406
3407 tlsbase = (struct elf_link_hash_entry *)bh;
3408 tlsbase->def_regular = 1;
3409 tlsbase->other = STV_HIDDEN;
3410 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3411 }
3412 }
3413
3414 return TRUE;
3415 }
3416
3417 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3418 executables. Rather than setting it to the beginning of the TLS
3419 section, we have to set it to the end. This function may be called
3420 multiple times, it is idempotent. */
3421
3422 static void
3423 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3424 {
3425 struct elf_x86_64_link_hash_table *htab;
3426 struct bfd_link_hash_entry *base;
3427
3428 if (!info->executable)
3429 return;
3430
3431 htab = elf_x86_64_hash_table (info);
3432 if (htab == NULL)
3433 return;
3434
3435 base = htab->tls_module_base;
3436 if (base == NULL)
3437 return;
3438
3439 base->u.def.value = htab->elf.tls_size;
3440 }
3441
3442 /* Return the base VMA address which should be subtracted from real addresses
3443 when resolving @dtpoff relocation.
3444 This is PT_TLS segment p_vaddr. */
3445
3446 static bfd_vma
3447 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3448 {
3449 /* If tls_sec is NULL, we should have signalled an error already. */
3450 if (elf_hash_table (info)->tls_sec == NULL)
3451 return 0;
3452 return elf_hash_table (info)->tls_sec->vma;
3453 }
3454
3455 /* Return the relocation value for @tpoff relocation
3456 if STT_TLS virtual address is ADDRESS. */
3457
3458 static bfd_vma
3459 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3460 {
3461 struct elf_link_hash_table *htab = elf_hash_table (info);
3462 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3463 bfd_vma static_tls_size;
3464
3465 /* If tls_segment is NULL, we should have signalled an error already. */
3466 if (htab->tls_sec == NULL)
3467 return 0;
3468
3469 /* Consider special static TLS alignment requirements. */
3470 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3471 return address - static_tls_size - htab->tls_sec->vma;
3472 }
3473
3474 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3475 branch? */
3476
3477 static bfd_boolean
3478 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3479 {
3480 /* Opcode Instruction
3481 0xe8 call
3482 0xe9 jump
3483 0x0f 0x8x conditional jump */
3484 return ((offset > 0
3485 && (contents [offset - 1] == 0xe8
3486 || contents [offset - 1] == 0xe9))
3487 || (offset > 1
3488 && contents [offset - 2] == 0x0f
3489 && (contents [offset - 1] & 0xf0) == 0x80));
3490 }
3491
3492 /* Relocate an x86_64 ELF section. */
3493
3494 static bfd_boolean
3495 elf_x86_64_relocate_section (bfd *output_bfd,
3496 struct bfd_link_info *info,
3497 bfd *input_bfd,
3498 asection *input_section,
3499 bfd_byte *contents,
3500 Elf_Internal_Rela *relocs,
3501 Elf_Internal_Sym *local_syms,
3502 asection **local_sections)
3503 {
3504 struct elf_x86_64_link_hash_table *htab;
3505 Elf_Internal_Shdr *symtab_hdr;
3506 struct elf_link_hash_entry **sym_hashes;
3507 bfd_vma *local_got_offsets;
3508 bfd_vma *local_tlsdesc_gotents;
3509 Elf_Internal_Rela *rel;
3510 Elf_Internal_Rela *relend;
3511 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3512
3513 BFD_ASSERT (is_x86_64_elf (input_bfd));
3514
3515 htab = elf_x86_64_hash_table (info);
3516 if (htab == NULL)
3517 return FALSE;
3518 symtab_hdr = &elf_symtab_hdr (input_bfd);
3519 sym_hashes = elf_sym_hashes (input_bfd);
3520 local_got_offsets = elf_local_got_offsets (input_bfd);
3521 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3522
3523 elf_x86_64_set_tls_module_base (info);
3524
3525 rel = relocs;
3526 relend = relocs + input_section->reloc_count;
3527 for (; rel < relend; rel++)
3528 {
3529 unsigned int r_type;
3530 reloc_howto_type *howto;
3531 unsigned long r_symndx;
3532 struct elf_link_hash_entry *h;
3533 struct elf_x86_64_link_hash_entry *eh;
3534 Elf_Internal_Sym *sym;
3535 asection *sec;
3536 bfd_vma off, offplt, plt_offset;
3537 bfd_vma relocation;
3538 bfd_boolean unresolved_reloc;
3539 bfd_reloc_status_type r;
3540 int tls_type;
3541 asection *base_got, *resolved_plt;
3542 bfd_vma st_size;
3543
3544 r_type = ELF32_R_TYPE (rel->r_info);
3545 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3546 || r_type == (int) R_X86_64_GNU_VTENTRY)
3547 continue;
3548
3549 if (r_type >= (int) R_X86_64_standard)
3550 {
3551 (*_bfd_error_handler)
3552 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3553 input_bfd, input_section, r_type);
3554 bfd_set_error (bfd_error_bad_value);
3555 return FALSE;
3556 }
3557
3558 if (r_type != (int) R_X86_64_32
3559 || ABI_64_P (output_bfd))
3560 howto = x86_64_elf_howto_table + r_type;
3561 else
3562 howto = (x86_64_elf_howto_table
3563 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3564 r_symndx = htab->r_sym (rel->r_info);
3565 h = NULL;
3566 sym = NULL;
3567 sec = NULL;
3568 unresolved_reloc = FALSE;
3569 if (r_symndx < symtab_hdr->sh_info)
3570 {
3571 sym = local_syms + r_symndx;
3572 sec = local_sections[r_symndx];
3573
3574 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3575 &sec, rel);
3576 st_size = sym->st_size;
3577
3578 /* Relocate against local STT_GNU_IFUNC symbol. */
3579 if (!info->relocatable
3580 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3581 {
3582 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3583 rel, FALSE);
3584 if (h == NULL)
3585 abort ();
3586
3587 /* Set STT_GNU_IFUNC symbol value. */
3588 h->root.u.def.value = sym->st_value;
3589 h->root.u.def.section = sec;
3590 }
3591 }
3592 else
3593 {
3594 bfd_boolean warned ATTRIBUTE_UNUSED;
3595 bfd_boolean ignored ATTRIBUTE_UNUSED;
3596
3597 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3598 r_symndx, symtab_hdr, sym_hashes,
3599 h, sec, relocation,
3600 unresolved_reloc, warned, ignored);
3601 st_size = h->size;
3602 }
3603
3604 if (sec != NULL && discarded_section (sec))
3605 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3606 rel, 1, relend, howto, 0, contents);
3607
3608 if (info->relocatable)
3609 continue;
3610
3611 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3612 {
3613 if (r_type == R_X86_64_64)
3614 {
3615 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3616 zero-extend it to 64bit if addend is zero. */
3617 r_type = R_X86_64_32;
3618 memset (contents + rel->r_offset + 4, 0, 4);
3619 }
3620 else if (r_type == R_X86_64_SIZE64)
3621 {
3622 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3623 zero-extend it to 64bit if addend is zero. */
3624 r_type = R_X86_64_SIZE32;
3625 memset (contents + rel->r_offset + 4, 0, 4);
3626 }
3627 }
3628
3629 eh = (struct elf_x86_64_link_hash_entry *) h;
3630
3631 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3632 it here if it is defined in a non-shared object. */
3633 if (h != NULL
3634 && h->type == STT_GNU_IFUNC
3635 && h->def_regular)
3636 {
3637 bfd_vma plt_index;
3638 const char *name;
3639
3640 if ((input_section->flags & SEC_ALLOC) == 0
3641 || h->plt.offset == (bfd_vma) -1)
3642 abort ();
3643
3644 /* STT_GNU_IFUNC symbol must go through PLT. */
3645 if (htab->elf.splt != NULL)
3646 {
3647 if (htab->plt_bnd != NULL)
3648 {
3649 resolved_plt = htab->plt_bnd;
3650 plt_offset = eh->plt_bnd.offset;
3651 }
3652 else
3653 {
3654 resolved_plt = htab->elf.splt;
3655 plt_offset = h->plt.offset;
3656 }
3657 }
3658 else
3659 {
3660 resolved_plt = htab->elf.iplt;
3661 plt_offset = h->plt.offset;
3662 }
3663
3664 relocation = (resolved_plt->output_section->vma
3665 + resolved_plt->output_offset + plt_offset);
3666
3667 switch (r_type)
3668 {
3669 default:
3670 if (h->root.root.string)
3671 name = h->root.root.string;
3672 else
3673 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3674 NULL);
3675 (*_bfd_error_handler)
3676 (_("%B: relocation %s against STT_GNU_IFUNC "
3677 "symbol `%s' isn't handled by %s"), input_bfd,
3678 x86_64_elf_howto_table[r_type].name,
3679 name, __FUNCTION__);
3680 bfd_set_error (bfd_error_bad_value);
3681 return FALSE;
3682
3683 case R_X86_64_32S:
3684 if (info->shared)
3685 abort ();
3686 goto do_relocation;
3687
3688 case R_X86_64_32:
3689 if (ABI_64_P (output_bfd))
3690 goto do_relocation;
3691 /* FALLTHROUGH */
3692 case R_X86_64_64:
3693 if (rel->r_addend != 0)
3694 {
3695 if (h->root.root.string)
3696 name = h->root.root.string;
3697 else
3698 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3699 sym, NULL);
3700 (*_bfd_error_handler)
3701 (_("%B: relocation %s against STT_GNU_IFUNC "
3702 "symbol `%s' has non-zero addend: %d"),
3703 input_bfd, x86_64_elf_howto_table[r_type].name,
3704 name, rel->r_addend);
3705 bfd_set_error (bfd_error_bad_value);
3706 return FALSE;
3707 }
3708
3709 /* Generate dynamic relcoation only when there is a
3710 non-GOT reference in a shared object. */
3711 if (info->shared && h->non_got_ref)
3712 {
3713 Elf_Internal_Rela outrel;
3714 asection *sreloc;
3715
3716 /* Need a dynamic relocation to get the real function
3717 address. */
3718 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3719 info,
3720 input_section,
3721 rel->r_offset);
3722 if (outrel.r_offset == (bfd_vma) -1
3723 || outrel.r_offset == (bfd_vma) -2)
3724 abort ();
3725
3726 outrel.r_offset += (input_section->output_section->vma
3727 + input_section->output_offset);
3728
3729 if (h->dynindx == -1
3730 || h->forced_local
3731 || info->executable)
3732 {
3733 /* This symbol is resolved locally. */
3734 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3735 outrel.r_addend = (h->root.u.def.value
3736 + h->root.u.def.section->output_section->vma
3737 + h->root.u.def.section->output_offset);
3738 }
3739 else
3740 {
3741 outrel.r_info = htab->r_info (h->dynindx, r_type);
3742 outrel.r_addend = 0;
3743 }
3744
3745 sreloc = htab->elf.irelifunc;
3746 elf_append_rela (output_bfd, sreloc, &outrel);
3747
3748 /* If this reloc is against an external symbol, we
3749 do not want to fiddle with the addend. Otherwise,
3750 we need to include the symbol value so that it
3751 becomes an addend for the dynamic reloc. For an
3752 internal symbol, we have updated addend. */
3753 continue;
3754 }
3755 /* FALLTHROUGH */
3756 case R_X86_64_PC32:
3757 case R_X86_64_PC32_BND:
3758 case R_X86_64_PC64:
3759 case R_X86_64_PLT32:
3760 case R_X86_64_PLT32_BND:
3761 goto do_relocation;
3762
3763 case R_X86_64_GOTPCREL:
3764 case R_X86_64_GOTPCREL64:
3765 base_got = htab->elf.sgot;
3766 off = h->got.offset;
3767
3768 if (base_got == NULL)
3769 abort ();
3770
3771 if (off == (bfd_vma) -1)
3772 {
3773 /* We can't use h->got.offset here to save state, or
3774 even just remember the offset, as finish_dynamic_symbol
3775 would use that as offset into .got. */
3776
3777 if (htab->elf.splt != NULL)
3778 {
3779 plt_index = h->plt.offset / plt_entry_size - 1;
3780 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3781 base_got = htab->elf.sgotplt;
3782 }
3783 else
3784 {
3785 plt_index = h->plt.offset / plt_entry_size;
3786 off = plt_index * GOT_ENTRY_SIZE;
3787 base_got = htab->elf.igotplt;
3788 }
3789
3790 if (h->dynindx == -1
3791 || h->forced_local
3792 || info->symbolic)
3793 {
3794 /* This references the local defitionion. We must
3795 initialize this entry in the global offset table.
3796 Since the offset must always be a multiple of 8,
3797 we use the least significant bit to record
3798 whether we have initialized it already.
3799
3800 When doing a dynamic link, we create a .rela.got
3801 relocation entry to initialize the value. This
3802 is done in the finish_dynamic_symbol routine. */
3803 if ((off & 1) != 0)
3804 off &= ~1;
3805 else
3806 {
3807 bfd_put_64 (output_bfd, relocation,
3808 base_got->contents + off);
3809 /* Note that this is harmless for the GOTPLT64
3810 case, as -1 | 1 still is -1. */
3811 h->got.offset |= 1;
3812 }
3813 }
3814 }
3815
3816 relocation = (base_got->output_section->vma
3817 + base_got->output_offset + off);
3818
3819 goto do_relocation;
3820 }
3821 }
3822
3823 /* When generating a shared object, the relocations handled here are
3824 copied into the output file to be resolved at run time. */
3825 switch (r_type)
3826 {
3827 case R_X86_64_GOT32:
3828 case R_X86_64_GOT64:
3829 /* Relocation is to the entry for this symbol in the global
3830 offset table. */
3831 case R_X86_64_GOTPCREL:
3832 case R_X86_64_GOTPCREL64:
3833 /* Use global offset table entry as symbol value. */
3834 case R_X86_64_GOTPLT64:
3835 /* This is obsolete and treated the the same as GOT64. */
3836 base_got = htab->elf.sgot;
3837
3838 if (htab->elf.sgot == NULL)
3839 abort ();
3840
3841 if (h != NULL)
3842 {
3843 bfd_boolean dyn;
3844
3845 off = h->got.offset;
3846 if (h->needs_plt
3847 && h->plt.offset != (bfd_vma)-1
3848 && off == (bfd_vma)-1)
3849 {
3850 /* We can't use h->got.offset here to save
3851 state, or even just remember the offset, as
3852 finish_dynamic_symbol would use that as offset into
3853 .got. */
3854 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3855 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3856 base_got = htab->elf.sgotplt;
3857 }
3858
3859 dyn = htab->elf.dynamic_sections_created;
3860
3861 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3862 || (info->shared
3863 && SYMBOL_REFERENCES_LOCAL (info, h))
3864 || (ELF_ST_VISIBILITY (h->other)
3865 && h->root.type == bfd_link_hash_undefweak))
3866 {
3867 /* This is actually a static link, or it is a -Bsymbolic
3868 link and the symbol is defined locally, or the symbol
3869 was forced to be local because of a version file. We
3870 must initialize this entry in the global offset table.
3871 Since the offset must always be a multiple of 8, we
3872 use the least significant bit to record whether we
3873 have initialized it already.
3874
3875 When doing a dynamic link, we create a .rela.got
3876 relocation entry to initialize the value. This is
3877 done in the finish_dynamic_symbol routine. */
3878 if ((off & 1) != 0)
3879 off &= ~1;
3880 else
3881 {
3882 bfd_put_64 (output_bfd, relocation,
3883 base_got->contents + off);
3884 /* Note that this is harmless for the GOTPLT64 case,
3885 as -1 | 1 still is -1. */
3886 h->got.offset |= 1;
3887 }
3888 }
3889 else
3890 unresolved_reloc = FALSE;
3891 }
3892 else
3893 {
3894 if (local_got_offsets == NULL)
3895 abort ();
3896
3897 off = local_got_offsets[r_symndx];
3898
3899 /* The offset must always be a multiple of 8. We use
3900 the least significant bit to record whether we have
3901 already generated the necessary reloc. */
3902 if ((off & 1) != 0)
3903 off &= ~1;
3904 else
3905 {
3906 bfd_put_64 (output_bfd, relocation,
3907 base_got->contents + off);
3908
3909 if (info->shared)
3910 {
3911 asection *s;
3912 Elf_Internal_Rela outrel;
3913
3914 /* We need to generate a R_X86_64_RELATIVE reloc
3915 for the dynamic linker. */
3916 s = htab->elf.srelgot;
3917 if (s == NULL)
3918 abort ();
3919
3920 outrel.r_offset = (base_got->output_section->vma
3921 + base_got->output_offset
3922 + off);
3923 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3924 outrel.r_addend = relocation;
3925 elf_append_rela (output_bfd, s, &outrel);
3926 }
3927
3928 local_got_offsets[r_symndx] |= 1;
3929 }
3930 }
3931
3932 if (off >= (bfd_vma) -2)
3933 abort ();
3934
3935 relocation = base_got->output_section->vma
3936 + base_got->output_offset + off;
3937 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
3938 relocation -= htab->elf.sgotplt->output_section->vma
3939 - htab->elf.sgotplt->output_offset;
3940
3941 break;
3942
3943 case R_X86_64_GOTOFF64:
3944 /* Relocation is relative to the start of the global offset
3945 table. */
3946
3947 /* Check to make sure it isn't a protected function symbol
3948 for shared library since it may not be local when used
3949 as function address. */
3950 if (!info->executable
3951 && h
3952 && !SYMBOLIC_BIND (info, h)
3953 && h->def_regular
3954 && h->type == STT_FUNC
3955 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3956 {
3957 (*_bfd_error_handler)
3958 (_("%B: relocation R_X86_64_GOTOFF64 against protected function `%s' can not be used when making a shared object"),
3959 input_bfd, h->root.root.string);
3960 bfd_set_error (bfd_error_bad_value);
3961 return FALSE;
3962 }
3963
3964 /* Note that sgot is not involved in this
3965 calculation. We always want the start of .got.plt. If we
3966 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3967 permitted by the ABI, we might have to change this
3968 calculation. */
3969 relocation -= htab->elf.sgotplt->output_section->vma
3970 + htab->elf.sgotplt->output_offset;
3971 break;
3972
3973 case R_X86_64_GOTPC32:
3974 case R_X86_64_GOTPC64:
3975 /* Use global offset table as symbol value. */
3976 relocation = htab->elf.sgotplt->output_section->vma
3977 + htab->elf.sgotplt->output_offset;
3978 unresolved_reloc = FALSE;
3979 break;
3980
3981 case R_X86_64_PLTOFF64:
3982 /* Relocation is PLT entry relative to GOT. For local
3983 symbols it's the symbol itself relative to GOT. */
3984 if (h != NULL
3985 /* See PLT32 handling. */
3986 && h->plt.offset != (bfd_vma) -1
3987 && htab->elf.splt != NULL)
3988 {
3989 if (htab->plt_bnd != NULL)
3990 {
3991 resolved_plt = htab->plt_bnd;
3992 plt_offset = eh->plt_bnd.offset;
3993 }
3994 else
3995 {
3996 resolved_plt = htab->elf.splt;
3997 plt_offset = h->plt.offset;
3998 }
3999
4000 relocation = (resolved_plt->output_section->vma
4001 + resolved_plt->output_offset
4002 + plt_offset);
4003 unresolved_reloc = FALSE;
4004 }
4005
4006 relocation -= htab->elf.sgotplt->output_section->vma
4007 + htab->elf.sgotplt->output_offset;
4008 break;
4009
4010 case R_X86_64_PLT32:
4011 case R_X86_64_PLT32_BND:
4012 /* Relocation is to the entry for this symbol in the
4013 procedure linkage table. */
4014
4015 /* Resolve a PLT32 reloc against a local symbol directly,
4016 without using the procedure linkage table. */
4017 if (h == NULL)
4018 break;
4019
4020 if ((h->plt.offset == (bfd_vma) -1
4021 && eh->plt_got.offset == (bfd_vma) -1)
4022 || htab->elf.splt == NULL)
4023 {
4024 /* We didn't make a PLT entry for this symbol. This
4025 happens when statically linking PIC code, or when
4026 using -Bsymbolic. */
4027 break;
4028 }
4029
4030 if (h->plt.offset != (bfd_vma) -1)
4031 {
4032 if (htab->plt_bnd != NULL)
4033 {
4034 resolved_plt = htab->plt_bnd;
4035 plt_offset = eh->plt_bnd.offset;
4036 }
4037 else
4038 {
4039 resolved_plt = htab->elf.splt;
4040 plt_offset = h->plt.offset;
4041 }
4042 }
4043 else
4044 {
4045 /* Use the GOT PLT. */
4046 resolved_plt = htab->plt_got;
4047 plt_offset = eh->plt_got.offset;
4048 }
4049
4050 relocation = (resolved_plt->output_section->vma
4051 + resolved_plt->output_offset
4052 + plt_offset);
4053 unresolved_reloc = FALSE;
4054 break;
4055
4056 case R_X86_64_SIZE32:
4057 case R_X86_64_SIZE64:
4058 /* Set to symbol size. */
4059 relocation = st_size;
4060 goto direct;
4061
4062 case R_X86_64_PC8:
4063 case R_X86_64_PC16:
4064 case R_X86_64_PC32:
4065 case R_X86_64_PC32_BND:
4066 if (info->shared
4067 && (input_section->flags & SEC_ALLOC) != 0
4068 && (input_section->flags & SEC_READONLY) != 0
4069 && h != NULL)
4070 {
4071 bfd_boolean fail = FALSE;
4072 bfd_boolean branch
4073 = ((r_type == R_X86_64_PC32
4074 || r_type == R_X86_64_PC32_BND)
4075 && is_32bit_relative_branch (contents, rel->r_offset));
4076
4077 if (SYMBOL_REFERENCES_LOCAL (info, h))
4078 {
4079 /* Symbol is referenced locally. Make sure it is
4080 defined locally or for a branch. */
4081 fail = !h->def_regular && !branch;
4082 }
4083 else if (!(info->executable
4084 && (h->needs_copy || eh->needs_copy)))
4085 {
4086 /* Symbol doesn't need copy reloc and isn't referenced
4087 locally. We only allow branch to symbol with
4088 non-default visibility. */
4089 fail = (!branch
4090 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4091 }
4092
4093 if (fail)
4094 {
4095 const char *fmt;
4096 const char *v;
4097 const char *pic = "";
4098
4099 switch (ELF_ST_VISIBILITY (h->other))
4100 {
4101 case STV_HIDDEN:
4102 v = _("hidden symbol");
4103 break;
4104 case STV_INTERNAL:
4105 v = _("internal symbol");
4106 break;
4107 case STV_PROTECTED:
4108 v = _("protected symbol");
4109 break;
4110 default:
4111 v = _("symbol");
4112 pic = _("; recompile with -fPIC");
4113 break;
4114 }
4115
4116 if (h->def_regular)
4117 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4118 else
4119 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4120
4121 (*_bfd_error_handler) (fmt, input_bfd,
4122 x86_64_elf_howto_table[r_type].name,
4123 v, h->root.root.string, pic);
4124 bfd_set_error (bfd_error_bad_value);
4125 return FALSE;
4126 }
4127 }
4128 /* Fall through. */
4129
4130 case R_X86_64_8:
4131 case R_X86_64_16:
4132 case R_X86_64_32:
4133 case R_X86_64_PC64:
4134 case R_X86_64_64:
4135 /* FIXME: The ABI says the linker should make sure the value is
4136 the same when it's zeroextended to 64 bit. */
4137
4138 direct:
4139 if ((input_section->flags & SEC_ALLOC) == 0)
4140 break;
4141
4142 /* Don't copy a pc-relative relocation into the output file
4143 if the symbol needs copy reloc. */
4144 if ((info->shared
4145 && !(info->executable
4146 && h != NULL
4147 && (h->needs_copy || eh->needs_copy)
4148 && IS_X86_64_PCREL_TYPE (r_type))
4149 && (h == NULL
4150 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4151 || h->root.type != bfd_link_hash_undefweak)
4152 && ((! IS_X86_64_PCREL_TYPE (r_type)
4153 && r_type != R_X86_64_SIZE32
4154 && r_type != R_X86_64_SIZE64)
4155 || ! SYMBOL_CALLS_LOCAL (info, h)))
4156 || (ELIMINATE_COPY_RELOCS
4157 && !info->shared
4158 && h != NULL
4159 && h->dynindx != -1
4160 && !h->non_got_ref
4161 && ((h->def_dynamic
4162 && !h->def_regular)
4163 || h->root.type == bfd_link_hash_undefweak
4164 || h->root.type == bfd_link_hash_undefined)))
4165 {
4166 Elf_Internal_Rela outrel;
4167 bfd_boolean skip, relocate;
4168 asection *sreloc;
4169
4170 /* When generating a shared object, these relocations
4171 are copied into the output file to be resolved at run
4172 time. */
4173 skip = FALSE;
4174 relocate = FALSE;
4175
4176 outrel.r_offset =
4177 _bfd_elf_section_offset (output_bfd, info, input_section,
4178 rel->r_offset);
4179 if (outrel.r_offset == (bfd_vma) -1)
4180 skip = TRUE;
4181 else if (outrel.r_offset == (bfd_vma) -2)
4182 skip = TRUE, relocate = TRUE;
4183
4184 outrel.r_offset += (input_section->output_section->vma
4185 + input_section->output_offset);
4186
4187 if (skip)
4188 memset (&outrel, 0, sizeof outrel);
4189
4190 /* h->dynindx may be -1 if this symbol was marked to
4191 become local. */
4192 else if (h != NULL
4193 && h->dynindx != -1
4194 && (IS_X86_64_PCREL_TYPE (r_type)
4195 || ! info->shared
4196 || ! SYMBOLIC_BIND (info, h)
4197 || ! h->def_regular))
4198 {
4199 outrel.r_info = htab->r_info (h->dynindx, r_type);
4200 outrel.r_addend = rel->r_addend;
4201 }
4202 else
4203 {
4204 /* This symbol is local, or marked to become local. */
4205 if (r_type == htab->pointer_r_type)
4206 {
4207 relocate = TRUE;
4208 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4209 outrel.r_addend = relocation + rel->r_addend;
4210 }
4211 else if (r_type == R_X86_64_64
4212 && !ABI_64_P (output_bfd))
4213 {
4214 relocate = TRUE;
4215 outrel.r_info = htab->r_info (0,
4216 R_X86_64_RELATIVE64);
4217 outrel.r_addend = relocation + rel->r_addend;
4218 /* Check addend overflow. */
4219 if ((outrel.r_addend & 0x80000000)
4220 != (rel->r_addend & 0x80000000))
4221 {
4222 const char *name;
4223 int addend = rel->r_addend;
4224 if (h && h->root.root.string)
4225 name = h->root.root.string;
4226 else
4227 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4228 sym, NULL);
4229 if (addend < 0)
4230 (*_bfd_error_handler)
4231 (_("%B: addend -0x%x in relocation %s against "
4232 "symbol `%s' at 0x%lx in section `%A' is "
4233 "out of range"),
4234 input_bfd, input_section, addend,
4235 x86_64_elf_howto_table[r_type].name,
4236 name, (unsigned long) rel->r_offset);
4237 else
4238 (*_bfd_error_handler)
4239 (_("%B: addend 0x%x in relocation %s against "
4240 "symbol `%s' at 0x%lx in section `%A' is "
4241 "out of range"),
4242 input_bfd, input_section, addend,
4243 x86_64_elf_howto_table[r_type].name,
4244 name, (unsigned long) rel->r_offset);
4245 bfd_set_error (bfd_error_bad_value);
4246 return FALSE;
4247 }
4248 }
4249 else
4250 {
4251 long sindx;
4252
4253 if (bfd_is_abs_section (sec))
4254 sindx = 0;
4255 else if (sec == NULL || sec->owner == NULL)
4256 {
4257 bfd_set_error (bfd_error_bad_value);
4258 return FALSE;
4259 }
4260 else
4261 {
4262 asection *osec;
4263
4264 /* We are turning this relocation into one
4265 against a section symbol. It would be
4266 proper to subtract the symbol's value,
4267 osec->vma, from the emitted reloc addend,
4268 but ld.so expects buggy relocs. */
4269 osec = sec->output_section;
4270 sindx = elf_section_data (osec)->dynindx;
4271 if (sindx == 0)
4272 {
4273 asection *oi = htab->elf.text_index_section;
4274 sindx = elf_section_data (oi)->dynindx;
4275 }
4276 BFD_ASSERT (sindx != 0);
4277 }
4278
4279 outrel.r_info = htab->r_info (sindx, r_type);
4280 outrel.r_addend = relocation + rel->r_addend;
4281 }
4282 }
4283
4284 sreloc = elf_section_data (input_section)->sreloc;
4285
4286 if (sreloc == NULL || sreloc->contents == NULL)
4287 {
4288 r = bfd_reloc_notsupported;
4289 goto check_relocation_error;
4290 }
4291
4292 elf_append_rela (output_bfd, sreloc, &outrel);
4293
4294 /* If this reloc is against an external symbol, we do
4295 not want to fiddle with the addend. Otherwise, we
4296 need to include the symbol value so that it becomes
4297 an addend for the dynamic reloc. */
4298 if (! relocate)
4299 continue;
4300 }
4301
4302 break;
4303
4304 case R_X86_64_TLSGD:
4305 case R_X86_64_GOTPC32_TLSDESC:
4306 case R_X86_64_TLSDESC_CALL:
4307 case R_X86_64_GOTTPOFF:
4308 tls_type = GOT_UNKNOWN;
4309 if (h == NULL && local_got_offsets)
4310 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4311 else if (h != NULL)
4312 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4313
4314 if (! elf_x86_64_tls_transition (info, input_bfd,
4315 input_section, contents,
4316 symtab_hdr, sym_hashes,
4317 &r_type, tls_type, rel,
4318 relend, h, r_symndx))
4319 return FALSE;
4320
4321 if (r_type == R_X86_64_TPOFF32)
4322 {
4323 bfd_vma roff = rel->r_offset;
4324
4325 BFD_ASSERT (! unresolved_reloc);
4326
4327 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4328 {
4329 /* GD->LE transition. For 64bit, change
4330 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4331 .word 0x6666; rex64; call __tls_get_addr
4332 into:
4333 movq %fs:0, %rax
4334 leaq foo@tpoff(%rax), %rax
4335 For 32bit, change
4336 leaq foo@tlsgd(%rip), %rdi
4337 .word 0x6666; rex64; call __tls_get_addr
4338 into:
4339 movl %fs:0, %eax
4340 leaq foo@tpoff(%rax), %rax
4341 For largepic, change:
4342 leaq foo@tlsgd(%rip), %rdi
4343 movabsq $__tls_get_addr@pltoff, %rax
4344 addq %rbx, %rax
4345 call *%rax
4346 into:
4347 movq %fs:0, %rax
4348 leaq foo@tpoff(%rax), %rax
4349 nopw 0x0(%rax,%rax,1) */
4350 int largepic = 0;
4351 if (ABI_64_P (output_bfd)
4352 && contents[roff + 5] == (bfd_byte) '\xb8')
4353 {
4354 memcpy (contents + roff - 3,
4355 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4356 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4357 largepic = 1;
4358 }
4359 else if (ABI_64_P (output_bfd))
4360 memcpy (contents + roff - 4,
4361 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4362 16);
4363 else
4364 memcpy (contents + roff - 3,
4365 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4366 15);
4367 bfd_put_32 (output_bfd,
4368 elf_x86_64_tpoff (info, relocation),
4369 contents + roff + 8 + largepic);
4370 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4371 rel++;
4372 continue;
4373 }
4374 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4375 {
4376 /* GDesc -> LE transition.
4377 It's originally something like:
4378 leaq x@tlsdesc(%rip), %rax
4379
4380 Change it to:
4381 movl $x@tpoff, %rax. */
4382
4383 unsigned int val, type;
4384
4385 type = bfd_get_8 (input_bfd, contents + roff - 3);
4386 val = bfd_get_8 (input_bfd, contents + roff - 1);
4387 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4388 contents + roff - 3);
4389 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4390 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4391 contents + roff - 1);
4392 bfd_put_32 (output_bfd,
4393 elf_x86_64_tpoff (info, relocation),
4394 contents + roff);
4395 continue;
4396 }
4397 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4398 {
4399 /* GDesc -> LE transition.
4400 It's originally:
4401 call *(%rax)
4402 Turn it into:
4403 xchg %ax,%ax. */
4404 bfd_put_8 (output_bfd, 0x66, contents + roff);
4405 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4406 continue;
4407 }
4408 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4409 {
4410 /* IE->LE transition:
4411 For 64bit, originally it can be one of:
4412 movq foo@gottpoff(%rip), %reg
4413 addq foo@gottpoff(%rip), %reg
4414 We change it into:
4415 movq $foo, %reg
4416 leaq foo(%reg), %reg
4417 addq $foo, %reg.
4418 For 32bit, originally it can be one of:
4419 movq foo@gottpoff(%rip), %reg
4420 addl foo@gottpoff(%rip), %reg
4421 We change it into:
4422 movq $foo, %reg
4423 leal foo(%reg), %reg
4424 addl $foo, %reg. */
4425
4426 unsigned int val, type, reg;
4427
4428 if (roff >= 3)
4429 val = bfd_get_8 (input_bfd, contents + roff - 3);
4430 else
4431 val = 0;
4432 type = bfd_get_8 (input_bfd, contents + roff - 2);
4433 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4434 reg >>= 3;
4435 if (type == 0x8b)
4436 {
4437 /* movq */
4438 if (val == 0x4c)
4439 bfd_put_8 (output_bfd, 0x49,
4440 contents + roff - 3);
4441 else if (!ABI_64_P (output_bfd) && val == 0x44)
4442 bfd_put_8 (output_bfd, 0x41,
4443 contents + roff - 3);
4444 bfd_put_8 (output_bfd, 0xc7,
4445 contents + roff - 2);
4446 bfd_put_8 (output_bfd, 0xc0 | reg,
4447 contents + roff - 1);
4448 }
4449 else if (reg == 4)
4450 {
4451 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4452 is special */
4453 if (val == 0x4c)
4454 bfd_put_8 (output_bfd, 0x49,
4455 contents + roff - 3);
4456 else if (!ABI_64_P (output_bfd) && val == 0x44)
4457 bfd_put_8 (output_bfd, 0x41,
4458 contents + roff - 3);
4459 bfd_put_8 (output_bfd, 0x81,
4460 contents + roff - 2);
4461 bfd_put_8 (output_bfd, 0xc0 | reg,
4462 contents + roff - 1);
4463 }
4464 else
4465 {
4466 /* addq/addl -> leaq/leal */
4467 if (val == 0x4c)
4468 bfd_put_8 (output_bfd, 0x4d,
4469 contents + roff - 3);
4470 else if (!ABI_64_P (output_bfd) && val == 0x44)
4471 bfd_put_8 (output_bfd, 0x45,
4472 contents + roff - 3);
4473 bfd_put_8 (output_bfd, 0x8d,
4474 contents + roff - 2);
4475 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4476 contents + roff - 1);
4477 }
4478 bfd_put_32 (output_bfd,
4479 elf_x86_64_tpoff (info, relocation),
4480 contents + roff);
4481 continue;
4482 }
4483 else
4484 BFD_ASSERT (FALSE);
4485 }
4486
4487 if (htab->elf.sgot == NULL)
4488 abort ();
4489
4490 if (h != NULL)
4491 {
4492 off = h->got.offset;
4493 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4494 }
4495 else
4496 {
4497 if (local_got_offsets == NULL)
4498 abort ();
4499
4500 off = local_got_offsets[r_symndx];
4501 offplt = local_tlsdesc_gotents[r_symndx];
4502 }
4503
4504 if ((off & 1) != 0)
4505 off &= ~1;
4506 else
4507 {
4508 Elf_Internal_Rela outrel;
4509 int dr_type, indx;
4510 asection *sreloc;
4511
4512 if (htab->elf.srelgot == NULL)
4513 abort ();
4514
4515 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4516
4517 if (GOT_TLS_GDESC_P (tls_type))
4518 {
4519 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4520 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4521 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4522 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4523 + htab->elf.sgotplt->output_offset
4524 + offplt
4525 + htab->sgotplt_jump_table_size);
4526 sreloc = htab->elf.srelplt;
4527 if (indx == 0)
4528 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4529 else
4530 outrel.r_addend = 0;
4531 elf_append_rela (output_bfd, sreloc, &outrel);
4532 }
4533
4534 sreloc = htab->elf.srelgot;
4535
4536 outrel.r_offset = (htab->elf.sgot->output_section->vma
4537 + htab->elf.sgot->output_offset + off);
4538
4539 if (GOT_TLS_GD_P (tls_type))
4540 dr_type = R_X86_64_DTPMOD64;
4541 else if (GOT_TLS_GDESC_P (tls_type))
4542 goto dr_done;
4543 else
4544 dr_type = R_X86_64_TPOFF64;
4545
4546 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4547 outrel.r_addend = 0;
4548 if ((dr_type == R_X86_64_TPOFF64
4549 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4550 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4551 outrel.r_info = htab->r_info (indx, dr_type);
4552
4553 elf_append_rela (output_bfd, sreloc, &outrel);
4554
4555 if (GOT_TLS_GD_P (tls_type))
4556 {
4557 if (indx == 0)
4558 {
4559 BFD_ASSERT (! unresolved_reloc);
4560 bfd_put_64 (output_bfd,
4561 relocation - elf_x86_64_dtpoff_base (info),
4562 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4563 }
4564 else
4565 {
4566 bfd_put_64 (output_bfd, 0,
4567 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4568 outrel.r_info = htab->r_info (indx,
4569 R_X86_64_DTPOFF64);
4570 outrel.r_offset += GOT_ENTRY_SIZE;
4571 elf_append_rela (output_bfd, sreloc,
4572 &outrel);
4573 }
4574 }
4575
4576 dr_done:
4577 if (h != NULL)
4578 h->got.offset |= 1;
4579 else
4580 local_got_offsets[r_symndx] |= 1;
4581 }
4582
4583 if (off >= (bfd_vma) -2
4584 && ! GOT_TLS_GDESC_P (tls_type))
4585 abort ();
4586 if (r_type == ELF32_R_TYPE (rel->r_info))
4587 {
4588 if (r_type == R_X86_64_GOTPC32_TLSDESC
4589 || r_type == R_X86_64_TLSDESC_CALL)
4590 relocation = htab->elf.sgotplt->output_section->vma
4591 + htab->elf.sgotplt->output_offset
4592 + offplt + htab->sgotplt_jump_table_size;
4593 else
4594 relocation = htab->elf.sgot->output_section->vma
4595 + htab->elf.sgot->output_offset + off;
4596 unresolved_reloc = FALSE;
4597 }
4598 else
4599 {
4600 bfd_vma roff = rel->r_offset;
4601
4602 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4603 {
4604 /* GD->IE transition. For 64bit, change
4605 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4606 .word 0x6666; rex64; call __tls_get_addr@plt
4607 into:
4608 movq %fs:0, %rax
4609 addq foo@gottpoff(%rip), %rax
4610 For 32bit, change
4611 leaq foo@tlsgd(%rip), %rdi
4612 .word 0x6666; rex64; call __tls_get_addr@plt
4613 into:
4614 movl %fs:0, %eax
4615 addq foo@gottpoff(%rip), %rax
4616 For largepic, change:
4617 leaq foo@tlsgd(%rip), %rdi
4618 movabsq $__tls_get_addr@pltoff, %rax
4619 addq %rbx, %rax
4620 call *%rax
4621 into:
4622 movq %fs:0, %rax
4623 addq foo@gottpoff(%rax), %rax
4624 nopw 0x0(%rax,%rax,1) */
4625 int largepic = 0;
4626 if (ABI_64_P (output_bfd)
4627 && contents[roff + 5] == (bfd_byte) '\xb8')
4628 {
4629 memcpy (contents + roff - 3,
4630 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4631 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4632 largepic = 1;
4633 }
4634 else if (ABI_64_P (output_bfd))
4635 memcpy (contents + roff - 4,
4636 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4637 16);
4638 else
4639 memcpy (contents + roff - 3,
4640 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4641 15);
4642
4643 relocation = (htab->elf.sgot->output_section->vma
4644 + htab->elf.sgot->output_offset + off
4645 - roff
4646 - largepic
4647 - input_section->output_section->vma
4648 - input_section->output_offset
4649 - 12);
4650 bfd_put_32 (output_bfd, relocation,
4651 contents + roff + 8 + largepic);
4652 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4653 rel++;
4654 continue;
4655 }
4656 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4657 {
4658 /* GDesc -> IE transition.
4659 It's originally something like:
4660 leaq x@tlsdesc(%rip), %rax
4661
4662 Change it to:
4663 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4664
4665 /* Now modify the instruction as appropriate. To
4666 turn a leaq into a movq in the form we use it, it
4667 suffices to change the second byte from 0x8d to
4668 0x8b. */
4669 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4670
4671 bfd_put_32 (output_bfd,
4672 htab->elf.sgot->output_section->vma
4673 + htab->elf.sgot->output_offset + off
4674 - rel->r_offset
4675 - input_section->output_section->vma
4676 - input_section->output_offset
4677 - 4,
4678 contents + roff);
4679 continue;
4680 }
4681 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4682 {
4683 /* GDesc -> IE transition.
4684 It's originally:
4685 call *(%rax)
4686
4687 Change it to:
4688 xchg %ax, %ax. */
4689
4690 bfd_put_8 (output_bfd, 0x66, contents + roff);
4691 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4692 continue;
4693 }
4694 else
4695 BFD_ASSERT (FALSE);
4696 }
4697 break;
4698
4699 case R_X86_64_TLSLD:
4700 if (! elf_x86_64_tls_transition (info, input_bfd,
4701 input_section, contents,
4702 symtab_hdr, sym_hashes,
4703 &r_type, GOT_UNKNOWN,
4704 rel, relend, h, r_symndx))
4705 return FALSE;
4706
4707 if (r_type != R_X86_64_TLSLD)
4708 {
4709 /* LD->LE transition:
4710 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4711 For 64bit, we change it into:
4712 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4713 For 32bit, we change it into:
4714 nopl 0x0(%rax); movl %fs:0, %eax.
4715 For largepic, change:
4716 leaq foo@tlsgd(%rip), %rdi
4717 movabsq $__tls_get_addr@pltoff, %rax
4718 addq %rbx, %rax
4719 call *%rax
4720 into:
4721 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4722 movq %fs:0, %eax */
4723
4724 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4725 if (ABI_64_P (output_bfd)
4726 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4727 memcpy (contents + rel->r_offset - 3,
4728 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4729 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4730 else if (ABI_64_P (output_bfd))
4731 memcpy (contents + rel->r_offset - 3,
4732 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4733 else
4734 memcpy (contents + rel->r_offset - 3,
4735 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4736 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4737 rel++;
4738 continue;
4739 }
4740
4741 if (htab->elf.sgot == NULL)
4742 abort ();
4743
4744 off = htab->tls_ld_got.offset;
4745 if (off & 1)
4746 off &= ~1;
4747 else
4748 {
4749 Elf_Internal_Rela outrel;
4750
4751 if (htab->elf.srelgot == NULL)
4752 abort ();
4753
4754 outrel.r_offset = (htab->elf.sgot->output_section->vma
4755 + htab->elf.sgot->output_offset + off);
4756
4757 bfd_put_64 (output_bfd, 0,
4758 htab->elf.sgot->contents + off);
4759 bfd_put_64 (output_bfd, 0,
4760 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4761 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4762 outrel.r_addend = 0;
4763 elf_append_rela (output_bfd, htab->elf.srelgot,
4764 &outrel);
4765 htab->tls_ld_got.offset |= 1;
4766 }
4767 relocation = htab->elf.sgot->output_section->vma
4768 + htab->elf.sgot->output_offset + off;
4769 unresolved_reloc = FALSE;
4770 break;
4771
4772 case R_X86_64_DTPOFF32:
4773 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4774 relocation -= elf_x86_64_dtpoff_base (info);
4775 else
4776 relocation = elf_x86_64_tpoff (info, relocation);
4777 break;
4778
4779 case R_X86_64_TPOFF32:
4780 case R_X86_64_TPOFF64:
4781 BFD_ASSERT (info->executable);
4782 relocation = elf_x86_64_tpoff (info, relocation);
4783 break;
4784
4785 case R_X86_64_DTPOFF64:
4786 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4787 relocation -= elf_x86_64_dtpoff_base (info);
4788 break;
4789
4790 default:
4791 break;
4792 }
4793
4794 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4795 because such sections are not SEC_ALLOC and thus ld.so will
4796 not process them. */
4797 if (unresolved_reloc
4798 && !((input_section->flags & SEC_DEBUGGING) != 0
4799 && h->def_dynamic)
4800 && _bfd_elf_section_offset (output_bfd, info, input_section,
4801 rel->r_offset) != (bfd_vma) -1)
4802 {
4803 (*_bfd_error_handler)
4804 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4805 input_bfd,
4806 input_section,
4807 (long) rel->r_offset,
4808 howto->name,
4809 h->root.root.string);
4810 return FALSE;
4811 }
4812
4813 do_relocation:
4814 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4815 contents, rel->r_offset,
4816 relocation, rel->r_addend);
4817
4818 check_relocation_error:
4819 if (r != bfd_reloc_ok)
4820 {
4821 const char *name;
4822
4823 if (h != NULL)
4824 name = h->root.root.string;
4825 else
4826 {
4827 name = bfd_elf_string_from_elf_section (input_bfd,
4828 symtab_hdr->sh_link,
4829 sym->st_name);
4830 if (name == NULL)
4831 return FALSE;
4832 if (*name == '\0')
4833 name = bfd_section_name (input_bfd, sec);
4834 }
4835
4836 if (r == bfd_reloc_overflow)
4837 {
4838 if (! ((*info->callbacks->reloc_overflow)
4839 (info, (h ? &h->root : NULL), name, howto->name,
4840 (bfd_vma) 0, input_bfd, input_section,
4841 rel->r_offset)))
4842 return FALSE;
4843 }
4844 else
4845 {
4846 (*_bfd_error_handler)
4847 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
4848 input_bfd, input_section,
4849 (long) rel->r_offset, name, (int) r);
4850 return FALSE;
4851 }
4852 }
4853 }
4854
4855 return TRUE;
4856 }
4857
4858 /* Finish up dynamic symbol handling. We set the contents of various
4859 dynamic sections here. */
4860
4861 static bfd_boolean
4862 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4863 struct bfd_link_info *info,
4864 struct elf_link_hash_entry *h,
4865 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
4866 {
4867 struct elf_x86_64_link_hash_table *htab;
4868 const struct elf_x86_64_backend_data *abed;
4869 bfd_boolean use_plt_bnd;
4870 struct elf_x86_64_link_hash_entry *eh;
4871
4872 htab = elf_x86_64_hash_table (info);
4873 if (htab == NULL)
4874 return FALSE;
4875
4876 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
4877 section only if there is .plt section. */
4878 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
4879 abed = (use_plt_bnd
4880 ? &elf_x86_64_bnd_arch_bed
4881 : get_elf_x86_64_backend_data (output_bfd));
4882
4883 eh = (struct elf_x86_64_link_hash_entry *) h;
4884
4885 if (h->plt.offset != (bfd_vma) -1)
4886 {
4887 bfd_vma plt_index;
4888 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
4889 bfd_vma plt_plt_insn_end, plt_got_insn_size;
4890 Elf_Internal_Rela rela;
4891 bfd_byte *loc;
4892 asection *plt, *gotplt, *relplt, *resolved_plt;
4893 const struct elf_backend_data *bed;
4894 bfd_vma plt_got_pcrel_offset;
4895
4896 /* When building a static executable, use .iplt, .igot.plt and
4897 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4898 if (htab->elf.splt != NULL)
4899 {
4900 plt = htab->elf.splt;
4901 gotplt = htab->elf.sgotplt;
4902 relplt = htab->elf.srelplt;
4903 }
4904 else
4905 {
4906 plt = htab->elf.iplt;
4907 gotplt = htab->elf.igotplt;
4908 relplt = htab->elf.irelplt;
4909 }
4910
4911 /* This symbol has an entry in the procedure linkage table. Set
4912 it up. */
4913 if ((h->dynindx == -1
4914 && !((h->forced_local || info->executable)
4915 && h->def_regular
4916 && h->type == STT_GNU_IFUNC))
4917 || plt == NULL
4918 || gotplt == NULL
4919 || relplt == NULL)
4920 abort ();
4921
4922 /* Get the index in the procedure linkage table which
4923 corresponds to this symbol. This is the index of this symbol
4924 in all the symbols for which we are making plt entries. The
4925 first entry in the procedure linkage table is reserved.
4926
4927 Get the offset into the .got table of the entry that
4928 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4929 bytes. The first three are reserved for the dynamic linker.
4930
4931 For static executables, we don't reserve anything. */
4932
4933 if (plt == htab->elf.splt)
4934 {
4935 got_offset = h->plt.offset / abed->plt_entry_size - 1;
4936 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4937 }
4938 else
4939 {
4940 got_offset = h->plt.offset / abed->plt_entry_size;
4941 got_offset = got_offset * GOT_ENTRY_SIZE;
4942 }
4943
4944 plt_plt_insn_end = abed->plt_plt_insn_end;
4945 plt_plt_offset = abed->plt_plt_offset;
4946 plt_got_insn_size = abed->plt_got_insn_size;
4947 plt_got_offset = abed->plt_got_offset;
4948 if (use_plt_bnd)
4949 {
4950 /* Use the second PLT with BND relocations. */
4951 const bfd_byte *plt_entry, *plt2_entry;
4952
4953 if (eh->has_bnd_reloc)
4954 {
4955 plt_entry = elf_x86_64_bnd_plt_entry;
4956 plt2_entry = elf_x86_64_bnd_plt2_entry;
4957 }
4958 else
4959 {
4960 plt_entry = elf_x86_64_legacy_plt_entry;
4961 plt2_entry = elf_x86_64_legacy_plt2_entry;
4962
4963 /* Subtract 1 since there is no BND prefix. */
4964 plt_plt_insn_end -= 1;
4965 plt_plt_offset -= 1;
4966 plt_got_insn_size -= 1;
4967 plt_got_offset -= 1;
4968 }
4969
4970 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
4971 == sizeof (elf_x86_64_legacy_plt_entry));
4972
4973 /* Fill in the entry in the procedure linkage table. */
4974 memcpy (plt->contents + h->plt.offset,
4975 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
4976 /* Fill in the entry in the second PLT. */
4977 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
4978 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
4979
4980 resolved_plt = htab->plt_bnd;
4981 plt_offset = eh->plt_bnd.offset;
4982 }
4983 else
4984 {
4985 /* Fill in the entry in the procedure linkage table. */
4986 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
4987 abed->plt_entry_size);
4988
4989 resolved_plt = plt;
4990 plt_offset = h->plt.offset;
4991 }
4992
4993 /* Insert the relocation positions of the plt section. */
4994
4995 /* Put offset the PC-relative instruction referring to the GOT entry,
4996 subtracting the size of that instruction. */
4997 plt_got_pcrel_offset = (gotplt->output_section->vma
4998 + gotplt->output_offset
4999 + got_offset
5000 - resolved_plt->output_section->vma
5001 - resolved_plt->output_offset
5002 - plt_offset
5003 - plt_got_insn_size);
5004
5005 /* Check PC-relative offset overflow in PLT entry. */
5006 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5007 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5008 output_bfd, h->root.root.string);
5009
5010 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5011 resolved_plt->contents + plt_offset + plt_got_offset);
5012
5013 /* Fill in the entry in the global offset table, initially this
5014 points to the second part of the PLT entry. */
5015 bfd_put_64 (output_bfd, (plt->output_section->vma
5016 + plt->output_offset
5017 + h->plt.offset + abed->plt_lazy_offset),
5018 gotplt->contents + got_offset);
5019
5020 /* Fill in the entry in the .rela.plt section. */
5021 rela.r_offset = (gotplt->output_section->vma
5022 + gotplt->output_offset
5023 + got_offset);
5024 if (h->dynindx == -1
5025 || ((info->executable
5026 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5027 && h->def_regular
5028 && h->type == STT_GNU_IFUNC))
5029 {
5030 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5031 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5032 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5033 rela.r_addend = (h->root.u.def.value
5034 + h->root.u.def.section->output_section->vma
5035 + h->root.u.def.section->output_offset);
5036 /* R_X86_64_IRELATIVE comes last. */
5037 plt_index = htab->next_irelative_index--;
5038 }
5039 else
5040 {
5041 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5042 rela.r_addend = 0;
5043 plt_index = htab->next_jump_slot_index++;
5044 }
5045
5046 /* Don't fill PLT entry for static executables. */
5047 if (plt == htab->elf.splt)
5048 {
5049 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5050
5051 /* Put relocation index. */
5052 bfd_put_32 (output_bfd, plt_index,
5053 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5054
5055 /* Put offset for jmp .PLT0 and check for overflow. We don't
5056 check relocation index for overflow since branch displacement
5057 will overflow first. */
5058 if (plt0_offset > 0x80000000)
5059 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5060 output_bfd, h->root.root.string);
5061 bfd_put_32 (output_bfd, - plt0_offset,
5062 plt->contents + h->plt.offset + plt_plt_offset);
5063 }
5064
5065 bed = get_elf_backend_data (output_bfd);
5066 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5067 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5068 }
5069 else if (eh->plt_got.offset != (bfd_vma) -1)
5070 {
5071 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5072 asection *plt, *got;
5073 bfd_boolean got_after_plt;
5074 int32_t got_pcrel_offset;
5075 const bfd_byte *got_plt_entry;
5076
5077 /* Set the entry in the GOT procedure linkage table. */
5078 plt = htab->plt_got;
5079 got = htab->elf.sgot;
5080 got_offset = h->got.offset;
5081
5082 if (got_offset == (bfd_vma) -1
5083 || h->type == STT_GNU_IFUNC
5084 || plt == NULL
5085 || got == NULL)
5086 abort ();
5087
5088 /* Use the second PLT entry template for the GOT PLT since they
5089 are the identical. */
5090 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5091 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5092 if (eh->has_bnd_reloc)
5093 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5094 else
5095 {
5096 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5097
5098 /* Subtract 1 since there is no BND prefix. */
5099 plt_got_insn_size -= 1;
5100 plt_got_offset -= 1;
5101 }
5102
5103 /* Fill in the entry in the GOT procedure linkage table. */
5104 plt_offset = eh->plt_got.offset;
5105 memcpy (plt->contents + plt_offset,
5106 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5107
5108 /* Put offset the PC-relative instruction referring to the GOT
5109 entry, subtracting the size of that instruction. */
5110 got_pcrel_offset = (got->output_section->vma
5111 + got->output_offset
5112 + got_offset
5113 - plt->output_section->vma
5114 - plt->output_offset
5115 - plt_offset
5116 - plt_got_insn_size);
5117
5118 /* Check PC-relative offset overflow in GOT PLT entry. */
5119 got_after_plt = got->output_section->vma > plt->output_section->vma;
5120 if ((got_after_plt && got_pcrel_offset < 0)
5121 || (!got_after_plt && got_pcrel_offset > 0))
5122 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5123 output_bfd, h->root.root.string);
5124
5125 bfd_put_32 (output_bfd, got_pcrel_offset,
5126 plt->contents + plt_offset + plt_got_offset);
5127 }
5128
5129 if (!h->def_regular
5130 && (h->plt.offset != (bfd_vma) -1
5131 || eh->plt_got.offset != (bfd_vma) -1))
5132 {
5133 /* Mark the symbol as undefined, rather than as defined in
5134 the .plt section. Leave the value if there were any
5135 relocations where pointer equality matters (this is a clue
5136 for the dynamic linker, to make function pointer
5137 comparisons work between an application and shared
5138 library), otherwise set it to zero. If a function is only
5139 called from a binary, there is no need to slow down
5140 shared libraries because of that. */
5141 sym->st_shndx = SHN_UNDEF;
5142 if (!h->pointer_equality_needed)
5143 sym->st_value = 0;
5144 }
5145
5146 if (h->got.offset != (bfd_vma) -1
5147 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5148 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5149 {
5150 Elf_Internal_Rela rela;
5151
5152 /* This symbol has an entry in the global offset table. Set it
5153 up. */
5154 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5155 abort ();
5156
5157 rela.r_offset = (htab->elf.sgot->output_section->vma
5158 + htab->elf.sgot->output_offset
5159 + (h->got.offset &~ (bfd_vma) 1));
5160
5161 /* If this is a static link, or it is a -Bsymbolic link and the
5162 symbol is defined locally or was forced to be local because
5163 of a version file, we just want to emit a RELATIVE reloc.
5164 The entry in the global offset table will already have been
5165 initialized in the relocate_section function. */
5166 if (h->def_regular
5167 && h->type == STT_GNU_IFUNC)
5168 {
5169 if (info->shared)
5170 {
5171 /* Generate R_X86_64_GLOB_DAT. */
5172 goto do_glob_dat;
5173 }
5174 else
5175 {
5176 asection *plt;
5177
5178 if (!h->pointer_equality_needed)
5179 abort ();
5180
5181 /* For non-shared object, we can't use .got.plt, which
5182 contains the real function addres if we need pointer
5183 equality. We load the GOT entry with the PLT entry. */
5184 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5185 bfd_put_64 (output_bfd, (plt->output_section->vma
5186 + plt->output_offset
5187 + h->plt.offset),
5188 htab->elf.sgot->contents + h->got.offset);
5189 return TRUE;
5190 }
5191 }
5192 else if (info->shared
5193 && SYMBOL_REFERENCES_LOCAL (info, h))
5194 {
5195 if (!h->def_regular)
5196 return FALSE;
5197 BFD_ASSERT((h->got.offset & 1) != 0);
5198 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5199 rela.r_addend = (h->root.u.def.value
5200 + h->root.u.def.section->output_section->vma
5201 + h->root.u.def.section->output_offset);
5202 }
5203 else
5204 {
5205 BFD_ASSERT((h->got.offset & 1) == 0);
5206 do_glob_dat:
5207 bfd_put_64 (output_bfd, (bfd_vma) 0,
5208 htab->elf.sgot->contents + h->got.offset);
5209 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5210 rela.r_addend = 0;
5211 }
5212
5213 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5214 }
5215
5216 if (h->needs_copy)
5217 {
5218 Elf_Internal_Rela rela;
5219
5220 /* This symbol needs a copy reloc. Set it up. */
5221
5222 if (h->dynindx == -1
5223 || (h->root.type != bfd_link_hash_defined
5224 && h->root.type != bfd_link_hash_defweak)
5225 || htab->srelbss == NULL)
5226 abort ();
5227
5228 rela.r_offset = (h->root.u.def.value
5229 + h->root.u.def.section->output_section->vma
5230 + h->root.u.def.section->output_offset);
5231 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5232 rela.r_addend = 0;
5233 elf_append_rela (output_bfd, htab->srelbss, &rela);
5234 }
5235
5236 return TRUE;
5237 }
5238
5239 /* Finish up local dynamic symbol handling. We set the contents of
5240 various dynamic sections here. */
5241
5242 static bfd_boolean
5243 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5244 {
5245 struct elf_link_hash_entry *h
5246 = (struct elf_link_hash_entry *) *slot;
5247 struct bfd_link_info *info
5248 = (struct bfd_link_info *) inf;
5249
5250 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5251 info, h, NULL);
5252 }
5253
5254 /* Used to decide how to sort relocs in an optimal manner for the
5255 dynamic linker, before writing them out. */
5256
5257 static enum elf_reloc_type_class
5258 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5259 const asection *rel_sec ATTRIBUTE_UNUSED,
5260 const Elf_Internal_Rela *rela)
5261 {
5262 switch ((int) ELF32_R_TYPE (rela->r_info))
5263 {
5264 case R_X86_64_RELATIVE:
5265 case R_X86_64_RELATIVE64:
5266 return reloc_class_relative;
5267 case R_X86_64_JUMP_SLOT:
5268 return reloc_class_plt;
5269 case R_X86_64_COPY:
5270 return reloc_class_copy;
5271 default:
5272 return reloc_class_normal;
5273 }
5274 }
5275
5276 /* Finish up the dynamic sections. */
5277
5278 static bfd_boolean
5279 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5280 struct bfd_link_info *info)
5281 {
5282 struct elf_x86_64_link_hash_table *htab;
5283 bfd *dynobj;
5284 asection *sdyn;
5285 const struct elf_x86_64_backend_data *abed;
5286
5287 htab = elf_x86_64_hash_table (info);
5288 if (htab == NULL)
5289 return FALSE;
5290
5291 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5292 section only if there is .plt section. */
5293 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5294 ? &elf_x86_64_bnd_arch_bed
5295 : get_elf_x86_64_backend_data (output_bfd));
5296
5297 dynobj = htab->elf.dynobj;
5298 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5299
5300 if (htab->elf.dynamic_sections_created)
5301 {
5302 bfd_byte *dyncon, *dynconend;
5303 const struct elf_backend_data *bed;
5304 bfd_size_type sizeof_dyn;
5305
5306 if (sdyn == NULL || htab->elf.sgot == NULL)
5307 abort ();
5308
5309 bed = get_elf_backend_data (dynobj);
5310 sizeof_dyn = bed->s->sizeof_dyn;
5311 dyncon = sdyn->contents;
5312 dynconend = sdyn->contents + sdyn->size;
5313 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5314 {
5315 Elf_Internal_Dyn dyn;
5316 asection *s;
5317
5318 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5319
5320 switch (dyn.d_tag)
5321 {
5322 default:
5323 continue;
5324
5325 case DT_PLTGOT:
5326 s = htab->elf.sgotplt;
5327 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5328 break;
5329
5330 case DT_JMPREL:
5331 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5332 break;
5333
5334 case DT_PLTRELSZ:
5335 s = htab->elf.srelplt->output_section;
5336 dyn.d_un.d_val = s->size;
5337 break;
5338
5339 case DT_RELASZ:
5340 /* The procedure linkage table relocs (DT_JMPREL) should
5341 not be included in the overall relocs (DT_RELA).
5342 Therefore, we override the DT_RELASZ entry here to
5343 make it not include the JMPREL relocs. Since the
5344 linker script arranges for .rela.plt to follow all
5345 other relocation sections, we don't have to worry
5346 about changing the DT_RELA entry. */
5347 if (htab->elf.srelplt != NULL)
5348 {
5349 s = htab->elf.srelplt->output_section;
5350 dyn.d_un.d_val -= s->size;
5351 }
5352 break;
5353
5354 case DT_TLSDESC_PLT:
5355 s = htab->elf.splt;
5356 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5357 + htab->tlsdesc_plt;
5358 break;
5359
5360 case DT_TLSDESC_GOT:
5361 s = htab->elf.sgot;
5362 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5363 + htab->tlsdesc_got;
5364 break;
5365 }
5366
5367 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5368 }
5369
5370 /* Fill in the special first entry in the procedure linkage table. */
5371 if (htab->elf.splt && htab->elf.splt->size > 0)
5372 {
5373 /* Fill in the first entry in the procedure linkage table. */
5374 memcpy (htab->elf.splt->contents,
5375 abed->plt0_entry, abed->plt_entry_size);
5376 /* Add offset for pushq GOT+8(%rip), since the instruction
5377 uses 6 bytes subtract this value. */
5378 bfd_put_32 (output_bfd,
5379 (htab->elf.sgotplt->output_section->vma
5380 + htab->elf.sgotplt->output_offset
5381 + 8
5382 - htab->elf.splt->output_section->vma
5383 - htab->elf.splt->output_offset
5384 - 6),
5385 htab->elf.splt->contents + abed->plt0_got1_offset);
5386 /* Add offset for the PC-relative instruction accessing GOT+16,
5387 subtracting the offset to the end of that instruction. */
5388 bfd_put_32 (output_bfd,
5389 (htab->elf.sgotplt->output_section->vma
5390 + htab->elf.sgotplt->output_offset
5391 + 16
5392 - htab->elf.splt->output_section->vma
5393 - htab->elf.splt->output_offset
5394 - abed->plt0_got2_insn_end),
5395 htab->elf.splt->contents + abed->plt0_got2_offset);
5396
5397 elf_section_data (htab->elf.splt->output_section)
5398 ->this_hdr.sh_entsize = abed->plt_entry_size;
5399
5400 if (htab->tlsdesc_plt)
5401 {
5402 bfd_put_64 (output_bfd, (bfd_vma) 0,
5403 htab->elf.sgot->contents + htab->tlsdesc_got);
5404
5405 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5406 abed->plt0_entry, abed->plt_entry_size);
5407
5408 /* Add offset for pushq GOT+8(%rip), since the
5409 instruction uses 6 bytes subtract this value. */
5410 bfd_put_32 (output_bfd,
5411 (htab->elf.sgotplt->output_section->vma
5412 + htab->elf.sgotplt->output_offset
5413 + 8
5414 - htab->elf.splt->output_section->vma
5415 - htab->elf.splt->output_offset
5416 - htab->tlsdesc_plt
5417 - 6),
5418 htab->elf.splt->contents
5419 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5420 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5421 where TGD stands for htab->tlsdesc_got, subtracting the offset
5422 to the end of that instruction. */
5423 bfd_put_32 (output_bfd,
5424 (htab->elf.sgot->output_section->vma
5425 + htab->elf.sgot->output_offset
5426 + htab->tlsdesc_got
5427 - htab->elf.splt->output_section->vma
5428 - htab->elf.splt->output_offset
5429 - htab->tlsdesc_plt
5430 - abed->plt0_got2_insn_end),
5431 htab->elf.splt->contents
5432 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5433 }
5434 }
5435 }
5436
5437 if (htab->plt_bnd != NULL)
5438 elf_section_data (htab->plt_bnd->output_section)
5439 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5440
5441 if (htab->elf.sgotplt)
5442 {
5443 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5444 {
5445 (*_bfd_error_handler)
5446 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5447 return FALSE;
5448 }
5449
5450 /* Fill in the first three entries in the global offset table. */
5451 if (htab->elf.sgotplt->size > 0)
5452 {
5453 /* Set the first entry in the global offset table to the address of
5454 the dynamic section. */
5455 if (sdyn == NULL)
5456 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5457 else
5458 bfd_put_64 (output_bfd,
5459 sdyn->output_section->vma + sdyn->output_offset,
5460 htab->elf.sgotplt->contents);
5461 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5462 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5463 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5464 }
5465
5466 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5467 GOT_ENTRY_SIZE;
5468 }
5469
5470 /* Adjust .eh_frame for .plt section. */
5471 if (htab->plt_eh_frame != NULL
5472 && htab->plt_eh_frame->contents != NULL)
5473 {
5474 if (htab->elf.splt != NULL
5475 && htab->elf.splt->size != 0
5476 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5477 && htab->elf.splt->output_section != NULL
5478 && htab->plt_eh_frame->output_section != NULL)
5479 {
5480 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5481 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5482 + htab->plt_eh_frame->output_offset
5483 + PLT_FDE_START_OFFSET;
5484 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5485 htab->plt_eh_frame->contents
5486 + PLT_FDE_START_OFFSET);
5487 }
5488 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5489 {
5490 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5491 htab->plt_eh_frame,
5492 htab->plt_eh_frame->contents))
5493 return FALSE;
5494 }
5495 }
5496
5497 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5498 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5499 = GOT_ENTRY_SIZE;
5500
5501 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5502 htab_traverse (htab->loc_hash_table,
5503 elf_x86_64_finish_local_dynamic_symbol,
5504 info);
5505
5506 return TRUE;
5507 }
5508
5509 /* Return an array of PLT entry symbol values. */
5510
5511 static bfd_vma *
5512 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
5513 asection *relplt)
5514 {
5515 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5516 arelent *p;
5517 long count, i;
5518 bfd_vma *plt_sym_val;
5519 bfd_vma plt_offset;
5520 bfd_byte *plt_contents;
5521 const struct elf_x86_64_backend_data *bed;
5522 Elf_Internal_Shdr *hdr;
5523 asection *plt_bnd;
5524
5525 /* Get the .plt section contents. PLT passed down may point to the
5526 .plt.bnd section. Make sure that PLT always points to the .plt
5527 section. */
5528 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
5529 if (plt_bnd)
5530 {
5531 if (plt != plt_bnd)
5532 abort ();
5533 plt = bfd_get_section_by_name (abfd, ".plt");
5534 if (plt == NULL)
5535 abort ();
5536 bed = &elf_x86_64_bnd_arch_bed;
5537 }
5538 else
5539 bed = get_elf_x86_64_backend_data (abfd);
5540
5541 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
5542 if (plt_contents == NULL)
5543 return NULL;
5544 if (!bfd_get_section_contents (abfd, (asection *) plt,
5545 plt_contents, 0, plt->size))
5546 {
5547 bad_return:
5548 free (plt_contents);
5549 return NULL;
5550 }
5551
5552 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5553 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5554 goto bad_return;
5555
5556 hdr = &elf_section_data (relplt)->this_hdr;
5557 count = relplt->size / hdr->sh_entsize;
5558
5559 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
5560 if (plt_sym_val == NULL)
5561 goto bad_return;
5562
5563 for (i = 0; i < count; i++, p++)
5564 plt_sym_val[i] = -1;
5565
5566 plt_offset = bed->plt_entry_size;
5567 p = relplt->relocation;
5568 for (i = 0; i < count; i++, p++)
5569 {
5570 long reloc_index;
5571
5572 /* Skip unknown relocation. */
5573 if (p->howto == NULL)
5574 continue;
5575
5576 if (p->howto->type != R_X86_64_JUMP_SLOT
5577 && p->howto->type != R_X86_64_IRELATIVE)
5578 continue;
5579
5580 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
5581 + bed->plt_reloc_offset));
5582 if (reloc_index >= count)
5583 abort ();
5584 if (plt_bnd)
5585 {
5586 /* This is the index in .plt section. */
5587 long plt_index = plt_offset / bed->plt_entry_size;
5588 /* Store VMA + the offset in .plt.bnd section. */
5589 plt_sym_val[reloc_index] =
5590 (plt_bnd->vma
5591 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
5592 }
5593 else
5594 plt_sym_val[reloc_index] = plt->vma + plt_offset;
5595 plt_offset += bed->plt_entry_size;
5596 }
5597
5598 free (plt_contents);
5599
5600 return plt_sym_val;
5601 }
5602
5603 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5604 support. */
5605
5606 static long
5607 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5608 long symcount,
5609 asymbol **syms,
5610 long dynsymcount,
5611 asymbol **dynsyms,
5612 asymbol **ret)
5613 {
5614 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
5615 as PLT if it exists. */
5616 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5617 if (plt == NULL)
5618 plt = bfd_get_section_by_name (abfd, ".plt");
5619 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
5620 dynsymcount, dynsyms, ret,
5621 plt,
5622 elf_x86_64_get_plt_sym_val);
5623 }
5624
5625 /* Handle an x86-64 specific section when reading an object file. This
5626 is called when elfcode.h finds a section with an unknown type. */
5627
5628 static bfd_boolean
5629 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5630 const char *name, int shindex)
5631 {
5632 if (hdr->sh_type != SHT_X86_64_UNWIND)
5633 return FALSE;
5634
5635 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5636 return FALSE;
5637
5638 return TRUE;
5639 }
5640
5641 /* Hook called by the linker routine which adds symbols from an object
5642 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5643 of .bss. */
5644
5645 static bfd_boolean
5646 elf_x86_64_add_symbol_hook (bfd *abfd,
5647 struct bfd_link_info *info,
5648 Elf_Internal_Sym *sym,
5649 const char **namep ATTRIBUTE_UNUSED,
5650 flagword *flagsp ATTRIBUTE_UNUSED,
5651 asection **secp,
5652 bfd_vma *valp)
5653 {
5654 asection *lcomm;
5655
5656 switch (sym->st_shndx)
5657 {
5658 case SHN_X86_64_LCOMMON:
5659 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5660 if (lcomm == NULL)
5661 {
5662 lcomm = bfd_make_section_with_flags (abfd,
5663 "LARGE_COMMON",
5664 (SEC_ALLOC
5665 | SEC_IS_COMMON
5666 | SEC_LINKER_CREATED));
5667 if (lcomm == NULL)
5668 return FALSE;
5669 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5670 }
5671 *secp = lcomm;
5672 *valp = sym->st_size;
5673 return TRUE;
5674 }
5675
5676 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
5677 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
5678 && (abfd->flags & DYNAMIC) == 0
5679 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5680 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
5681
5682 return TRUE;
5683 }
5684
5685
5686 /* Given a BFD section, try to locate the corresponding ELF section
5687 index. */
5688
5689 static bfd_boolean
5690 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5691 asection *sec, int *index_return)
5692 {
5693 if (sec == &_bfd_elf_large_com_section)
5694 {
5695 *index_return = SHN_X86_64_LCOMMON;
5696 return TRUE;
5697 }
5698 return FALSE;
5699 }
5700
5701 /* Process a symbol. */
5702
5703 static void
5704 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5705 asymbol *asym)
5706 {
5707 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5708
5709 switch (elfsym->internal_elf_sym.st_shndx)
5710 {
5711 case SHN_X86_64_LCOMMON:
5712 asym->section = &_bfd_elf_large_com_section;
5713 asym->value = elfsym->internal_elf_sym.st_size;
5714 /* Common symbol doesn't set BSF_GLOBAL. */
5715 asym->flags &= ~BSF_GLOBAL;
5716 break;
5717 }
5718 }
5719
5720 static bfd_boolean
5721 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5722 {
5723 return (sym->st_shndx == SHN_COMMON
5724 || sym->st_shndx == SHN_X86_64_LCOMMON);
5725 }
5726
5727 static unsigned int
5728 elf_x86_64_common_section_index (asection *sec)
5729 {
5730 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5731 return SHN_COMMON;
5732 else
5733 return SHN_X86_64_LCOMMON;
5734 }
5735
5736 static asection *
5737 elf_x86_64_common_section (asection *sec)
5738 {
5739 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5740 return bfd_com_section_ptr;
5741 else
5742 return &_bfd_elf_large_com_section;
5743 }
5744
5745 static bfd_boolean
5746 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5747 const Elf_Internal_Sym *sym,
5748 asection **psec,
5749 bfd_boolean newdef,
5750 bfd_boolean olddef,
5751 bfd *oldbfd,
5752 const asection *oldsec)
5753 {
5754 /* A normal common symbol and a large common symbol result in a
5755 normal common symbol. We turn the large common symbol into a
5756 normal one. */
5757 if (!olddef
5758 && h->root.type == bfd_link_hash_common
5759 && !newdef
5760 && bfd_is_com_section (*psec)
5761 && oldsec != *psec)
5762 {
5763 if (sym->st_shndx == SHN_COMMON
5764 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5765 {
5766 h->root.u.c.p->section
5767 = bfd_make_section_old_way (oldbfd, "COMMON");
5768 h->root.u.c.p->section->flags = SEC_ALLOC;
5769 }
5770 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5771 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5772 *psec = bfd_com_section_ptr;
5773 }
5774
5775 return TRUE;
5776 }
5777
5778 static int
5779 elf_x86_64_additional_program_headers (bfd *abfd,
5780 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5781 {
5782 asection *s;
5783 int count = 0;
5784
5785 /* Check to see if we need a large readonly segment. */
5786 s = bfd_get_section_by_name (abfd, ".lrodata");
5787 if (s && (s->flags & SEC_LOAD))
5788 count++;
5789
5790 /* Check to see if we need a large data segment. Since .lbss sections
5791 is placed right after the .bss section, there should be no need for
5792 a large data segment just because of .lbss. */
5793 s = bfd_get_section_by_name (abfd, ".ldata");
5794 if (s && (s->flags & SEC_LOAD))
5795 count++;
5796
5797 return count;
5798 }
5799
5800 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5801
5802 static bfd_boolean
5803 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5804 {
5805 if (h->plt.offset != (bfd_vma) -1
5806 && !h->def_regular
5807 && !h->pointer_equality_needed)
5808 return FALSE;
5809
5810 return _bfd_elf_hash_symbol (h);
5811 }
5812
5813 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5814
5815 static bfd_boolean
5816 elf_x86_64_relocs_compatible (const bfd_target *input,
5817 const bfd_target *output)
5818 {
5819 return ((xvec_get_elf_backend_data (input)->s->elfclass
5820 == xvec_get_elf_backend_data (output)->s->elfclass)
5821 && _bfd_elf_relocs_compatible (input, output));
5822 }
5823
5824 static const struct bfd_elf_special_section
5825 elf_x86_64_special_sections[]=
5826 {
5827 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5828 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5829 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5830 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5831 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5832 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5833 { NULL, 0, 0, 0, 0 }
5834 };
5835
5836 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5837 #define TARGET_LITTLE_NAME "elf64-x86-64"
5838 #define ELF_ARCH bfd_arch_i386
5839 #define ELF_TARGET_ID X86_64_ELF_DATA
5840 #define ELF_MACHINE_CODE EM_X86_64
5841 #define ELF_MAXPAGESIZE 0x200000
5842 #define ELF_MINPAGESIZE 0x1000
5843 #define ELF_COMMONPAGESIZE 0x1000
5844
5845 #define elf_backend_can_gc_sections 1
5846 #define elf_backend_can_refcount 1
5847 #define elf_backend_want_got_plt 1
5848 #define elf_backend_plt_readonly 1
5849 #define elf_backend_want_plt_sym 0
5850 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5851 #define elf_backend_rela_normal 1
5852 #define elf_backend_plt_alignment 4
5853
5854 #define elf_info_to_howto elf_x86_64_info_to_howto
5855
5856 #define bfd_elf64_bfd_link_hash_table_create \
5857 elf_x86_64_link_hash_table_create
5858 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5859 #define bfd_elf64_bfd_reloc_name_lookup \
5860 elf_x86_64_reloc_name_lookup
5861
5862 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
5863 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5864 #define elf_backend_check_relocs elf_x86_64_check_relocs
5865 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
5866 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
5867 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5868 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5869 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
5870 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
5871 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5872 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5873 #ifdef CORE_HEADER
5874 #define elf_backend_write_core_note elf_x86_64_write_core_note
5875 #endif
5876 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5877 #define elf_backend_relocate_section elf_x86_64_relocate_section
5878 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
5879 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5880 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5881 #define elf_backend_object_p elf64_x86_64_elf_object_p
5882 #define bfd_elf64_mkobject elf_x86_64_mkobject
5883 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5884
5885 #define elf_backend_section_from_shdr \
5886 elf_x86_64_section_from_shdr
5887
5888 #define elf_backend_section_from_bfd_section \
5889 elf_x86_64_elf_section_from_bfd_section
5890 #define elf_backend_add_symbol_hook \
5891 elf_x86_64_add_symbol_hook
5892 #define elf_backend_symbol_processing \
5893 elf_x86_64_symbol_processing
5894 #define elf_backend_common_section_index \
5895 elf_x86_64_common_section_index
5896 #define elf_backend_common_section \
5897 elf_x86_64_common_section
5898 #define elf_backend_common_definition \
5899 elf_x86_64_common_definition
5900 #define elf_backend_merge_symbol \
5901 elf_x86_64_merge_symbol
5902 #define elf_backend_special_sections \
5903 elf_x86_64_special_sections
5904 #define elf_backend_additional_program_headers \
5905 elf_x86_64_additional_program_headers
5906 #define elf_backend_hash_symbol \
5907 elf_x86_64_hash_symbol
5908
5909 #include "elf64-target.h"
5910
5911 /* FreeBSD support. */
5912
5913 #undef TARGET_LITTLE_SYM
5914 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5915 #undef TARGET_LITTLE_NAME
5916 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5917
5918 #undef ELF_OSABI
5919 #define ELF_OSABI ELFOSABI_FREEBSD
5920
5921 #undef elf64_bed
5922 #define elf64_bed elf64_x86_64_fbsd_bed
5923
5924 #include "elf64-target.h"
5925
5926 /* Solaris 2 support. */
5927
5928 #undef TARGET_LITTLE_SYM
5929 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5930 #undef TARGET_LITTLE_NAME
5931 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5932
5933 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5934 objects won't be recognized. */
5935 #undef ELF_OSABI
5936
5937 #undef elf64_bed
5938 #define elf64_bed elf64_x86_64_sol2_bed
5939
5940 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5941 boundary. */
5942 #undef elf_backend_static_tls_alignment
5943 #define elf_backend_static_tls_alignment 16
5944
5945 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5946
5947 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5948 File, p.63. */
5949 #undef elf_backend_want_plt_sym
5950 #define elf_backend_want_plt_sym 1
5951
5952 #include "elf64-target.h"
5953
5954 /* Native Client support. */
5955
5956 static bfd_boolean
5957 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5958 {
5959 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5960 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5961 return TRUE;
5962 }
5963
5964 #undef TARGET_LITTLE_SYM
5965 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5966 #undef TARGET_LITTLE_NAME
5967 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5968 #undef elf64_bed
5969 #define elf64_bed elf64_x86_64_nacl_bed
5970
5971 #undef ELF_MAXPAGESIZE
5972 #undef ELF_MINPAGESIZE
5973 #undef ELF_COMMONPAGESIZE
5974 #define ELF_MAXPAGESIZE 0x10000
5975 #define ELF_MINPAGESIZE 0x10000
5976 #define ELF_COMMONPAGESIZE 0x10000
5977
5978 /* Restore defaults. */
5979 #undef ELF_OSABI
5980 #undef elf_backend_static_tls_alignment
5981 #undef elf_backend_want_plt_sym
5982 #define elf_backend_want_plt_sym 0
5983
5984 /* NaCl uses substantially different PLT entries for the same effects. */
5985
5986 #undef elf_backend_plt_alignment
5987 #define elf_backend_plt_alignment 5
5988 #define NACL_PLT_ENTRY_SIZE 64
5989 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5990
5991 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5992 {
5993 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5994 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5995 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5996 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5997 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5998
5999 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6000 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6001
6002 /* 32 bytes of nop to pad out to the standard size. */
6003 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6004 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6005 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6006 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6007 0x66, /* excess data32 prefix */
6008 0x90 /* nop */
6009 };
6010
6011 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6012 {
6013 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6014 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6015 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6016 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6017
6018 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6019 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6020 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6021
6022 /* Lazy GOT entries point here (32-byte aligned). */
6023 0x68, /* pushq immediate */
6024 0, 0, 0, 0, /* replaced with index into relocation table. */
6025 0xe9, /* jmp relative */
6026 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6027
6028 /* 22 bytes of nop to pad out to the standard size. */
6029 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6030 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6031 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6032 };
6033
6034 /* .eh_frame covering the .plt section. */
6035
6036 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6037 {
6038 #if (PLT_CIE_LENGTH != 20 \
6039 || PLT_FDE_LENGTH != 36 \
6040 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6041 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6042 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6043 #endif
6044 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6045 0, 0, 0, 0, /* CIE ID */
6046 1, /* CIE version */
6047 'z', 'R', 0, /* Augmentation string */
6048 1, /* Code alignment factor */
6049 0x78, /* Data alignment factor */
6050 16, /* Return address column */
6051 1, /* Augmentation size */
6052 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6053 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6054 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6055 DW_CFA_nop, DW_CFA_nop,
6056
6057 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6058 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6059 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6060 0, 0, 0, 0, /* .plt size goes here */
6061 0, /* Augmentation size */
6062 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6063 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6064 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6065 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6066 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6067 13, /* Block length */
6068 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6069 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6070 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6071 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6072 DW_CFA_nop, DW_CFA_nop
6073 };
6074
6075 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6076 {
6077 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6078 elf_x86_64_nacl_plt_entry, /* plt_entry */
6079 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6080 2, /* plt0_got1_offset */
6081 9, /* plt0_got2_offset */
6082 13, /* plt0_got2_insn_end */
6083 3, /* plt_got_offset */
6084 33, /* plt_reloc_offset */
6085 38, /* plt_plt_offset */
6086 7, /* plt_got_insn_size */
6087 42, /* plt_plt_insn_end */
6088 32, /* plt_lazy_offset */
6089 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6090 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6091 };
6092
6093 #undef elf_backend_arch_data
6094 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6095
6096 #undef elf_backend_object_p
6097 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6098 #undef elf_backend_modify_segment_map
6099 #define elf_backend_modify_segment_map nacl_modify_segment_map
6100 #undef elf_backend_modify_program_headers
6101 #define elf_backend_modify_program_headers nacl_modify_program_headers
6102 #undef elf_backend_final_write_processing
6103 #define elf_backend_final_write_processing nacl_final_write_processing
6104
6105 #include "elf64-target.h"
6106
6107 /* Native Client x32 support. */
6108
6109 static bfd_boolean
6110 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6111 {
6112 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6113 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6114 return TRUE;
6115 }
6116
6117 #undef TARGET_LITTLE_SYM
6118 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6119 #undef TARGET_LITTLE_NAME
6120 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6121 #undef elf32_bed
6122 #define elf32_bed elf32_x86_64_nacl_bed
6123
6124 #define bfd_elf32_bfd_link_hash_table_create \
6125 elf_x86_64_link_hash_table_create
6126 #define bfd_elf32_bfd_reloc_type_lookup \
6127 elf_x86_64_reloc_type_lookup
6128 #define bfd_elf32_bfd_reloc_name_lookup \
6129 elf_x86_64_reloc_name_lookup
6130 #define bfd_elf32_mkobject \
6131 elf_x86_64_mkobject
6132 #define bfd_elf32_get_synthetic_symtab \
6133 elf_x86_64_get_synthetic_symtab
6134
6135 #undef elf_backend_object_p
6136 #define elf_backend_object_p \
6137 elf32_x86_64_nacl_elf_object_p
6138
6139 #undef elf_backend_bfd_from_remote_memory
6140 #define elf_backend_bfd_from_remote_memory \
6141 _bfd_elf32_bfd_from_remote_memory
6142
6143 #undef elf_backend_size_info
6144 #define elf_backend_size_info \
6145 _bfd_elf32_size_info
6146
6147 #include "elf32-target.h"
6148
6149 /* Restore defaults. */
6150 #undef elf_backend_object_p
6151 #define elf_backend_object_p elf64_x86_64_elf_object_p
6152 #undef elf_backend_bfd_from_remote_memory
6153 #undef elf_backend_size_info
6154 #undef elf_backend_modify_segment_map
6155 #undef elf_backend_modify_program_headers
6156 #undef elf_backend_final_write_processing
6157
6158 /* Intel L1OM support. */
6159
6160 static bfd_boolean
6161 elf64_l1om_elf_object_p (bfd *abfd)
6162 {
6163 /* Set the right machine number for an L1OM elf64 file. */
6164 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6165 return TRUE;
6166 }
6167
6168 #undef TARGET_LITTLE_SYM
6169 #define TARGET_LITTLE_SYM l1om_elf64_vec
6170 #undef TARGET_LITTLE_NAME
6171 #define TARGET_LITTLE_NAME "elf64-l1om"
6172 #undef ELF_ARCH
6173 #define ELF_ARCH bfd_arch_l1om
6174
6175 #undef ELF_MACHINE_CODE
6176 #define ELF_MACHINE_CODE EM_L1OM
6177
6178 #undef ELF_OSABI
6179
6180 #undef elf64_bed
6181 #define elf64_bed elf64_l1om_bed
6182
6183 #undef elf_backend_object_p
6184 #define elf_backend_object_p elf64_l1om_elf_object_p
6185
6186 /* Restore defaults. */
6187 #undef ELF_MAXPAGESIZE
6188 #undef ELF_MINPAGESIZE
6189 #undef ELF_COMMONPAGESIZE
6190 #define ELF_MAXPAGESIZE 0x200000
6191 #define ELF_MINPAGESIZE 0x1000
6192 #define ELF_COMMONPAGESIZE 0x1000
6193 #undef elf_backend_plt_alignment
6194 #define elf_backend_plt_alignment 4
6195 #undef elf_backend_arch_data
6196 #define elf_backend_arch_data &elf_x86_64_arch_bed
6197
6198 #include "elf64-target.h"
6199
6200 /* FreeBSD L1OM support. */
6201
6202 #undef TARGET_LITTLE_SYM
6203 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6204 #undef TARGET_LITTLE_NAME
6205 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6206
6207 #undef ELF_OSABI
6208 #define ELF_OSABI ELFOSABI_FREEBSD
6209
6210 #undef elf64_bed
6211 #define elf64_bed elf64_l1om_fbsd_bed
6212
6213 #include "elf64-target.h"
6214
6215 /* Intel K1OM support. */
6216
6217 static bfd_boolean
6218 elf64_k1om_elf_object_p (bfd *abfd)
6219 {
6220 /* Set the right machine number for an K1OM elf64 file. */
6221 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6222 return TRUE;
6223 }
6224
6225 #undef TARGET_LITTLE_SYM
6226 #define TARGET_LITTLE_SYM k1om_elf64_vec
6227 #undef TARGET_LITTLE_NAME
6228 #define TARGET_LITTLE_NAME "elf64-k1om"
6229 #undef ELF_ARCH
6230 #define ELF_ARCH bfd_arch_k1om
6231
6232 #undef ELF_MACHINE_CODE
6233 #define ELF_MACHINE_CODE EM_K1OM
6234
6235 #undef ELF_OSABI
6236
6237 #undef elf64_bed
6238 #define elf64_bed elf64_k1om_bed
6239
6240 #undef elf_backend_object_p
6241 #define elf_backend_object_p elf64_k1om_elf_object_p
6242
6243 #undef elf_backend_static_tls_alignment
6244
6245 #undef elf_backend_want_plt_sym
6246 #define elf_backend_want_plt_sym 0
6247
6248 #include "elf64-target.h"
6249
6250 /* FreeBSD K1OM support. */
6251
6252 #undef TARGET_LITTLE_SYM
6253 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6254 #undef TARGET_LITTLE_NAME
6255 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6256
6257 #undef ELF_OSABI
6258 #define ELF_OSABI ELFOSABI_FREEBSD
6259
6260 #undef elf64_bed
6261 #define elf64_bed elf64_k1om_fbsd_bed
6262
6263 #include "elf64-target.h"
6264
6265 /* 32bit x86-64 support. */
6266
6267 #undef TARGET_LITTLE_SYM
6268 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6269 #undef TARGET_LITTLE_NAME
6270 #define TARGET_LITTLE_NAME "elf32-x86-64"
6271 #undef elf32_bed
6272
6273 #undef ELF_ARCH
6274 #define ELF_ARCH bfd_arch_i386
6275
6276 #undef ELF_MACHINE_CODE
6277 #define ELF_MACHINE_CODE EM_X86_64
6278
6279 #undef ELF_OSABI
6280
6281 #undef elf_backend_object_p
6282 #define elf_backend_object_p \
6283 elf32_x86_64_elf_object_p
6284
6285 #undef elf_backend_bfd_from_remote_memory
6286 #define elf_backend_bfd_from_remote_memory \
6287 _bfd_elf32_bfd_from_remote_memory
6288
6289 #undef elf_backend_size_info
6290 #define elf_backend_size_info \
6291 _bfd_elf32_size_info
6292
6293 #include "elf32-target.h"
This page took 0.265839 seconds and 4 git commands to generate.