Skip PLT for function pointer initialization
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return NULL;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if a weak symbol with a real definition needs a copy reloc.
761 When there is a weak symbol with a real definition, the processor
762 independent code will have arranged for us to see the real
763 definition first. We need to copy the needs_copy bit from the
764 real definition and check it when allowing copy reloc in PIE. */
765 unsigned int needs_copy : 1;
766
767 /* TRUE if symbol has at least one BND relocation. */
768 unsigned int has_bnd_reloc : 1;
769
770 /* Reference count of C/C++ function pointer relocations in read-write
771 section which can be resolved at run-time. */
772 bfd_signed_vma func_pointer_refcount;
773
774 /* Information about the GOT PLT entry. Filled when there are both
775 GOT and PLT relocations against the same function. */
776 union gotplt_union plt_got;
777
778 /* Information about the second PLT entry. Filled when has_bnd_reloc is
779 set. */
780 union gotplt_union plt_bnd;
781
782 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
783 starting at the end of the jump table. */
784 bfd_vma tlsdesc_got;
785 };
786
787 #define elf_x86_64_hash_entry(ent) \
788 ((struct elf_x86_64_link_hash_entry *)(ent))
789
790 struct elf_x86_64_obj_tdata
791 {
792 struct elf_obj_tdata root;
793
794 /* tls_type for each local got entry. */
795 char *local_got_tls_type;
796
797 /* GOTPLT entries for TLS descriptors. */
798 bfd_vma *local_tlsdesc_gotent;
799 };
800
801 #define elf_x86_64_tdata(abfd) \
802 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
803
804 #define elf_x86_64_local_got_tls_type(abfd) \
805 (elf_x86_64_tdata (abfd)->local_got_tls_type)
806
807 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
808 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
809
810 #define is_x86_64_elf(bfd) \
811 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
812 && elf_tdata (bfd) != NULL \
813 && elf_object_id (bfd) == X86_64_ELF_DATA)
814
815 static bfd_boolean
816 elf_x86_64_mkobject (bfd *abfd)
817 {
818 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
819 X86_64_ELF_DATA);
820 }
821
822 /* x86-64 ELF linker hash table. */
823
824 struct elf_x86_64_link_hash_table
825 {
826 struct elf_link_hash_table elf;
827
828 /* Short-cuts to get to dynamic linker sections. */
829 asection *sdynbss;
830 asection *srelbss;
831 asection *plt_eh_frame;
832 asection *plt_bnd;
833 asection *plt_got;
834
835 union
836 {
837 bfd_signed_vma refcount;
838 bfd_vma offset;
839 } tls_ld_got;
840
841 /* The amount of space used by the jump slots in the GOT. */
842 bfd_vma sgotplt_jump_table_size;
843
844 /* Small local sym cache. */
845 struct sym_cache sym_cache;
846
847 bfd_vma (*r_info) (bfd_vma, bfd_vma);
848 bfd_vma (*r_sym) (bfd_vma);
849 unsigned int pointer_r_type;
850 const char *dynamic_interpreter;
851 int dynamic_interpreter_size;
852
853 /* _TLS_MODULE_BASE_ symbol. */
854 struct bfd_link_hash_entry *tls_module_base;
855
856 /* Used by local STT_GNU_IFUNC symbols. */
857 htab_t loc_hash_table;
858 void * loc_hash_memory;
859
860 /* The offset into splt of the PLT entry for the TLS descriptor
861 resolver. Special values are 0, if not necessary (or not found
862 to be necessary yet), and -1 if needed but not determined
863 yet. */
864 bfd_vma tlsdesc_plt;
865 /* The offset into sgot of the GOT entry used by the PLT entry
866 above. */
867 bfd_vma tlsdesc_got;
868
869 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
870 bfd_vma next_jump_slot_index;
871 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
872 bfd_vma next_irelative_index;
873 };
874
875 /* Get the x86-64 ELF linker hash table from a link_info structure. */
876
877 #define elf_x86_64_hash_table(p) \
878 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
879 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
880
881 #define elf_x86_64_compute_jump_table_size(htab) \
882 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
883
884 /* Create an entry in an x86-64 ELF linker hash table. */
885
886 static struct bfd_hash_entry *
887 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
888 struct bfd_hash_table *table,
889 const char *string)
890 {
891 /* Allocate the structure if it has not already been allocated by a
892 subclass. */
893 if (entry == NULL)
894 {
895 entry = (struct bfd_hash_entry *)
896 bfd_hash_allocate (table,
897 sizeof (struct elf_x86_64_link_hash_entry));
898 if (entry == NULL)
899 return entry;
900 }
901
902 /* Call the allocation method of the superclass. */
903 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
904 if (entry != NULL)
905 {
906 struct elf_x86_64_link_hash_entry *eh;
907
908 eh = (struct elf_x86_64_link_hash_entry *) entry;
909 eh->dyn_relocs = NULL;
910 eh->tls_type = GOT_UNKNOWN;
911 eh->needs_copy = 0;
912 eh->has_bnd_reloc = 0;
913 eh->func_pointer_refcount = 0;
914 eh->plt_bnd.offset = (bfd_vma) -1;
915 eh->plt_got.offset = (bfd_vma) -1;
916 eh->tlsdesc_got = (bfd_vma) -1;
917 }
918
919 return entry;
920 }
921
922 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
923 for local symbol so that we can handle local STT_GNU_IFUNC symbols
924 as global symbol. We reuse indx and dynstr_index for local symbol
925 hash since they aren't used by global symbols in this backend. */
926
927 static hashval_t
928 elf_x86_64_local_htab_hash (const void *ptr)
929 {
930 struct elf_link_hash_entry *h
931 = (struct elf_link_hash_entry *) ptr;
932 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
933 }
934
935 /* Compare local hash entries. */
936
937 static int
938 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
939 {
940 struct elf_link_hash_entry *h1
941 = (struct elf_link_hash_entry *) ptr1;
942 struct elf_link_hash_entry *h2
943 = (struct elf_link_hash_entry *) ptr2;
944
945 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
946 }
947
948 /* Find and/or create a hash entry for local symbol. */
949
950 static struct elf_link_hash_entry *
951 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
952 bfd *abfd, const Elf_Internal_Rela *rel,
953 bfd_boolean create)
954 {
955 struct elf_x86_64_link_hash_entry e, *ret;
956 asection *sec = abfd->sections;
957 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
958 htab->r_sym (rel->r_info));
959 void **slot;
960
961 e.elf.indx = sec->id;
962 e.elf.dynstr_index = htab->r_sym (rel->r_info);
963 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
964 create ? INSERT : NO_INSERT);
965
966 if (!slot)
967 return NULL;
968
969 if (*slot)
970 {
971 ret = (struct elf_x86_64_link_hash_entry *) *slot;
972 return &ret->elf;
973 }
974
975 ret = (struct elf_x86_64_link_hash_entry *)
976 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
977 sizeof (struct elf_x86_64_link_hash_entry));
978 if (ret)
979 {
980 memset (ret, 0, sizeof (*ret));
981 ret->elf.indx = sec->id;
982 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
983 ret->elf.dynindx = -1;
984 ret->func_pointer_refcount = 0;
985 ret->plt_got.offset = (bfd_vma) -1;
986 *slot = ret;
987 }
988 return &ret->elf;
989 }
990
991 /* Destroy an X86-64 ELF linker hash table. */
992
993 static void
994 elf_x86_64_link_hash_table_free (bfd *obfd)
995 {
996 struct elf_x86_64_link_hash_table *htab
997 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
998
999 if (htab->loc_hash_table)
1000 htab_delete (htab->loc_hash_table);
1001 if (htab->loc_hash_memory)
1002 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
1003 _bfd_elf_link_hash_table_free (obfd);
1004 }
1005
1006 /* Create an X86-64 ELF linker hash table. */
1007
1008 static struct bfd_link_hash_table *
1009 elf_x86_64_link_hash_table_create (bfd *abfd)
1010 {
1011 struct elf_x86_64_link_hash_table *ret;
1012 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1013
1014 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1015 if (ret == NULL)
1016 return NULL;
1017
1018 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1019 elf_x86_64_link_hash_newfunc,
1020 sizeof (struct elf_x86_64_link_hash_entry),
1021 X86_64_ELF_DATA))
1022 {
1023 free (ret);
1024 return NULL;
1025 }
1026
1027 if (ABI_64_P (abfd))
1028 {
1029 ret->r_info = elf64_r_info;
1030 ret->r_sym = elf64_r_sym;
1031 ret->pointer_r_type = R_X86_64_64;
1032 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1033 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1034 }
1035 else
1036 {
1037 ret->r_info = elf32_r_info;
1038 ret->r_sym = elf32_r_sym;
1039 ret->pointer_r_type = R_X86_64_32;
1040 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1041 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1042 }
1043
1044 ret->loc_hash_table = htab_try_create (1024,
1045 elf_x86_64_local_htab_hash,
1046 elf_x86_64_local_htab_eq,
1047 NULL);
1048 ret->loc_hash_memory = objalloc_create ();
1049 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1050 {
1051 elf_x86_64_link_hash_table_free (abfd);
1052 return NULL;
1053 }
1054 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1055
1056 return &ret->elf.root;
1057 }
1058
1059 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1060 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1061 hash table. */
1062
1063 static bfd_boolean
1064 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1065 struct bfd_link_info *info)
1066 {
1067 struct elf_x86_64_link_hash_table *htab;
1068
1069 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1070 return FALSE;
1071
1072 htab = elf_x86_64_hash_table (info);
1073 if (htab == NULL)
1074 return FALSE;
1075
1076 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1077 if (!htab->sdynbss)
1078 abort ();
1079
1080 if (bfd_link_executable (info))
1081 {
1082 /* Always allow copy relocs for building executables. */
1083 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1084 if (s == NULL)
1085 {
1086 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1087 s = bfd_make_section_anyway_with_flags (dynobj,
1088 ".rela.bss",
1089 (bed->dynamic_sec_flags
1090 | SEC_READONLY));
1091 if (s == NULL
1092 || ! bfd_set_section_alignment (dynobj, s,
1093 bed->s->log_file_align))
1094 return FALSE;
1095 }
1096 htab->srelbss = s;
1097 }
1098
1099 if (!info->no_ld_generated_unwind_info
1100 && htab->plt_eh_frame == NULL
1101 && htab->elf.splt != NULL)
1102 {
1103 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1104 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1105 | SEC_LINKER_CREATED);
1106 htab->plt_eh_frame
1107 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1108 if (htab->plt_eh_frame == NULL
1109 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1110 return FALSE;
1111 }
1112 return TRUE;
1113 }
1114
1115 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1116
1117 static void
1118 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1119 struct elf_link_hash_entry *dir,
1120 struct elf_link_hash_entry *ind)
1121 {
1122 struct elf_x86_64_link_hash_entry *edir, *eind;
1123
1124 edir = (struct elf_x86_64_link_hash_entry *) dir;
1125 eind = (struct elf_x86_64_link_hash_entry *) ind;
1126
1127 if (!edir->has_bnd_reloc)
1128 edir->has_bnd_reloc = eind->has_bnd_reloc;
1129
1130 if (eind->dyn_relocs != NULL)
1131 {
1132 if (edir->dyn_relocs != NULL)
1133 {
1134 struct elf_dyn_relocs **pp;
1135 struct elf_dyn_relocs *p;
1136
1137 /* Add reloc counts against the indirect sym to the direct sym
1138 list. Merge any entries against the same section. */
1139 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1140 {
1141 struct elf_dyn_relocs *q;
1142
1143 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1144 if (q->sec == p->sec)
1145 {
1146 q->pc_count += p->pc_count;
1147 q->count += p->count;
1148 *pp = p->next;
1149 break;
1150 }
1151 if (q == NULL)
1152 pp = &p->next;
1153 }
1154 *pp = edir->dyn_relocs;
1155 }
1156
1157 edir->dyn_relocs = eind->dyn_relocs;
1158 eind->dyn_relocs = NULL;
1159 }
1160
1161 if (ind->root.type == bfd_link_hash_indirect
1162 && dir->got.refcount <= 0)
1163 {
1164 edir->tls_type = eind->tls_type;
1165 eind->tls_type = GOT_UNKNOWN;
1166 }
1167
1168 if (ELIMINATE_COPY_RELOCS
1169 && ind->root.type != bfd_link_hash_indirect
1170 && dir->dynamic_adjusted)
1171 {
1172 /* If called to transfer flags for a weakdef during processing
1173 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1174 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1175 dir->ref_dynamic |= ind->ref_dynamic;
1176 dir->ref_regular |= ind->ref_regular;
1177 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1178 dir->needs_plt |= ind->needs_plt;
1179 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1180 }
1181 else
1182 {
1183 if (eind->func_pointer_refcount > 0)
1184 {
1185 edir->func_pointer_refcount += eind->func_pointer_refcount;
1186 eind->func_pointer_refcount = 0;
1187 }
1188
1189 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1190 }
1191 }
1192
1193 static bfd_boolean
1194 elf64_x86_64_elf_object_p (bfd *abfd)
1195 {
1196 /* Set the right machine number for an x86-64 elf64 file. */
1197 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1198 return TRUE;
1199 }
1200
1201 static bfd_boolean
1202 elf32_x86_64_elf_object_p (bfd *abfd)
1203 {
1204 /* Set the right machine number for an x86-64 elf32 file. */
1205 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1206 return TRUE;
1207 }
1208
1209 /* Return TRUE if the TLS access code sequence support transition
1210 from R_TYPE. */
1211
1212 static bfd_boolean
1213 elf_x86_64_check_tls_transition (bfd *abfd,
1214 struct bfd_link_info *info,
1215 asection *sec,
1216 bfd_byte *contents,
1217 Elf_Internal_Shdr *symtab_hdr,
1218 struct elf_link_hash_entry **sym_hashes,
1219 unsigned int r_type,
1220 const Elf_Internal_Rela *rel,
1221 const Elf_Internal_Rela *relend)
1222 {
1223 unsigned int val;
1224 unsigned long r_symndx;
1225 bfd_boolean largepic = FALSE;
1226 struct elf_link_hash_entry *h;
1227 bfd_vma offset;
1228 struct elf_x86_64_link_hash_table *htab;
1229
1230 /* Get the section contents. */
1231 if (contents == NULL)
1232 {
1233 if (elf_section_data (sec)->this_hdr.contents != NULL)
1234 contents = elf_section_data (sec)->this_hdr.contents;
1235 else
1236 {
1237 /* FIXME: How to better handle error condition? */
1238 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1239 return FALSE;
1240
1241 /* Cache the section contents for elf_link_input_bfd. */
1242 elf_section_data (sec)->this_hdr.contents = contents;
1243 }
1244 }
1245
1246 htab = elf_x86_64_hash_table (info);
1247 offset = rel->r_offset;
1248 switch (r_type)
1249 {
1250 case R_X86_64_TLSGD:
1251 case R_X86_64_TLSLD:
1252 if ((rel + 1) >= relend)
1253 return FALSE;
1254
1255 if (r_type == R_X86_64_TLSGD)
1256 {
1257 /* Check transition from GD access model. For 64bit, only
1258 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1259 .word 0x6666; rex64; call __tls_get_addr
1260 can transit to different access model. For 32bit, only
1261 leaq foo@tlsgd(%rip), %rdi
1262 .word 0x6666; rex64; call __tls_get_addr
1263 can transit to different access model. For largepic
1264 we also support:
1265 leaq foo@tlsgd(%rip), %rdi
1266 movabsq $__tls_get_addr@pltoff, %rax
1267 addq $rbx, %rax
1268 call *%rax. */
1269
1270 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1271 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1272
1273 if ((offset + 12) > sec->size)
1274 return FALSE;
1275
1276 if (memcmp (contents + offset + 4, call, 4) != 0)
1277 {
1278 if (!ABI_64_P (abfd)
1279 || (offset + 19) > sec->size
1280 || offset < 3
1281 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1282 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1283 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1284 != 0)
1285 return FALSE;
1286 largepic = TRUE;
1287 }
1288 else if (ABI_64_P (abfd))
1289 {
1290 if (offset < 4
1291 || memcmp (contents + offset - 4, leaq, 4) != 0)
1292 return FALSE;
1293 }
1294 else
1295 {
1296 if (offset < 3
1297 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1298 return FALSE;
1299 }
1300 }
1301 else
1302 {
1303 /* Check transition from LD access model. Only
1304 leaq foo@tlsld(%rip), %rdi;
1305 call __tls_get_addr
1306 can transit to different access model. For largepic
1307 we also support:
1308 leaq foo@tlsld(%rip), %rdi
1309 movabsq $__tls_get_addr@pltoff, %rax
1310 addq $rbx, %rax
1311 call *%rax. */
1312
1313 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1314
1315 if (offset < 3 || (offset + 9) > sec->size)
1316 return FALSE;
1317
1318 if (memcmp (contents + offset - 3, lea, 3) != 0)
1319 return FALSE;
1320
1321 if (0xe8 != *(contents + offset + 4))
1322 {
1323 if (!ABI_64_P (abfd)
1324 || (offset + 19) > sec->size
1325 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1326 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1327 != 0)
1328 return FALSE;
1329 largepic = TRUE;
1330 }
1331 }
1332
1333 r_symndx = htab->r_sym (rel[1].r_info);
1334 if (r_symndx < symtab_hdr->sh_info)
1335 return FALSE;
1336
1337 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1338 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1339 may be versioned. */
1340 return (h != NULL
1341 && h->root.root.string != NULL
1342 && (largepic
1343 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1344 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1345 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1346 && (strncmp (h->root.root.string,
1347 "__tls_get_addr", 14) == 0));
1348
1349 case R_X86_64_GOTTPOFF:
1350 /* Check transition from IE access model:
1351 mov foo@gottpoff(%rip), %reg
1352 add foo@gottpoff(%rip), %reg
1353 */
1354
1355 /* Check REX prefix first. */
1356 if (offset >= 3 && (offset + 4) <= sec->size)
1357 {
1358 val = bfd_get_8 (abfd, contents + offset - 3);
1359 if (val != 0x48 && val != 0x4c)
1360 {
1361 /* X32 may have 0x44 REX prefix or no REX prefix. */
1362 if (ABI_64_P (abfd))
1363 return FALSE;
1364 }
1365 }
1366 else
1367 {
1368 /* X32 may not have any REX prefix. */
1369 if (ABI_64_P (abfd))
1370 return FALSE;
1371 if (offset < 2 || (offset + 3) > sec->size)
1372 return FALSE;
1373 }
1374
1375 val = bfd_get_8 (abfd, contents + offset - 2);
1376 if (val != 0x8b && val != 0x03)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 1);
1380 return (val & 0xc7) == 5;
1381
1382 case R_X86_64_GOTPC32_TLSDESC:
1383 /* Check transition from GDesc access model:
1384 leaq x@tlsdesc(%rip), %rax
1385
1386 Make sure it's a leaq adding rip to a 32-bit offset
1387 into any register, although it's probably almost always
1388 going to be rax. */
1389
1390 if (offset < 3 || (offset + 4) > sec->size)
1391 return FALSE;
1392
1393 val = bfd_get_8 (abfd, contents + offset - 3);
1394 if ((val & 0xfb) != 0x48)
1395 return FALSE;
1396
1397 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1398 return FALSE;
1399
1400 val = bfd_get_8 (abfd, contents + offset - 1);
1401 return (val & 0xc7) == 0x05;
1402
1403 case R_X86_64_TLSDESC_CALL:
1404 /* Check transition from GDesc access model:
1405 call *x@tlsdesc(%rax)
1406 */
1407 if (offset + 2 <= sec->size)
1408 {
1409 /* Make sure that it's a call *x@tlsdesc(%rax). */
1410 static const unsigned char call[] = { 0xff, 0x10 };
1411 return memcmp (contents + offset, call, 2) == 0;
1412 }
1413
1414 return FALSE;
1415
1416 default:
1417 abort ();
1418 }
1419 }
1420
1421 /* Return TRUE if the TLS access transition is OK or no transition
1422 will be performed. Update R_TYPE if there is a transition. */
1423
1424 static bfd_boolean
1425 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1426 asection *sec, bfd_byte *contents,
1427 Elf_Internal_Shdr *symtab_hdr,
1428 struct elf_link_hash_entry **sym_hashes,
1429 unsigned int *r_type, int tls_type,
1430 const Elf_Internal_Rela *rel,
1431 const Elf_Internal_Rela *relend,
1432 struct elf_link_hash_entry *h,
1433 unsigned long r_symndx)
1434 {
1435 unsigned int from_type = *r_type;
1436 unsigned int to_type = from_type;
1437 bfd_boolean check = TRUE;
1438
1439 /* Skip TLS transition for functions. */
1440 if (h != NULL
1441 && (h->type == STT_FUNC
1442 || h->type == STT_GNU_IFUNC))
1443 return TRUE;
1444
1445 switch (from_type)
1446 {
1447 case R_X86_64_TLSGD:
1448 case R_X86_64_GOTPC32_TLSDESC:
1449 case R_X86_64_TLSDESC_CALL:
1450 case R_X86_64_GOTTPOFF:
1451 if (bfd_link_executable (info))
1452 {
1453 if (h == NULL)
1454 to_type = R_X86_64_TPOFF32;
1455 else
1456 to_type = R_X86_64_GOTTPOFF;
1457 }
1458
1459 /* When we are called from elf_x86_64_relocate_section,
1460 CONTENTS isn't NULL and there may be additional transitions
1461 based on TLS_TYPE. */
1462 if (contents != NULL)
1463 {
1464 unsigned int new_to_type = to_type;
1465
1466 if (bfd_link_executable (info)
1467 && h != NULL
1468 && h->dynindx == -1
1469 && tls_type == GOT_TLS_IE)
1470 new_to_type = R_X86_64_TPOFF32;
1471
1472 if (to_type == R_X86_64_TLSGD
1473 || to_type == R_X86_64_GOTPC32_TLSDESC
1474 || to_type == R_X86_64_TLSDESC_CALL)
1475 {
1476 if (tls_type == GOT_TLS_IE)
1477 new_to_type = R_X86_64_GOTTPOFF;
1478 }
1479
1480 /* We checked the transition before when we were called from
1481 elf_x86_64_check_relocs. We only want to check the new
1482 transition which hasn't been checked before. */
1483 check = new_to_type != to_type && from_type == to_type;
1484 to_type = new_to_type;
1485 }
1486
1487 break;
1488
1489 case R_X86_64_TLSLD:
1490 if (bfd_link_executable (info))
1491 to_type = R_X86_64_TPOFF32;
1492 break;
1493
1494 default:
1495 return TRUE;
1496 }
1497
1498 /* Return TRUE if there is no transition. */
1499 if (from_type == to_type)
1500 return TRUE;
1501
1502 /* Check if the transition can be performed. */
1503 if (check
1504 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1505 symtab_hdr, sym_hashes,
1506 from_type, rel, relend))
1507 {
1508 reloc_howto_type *from, *to;
1509 const char *name;
1510
1511 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1512 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1513
1514 if (h)
1515 name = h->root.root.string;
1516 else
1517 {
1518 struct elf_x86_64_link_hash_table *htab;
1519
1520 htab = elf_x86_64_hash_table (info);
1521 if (htab == NULL)
1522 name = "*unknown*";
1523 else
1524 {
1525 Elf_Internal_Sym *isym;
1526
1527 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1528 abfd, r_symndx);
1529 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1530 }
1531 }
1532
1533 (*_bfd_error_handler)
1534 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1535 "in section `%A' failed"),
1536 abfd, sec, from->name, to->name, name,
1537 (unsigned long) rel->r_offset);
1538 bfd_set_error (bfd_error_bad_value);
1539 return FALSE;
1540 }
1541
1542 *r_type = to_type;
1543 return TRUE;
1544 }
1545
1546 /* Rename some of the generic section flags to better document how they
1547 are used here. */
1548 #define need_convert_mov_to_lea sec_flg0
1549
1550 /* Look through the relocs for a section during the first phase, and
1551 calculate needed space in the global offset table, procedure
1552 linkage table, and dynamic reloc sections. */
1553
1554 static bfd_boolean
1555 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1556 asection *sec,
1557 const Elf_Internal_Rela *relocs)
1558 {
1559 struct elf_x86_64_link_hash_table *htab;
1560 Elf_Internal_Shdr *symtab_hdr;
1561 struct elf_link_hash_entry **sym_hashes;
1562 const Elf_Internal_Rela *rel;
1563 const Elf_Internal_Rela *rel_end;
1564 asection *sreloc;
1565 bfd_boolean use_plt_got;
1566
1567 if (bfd_link_relocatable (info))
1568 return TRUE;
1569
1570 BFD_ASSERT (is_x86_64_elf (abfd));
1571
1572 htab = elf_x86_64_hash_table (info);
1573 if (htab == NULL)
1574 return FALSE;
1575
1576 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1577
1578 symtab_hdr = &elf_symtab_hdr (abfd);
1579 sym_hashes = elf_sym_hashes (abfd);
1580
1581 sreloc = NULL;
1582
1583 rel_end = relocs + sec->reloc_count;
1584 for (rel = relocs; rel < rel_end; rel++)
1585 {
1586 unsigned int r_type;
1587 unsigned long r_symndx;
1588 struct elf_link_hash_entry *h;
1589 Elf_Internal_Sym *isym;
1590 const char *name;
1591 bfd_boolean size_reloc;
1592
1593 r_symndx = htab->r_sym (rel->r_info);
1594 r_type = ELF32_R_TYPE (rel->r_info);
1595
1596 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1597 {
1598 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1599 abfd, r_symndx);
1600 return FALSE;
1601 }
1602
1603 if (r_symndx < symtab_hdr->sh_info)
1604 {
1605 /* A local symbol. */
1606 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1607 abfd, r_symndx);
1608 if (isym == NULL)
1609 return FALSE;
1610
1611 /* Check relocation against local STT_GNU_IFUNC symbol. */
1612 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1613 {
1614 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1615 TRUE);
1616 if (h == NULL)
1617 return FALSE;
1618
1619 /* Fake a STT_GNU_IFUNC symbol. */
1620 h->type = STT_GNU_IFUNC;
1621 h->def_regular = 1;
1622 h->ref_regular = 1;
1623 h->forced_local = 1;
1624 h->root.type = bfd_link_hash_defined;
1625 }
1626 else
1627 h = NULL;
1628 }
1629 else
1630 {
1631 isym = NULL;
1632 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1633 while (h->root.type == bfd_link_hash_indirect
1634 || h->root.type == bfd_link_hash_warning)
1635 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1636 }
1637
1638 /* Check invalid x32 relocations. */
1639 if (!ABI_64_P (abfd))
1640 switch (r_type)
1641 {
1642 default:
1643 break;
1644
1645 case R_X86_64_DTPOFF64:
1646 case R_X86_64_TPOFF64:
1647 case R_X86_64_PC64:
1648 case R_X86_64_GOTOFF64:
1649 case R_X86_64_GOT64:
1650 case R_X86_64_GOTPCREL64:
1651 case R_X86_64_GOTPC64:
1652 case R_X86_64_GOTPLT64:
1653 case R_X86_64_PLTOFF64:
1654 {
1655 if (h)
1656 name = h->root.root.string;
1657 else
1658 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1659 NULL);
1660 (*_bfd_error_handler)
1661 (_("%B: relocation %s against symbol `%s' isn't "
1662 "supported in x32 mode"), abfd,
1663 x86_64_elf_howto_table[r_type].name, name);
1664 bfd_set_error (bfd_error_bad_value);
1665 return FALSE;
1666 }
1667 break;
1668 }
1669
1670 if (h != NULL)
1671 {
1672 /* Create the ifunc sections for static executables. If we
1673 never see an indirect function symbol nor we are building
1674 a static executable, those sections will be empty and
1675 won't appear in output. */
1676 switch (r_type)
1677 {
1678 default:
1679 break;
1680
1681 case R_X86_64_PC32_BND:
1682 case R_X86_64_PLT32_BND:
1683 case R_X86_64_PC32:
1684 case R_X86_64_PLT32:
1685 case R_X86_64_32:
1686 case R_X86_64_64:
1687 /* MPX PLT is supported only if elf_x86_64_arch_bed
1688 is used in 64-bit mode. */
1689 if (ABI_64_P (abfd)
1690 && info->bndplt
1691 && (get_elf_x86_64_backend_data (abfd)
1692 == &elf_x86_64_arch_bed))
1693 {
1694 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
1695
1696 /* Create the second PLT for Intel MPX support. */
1697 if (htab->plt_bnd == NULL)
1698 {
1699 unsigned int plt_bnd_align;
1700 const struct elf_backend_data *bed;
1701
1702 bed = get_elf_backend_data (info->output_bfd);
1703 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1704 && (sizeof (elf_x86_64_bnd_plt2_entry)
1705 == sizeof (elf_x86_64_legacy_plt2_entry)));
1706 plt_bnd_align = 3;
1707
1708 if (htab->elf.dynobj == NULL)
1709 htab->elf.dynobj = abfd;
1710 htab->plt_bnd
1711 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1712 ".plt.bnd",
1713 (bed->dynamic_sec_flags
1714 | SEC_ALLOC
1715 | SEC_CODE
1716 | SEC_LOAD
1717 | SEC_READONLY));
1718 if (htab->plt_bnd == NULL
1719 || !bfd_set_section_alignment (htab->elf.dynobj,
1720 htab->plt_bnd,
1721 plt_bnd_align))
1722 return FALSE;
1723 }
1724 }
1725
1726 case R_X86_64_32S:
1727 case R_X86_64_PC64:
1728 case R_X86_64_GOTPCREL:
1729 case R_X86_64_GOTPCREL64:
1730 if (htab->elf.dynobj == NULL)
1731 htab->elf.dynobj = abfd;
1732 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1733 return FALSE;
1734 break;
1735 }
1736
1737 /* It is referenced by a non-shared object. */
1738 h->ref_regular = 1;
1739 h->root.non_ir_ref = 1;
1740
1741 if (h->type == STT_GNU_IFUNC)
1742 elf_tdata (info->output_bfd)->has_gnu_symbols
1743 |= elf_gnu_symbol_ifunc;
1744 }
1745
1746 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1747 symtab_hdr, sym_hashes,
1748 &r_type, GOT_UNKNOWN,
1749 rel, rel_end, h, r_symndx))
1750 return FALSE;
1751
1752 switch (r_type)
1753 {
1754 case R_X86_64_TLSLD:
1755 htab->tls_ld_got.refcount += 1;
1756 goto create_got;
1757
1758 case R_X86_64_TPOFF32:
1759 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1760 {
1761 if (h)
1762 name = h->root.root.string;
1763 else
1764 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1765 NULL);
1766 (*_bfd_error_handler)
1767 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1768 abfd,
1769 x86_64_elf_howto_table[r_type].name, name);
1770 bfd_set_error (bfd_error_bad_value);
1771 return FALSE;
1772 }
1773 break;
1774
1775 case R_X86_64_GOTTPOFF:
1776 if (!bfd_link_executable (info))
1777 info->flags |= DF_STATIC_TLS;
1778 /* Fall through */
1779
1780 case R_X86_64_GOT32:
1781 case R_X86_64_GOTPCREL:
1782 case R_X86_64_TLSGD:
1783 case R_X86_64_GOT64:
1784 case R_X86_64_GOTPCREL64:
1785 case R_X86_64_GOTPLT64:
1786 case R_X86_64_GOTPC32_TLSDESC:
1787 case R_X86_64_TLSDESC_CALL:
1788 /* This symbol requires a global offset table entry. */
1789 {
1790 int tls_type, old_tls_type;
1791
1792 switch (r_type)
1793 {
1794 default: tls_type = GOT_NORMAL; break;
1795 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1796 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1797 case R_X86_64_GOTPC32_TLSDESC:
1798 case R_X86_64_TLSDESC_CALL:
1799 tls_type = GOT_TLS_GDESC; break;
1800 }
1801
1802 if (h != NULL)
1803 {
1804 h->got.refcount += 1;
1805 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1806 }
1807 else
1808 {
1809 bfd_signed_vma *local_got_refcounts;
1810
1811 /* This is a global offset table entry for a local symbol. */
1812 local_got_refcounts = elf_local_got_refcounts (abfd);
1813 if (local_got_refcounts == NULL)
1814 {
1815 bfd_size_type size;
1816
1817 size = symtab_hdr->sh_info;
1818 size *= sizeof (bfd_signed_vma)
1819 + sizeof (bfd_vma) + sizeof (char);
1820 local_got_refcounts = ((bfd_signed_vma *)
1821 bfd_zalloc (abfd, size));
1822 if (local_got_refcounts == NULL)
1823 return FALSE;
1824 elf_local_got_refcounts (abfd) = local_got_refcounts;
1825 elf_x86_64_local_tlsdesc_gotent (abfd)
1826 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1827 elf_x86_64_local_got_tls_type (abfd)
1828 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1829 }
1830 local_got_refcounts[r_symndx] += 1;
1831 old_tls_type
1832 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1833 }
1834
1835 /* If a TLS symbol is accessed using IE at least once,
1836 there is no point to use dynamic model for it. */
1837 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1838 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1839 || tls_type != GOT_TLS_IE))
1840 {
1841 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1842 tls_type = old_tls_type;
1843 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1844 && GOT_TLS_GD_ANY_P (tls_type))
1845 tls_type |= old_tls_type;
1846 else
1847 {
1848 if (h)
1849 name = h->root.root.string;
1850 else
1851 name = bfd_elf_sym_name (abfd, symtab_hdr,
1852 isym, NULL);
1853 (*_bfd_error_handler)
1854 (_("%B: '%s' accessed both as normal and thread local symbol"),
1855 abfd, name);
1856 bfd_set_error (bfd_error_bad_value);
1857 return FALSE;
1858 }
1859 }
1860
1861 if (old_tls_type != tls_type)
1862 {
1863 if (h != NULL)
1864 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1865 else
1866 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1867 }
1868 }
1869 /* Fall through */
1870
1871 case R_X86_64_GOTOFF64:
1872 case R_X86_64_GOTPC32:
1873 case R_X86_64_GOTPC64:
1874 create_got:
1875 if (htab->elf.sgot == NULL)
1876 {
1877 if (htab->elf.dynobj == NULL)
1878 htab->elf.dynobj = abfd;
1879 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1880 info))
1881 return FALSE;
1882 }
1883 break;
1884
1885 case R_X86_64_PLT32:
1886 case R_X86_64_PLT32_BND:
1887 /* This symbol requires a procedure linkage table entry. We
1888 actually build the entry in adjust_dynamic_symbol,
1889 because this might be a case of linking PIC code which is
1890 never referenced by a dynamic object, in which case we
1891 don't need to generate a procedure linkage table entry
1892 after all. */
1893
1894 /* If this is a local symbol, we resolve it directly without
1895 creating a procedure linkage table entry. */
1896 if (h == NULL)
1897 continue;
1898
1899 h->needs_plt = 1;
1900 h->plt.refcount += 1;
1901 break;
1902
1903 case R_X86_64_PLTOFF64:
1904 /* This tries to form the 'address' of a function relative
1905 to GOT. For global symbols we need a PLT entry. */
1906 if (h != NULL)
1907 {
1908 h->needs_plt = 1;
1909 h->plt.refcount += 1;
1910 }
1911 goto create_got;
1912
1913 case R_X86_64_SIZE32:
1914 case R_X86_64_SIZE64:
1915 size_reloc = TRUE;
1916 goto do_size;
1917
1918 case R_X86_64_32:
1919 if (!ABI_64_P (abfd))
1920 goto pointer;
1921 case R_X86_64_8:
1922 case R_X86_64_16:
1923 case R_X86_64_32S:
1924 /* Let's help debug shared library creation. These relocs
1925 cannot be used in shared libs. Don't error out for
1926 sections we don't care about, such as debug sections or
1927 non-constant sections. */
1928 if (bfd_link_pic (info)
1929 && (sec->flags & SEC_ALLOC) != 0
1930 && (sec->flags & SEC_READONLY) != 0)
1931 {
1932 if (h)
1933 name = h->root.root.string;
1934 else
1935 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1936 (*_bfd_error_handler)
1937 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1938 abfd, x86_64_elf_howto_table[r_type].name, name);
1939 bfd_set_error (bfd_error_bad_value);
1940 return FALSE;
1941 }
1942 /* Fall through. */
1943
1944 case R_X86_64_PC8:
1945 case R_X86_64_PC16:
1946 case R_X86_64_PC32:
1947 case R_X86_64_PC32_BND:
1948 case R_X86_64_PC64:
1949 case R_X86_64_64:
1950 pointer:
1951 if (h != NULL && bfd_link_executable (info))
1952 {
1953 /* If this reloc is in a read-only section, we might
1954 need a copy reloc. We can't check reliably at this
1955 stage whether the section is read-only, as input
1956 sections have not yet been mapped to output sections.
1957 Tentatively set the flag for now, and correct in
1958 adjust_dynamic_symbol. */
1959 h->non_got_ref = 1;
1960
1961 /* We may need a .plt entry if the function this reloc
1962 refers to is in a shared lib. */
1963 h->plt.refcount += 1;
1964 if (r_type != R_X86_64_PC32
1965 && r_type != R_X86_64_PC32_BND
1966 && r_type != R_X86_64_PC64)
1967 {
1968 h->pointer_equality_needed = 1;
1969 /* At run-time, R_X86_64_64 can be resolved for both
1970 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
1971 can only be resolved for x32. */
1972 if ((sec->flags & SEC_READONLY) == 0
1973 && (r_type == R_X86_64_64
1974 || (!ABI_64_P (abfd)
1975 && (r_type == R_X86_64_32
1976 || r_type == R_X86_64_32S))))
1977 {
1978 struct elf_x86_64_link_hash_entry *eh
1979 = (struct elf_x86_64_link_hash_entry *) h;
1980 eh->func_pointer_refcount += 1;
1981 }
1982 }
1983 }
1984
1985 size_reloc = FALSE;
1986 do_size:
1987 /* If we are creating a shared library, and this is a reloc
1988 against a global symbol, or a non PC relative reloc
1989 against a local symbol, then we need to copy the reloc
1990 into the shared library. However, if we are linking with
1991 -Bsymbolic, we do not need to copy a reloc against a
1992 global symbol which is defined in an object we are
1993 including in the link (i.e., DEF_REGULAR is set). At
1994 this point we have not seen all the input files, so it is
1995 possible that DEF_REGULAR is not set now but will be set
1996 later (it is never cleared). In case of a weak definition,
1997 DEF_REGULAR may be cleared later by a strong definition in
1998 a shared library. We account for that possibility below by
1999 storing information in the relocs_copied field of the hash
2000 table entry. A similar situation occurs when creating
2001 shared libraries and symbol visibility changes render the
2002 symbol local.
2003
2004 If on the other hand, we are creating an executable, we
2005 may need to keep relocations for symbols satisfied by a
2006 dynamic library if we manage to avoid copy relocs for the
2007 symbol. */
2008 if ((bfd_link_pic (info)
2009 && (sec->flags & SEC_ALLOC) != 0
2010 && (! IS_X86_64_PCREL_TYPE (r_type)
2011 || (h != NULL
2012 && (! SYMBOLIC_BIND (info, h)
2013 || h->root.type == bfd_link_hash_defweak
2014 || !h->def_regular))))
2015 || (ELIMINATE_COPY_RELOCS
2016 && !bfd_link_pic (info)
2017 && (sec->flags & SEC_ALLOC) != 0
2018 && h != NULL
2019 && (h->root.type == bfd_link_hash_defweak
2020 || !h->def_regular)))
2021 {
2022 struct elf_dyn_relocs *p;
2023 struct elf_dyn_relocs **head;
2024
2025 /* We must copy these reloc types into the output file.
2026 Create a reloc section in dynobj and make room for
2027 this reloc. */
2028 if (sreloc == NULL)
2029 {
2030 if (htab->elf.dynobj == NULL)
2031 htab->elf.dynobj = abfd;
2032
2033 sreloc = _bfd_elf_make_dynamic_reloc_section
2034 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2035 abfd, /*rela?*/ TRUE);
2036
2037 if (sreloc == NULL)
2038 return FALSE;
2039 }
2040
2041 /* If this is a global symbol, we count the number of
2042 relocations we need for this symbol. */
2043 if (h != NULL)
2044 {
2045 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2046 }
2047 else
2048 {
2049 /* Track dynamic relocs needed for local syms too.
2050 We really need local syms available to do this
2051 easily. Oh well. */
2052 asection *s;
2053 void **vpp;
2054
2055 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2056 abfd, r_symndx);
2057 if (isym == NULL)
2058 return FALSE;
2059
2060 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2061 if (s == NULL)
2062 s = sec;
2063
2064 /* Beware of type punned pointers vs strict aliasing
2065 rules. */
2066 vpp = &(elf_section_data (s)->local_dynrel);
2067 head = (struct elf_dyn_relocs **)vpp;
2068 }
2069
2070 p = *head;
2071 if (p == NULL || p->sec != sec)
2072 {
2073 bfd_size_type amt = sizeof *p;
2074
2075 p = ((struct elf_dyn_relocs *)
2076 bfd_alloc (htab->elf.dynobj, amt));
2077 if (p == NULL)
2078 return FALSE;
2079 p->next = *head;
2080 *head = p;
2081 p->sec = sec;
2082 p->count = 0;
2083 p->pc_count = 0;
2084 }
2085
2086 p->count += 1;
2087 /* Count size relocation as PC-relative relocation. */
2088 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2089 p->pc_count += 1;
2090 }
2091 break;
2092
2093 /* This relocation describes the C++ object vtable hierarchy.
2094 Reconstruct it for later use during GC. */
2095 case R_X86_64_GNU_VTINHERIT:
2096 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2097 return FALSE;
2098 break;
2099
2100 /* This relocation describes which C++ vtable entries are actually
2101 used. Record for later use during GC. */
2102 case R_X86_64_GNU_VTENTRY:
2103 BFD_ASSERT (h != NULL);
2104 if (h != NULL
2105 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2106 return FALSE;
2107 break;
2108
2109 default:
2110 break;
2111 }
2112
2113 if (use_plt_got
2114 && h != NULL
2115 && h->plt.refcount > 0
2116 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2117 || h->got.refcount > 0)
2118 && htab->plt_got == NULL)
2119 {
2120 /* Create the GOT procedure linkage table. */
2121 unsigned int plt_got_align;
2122 const struct elf_backend_data *bed;
2123
2124 bed = get_elf_backend_data (info->output_bfd);
2125 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2126 && (sizeof (elf_x86_64_bnd_plt2_entry)
2127 == sizeof (elf_x86_64_legacy_plt2_entry)));
2128 plt_got_align = 3;
2129
2130 if (htab->elf.dynobj == NULL)
2131 htab->elf.dynobj = abfd;
2132 htab->plt_got
2133 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2134 ".plt.got",
2135 (bed->dynamic_sec_flags
2136 | SEC_ALLOC
2137 | SEC_CODE
2138 | SEC_LOAD
2139 | SEC_READONLY));
2140 if (htab->plt_got == NULL
2141 || !bfd_set_section_alignment (htab->elf.dynobj,
2142 htab->plt_got,
2143 plt_got_align))
2144 return FALSE;
2145 }
2146
2147 if (r_type == R_X86_64_GOTPCREL
2148 && (h == NULL || h->type != STT_GNU_IFUNC))
2149 sec->need_convert_mov_to_lea = 1;
2150 }
2151
2152 return TRUE;
2153 }
2154
2155 /* Return the section that should be marked against GC for a given
2156 relocation. */
2157
2158 static asection *
2159 elf_x86_64_gc_mark_hook (asection *sec,
2160 struct bfd_link_info *info,
2161 Elf_Internal_Rela *rel,
2162 struct elf_link_hash_entry *h,
2163 Elf_Internal_Sym *sym)
2164 {
2165 if (h != NULL)
2166 switch (ELF32_R_TYPE (rel->r_info))
2167 {
2168 case R_X86_64_GNU_VTINHERIT:
2169 case R_X86_64_GNU_VTENTRY:
2170 return NULL;
2171 }
2172
2173 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2174 }
2175
2176 /* Update the got entry reference counts for the section being removed. */
2177
2178 static bfd_boolean
2179 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2180 asection *sec,
2181 const Elf_Internal_Rela *relocs)
2182 {
2183 struct elf_x86_64_link_hash_table *htab;
2184 Elf_Internal_Shdr *symtab_hdr;
2185 struct elf_link_hash_entry **sym_hashes;
2186 bfd_signed_vma *local_got_refcounts;
2187 const Elf_Internal_Rela *rel, *relend;
2188
2189 if (bfd_link_relocatable (info))
2190 return TRUE;
2191
2192 htab = elf_x86_64_hash_table (info);
2193 if (htab == NULL)
2194 return FALSE;
2195
2196 elf_section_data (sec)->local_dynrel = NULL;
2197
2198 symtab_hdr = &elf_symtab_hdr (abfd);
2199 sym_hashes = elf_sym_hashes (abfd);
2200 local_got_refcounts = elf_local_got_refcounts (abfd);
2201
2202 htab = elf_x86_64_hash_table (info);
2203 relend = relocs + sec->reloc_count;
2204 for (rel = relocs; rel < relend; rel++)
2205 {
2206 unsigned long r_symndx;
2207 unsigned int r_type;
2208 struct elf_link_hash_entry *h = NULL;
2209 bfd_boolean pointer_reloc;
2210
2211 r_symndx = htab->r_sym (rel->r_info);
2212 if (r_symndx >= symtab_hdr->sh_info)
2213 {
2214 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2215 while (h->root.type == bfd_link_hash_indirect
2216 || h->root.type == bfd_link_hash_warning)
2217 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2218 }
2219 else
2220 {
2221 /* A local symbol. */
2222 Elf_Internal_Sym *isym;
2223
2224 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2225 abfd, r_symndx);
2226
2227 /* Check relocation against local STT_GNU_IFUNC symbol. */
2228 if (isym != NULL
2229 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2230 {
2231 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2232 if (h == NULL)
2233 abort ();
2234 }
2235 }
2236
2237 if (h)
2238 {
2239 struct elf_x86_64_link_hash_entry *eh;
2240 struct elf_dyn_relocs **pp;
2241 struct elf_dyn_relocs *p;
2242
2243 eh = (struct elf_x86_64_link_hash_entry *) h;
2244
2245 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2246 if (p->sec == sec)
2247 {
2248 /* Everything must go for SEC. */
2249 *pp = p->next;
2250 break;
2251 }
2252 }
2253
2254 r_type = ELF32_R_TYPE (rel->r_info);
2255 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2256 symtab_hdr, sym_hashes,
2257 &r_type, GOT_UNKNOWN,
2258 rel, relend, h, r_symndx))
2259 return FALSE;
2260
2261 pointer_reloc = FALSE;
2262 switch (r_type)
2263 {
2264 case R_X86_64_TLSLD:
2265 if (htab->tls_ld_got.refcount > 0)
2266 htab->tls_ld_got.refcount -= 1;
2267 break;
2268
2269 case R_X86_64_TLSGD:
2270 case R_X86_64_GOTPC32_TLSDESC:
2271 case R_X86_64_TLSDESC_CALL:
2272 case R_X86_64_GOTTPOFF:
2273 case R_X86_64_GOT32:
2274 case R_X86_64_GOTPCREL:
2275 case R_X86_64_GOT64:
2276 case R_X86_64_GOTPCREL64:
2277 case R_X86_64_GOTPLT64:
2278 if (h != NULL)
2279 {
2280 if (h->got.refcount > 0)
2281 h->got.refcount -= 1;
2282 if (h->type == STT_GNU_IFUNC)
2283 {
2284 if (h->plt.refcount > 0)
2285 h->plt.refcount -= 1;
2286 }
2287 }
2288 else if (local_got_refcounts != NULL)
2289 {
2290 if (local_got_refcounts[r_symndx] > 0)
2291 local_got_refcounts[r_symndx] -= 1;
2292 }
2293 break;
2294
2295 case R_X86_64_32:
2296 case R_X86_64_32S:
2297 pointer_reloc = !ABI_64_P (abfd);
2298 goto pointer;
2299
2300 case R_X86_64_64:
2301 pointer_reloc = TRUE;
2302 case R_X86_64_8:
2303 case R_X86_64_16:
2304 case R_X86_64_PC8:
2305 case R_X86_64_PC16:
2306 case R_X86_64_PC32:
2307 case R_X86_64_PC32_BND:
2308 case R_X86_64_PC64:
2309 case R_X86_64_SIZE32:
2310 case R_X86_64_SIZE64:
2311 pointer:
2312 if (bfd_link_pic (info)
2313 && (h == NULL || h->type != STT_GNU_IFUNC))
2314 break;
2315 /* Fall thru */
2316
2317 case R_X86_64_PLT32:
2318 case R_X86_64_PLT32_BND:
2319 case R_X86_64_PLTOFF64:
2320 if (h != NULL)
2321 {
2322 if (h->plt.refcount > 0)
2323 h->plt.refcount -= 1;
2324 if (pointer_reloc && (sec->flags & SEC_READONLY) == 0)
2325 {
2326 struct elf_x86_64_link_hash_entry *eh
2327 = (struct elf_x86_64_link_hash_entry *) h;
2328 if (eh->func_pointer_refcount > 0)
2329 eh->func_pointer_refcount -= 1;
2330 }
2331 }
2332 break;
2333
2334 default:
2335 break;
2336 }
2337 }
2338
2339 return TRUE;
2340 }
2341
2342 /* Adjust a symbol defined by a dynamic object and referenced by a
2343 regular object. The current definition is in some section of the
2344 dynamic object, but we're not including those sections. We have to
2345 change the definition to something the rest of the link can
2346 understand. */
2347
2348 static bfd_boolean
2349 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2350 struct elf_link_hash_entry *h)
2351 {
2352 struct elf_x86_64_link_hash_table *htab;
2353 asection *s;
2354 struct elf_x86_64_link_hash_entry *eh;
2355 struct elf_dyn_relocs *p;
2356
2357 /* STT_GNU_IFUNC symbol must go through PLT. */
2358 if (h->type == STT_GNU_IFUNC)
2359 {
2360 /* All local STT_GNU_IFUNC references must be treate as local
2361 calls via local PLT. */
2362 if (h->ref_regular
2363 && SYMBOL_CALLS_LOCAL (info, h))
2364 {
2365 bfd_size_type pc_count = 0, count = 0;
2366 struct elf_dyn_relocs **pp;
2367
2368 eh = (struct elf_x86_64_link_hash_entry *) h;
2369 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2370 {
2371 pc_count += p->pc_count;
2372 p->count -= p->pc_count;
2373 p->pc_count = 0;
2374 count += p->count;
2375 if (p->count == 0)
2376 *pp = p->next;
2377 else
2378 pp = &p->next;
2379 }
2380
2381 if (pc_count || count)
2382 {
2383 h->needs_plt = 1;
2384 h->non_got_ref = 1;
2385 if (h->plt.refcount <= 0)
2386 h->plt.refcount = 1;
2387 else
2388 h->plt.refcount += 1;
2389 }
2390 }
2391
2392 if (h->plt.refcount <= 0)
2393 {
2394 h->plt.offset = (bfd_vma) -1;
2395 h->needs_plt = 0;
2396 }
2397 return TRUE;
2398 }
2399
2400 /* If this is a function, put it in the procedure linkage table. We
2401 will fill in the contents of the procedure linkage table later,
2402 when we know the address of the .got section. */
2403 if (h->type == STT_FUNC
2404 || h->needs_plt)
2405 {
2406 if (h->plt.refcount <= 0
2407 || SYMBOL_CALLS_LOCAL (info, h)
2408 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2409 && h->root.type == bfd_link_hash_undefweak))
2410 {
2411 /* This case can occur if we saw a PLT32 reloc in an input
2412 file, but the symbol was never referred to by a dynamic
2413 object, or if all references were garbage collected. In
2414 such a case, we don't actually need to build a procedure
2415 linkage table, and we can just do a PC32 reloc instead. */
2416 h->plt.offset = (bfd_vma) -1;
2417 h->needs_plt = 0;
2418 }
2419
2420 return TRUE;
2421 }
2422 else
2423 /* It's possible that we incorrectly decided a .plt reloc was
2424 needed for an R_X86_64_PC32 reloc to a non-function sym in
2425 check_relocs. We can't decide accurately between function and
2426 non-function syms in check-relocs; Objects loaded later in
2427 the link may change h->type. So fix it now. */
2428 h->plt.offset = (bfd_vma) -1;
2429
2430 /* If this is a weak symbol, and there is a real definition, the
2431 processor independent code will have arranged for us to see the
2432 real definition first, and we can just use the same value. */
2433 if (h->u.weakdef != NULL)
2434 {
2435 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2436 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2437 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2438 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2439 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2440 {
2441 eh = (struct elf_x86_64_link_hash_entry *) h;
2442 h->non_got_ref = h->u.weakdef->non_got_ref;
2443 eh->needs_copy = h->u.weakdef->needs_copy;
2444 }
2445 return TRUE;
2446 }
2447
2448 /* This is a reference to a symbol defined by a dynamic object which
2449 is not a function. */
2450
2451 /* If we are creating a shared library, we must presume that the
2452 only references to the symbol are via the global offset table.
2453 For such cases we need not do anything here; the relocations will
2454 be handled correctly by relocate_section. */
2455 if (!bfd_link_executable (info))
2456 return TRUE;
2457
2458 /* If there are no references to this symbol that do not use the
2459 GOT, we don't need to generate a copy reloc. */
2460 if (!h->non_got_ref)
2461 return TRUE;
2462
2463 /* If -z nocopyreloc was given, we won't generate them either. */
2464 if (info->nocopyreloc)
2465 {
2466 h->non_got_ref = 0;
2467 return TRUE;
2468 }
2469
2470 if (ELIMINATE_COPY_RELOCS)
2471 {
2472 eh = (struct elf_x86_64_link_hash_entry *) h;
2473 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2474 {
2475 s = p->sec->output_section;
2476 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2477 break;
2478 }
2479
2480 /* If we didn't find any dynamic relocs in read-only sections, then
2481 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2482 if (p == NULL)
2483 {
2484 h->non_got_ref = 0;
2485 return TRUE;
2486 }
2487 }
2488
2489 /* We must allocate the symbol in our .dynbss section, which will
2490 become part of the .bss section of the executable. There will be
2491 an entry for this symbol in the .dynsym section. The dynamic
2492 object will contain position independent code, so all references
2493 from the dynamic object to this symbol will go through the global
2494 offset table. The dynamic linker will use the .dynsym entry to
2495 determine the address it must put in the global offset table, so
2496 both the dynamic object and the regular object will refer to the
2497 same memory location for the variable. */
2498
2499 htab = elf_x86_64_hash_table (info);
2500 if (htab == NULL)
2501 return FALSE;
2502
2503 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2504 to copy the initial value out of the dynamic object and into the
2505 runtime process image. */
2506 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2507 {
2508 const struct elf_backend_data *bed;
2509 bed = get_elf_backend_data (info->output_bfd);
2510 htab->srelbss->size += bed->s->sizeof_rela;
2511 h->needs_copy = 1;
2512 }
2513
2514 s = htab->sdynbss;
2515
2516 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2517 }
2518
2519 /* Allocate space in .plt, .got and associated reloc sections for
2520 dynamic relocs. */
2521
2522 static bfd_boolean
2523 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2524 {
2525 struct bfd_link_info *info;
2526 struct elf_x86_64_link_hash_table *htab;
2527 struct elf_x86_64_link_hash_entry *eh;
2528 struct elf_dyn_relocs *p;
2529 const struct elf_backend_data *bed;
2530 unsigned int plt_entry_size;
2531
2532 if (h->root.type == bfd_link_hash_indirect)
2533 return TRUE;
2534
2535 eh = (struct elf_x86_64_link_hash_entry *) h;
2536
2537 info = (struct bfd_link_info *) inf;
2538 htab = elf_x86_64_hash_table (info);
2539 if (htab == NULL)
2540 return FALSE;
2541 bed = get_elf_backend_data (info->output_bfd);
2542 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2543
2544 /* We can't use the GOT PLT if pointer equality is needed since
2545 finish_dynamic_symbol won't clear symbol value and the dynamic
2546 linker won't update the GOT slot. We will get into an infinite
2547 loop at run-time. */
2548 if (htab->plt_got != NULL
2549 && h->type != STT_GNU_IFUNC
2550 && !h->pointer_equality_needed
2551 && h->plt.refcount > 0
2552 && h->got.refcount > 0)
2553 {
2554 /* Don't use the regular PLT if there are both GOT and GOTPLT
2555 reloctions. */
2556 h->plt.offset = (bfd_vma) -1;
2557
2558 /* Use the GOT PLT. */
2559 eh->plt_got.refcount = 1;
2560 }
2561
2562 /* Clear the reference count of function pointer relocations if
2563 symbol isn't a normal function. */
2564 if (h->type != STT_FUNC)
2565 eh->func_pointer_refcount = 0;
2566
2567 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2568 here if it is defined and referenced in a non-shared object. */
2569 if (h->type == STT_GNU_IFUNC
2570 && h->def_regular)
2571 {
2572 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2573 &eh->dyn_relocs,
2574 plt_entry_size,
2575 plt_entry_size,
2576 GOT_ENTRY_SIZE))
2577 {
2578 asection *s = htab->plt_bnd;
2579 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2580 {
2581 /* Use the .plt.bnd section if it is created. */
2582 eh->plt_bnd.offset = s->size;
2583
2584 /* Make room for this entry in the .plt.bnd section. */
2585 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2586 }
2587
2588 return TRUE;
2589 }
2590 else
2591 return FALSE;
2592 }
2593 /* Don't create the PLT entry if there are only function pointer
2594 relocations which can be resolved at run-time. */
2595 else if (htab->elf.dynamic_sections_created
2596 && (h->plt.refcount > eh->func_pointer_refcount
2597 || eh->plt_got.refcount > 0))
2598 {
2599 bfd_boolean use_plt_got;
2600
2601 /* Clear the reference count of function pointer relocations
2602 if PLT is used. */
2603 eh->func_pointer_refcount = 0;
2604
2605 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2606 {
2607 /* Don't use the regular PLT for DF_BIND_NOW. */
2608 h->plt.offset = (bfd_vma) -1;
2609
2610 /* Use the GOT PLT. */
2611 h->got.refcount = 1;
2612 eh->plt_got.refcount = 1;
2613 }
2614
2615 use_plt_got = eh->plt_got.refcount > 0;
2616
2617 /* Make sure this symbol is output as a dynamic symbol.
2618 Undefined weak syms won't yet be marked as dynamic. */
2619 if (h->dynindx == -1
2620 && !h->forced_local)
2621 {
2622 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2623 return FALSE;
2624 }
2625
2626 if (bfd_link_pic (info)
2627 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2628 {
2629 asection *s = htab->elf.splt;
2630 asection *bnd_s = htab->plt_bnd;
2631 asection *got_s = htab->plt_got;
2632
2633 /* If this is the first .plt entry, make room for the special
2634 first entry. The .plt section is used by prelink to undo
2635 prelinking for dynamic relocations. */
2636 if (s->size == 0)
2637 s->size = plt_entry_size;
2638
2639 if (use_plt_got)
2640 eh->plt_got.offset = got_s->size;
2641 else
2642 {
2643 h->plt.offset = s->size;
2644 if (bnd_s)
2645 eh->plt_bnd.offset = bnd_s->size;
2646 }
2647
2648 /* If this symbol is not defined in a regular file, and we are
2649 not generating a shared library, then set the symbol to this
2650 location in the .plt. This is required to make function
2651 pointers compare as equal between the normal executable and
2652 the shared library. */
2653 if (! bfd_link_pic (info)
2654 && !h->def_regular)
2655 {
2656 if (use_plt_got)
2657 {
2658 /* We need to make a call to the entry of the GOT PLT
2659 instead of regular PLT entry. */
2660 h->root.u.def.section = got_s;
2661 h->root.u.def.value = eh->plt_got.offset;
2662 }
2663 else
2664 {
2665 if (bnd_s)
2666 {
2667 /* We need to make a call to the entry of the second
2668 PLT instead of regular PLT entry. */
2669 h->root.u.def.section = bnd_s;
2670 h->root.u.def.value = eh->plt_bnd.offset;
2671 }
2672 else
2673 {
2674 h->root.u.def.section = s;
2675 h->root.u.def.value = h->plt.offset;
2676 }
2677 }
2678 }
2679
2680 /* Make room for this entry. */
2681 if (use_plt_got)
2682 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2683 else
2684 {
2685 s->size += plt_entry_size;
2686 if (bnd_s)
2687 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2688
2689 /* We also need to make an entry in the .got.plt section,
2690 which will be placed in the .got section by the linker
2691 script. */
2692 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2693
2694 /* We also need to make an entry in the .rela.plt
2695 section. */
2696 htab->elf.srelplt->size += bed->s->sizeof_rela;
2697 htab->elf.srelplt->reloc_count++;
2698 }
2699 }
2700 else
2701 {
2702 h->plt.offset = (bfd_vma) -1;
2703 h->needs_plt = 0;
2704 }
2705 }
2706 else
2707 {
2708 h->plt.offset = (bfd_vma) -1;
2709 h->needs_plt = 0;
2710 }
2711
2712 eh->tlsdesc_got = (bfd_vma) -1;
2713
2714 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2715 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2716 if (h->got.refcount > 0
2717 && bfd_link_executable (info)
2718 && h->dynindx == -1
2719 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2720 {
2721 h->got.offset = (bfd_vma) -1;
2722 }
2723 else if (h->got.refcount > 0)
2724 {
2725 asection *s;
2726 bfd_boolean dyn;
2727 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2728
2729 /* Make sure this symbol is output as a dynamic symbol.
2730 Undefined weak syms won't yet be marked as dynamic. */
2731 if (h->dynindx == -1
2732 && !h->forced_local)
2733 {
2734 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2735 return FALSE;
2736 }
2737
2738 if (GOT_TLS_GDESC_P (tls_type))
2739 {
2740 eh->tlsdesc_got = htab->elf.sgotplt->size
2741 - elf_x86_64_compute_jump_table_size (htab);
2742 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2743 h->got.offset = (bfd_vma) -2;
2744 }
2745 if (! GOT_TLS_GDESC_P (tls_type)
2746 || GOT_TLS_GD_P (tls_type))
2747 {
2748 s = htab->elf.sgot;
2749 h->got.offset = s->size;
2750 s->size += GOT_ENTRY_SIZE;
2751 if (GOT_TLS_GD_P (tls_type))
2752 s->size += GOT_ENTRY_SIZE;
2753 }
2754 dyn = htab->elf.dynamic_sections_created;
2755 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2756 and two if global.
2757 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2758 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2759 || tls_type == GOT_TLS_IE)
2760 htab->elf.srelgot->size += bed->s->sizeof_rela;
2761 else if (GOT_TLS_GD_P (tls_type))
2762 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2763 else if (! GOT_TLS_GDESC_P (tls_type)
2764 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2765 || h->root.type != bfd_link_hash_undefweak)
2766 && (bfd_link_pic (info)
2767 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2768 htab->elf.srelgot->size += bed->s->sizeof_rela;
2769 if (GOT_TLS_GDESC_P (tls_type))
2770 {
2771 htab->elf.srelplt->size += bed->s->sizeof_rela;
2772 htab->tlsdesc_plt = (bfd_vma) -1;
2773 }
2774 }
2775 else
2776 h->got.offset = (bfd_vma) -1;
2777
2778 if (eh->dyn_relocs == NULL)
2779 return TRUE;
2780
2781 /* In the shared -Bsymbolic case, discard space allocated for
2782 dynamic pc-relative relocs against symbols which turn out to be
2783 defined in regular objects. For the normal shared case, discard
2784 space for pc-relative relocs that have become local due to symbol
2785 visibility changes. */
2786
2787 if (bfd_link_pic (info))
2788 {
2789 /* Relocs that use pc_count are those that appear on a call
2790 insn, or certain REL relocs that can generated via assembly.
2791 We want calls to protected symbols to resolve directly to the
2792 function rather than going via the plt. If people want
2793 function pointer comparisons to work as expected then they
2794 should avoid writing weird assembly. */
2795 if (SYMBOL_CALLS_LOCAL (info, h))
2796 {
2797 struct elf_dyn_relocs **pp;
2798
2799 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2800 {
2801 p->count -= p->pc_count;
2802 p->pc_count = 0;
2803 if (p->count == 0)
2804 *pp = p->next;
2805 else
2806 pp = &p->next;
2807 }
2808 }
2809
2810 /* Also discard relocs on undefined weak syms with non-default
2811 visibility. */
2812 if (eh->dyn_relocs != NULL)
2813 {
2814 if (h->root.type == bfd_link_hash_undefweak)
2815 {
2816 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2817 eh->dyn_relocs = NULL;
2818
2819 /* Make sure undefined weak symbols are output as a dynamic
2820 symbol in PIEs. */
2821 else if (h->dynindx == -1
2822 && ! h->forced_local
2823 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2824 return FALSE;
2825 }
2826 /* For PIE, discard space for pc-relative relocs against
2827 symbols which turn out to need copy relocs. */
2828 else if (bfd_link_executable (info)
2829 && (h->needs_copy || eh->needs_copy)
2830 && h->def_dynamic
2831 && !h->def_regular)
2832 {
2833 struct elf_dyn_relocs **pp;
2834
2835 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2836 {
2837 if (p->pc_count != 0)
2838 *pp = p->next;
2839 else
2840 pp = &p->next;
2841 }
2842 }
2843 }
2844 }
2845 else if (ELIMINATE_COPY_RELOCS)
2846 {
2847 /* For the non-shared case, discard space for relocs against
2848 symbols which turn out to need copy relocs or are not
2849 dynamic. Keep dynamic relocations for run-time function
2850 pointer initialization. */
2851
2852 if ((!h->non_got_ref || eh->func_pointer_refcount > 0)
2853 && ((h->def_dynamic
2854 && !h->def_regular)
2855 || (htab->elf.dynamic_sections_created
2856 && (h->root.type == bfd_link_hash_undefweak
2857 || h->root.type == bfd_link_hash_undefined))))
2858 {
2859 /* Make sure this symbol is output as a dynamic symbol.
2860 Undefined weak syms won't yet be marked as dynamic. */
2861 if (h->dynindx == -1
2862 && ! h->forced_local
2863 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2864 return FALSE;
2865
2866 /* If that succeeded, we know we'll be keeping all the
2867 relocs. */
2868 if (h->dynindx != -1)
2869 goto keep;
2870 }
2871
2872 eh->dyn_relocs = NULL;
2873 eh->func_pointer_refcount = 0;
2874
2875 keep: ;
2876 }
2877
2878 /* Finally, allocate space. */
2879 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2880 {
2881 asection * sreloc;
2882
2883 sreloc = elf_section_data (p->sec)->sreloc;
2884
2885 BFD_ASSERT (sreloc != NULL);
2886
2887 sreloc->size += p->count * bed->s->sizeof_rela;
2888 }
2889
2890 return TRUE;
2891 }
2892
2893 /* Allocate space in .plt, .got and associated reloc sections for
2894 local dynamic relocs. */
2895
2896 static bfd_boolean
2897 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2898 {
2899 struct elf_link_hash_entry *h
2900 = (struct elf_link_hash_entry *) *slot;
2901
2902 if (h->type != STT_GNU_IFUNC
2903 || !h->def_regular
2904 || !h->ref_regular
2905 || !h->forced_local
2906 || h->root.type != bfd_link_hash_defined)
2907 abort ();
2908
2909 return elf_x86_64_allocate_dynrelocs (h, inf);
2910 }
2911
2912 /* Find any dynamic relocs that apply to read-only sections. */
2913
2914 static bfd_boolean
2915 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2916 void * inf)
2917 {
2918 struct elf_x86_64_link_hash_entry *eh;
2919 struct elf_dyn_relocs *p;
2920
2921 /* Skip local IFUNC symbols. */
2922 if (h->forced_local && h->type == STT_GNU_IFUNC)
2923 return TRUE;
2924
2925 eh = (struct elf_x86_64_link_hash_entry *) h;
2926 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2927 {
2928 asection *s = p->sec->output_section;
2929
2930 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2931 {
2932 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2933
2934 info->flags |= DF_TEXTREL;
2935
2936 if ((info->warn_shared_textrel && bfd_link_pic (info))
2937 || info->error_textrel)
2938 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
2939 p->sec->owner, h->root.root.string,
2940 p->sec);
2941
2942 /* Not an error, just cut short the traversal. */
2943 return FALSE;
2944 }
2945 }
2946 return TRUE;
2947 }
2948
2949 /* Convert
2950 mov foo@GOTPCREL(%rip), %reg
2951 to
2952 lea foo(%rip), %reg
2953 with the local symbol, foo. */
2954
2955 static bfd_boolean
2956 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2957 struct bfd_link_info *link_info)
2958 {
2959 Elf_Internal_Shdr *symtab_hdr;
2960 Elf_Internal_Rela *internal_relocs;
2961 Elf_Internal_Rela *irel, *irelend;
2962 bfd_byte *contents;
2963 struct elf_x86_64_link_hash_table *htab;
2964 bfd_boolean changed_contents;
2965 bfd_boolean changed_relocs;
2966 bfd_signed_vma *local_got_refcounts;
2967 bfd_vma maxpagesize;
2968
2969 /* Don't even try to convert non-ELF outputs. */
2970 if (!is_elf_hash_table (link_info->hash))
2971 return FALSE;
2972
2973 /* Nothing to do if there is no need or no output. */
2974 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2975 || sec->need_convert_mov_to_lea == 0
2976 || bfd_is_abs_section (sec->output_section))
2977 return TRUE;
2978
2979 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2980
2981 /* Load the relocations for this section. */
2982 internal_relocs = (_bfd_elf_link_read_relocs
2983 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2984 link_info->keep_memory));
2985 if (internal_relocs == NULL)
2986 return FALSE;
2987
2988 htab = elf_x86_64_hash_table (link_info);
2989 changed_contents = FALSE;
2990 changed_relocs = FALSE;
2991 local_got_refcounts = elf_local_got_refcounts (abfd);
2992 maxpagesize = get_elf_backend_data (abfd)->maxpagesize;
2993
2994 /* Get the section contents. */
2995 if (elf_section_data (sec)->this_hdr.contents != NULL)
2996 contents = elf_section_data (sec)->this_hdr.contents;
2997 else
2998 {
2999 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
3000 goto error_return;
3001 }
3002
3003 irelend = internal_relocs + sec->reloc_count;
3004 for (irel = internal_relocs; irel < irelend; irel++)
3005 {
3006 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
3007 unsigned int r_symndx = htab->r_sym (irel->r_info);
3008 unsigned int indx;
3009 struct elf_link_hash_entry *h;
3010 asection *tsec;
3011 char symtype;
3012 bfd_vma toff, roff;
3013 enum {
3014 none, local, global
3015 } convert_mov_to_lea;
3016 unsigned int opcode;
3017
3018 if (r_type != R_X86_64_GOTPCREL)
3019 continue;
3020
3021 roff = irel->r_offset;
3022
3023 if (roff < 2)
3024 continue;
3025
3026 opcode = bfd_get_8 (abfd, contents + roff - 2);
3027
3028 /* PR ld/18591: Don't convert R_X86_64_GOTPCREL relocation if it
3029 isn't for mov instruction. */
3030 if (opcode != 0x8b)
3031 continue;
3032
3033 tsec = NULL;
3034 convert_mov_to_lea = none;
3035
3036 /* Get the symbol referred to by the reloc. */
3037 if (r_symndx < symtab_hdr->sh_info)
3038 {
3039 Elf_Internal_Sym *isym;
3040
3041 /* Silence older GCC warning. */
3042 h = NULL;
3043
3044 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3045 abfd, r_symndx);
3046
3047 symtype = ELF_ST_TYPE (isym->st_info);
3048
3049 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation and
3050 skip relocation against undefined symbols. */
3051 if (symtype != STT_GNU_IFUNC && isym->st_shndx != SHN_UNDEF)
3052 {
3053 if (isym->st_shndx == SHN_ABS)
3054 tsec = bfd_abs_section_ptr;
3055 else if (isym->st_shndx == SHN_COMMON)
3056 tsec = bfd_com_section_ptr;
3057 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
3058 tsec = &_bfd_elf_large_com_section;
3059 else
3060 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3061
3062 toff = isym->st_value;
3063 convert_mov_to_lea = local;
3064 }
3065 }
3066 else
3067 {
3068 indx = r_symndx - symtab_hdr->sh_info;
3069 h = elf_sym_hashes (abfd)[indx];
3070 BFD_ASSERT (h != NULL);
3071
3072 while (h->root.type == bfd_link_hash_indirect
3073 || h->root.type == bfd_link_hash_warning)
3074 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3075
3076 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
3077 avoid optimizing _DYNAMIC since ld.so may use its link-time
3078 address. */
3079 if (h->def_regular
3080 && h->type != STT_GNU_IFUNC
3081 && h != htab->elf.hdynamic
3082 && SYMBOL_REFERENCES_LOCAL (link_info, h))
3083 {
3084 tsec = h->root.u.def.section;
3085 toff = h->root.u.def.value;
3086 symtype = h->type;
3087 convert_mov_to_lea = global;
3088 }
3089 }
3090
3091 if (convert_mov_to_lea == none)
3092 continue;
3093
3094 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
3095 {
3096 /* At this stage in linking, no SEC_MERGE symbol has been
3097 adjusted, so all references to such symbols need to be
3098 passed through _bfd_merged_section_offset. (Later, in
3099 relocate_section, all SEC_MERGE symbols *except* for
3100 section symbols have been adjusted.)
3101
3102 gas may reduce relocations against symbols in SEC_MERGE
3103 sections to a relocation against the section symbol when
3104 the original addend was zero. When the reloc is against
3105 a section symbol we should include the addend in the
3106 offset passed to _bfd_merged_section_offset, since the
3107 location of interest is the original symbol. On the
3108 other hand, an access to "sym+addend" where "sym" is not
3109 a section symbol should not include the addend; Such an
3110 access is presumed to be an offset from "sym"; The
3111 location of interest is just "sym". */
3112 if (symtype == STT_SECTION)
3113 toff += irel->r_addend;
3114
3115 toff = _bfd_merged_section_offset (abfd, &tsec,
3116 elf_section_data (tsec)->sec_info,
3117 toff);
3118
3119 if (symtype != STT_SECTION)
3120 toff += irel->r_addend;
3121 }
3122 else
3123 toff += irel->r_addend;
3124
3125 /* Don't convert if R_X86_64_PC32 relocation overflows. */
3126 if (tsec->output_section == sec->output_section)
3127 {
3128 if ((toff - roff + 0x80000000) > 0xffffffff)
3129 continue;
3130 }
3131 else
3132 {
3133 asection *asect;
3134 bfd_size_type size;
3135
3136 /* At this point, we don't know the load addresses of TSEC
3137 section nor SEC section. We estimate the distrance between
3138 SEC and TSEC. */
3139 size = 0;
3140 for (asect = sec->output_section;
3141 asect != NULL && asect != tsec->output_section;
3142 asect = asect->next)
3143 {
3144 asection *i;
3145 for (i = asect->output_section->map_head.s;
3146 i != NULL;
3147 i = i->map_head.s)
3148 {
3149 size = align_power (size, i->alignment_power);
3150 size += i->size;
3151 }
3152 }
3153
3154 /* Don't convert R_X86_64_GOTPCREL if TSEC isn't placed after
3155 SEC. */
3156 if (asect == NULL)
3157 continue;
3158
3159 /* Take PT_GNU_RELRO segment into account by adding
3160 maxpagesize. */
3161 if ((toff + size + maxpagesize - roff + 0x80000000)
3162 > 0xffffffff)
3163 continue;
3164 }
3165
3166 bfd_put_8 (abfd, 0x8d, contents + roff - 2);
3167 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
3168 changed_contents = TRUE;
3169 changed_relocs = TRUE;
3170
3171 if (convert_mov_to_lea == local)
3172 {
3173 if (local_got_refcounts != NULL
3174 && local_got_refcounts[r_symndx] > 0)
3175 local_got_refcounts[r_symndx] -= 1;
3176 }
3177 else
3178 {
3179 if (h->got.refcount > 0)
3180 h->got.refcount -= 1;
3181 }
3182 }
3183
3184 if (contents != NULL
3185 && elf_section_data (sec)->this_hdr.contents != contents)
3186 {
3187 if (!changed_contents && !link_info->keep_memory)
3188 free (contents);
3189 else
3190 {
3191 /* Cache the section contents for elf_link_input_bfd. */
3192 elf_section_data (sec)->this_hdr.contents = contents;
3193 }
3194 }
3195
3196 if (elf_section_data (sec)->relocs != internal_relocs)
3197 {
3198 if (!changed_relocs)
3199 free (internal_relocs);
3200 else
3201 elf_section_data (sec)->relocs = internal_relocs;
3202 }
3203
3204 return TRUE;
3205
3206 error_return:
3207 if (contents != NULL
3208 && elf_section_data (sec)->this_hdr.contents != contents)
3209 free (contents);
3210 if (internal_relocs != NULL
3211 && elf_section_data (sec)->relocs != internal_relocs)
3212 free (internal_relocs);
3213 return FALSE;
3214 }
3215
3216 /* Set the sizes of the dynamic sections. */
3217
3218 static bfd_boolean
3219 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3220 struct bfd_link_info *info)
3221 {
3222 struct elf_x86_64_link_hash_table *htab;
3223 bfd *dynobj;
3224 asection *s;
3225 bfd_boolean relocs;
3226 bfd *ibfd;
3227 const struct elf_backend_data *bed;
3228
3229 htab = elf_x86_64_hash_table (info);
3230 if (htab == NULL)
3231 return FALSE;
3232 bed = get_elf_backend_data (output_bfd);
3233
3234 dynobj = htab->elf.dynobj;
3235 if (dynobj == NULL)
3236 abort ();
3237
3238 if (htab->elf.dynamic_sections_created)
3239 {
3240 /* Set the contents of the .interp section to the interpreter. */
3241 if (bfd_link_executable (info))
3242 {
3243 s = bfd_get_linker_section (dynobj, ".interp");
3244 if (s == NULL)
3245 abort ();
3246 s->size = htab->dynamic_interpreter_size;
3247 s->contents = (unsigned char *) htab->dynamic_interpreter;
3248 }
3249 }
3250
3251 /* Set up .got offsets for local syms, and space for local dynamic
3252 relocs. */
3253 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3254 {
3255 bfd_signed_vma *local_got;
3256 bfd_signed_vma *end_local_got;
3257 char *local_tls_type;
3258 bfd_vma *local_tlsdesc_gotent;
3259 bfd_size_type locsymcount;
3260 Elf_Internal_Shdr *symtab_hdr;
3261 asection *srel;
3262
3263 if (! is_x86_64_elf (ibfd))
3264 continue;
3265
3266 for (s = ibfd->sections; s != NULL; s = s->next)
3267 {
3268 struct elf_dyn_relocs *p;
3269
3270 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3271 return FALSE;
3272
3273 for (p = (struct elf_dyn_relocs *)
3274 (elf_section_data (s)->local_dynrel);
3275 p != NULL;
3276 p = p->next)
3277 {
3278 if (!bfd_is_abs_section (p->sec)
3279 && bfd_is_abs_section (p->sec->output_section))
3280 {
3281 /* Input section has been discarded, either because
3282 it is a copy of a linkonce section or due to
3283 linker script /DISCARD/, so we'll be discarding
3284 the relocs too. */
3285 }
3286 else if (p->count != 0)
3287 {
3288 srel = elf_section_data (p->sec)->sreloc;
3289 srel->size += p->count * bed->s->sizeof_rela;
3290 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3291 && (info->flags & DF_TEXTREL) == 0)
3292 {
3293 info->flags |= DF_TEXTREL;
3294 if ((info->warn_shared_textrel && bfd_link_pic (info))
3295 || info->error_textrel)
3296 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3297 p->sec->owner, p->sec);
3298 }
3299 }
3300 }
3301 }
3302
3303 local_got = elf_local_got_refcounts (ibfd);
3304 if (!local_got)
3305 continue;
3306
3307 symtab_hdr = &elf_symtab_hdr (ibfd);
3308 locsymcount = symtab_hdr->sh_info;
3309 end_local_got = local_got + locsymcount;
3310 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3311 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3312 s = htab->elf.sgot;
3313 srel = htab->elf.srelgot;
3314 for (; local_got < end_local_got;
3315 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3316 {
3317 *local_tlsdesc_gotent = (bfd_vma) -1;
3318 if (*local_got > 0)
3319 {
3320 if (GOT_TLS_GDESC_P (*local_tls_type))
3321 {
3322 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3323 - elf_x86_64_compute_jump_table_size (htab);
3324 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3325 *local_got = (bfd_vma) -2;
3326 }
3327 if (! GOT_TLS_GDESC_P (*local_tls_type)
3328 || GOT_TLS_GD_P (*local_tls_type))
3329 {
3330 *local_got = s->size;
3331 s->size += GOT_ENTRY_SIZE;
3332 if (GOT_TLS_GD_P (*local_tls_type))
3333 s->size += GOT_ENTRY_SIZE;
3334 }
3335 if (bfd_link_pic (info)
3336 || GOT_TLS_GD_ANY_P (*local_tls_type)
3337 || *local_tls_type == GOT_TLS_IE)
3338 {
3339 if (GOT_TLS_GDESC_P (*local_tls_type))
3340 {
3341 htab->elf.srelplt->size
3342 += bed->s->sizeof_rela;
3343 htab->tlsdesc_plt = (bfd_vma) -1;
3344 }
3345 if (! GOT_TLS_GDESC_P (*local_tls_type)
3346 || GOT_TLS_GD_P (*local_tls_type))
3347 srel->size += bed->s->sizeof_rela;
3348 }
3349 }
3350 else
3351 *local_got = (bfd_vma) -1;
3352 }
3353 }
3354
3355 if (htab->tls_ld_got.refcount > 0)
3356 {
3357 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3358 relocs. */
3359 htab->tls_ld_got.offset = htab->elf.sgot->size;
3360 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3361 htab->elf.srelgot->size += bed->s->sizeof_rela;
3362 }
3363 else
3364 htab->tls_ld_got.offset = -1;
3365
3366 /* Allocate global sym .plt and .got entries, and space for global
3367 sym dynamic relocs. */
3368 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3369 info);
3370
3371 /* Allocate .plt and .got entries, and space for local symbols. */
3372 htab_traverse (htab->loc_hash_table,
3373 elf_x86_64_allocate_local_dynrelocs,
3374 info);
3375
3376 /* For every jump slot reserved in the sgotplt, reloc_count is
3377 incremented. However, when we reserve space for TLS descriptors,
3378 it's not incremented, so in order to compute the space reserved
3379 for them, it suffices to multiply the reloc count by the jump
3380 slot size.
3381
3382 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3383 so that R_X86_64_IRELATIVE entries come last. */
3384 if (htab->elf.srelplt)
3385 {
3386 htab->sgotplt_jump_table_size
3387 = elf_x86_64_compute_jump_table_size (htab);
3388 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3389 }
3390 else if (htab->elf.irelplt)
3391 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3392
3393 if (htab->tlsdesc_plt)
3394 {
3395 /* If we're not using lazy TLS relocations, don't generate the
3396 PLT and GOT entries they require. */
3397 if ((info->flags & DF_BIND_NOW))
3398 htab->tlsdesc_plt = 0;
3399 else
3400 {
3401 htab->tlsdesc_got = htab->elf.sgot->size;
3402 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3403 /* Reserve room for the initial entry.
3404 FIXME: we could probably do away with it in this case. */
3405 if (htab->elf.splt->size == 0)
3406 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3407 htab->tlsdesc_plt = htab->elf.splt->size;
3408 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3409 }
3410 }
3411
3412 if (htab->elf.sgotplt)
3413 {
3414 /* Don't allocate .got.plt section if there are no GOT nor PLT
3415 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3416 if ((htab->elf.hgot == NULL
3417 || !htab->elf.hgot->ref_regular_nonweak)
3418 && (htab->elf.sgotplt->size
3419 == get_elf_backend_data (output_bfd)->got_header_size)
3420 && (htab->elf.splt == NULL
3421 || htab->elf.splt->size == 0)
3422 && (htab->elf.sgot == NULL
3423 || htab->elf.sgot->size == 0)
3424 && (htab->elf.iplt == NULL
3425 || htab->elf.iplt->size == 0)
3426 && (htab->elf.igotplt == NULL
3427 || htab->elf.igotplt->size == 0))
3428 htab->elf.sgotplt->size = 0;
3429 }
3430
3431 if (htab->plt_eh_frame != NULL
3432 && htab->elf.splt != NULL
3433 && htab->elf.splt->size != 0
3434 && !bfd_is_abs_section (htab->elf.splt->output_section)
3435 && _bfd_elf_eh_frame_present (info))
3436 {
3437 const struct elf_x86_64_backend_data *arch_data
3438 = get_elf_x86_64_arch_data (bed);
3439 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3440 }
3441
3442 /* We now have determined the sizes of the various dynamic sections.
3443 Allocate memory for them. */
3444 relocs = FALSE;
3445 for (s = dynobj->sections; s != NULL; s = s->next)
3446 {
3447 if ((s->flags & SEC_LINKER_CREATED) == 0)
3448 continue;
3449
3450 if (s == htab->elf.splt
3451 || s == htab->elf.sgot
3452 || s == htab->elf.sgotplt
3453 || s == htab->elf.iplt
3454 || s == htab->elf.igotplt
3455 || s == htab->plt_bnd
3456 || s == htab->plt_got
3457 || s == htab->plt_eh_frame
3458 || s == htab->sdynbss)
3459 {
3460 /* Strip this section if we don't need it; see the
3461 comment below. */
3462 }
3463 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3464 {
3465 if (s->size != 0 && s != htab->elf.srelplt)
3466 relocs = TRUE;
3467
3468 /* We use the reloc_count field as a counter if we need
3469 to copy relocs into the output file. */
3470 if (s != htab->elf.srelplt)
3471 s->reloc_count = 0;
3472 }
3473 else
3474 {
3475 /* It's not one of our sections, so don't allocate space. */
3476 continue;
3477 }
3478
3479 if (s->size == 0)
3480 {
3481 /* If we don't need this section, strip it from the
3482 output file. This is mostly to handle .rela.bss and
3483 .rela.plt. We must create both sections in
3484 create_dynamic_sections, because they must be created
3485 before the linker maps input sections to output
3486 sections. The linker does that before
3487 adjust_dynamic_symbol is called, and it is that
3488 function which decides whether anything needs to go
3489 into these sections. */
3490
3491 s->flags |= SEC_EXCLUDE;
3492 continue;
3493 }
3494
3495 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3496 continue;
3497
3498 /* Allocate memory for the section contents. We use bfd_zalloc
3499 here in case unused entries are not reclaimed before the
3500 section's contents are written out. This should not happen,
3501 but this way if it does, we get a R_X86_64_NONE reloc instead
3502 of garbage. */
3503 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3504 if (s->contents == NULL)
3505 return FALSE;
3506 }
3507
3508 if (htab->plt_eh_frame != NULL
3509 && htab->plt_eh_frame->contents != NULL)
3510 {
3511 const struct elf_x86_64_backend_data *arch_data
3512 = get_elf_x86_64_arch_data (bed);
3513
3514 memcpy (htab->plt_eh_frame->contents,
3515 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3516 bfd_put_32 (dynobj, htab->elf.splt->size,
3517 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3518 }
3519
3520 if (htab->elf.dynamic_sections_created)
3521 {
3522 /* Add some entries to the .dynamic section. We fill in the
3523 values later, in elf_x86_64_finish_dynamic_sections, but we
3524 must add the entries now so that we get the correct size for
3525 the .dynamic section. The DT_DEBUG entry is filled in by the
3526 dynamic linker and used by the debugger. */
3527 #define add_dynamic_entry(TAG, VAL) \
3528 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3529
3530 if (bfd_link_executable (info))
3531 {
3532 if (!add_dynamic_entry (DT_DEBUG, 0))
3533 return FALSE;
3534 }
3535
3536 if (htab->elf.splt->size != 0)
3537 {
3538 /* DT_PLTGOT is used by prelink even if there is no PLT
3539 relocation. */
3540 if (!add_dynamic_entry (DT_PLTGOT, 0))
3541 return FALSE;
3542
3543 if (htab->elf.srelplt->size != 0)
3544 {
3545 if (!add_dynamic_entry (DT_PLTRELSZ, 0)
3546 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3547 || !add_dynamic_entry (DT_JMPREL, 0))
3548 return FALSE;
3549 }
3550
3551 if (htab->tlsdesc_plt
3552 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3553 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3554 return FALSE;
3555 }
3556
3557 if (relocs)
3558 {
3559 if (!add_dynamic_entry (DT_RELA, 0)
3560 || !add_dynamic_entry (DT_RELASZ, 0)
3561 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3562 return FALSE;
3563
3564 /* If any dynamic relocs apply to a read-only section,
3565 then we need a DT_TEXTREL entry. */
3566 if ((info->flags & DF_TEXTREL) == 0)
3567 elf_link_hash_traverse (&htab->elf,
3568 elf_x86_64_readonly_dynrelocs,
3569 info);
3570
3571 if ((info->flags & DF_TEXTREL) != 0)
3572 {
3573 if ((elf_tdata (output_bfd)->has_gnu_symbols
3574 & elf_gnu_symbol_ifunc) == elf_gnu_symbol_ifunc)
3575 {
3576 info->callbacks->einfo
3577 (_("%P%X: read-only segment has dynamic IFUNC relocations; recompile with -fPIC\n"));
3578 bfd_set_error (bfd_error_bad_value);
3579 return FALSE;
3580 }
3581
3582 if (!add_dynamic_entry (DT_TEXTREL, 0))
3583 return FALSE;
3584 }
3585 }
3586 }
3587 #undef add_dynamic_entry
3588
3589 return TRUE;
3590 }
3591
3592 static bfd_boolean
3593 elf_x86_64_always_size_sections (bfd *output_bfd,
3594 struct bfd_link_info *info)
3595 {
3596 asection *tls_sec = elf_hash_table (info)->tls_sec;
3597
3598 if (tls_sec)
3599 {
3600 struct elf_link_hash_entry *tlsbase;
3601
3602 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3603 "_TLS_MODULE_BASE_",
3604 FALSE, FALSE, FALSE);
3605
3606 if (tlsbase && tlsbase->type == STT_TLS)
3607 {
3608 struct elf_x86_64_link_hash_table *htab;
3609 struct bfd_link_hash_entry *bh = NULL;
3610 const struct elf_backend_data *bed
3611 = get_elf_backend_data (output_bfd);
3612
3613 htab = elf_x86_64_hash_table (info);
3614 if (htab == NULL)
3615 return FALSE;
3616
3617 if (!(_bfd_generic_link_add_one_symbol
3618 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3619 tls_sec, 0, NULL, FALSE,
3620 bed->collect, &bh)))
3621 return FALSE;
3622
3623 htab->tls_module_base = bh;
3624
3625 tlsbase = (struct elf_link_hash_entry *)bh;
3626 tlsbase->def_regular = 1;
3627 tlsbase->other = STV_HIDDEN;
3628 tlsbase->root.linker_def = 1;
3629 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3630 }
3631 }
3632
3633 return TRUE;
3634 }
3635
3636 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3637 executables. Rather than setting it to the beginning of the TLS
3638 section, we have to set it to the end. This function may be called
3639 multiple times, it is idempotent. */
3640
3641 static void
3642 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3643 {
3644 struct elf_x86_64_link_hash_table *htab;
3645 struct bfd_link_hash_entry *base;
3646
3647 if (!bfd_link_executable (info))
3648 return;
3649
3650 htab = elf_x86_64_hash_table (info);
3651 if (htab == NULL)
3652 return;
3653
3654 base = htab->tls_module_base;
3655 if (base == NULL)
3656 return;
3657
3658 base->u.def.value = htab->elf.tls_size;
3659 }
3660
3661 /* Return the base VMA address which should be subtracted from real addresses
3662 when resolving @dtpoff relocation.
3663 This is PT_TLS segment p_vaddr. */
3664
3665 static bfd_vma
3666 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3667 {
3668 /* If tls_sec is NULL, we should have signalled an error already. */
3669 if (elf_hash_table (info)->tls_sec == NULL)
3670 return 0;
3671 return elf_hash_table (info)->tls_sec->vma;
3672 }
3673
3674 /* Return the relocation value for @tpoff relocation
3675 if STT_TLS virtual address is ADDRESS. */
3676
3677 static bfd_vma
3678 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3679 {
3680 struct elf_link_hash_table *htab = elf_hash_table (info);
3681 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3682 bfd_vma static_tls_size;
3683
3684 /* If tls_segment is NULL, we should have signalled an error already. */
3685 if (htab->tls_sec == NULL)
3686 return 0;
3687
3688 /* Consider special static TLS alignment requirements. */
3689 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3690 return address - static_tls_size - htab->tls_sec->vma;
3691 }
3692
3693 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3694 branch? */
3695
3696 static bfd_boolean
3697 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3698 {
3699 /* Opcode Instruction
3700 0xe8 call
3701 0xe9 jump
3702 0x0f 0x8x conditional jump */
3703 return ((offset > 0
3704 && (contents [offset - 1] == 0xe8
3705 || contents [offset - 1] == 0xe9))
3706 || (offset > 1
3707 && contents [offset - 2] == 0x0f
3708 && (contents [offset - 1] & 0xf0) == 0x80));
3709 }
3710
3711 /* Relocate an x86_64 ELF section. */
3712
3713 static bfd_boolean
3714 elf_x86_64_relocate_section (bfd *output_bfd,
3715 struct bfd_link_info *info,
3716 bfd *input_bfd,
3717 asection *input_section,
3718 bfd_byte *contents,
3719 Elf_Internal_Rela *relocs,
3720 Elf_Internal_Sym *local_syms,
3721 asection **local_sections)
3722 {
3723 struct elf_x86_64_link_hash_table *htab;
3724 Elf_Internal_Shdr *symtab_hdr;
3725 struct elf_link_hash_entry **sym_hashes;
3726 bfd_vma *local_got_offsets;
3727 bfd_vma *local_tlsdesc_gotents;
3728 Elf_Internal_Rela *rel;
3729 Elf_Internal_Rela *relend;
3730 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3731
3732 BFD_ASSERT (is_x86_64_elf (input_bfd));
3733
3734 htab = elf_x86_64_hash_table (info);
3735 if (htab == NULL)
3736 return FALSE;
3737 symtab_hdr = &elf_symtab_hdr (input_bfd);
3738 sym_hashes = elf_sym_hashes (input_bfd);
3739 local_got_offsets = elf_local_got_offsets (input_bfd);
3740 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3741
3742 elf_x86_64_set_tls_module_base (info);
3743
3744 rel = relocs;
3745 relend = relocs + input_section->reloc_count;
3746 for (; rel < relend; rel++)
3747 {
3748 unsigned int r_type;
3749 reloc_howto_type *howto;
3750 unsigned long r_symndx;
3751 struct elf_link_hash_entry *h;
3752 struct elf_x86_64_link_hash_entry *eh;
3753 Elf_Internal_Sym *sym;
3754 asection *sec;
3755 bfd_vma off, offplt, plt_offset;
3756 bfd_vma relocation;
3757 bfd_boolean unresolved_reloc;
3758 bfd_reloc_status_type r;
3759 int tls_type;
3760 asection *base_got, *resolved_plt;
3761 bfd_vma st_size;
3762
3763 r_type = ELF32_R_TYPE (rel->r_info);
3764 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3765 || r_type == (int) R_X86_64_GNU_VTENTRY)
3766 continue;
3767
3768 if (r_type >= (int) R_X86_64_standard)
3769 {
3770 (*_bfd_error_handler)
3771 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3772 input_bfd, input_section, r_type);
3773 bfd_set_error (bfd_error_bad_value);
3774 return FALSE;
3775 }
3776
3777 if (r_type != (int) R_X86_64_32
3778 || ABI_64_P (output_bfd))
3779 howto = x86_64_elf_howto_table + r_type;
3780 else
3781 howto = (x86_64_elf_howto_table
3782 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3783 r_symndx = htab->r_sym (rel->r_info);
3784 h = NULL;
3785 sym = NULL;
3786 sec = NULL;
3787 unresolved_reloc = FALSE;
3788 if (r_symndx < symtab_hdr->sh_info)
3789 {
3790 sym = local_syms + r_symndx;
3791 sec = local_sections[r_symndx];
3792
3793 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3794 &sec, rel);
3795 st_size = sym->st_size;
3796
3797 /* Relocate against local STT_GNU_IFUNC symbol. */
3798 if (!bfd_link_relocatable (info)
3799 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3800 {
3801 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3802 rel, FALSE);
3803 if (h == NULL)
3804 abort ();
3805
3806 /* Set STT_GNU_IFUNC symbol value. */
3807 h->root.u.def.value = sym->st_value;
3808 h->root.u.def.section = sec;
3809 }
3810 }
3811 else
3812 {
3813 bfd_boolean warned ATTRIBUTE_UNUSED;
3814 bfd_boolean ignored ATTRIBUTE_UNUSED;
3815
3816 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3817 r_symndx, symtab_hdr, sym_hashes,
3818 h, sec, relocation,
3819 unresolved_reloc, warned, ignored);
3820 st_size = h->size;
3821 }
3822
3823 if (sec != NULL && discarded_section (sec))
3824 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3825 rel, 1, relend, howto, 0, contents);
3826
3827 if (bfd_link_relocatable (info))
3828 continue;
3829
3830 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3831 {
3832 if (r_type == R_X86_64_64)
3833 {
3834 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3835 zero-extend it to 64bit if addend is zero. */
3836 r_type = R_X86_64_32;
3837 memset (contents + rel->r_offset + 4, 0, 4);
3838 }
3839 else if (r_type == R_X86_64_SIZE64)
3840 {
3841 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3842 zero-extend it to 64bit if addend is zero. */
3843 r_type = R_X86_64_SIZE32;
3844 memset (contents + rel->r_offset + 4, 0, 4);
3845 }
3846 }
3847
3848 eh = (struct elf_x86_64_link_hash_entry *) h;
3849
3850 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3851 it here if it is defined in a non-shared object. */
3852 if (h != NULL
3853 && h->type == STT_GNU_IFUNC
3854 && h->def_regular)
3855 {
3856 bfd_vma plt_index;
3857 const char *name;
3858
3859 if ((input_section->flags & SEC_ALLOC) == 0)
3860 {
3861 /* Dynamic relocs are not propagated for SEC_DEBUGGING
3862 sections because such sections are not SEC_ALLOC and
3863 thus ld.so will not process them. */
3864 if ((input_section->flags & SEC_DEBUGGING) != 0)
3865 continue;
3866 abort ();
3867 }
3868 else if (h->plt.offset == (bfd_vma) -1)
3869 abort ();
3870
3871 /* STT_GNU_IFUNC symbol must go through PLT. */
3872 if (htab->elf.splt != NULL)
3873 {
3874 if (htab->plt_bnd != NULL)
3875 {
3876 resolved_plt = htab->plt_bnd;
3877 plt_offset = eh->plt_bnd.offset;
3878 }
3879 else
3880 {
3881 resolved_plt = htab->elf.splt;
3882 plt_offset = h->plt.offset;
3883 }
3884 }
3885 else
3886 {
3887 resolved_plt = htab->elf.iplt;
3888 plt_offset = h->plt.offset;
3889 }
3890
3891 relocation = (resolved_plt->output_section->vma
3892 + resolved_plt->output_offset + plt_offset);
3893
3894 switch (r_type)
3895 {
3896 default:
3897 if (h->root.root.string)
3898 name = h->root.root.string;
3899 else
3900 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3901 NULL);
3902 (*_bfd_error_handler)
3903 (_("%B: relocation %s against STT_GNU_IFUNC "
3904 "symbol `%s' isn't handled by %s"), input_bfd,
3905 x86_64_elf_howto_table[r_type].name,
3906 name, __FUNCTION__);
3907 bfd_set_error (bfd_error_bad_value);
3908 return FALSE;
3909
3910 case R_X86_64_32S:
3911 if (bfd_link_pic (info))
3912 abort ();
3913 goto do_relocation;
3914
3915 case R_X86_64_32:
3916 if (ABI_64_P (output_bfd))
3917 goto do_relocation;
3918 /* FALLTHROUGH */
3919 case R_X86_64_64:
3920 if (rel->r_addend != 0)
3921 {
3922 if (h->root.root.string)
3923 name = h->root.root.string;
3924 else
3925 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3926 sym, NULL);
3927 (*_bfd_error_handler)
3928 (_("%B: relocation %s against STT_GNU_IFUNC "
3929 "symbol `%s' has non-zero addend: %d"),
3930 input_bfd, x86_64_elf_howto_table[r_type].name,
3931 name, rel->r_addend);
3932 bfd_set_error (bfd_error_bad_value);
3933 return FALSE;
3934 }
3935
3936 /* Generate dynamic relcoation only when there is a
3937 non-GOT reference in a shared object. */
3938 if (bfd_link_pic (info) && h->non_got_ref)
3939 {
3940 Elf_Internal_Rela outrel;
3941 asection *sreloc;
3942
3943 /* Need a dynamic relocation to get the real function
3944 address. */
3945 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3946 info,
3947 input_section,
3948 rel->r_offset);
3949 if (outrel.r_offset == (bfd_vma) -1
3950 || outrel.r_offset == (bfd_vma) -2)
3951 abort ();
3952
3953 outrel.r_offset += (input_section->output_section->vma
3954 + input_section->output_offset);
3955
3956 if (h->dynindx == -1
3957 || h->forced_local
3958 || bfd_link_executable (info))
3959 {
3960 /* This symbol is resolved locally. */
3961 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3962 outrel.r_addend = (h->root.u.def.value
3963 + h->root.u.def.section->output_section->vma
3964 + h->root.u.def.section->output_offset);
3965 }
3966 else
3967 {
3968 outrel.r_info = htab->r_info (h->dynindx, r_type);
3969 outrel.r_addend = 0;
3970 }
3971
3972 sreloc = htab->elf.irelifunc;
3973 elf_append_rela (output_bfd, sreloc, &outrel);
3974
3975 /* If this reloc is against an external symbol, we
3976 do not want to fiddle with the addend. Otherwise,
3977 we need to include the symbol value so that it
3978 becomes an addend for the dynamic reloc. For an
3979 internal symbol, we have updated addend. */
3980 continue;
3981 }
3982 /* FALLTHROUGH */
3983 case R_X86_64_PC32:
3984 case R_X86_64_PC32_BND:
3985 case R_X86_64_PC64:
3986 case R_X86_64_PLT32:
3987 case R_X86_64_PLT32_BND:
3988 goto do_relocation;
3989
3990 case R_X86_64_GOTPCREL:
3991 case R_X86_64_GOTPCREL64:
3992 base_got = htab->elf.sgot;
3993 off = h->got.offset;
3994
3995 if (base_got == NULL)
3996 abort ();
3997
3998 if (off == (bfd_vma) -1)
3999 {
4000 /* We can't use h->got.offset here to save state, or
4001 even just remember the offset, as finish_dynamic_symbol
4002 would use that as offset into .got. */
4003
4004 if (htab->elf.splt != NULL)
4005 {
4006 plt_index = h->plt.offset / plt_entry_size - 1;
4007 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4008 base_got = htab->elf.sgotplt;
4009 }
4010 else
4011 {
4012 plt_index = h->plt.offset / plt_entry_size;
4013 off = plt_index * GOT_ENTRY_SIZE;
4014 base_got = htab->elf.igotplt;
4015 }
4016
4017 if (h->dynindx == -1
4018 || h->forced_local
4019 || info->symbolic)
4020 {
4021 /* This references the local defitionion. We must
4022 initialize this entry in the global offset table.
4023 Since the offset must always be a multiple of 8,
4024 we use the least significant bit to record
4025 whether we have initialized it already.
4026
4027 When doing a dynamic link, we create a .rela.got
4028 relocation entry to initialize the value. This
4029 is done in the finish_dynamic_symbol routine. */
4030 if ((off & 1) != 0)
4031 off &= ~1;
4032 else
4033 {
4034 bfd_put_64 (output_bfd, relocation,
4035 base_got->contents + off);
4036 /* Note that this is harmless for the GOTPLT64
4037 case, as -1 | 1 still is -1. */
4038 h->got.offset |= 1;
4039 }
4040 }
4041 }
4042
4043 relocation = (base_got->output_section->vma
4044 + base_got->output_offset + off);
4045
4046 goto do_relocation;
4047 }
4048 }
4049
4050 /* When generating a shared object, the relocations handled here are
4051 copied into the output file to be resolved at run time. */
4052 switch (r_type)
4053 {
4054 case R_X86_64_GOT32:
4055 case R_X86_64_GOT64:
4056 /* Relocation is to the entry for this symbol in the global
4057 offset table. */
4058 case R_X86_64_GOTPCREL:
4059 case R_X86_64_GOTPCREL64:
4060 /* Use global offset table entry as symbol value. */
4061 case R_X86_64_GOTPLT64:
4062 /* This is obsolete and treated the the same as GOT64. */
4063 base_got = htab->elf.sgot;
4064
4065 if (htab->elf.sgot == NULL)
4066 abort ();
4067
4068 if (h != NULL)
4069 {
4070 bfd_boolean dyn;
4071
4072 off = h->got.offset;
4073 if (h->needs_plt
4074 && h->plt.offset != (bfd_vma)-1
4075 && off == (bfd_vma)-1)
4076 {
4077 /* We can't use h->got.offset here to save
4078 state, or even just remember the offset, as
4079 finish_dynamic_symbol would use that as offset into
4080 .got. */
4081 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
4082 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4083 base_got = htab->elf.sgotplt;
4084 }
4085
4086 dyn = htab->elf.dynamic_sections_created;
4087
4088 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4089 || (bfd_link_pic (info)
4090 && SYMBOL_REFERENCES_LOCAL (info, h))
4091 || (ELF_ST_VISIBILITY (h->other)
4092 && h->root.type == bfd_link_hash_undefweak))
4093 {
4094 /* This is actually a static link, or it is a -Bsymbolic
4095 link and the symbol is defined locally, or the symbol
4096 was forced to be local because of a version file. We
4097 must initialize this entry in the global offset table.
4098 Since the offset must always be a multiple of 8, we
4099 use the least significant bit to record whether we
4100 have initialized it already.
4101
4102 When doing a dynamic link, we create a .rela.got
4103 relocation entry to initialize the value. This is
4104 done in the finish_dynamic_symbol routine. */
4105 if ((off & 1) != 0)
4106 off &= ~1;
4107 else
4108 {
4109 bfd_put_64 (output_bfd, relocation,
4110 base_got->contents + off);
4111 /* Note that this is harmless for the GOTPLT64 case,
4112 as -1 | 1 still is -1. */
4113 h->got.offset |= 1;
4114 }
4115 }
4116 else
4117 unresolved_reloc = FALSE;
4118 }
4119 else
4120 {
4121 if (local_got_offsets == NULL)
4122 abort ();
4123
4124 off = local_got_offsets[r_symndx];
4125
4126 /* The offset must always be a multiple of 8. We use
4127 the least significant bit to record whether we have
4128 already generated the necessary reloc. */
4129 if ((off & 1) != 0)
4130 off &= ~1;
4131 else
4132 {
4133 bfd_put_64 (output_bfd, relocation,
4134 base_got->contents + off);
4135
4136 if (bfd_link_pic (info))
4137 {
4138 asection *s;
4139 Elf_Internal_Rela outrel;
4140
4141 /* We need to generate a R_X86_64_RELATIVE reloc
4142 for the dynamic linker. */
4143 s = htab->elf.srelgot;
4144 if (s == NULL)
4145 abort ();
4146
4147 outrel.r_offset = (base_got->output_section->vma
4148 + base_got->output_offset
4149 + off);
4150 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4151 outrel.r_addend = relocation;
4152 elf_append_rela (output_bfd, s, &outrel);
4153 }
4154
4155 local_got_offsets[r_symndx] |= 1;
4156 }
4157 }
4158
4159 if (off >= (bfd_vma) -2)
4160 abort ();
4161
4162 relocation = base_got->output_section->vma
4163 + base_got->output_offset + off;
4164 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
4165 relocation -= htab->elf.sgotplt->output_section->vma
4166 - htab->elf.sgotplt->output_offset;
4167
4168 break;
4169
4170 case R_X86_64_GOTOFF64:
4171 /* Relocation is relative to the start of the global offset
4172 table. */
4173
4174 /* Check to make sure it isn't a protected function or data
4175 symbol for shared library since it may not be local when
4176 used as function address or with copy relocation. We also
4177 need to make sure that a symbol is referenced locally. */
4178 if (bfd_link_pic (info) && h)
4179 {
4180 if (!h->def_regular)
4181 {
4182 const char *v;
4183
4184 switch (ELF_ST_VISIBILITY (h->other))
4185 {
4186 case STV_HIDDEN:
4187 v = _("hidden symbol");
4188 break;
4189 case STV_INTERNAL:
4190 v = _("internal symbol");
4191 break;
4192 case STV_PROTECTED:
4193 v = _("protected symbol");
4194 break;
4195 default:
4196 v = _("symbol");
4197 break;
4198 }
4199
4200 (*_bfd_error_handler)
4201 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4202 input_bfd, v, h->root.root.string);
4203 bfd_set_error (bfd_error_bad_value);
4204 return FALSE;
4205 }
4206 else if (!bfd_link_executable (info)
4207 && !SYMBOL_REFERENCES_LOCAL (info, h)
4208 && (h->type == STT_FUNC
4209 || h->type == STT_OBJECT)
4210 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4211 {
4212 (*_bfd_error_handler)
4213 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4214 input_bfd,
4215 h->type == STT_FUNC ? "function" : "data",
4216 h->root.root.string);
4217 bfd_set_error (bfd_error_bad_value);
4218 return FALSE;
4219 }
4220 }
4221
4222 /* Note that sgot is not involved in this
4223 calculation. We always want the start of .got.plt. If we
4224 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4225 permitted by the ABI, we might have to change this
4226 calculation. */
4227 relocation -= htab->elf.sgotplt->output_section->vma
4228 + htab->elf.sgotplt->output_offset;
4229 break;
4230
4231 case R_X86_64_GOTPC32:
4232 case R_X86_64_GOTPC64:
4233 /* Use global offset table as symbol value. */
4234 relocation = htab->elf.sgotplt->output_section->vma
4235 + htab->elf.sgotplt->output_offset;
4236 unresolved_reloc = FALSE;
4237 break;
4238
4239 case R_X86_64_PLTOFF64:
4240 /* Relocation is PLT entry relative to GOT. For local
4241 symbols it's the symbol itself relative to GOT. */
4242 if (h != NULL
4243 /* See PLT32 handling. */
4244 && h->plt.offset != (bfd_vma) -1
4245 && htab->elf.splt != NULL)
4246 {
4247 if (htab->plt_bnd != NULL)
4248 {
4249 resolved_plt = htab->plt_bnd;
4250 plt_offset = eh->plt_bnd.offset;
4251 }
4252 else
4253 {
4254 resolved_plt = htab->elf.splt;
4255 plt_offset = h->plt.offset;
4256 }
4257
4258 relocation = (resolved_plt->output_section->vma
4259 + resolved_plt->output_offset
4260 + plt_offset);
4261 unresolved_reloc = FALSE;
4262 }
4263
4264 relocation -= htab->elf.sgotplt->output_section->vma
4265 + htab->elf.sgotplt->output_offset;
4266 break;
4267
4268 case R_X86_64_PLT32:
4269 case R_X86_64_PLT32_BND:
4270 /* Relocation is to the entry for this symbol in the
4271 procedure linkage table. */
4272
4273 /* Resolve a PLT32 reloc against a local symbol directly,
4274 without using the procedure linkage table. */
4275 if (h == NULL)
4276 break;
4277
4278 if ((h->plt.offset == (bfd_vma) -1
4279 && eh->plt_got.offset == (bfd_vma) -1)
4280 || htab->elf.splt == NULL)
4281 {
4282 /* We didn't make a PLT entry for this symbol. This
4283 happens when statically linking PIC code, or when
4284 using -Bsymbolic. */
4285 break;
4286 }
4287
4288 if (h->plt.offset != (bfd_vma) -1)
4289 {
4290 if (htab->plt_bnd != NULL)
4291 {
4292 resolved_plt = htab->plt_bnd;
4293 plt_offset = eh->plt_bnd.offset;
4294 }
4295 else
4296 {
4297 resolved_plt = htab->elf.splt;
4298 plt_offset = h->plt.offset;
4299 }
4300 }
4301 else
4302 {
4303 /* Use the GOT PLT. */
4304 resolved_plt = htab->plt_got;
4305 plt_offset = eh->plt_got.offset;
4306 }
4307
4308 relocation = (resolved_plt->output_section->vma
4309 + resolved_plt->output_offset
4310 + plt_offset);
4311 unresolved_reloc = FALSE;
4312 break;
4313
4314 case R_X86_64_SIZE32:
4315 case R_X86_64_SIZE64:
4316 /* Set to symbol size. */
4317 relocation = st_size;
4318 goto direct;
4319
4320 case R_X86_64_PC8:
4321 case R_X86_64_PC16:
4322 case R_X86_64_PC32:
4323 case R_X86_64_PC32_BND:
4324 /* Don't complain about -fPIC if the symbol is undefined when
4325 building executable. */
4326 if (bfd_link_pic (info)
4327 && (input_section->flags & SEC_ALLOC) != 0
4328 && (input_section->flags & SEC_READONLY) != 0
4329 && h != NULL
4330 && !(bfd_link_executable (info)
4331 && h->root.type == bfd_link_hash_undefined))
4332 {
4333 bfd_boolean fail = FALSE;
4334 bfd_boolean branch
4335 = ((r_type == R_X86_64_PC32
4336 || r_type == R_X86_64_PC32_BND)
4337 && is_32bit_relative_branch (contents, rel->r_offset));
4338
4339 if (SYMBOL_REFERENCES_LOCAL (info, h))
4340 {
4341 /* Symbol is referenced locally. Make sure it is
4342 defined locally or for a branch. */
4343 fail = !h->def_regular && !branch;
4344 }
4345 else if (!(bfd_link_executable (info)
4346 && (h->needs_copy || eh->needs_copy)))
4347 {
4348 /* Symbol doesn't need copy reloc and isn't referenced
4349 locally. We only allow branch to symbol with
4350 non-default visibility. */
4351 fail = (!branch
4352 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4353 }
4354
4355 if (fail)
4356 {
4357 const char *fmt;
4358 const char *v;
4359 const char *pic = "";
4360
4361 switch (ELF_ST_VISIBILITY (h->other))
4362 {
4363 case STV_HIDDEN:
4364 v = _("hidden symbol");
4365 break;
4366 case STV_INTERNAL:
4367 v = _("internal symbol");
4368 break;
4369 case STV_PROTECTED:
4370 v = _("protected symbol");
4371 break;
4372 default:
4373 v = _("symbol");
4374 pic = _("; recompile with -fPIC");
4375 break;
4376 }
4377
4378 if (h->def_regular)
4379 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4380 else
4381 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4382
4383 (*_bfd_error_handler) (fmt, input_bfd,
4384 x86_64_elf_howto_table[r_type].name,
4385 v, h->root.root.string, pic);
4386 bfd_set_error (bfd_error_bad_value);
4387 return FALSE;
4388 }
4389 }
4390 /* Fall through. */
4391
4392 case R_X86_64_8:
4393 case R_X86_64_16:
4394 case R_X86_64_32:
4395 case R_X86_64_PC64:
4396 case R_X86_64_64:
4397 /* FIXME: The ABI says the linker should make sure the value is
4398 the same when it's zeroextended to 64 bit. */
4399
4400 direct:
4401 if ((input_section->flags & SEC_ALLOC) == 0)
4402 break;
4403
4404 /* Don't copy a pc-relative relocation into the output file
4405 if the symbol needs copy reloc or the symbol is undefined
4406 when building executable. Copy dynamic function pointer
4407 relocations. */
4408 if ((bfd_link_pic (info)
4409 && !(bfd_link_executable (info)
4410 && h != NULL
4411 && (h->needs_copy
4412 || eh->needs_copy
4413 || h->root.type == bfd_link_hash_undefined)
4414 && IS_X86_64_PCREL_TYPE (r_type))
4415 && (h == NULL
4416 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4417 || h->root.type != bfd_link_hash_undefweak)
4418 && ((! IS_X86_64_PCREL_TYPE (r_type)
4419 && r_type != R_X86_64_SIZE32
4420 && r_type != R_X86_64_SIZE64)
4421 || ! SYMBOL_CALLS_LOCAL (info, h)))
4422 || (ELIMINATE_COPY_RELOCS
4423 && !bfd_link_pic (info)
4424 && h != NULL
4425 && h->dynindx != -1
4426 && (!h->non_got_ref || eh->func_pointer_refcount > 0)
4427 && ((h->def_dynamic
4428 && !h->def_regular)
4429 || h->root.type == bfd_link_hash_undefweak
4430 || h->root.type == bfd_link_hash_undefined)))
4431 {
4432 Elf_Internal_Rela outrel;
4433 bfd_boolean skip, relocate;
4434 asection *sreloc;
4435
4436 /* When generating a shared object, these relocations
4437 are copied into the output file to be resolved at run
4438 time. */
4439 skip = FALSE;
4440 relocate = FALSE;
4441
4442 outrel.r_offset =
4443 _bfd_elf_section_offset (output_bfd, info, input_section,
4444 rel->r_offset);
4445 if (outrel.r_offset == (bfd_vma) -1)
4446 skip = TRUE;
4447 else if (outrel.r_offset == (bfd_vma) -2)
4448 skip = TRUE, relocate = TRUE;
4449
4450 outrel.r_offset += (input_section->output_section->vma
4451 + input_section->output_offset);
4452
4453 if (skip)
4454 memset (&outrel, 0, sizeof outrel);
4455
4456 /* h->dynindx may be -1 if this symbol was marked to
4457 become local. */
4458 else if (h != NULL
4459 && h->dynindx != -1
4460 && (IS_X86_64_PCREL_TYPE (r_type)
4461 || ! bfd_link_pic (info)
4462 || ! SYMBOLIC_BIND (info, h)
4463 || ! h->def_regular))
4464 {
4465 outrel.r_info = htab->r_info (h->dynindx, r_type);
4466 outrel.r_addend = rel->r_addend;
4467 }
4468 else
4469 {
4470 /* This symbol is local, or marked to become local. */
4471 if (r_type == htab->pointer_r_type)
4472 {
4473 relocate = TRUE;
4474 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4475 outrel.r_addend = relocation + rel->r_addend;
4476 }
4477 else if (r_type == R_X86_64_64
4478 && !ABI_64_P (output_bfd))
4479 {
4480 relocate = TRUE;
4481 outrel.r_info = htab->r_info (0,
4482 R_X86_64_RELATIVE64);
4483 outrel.r_addend = relocation + rel->r_addend;
4484 /* Check addend overflow. */
4485 if ((outrel.r_addend & 0x80000000)
4486 != (rel->r_addend & 0x80000000))
4487 {
4488 const char *name;
4489 int addend = rel->r_addend;
4490 if (h && h->root.root.string)
4491 name = h->root.root.string;
4492 else
4493 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4494 sym, NULL);
4495 if (addend < 0)
4496 (*_bfd_error_handler)
4497 (_("%B: addend -0x%x in relocation %s against "
4498 "symbol `%s' at 0x%lx in section `%A' is "
4499 "out of range"),
4500 input_bfd, input_section, addend,
4501 x86_64_elf_howto_table[r_type].name,
4502 name, (unsigned long) rel->r_offset);
4503 else
4504 (*_bfd_error_handler)
4505 (_("%B: addend 0x%x in relocation %s against "
4506 "symbol `%s' at 0x%lx in section `%A' is "
4507 "out of range"),
4508 input_bfd, input_section, addend,
4509 x86_64_elf_howto_table[r_type].name,
4510 name, (unsigned long) rel->r_offset);
4511 bfd_set_error (bfd_error_bad_value);
4512 return FALSE;
4513 }
4514 }
4515 else
4516 {
4517 long sindx;
4518
4519 if (bfd_is_abs_section (sec))
4520 sindx = 0;
4521 else if (sec == NULL || sec->owner == NULL)
4522 {
4523 bfd_set_error (bfd_error_bad_value);
4524 return FALSE;
4525 }
4526 else
4527 {
4528 asection *osec;
4529
4530 /* We are turning this relocation into one
4531 against a section symbol. It would be
4532 proper to subtract the symbol's value,
4533 osec->vma, from the emitted reloc addend,
4534 but ld.so expects buggy relocs. */
4535 osec = sec->output_section;
4536 sindx = elf_section_data (osec)->dynindx;
4537 if (sindx == 0)
4538 {
4539 asection *oi = htab->elf.text_index_section;
4540 sindx = elf_section_data (oi)->dynindx;
4541 }
4542 BFD_ASSERT (sindx != 0);
4543 }
4544
4545 outrel.r_info = htab->r_info (sindx, r_type);
4546 outrel.r_addend = relocation + rel->r_addend;
4547 }
4548 }
4549
4550 sreloc = elf_section_data (input_section)->sreloc;
4551
4552 if (sreloc == NULL || sreloc->contents == NULL)
4553 {
4554 r = bfd_reloc_notsupported;
4555 goto check_relocation_error;
4556 }
4557
4558 elf_append_rela (output_bfd, sreloc, &outrel);
4559
4560 /* If this reloc is against an external symbol, we do
4561 not want to fiddle with the addend. Otherwise, we
4562 need to include the symbol value so that it becomes
4563 an addend for the dynamic reloc. */
4564 if (! relocate)
4565 continue;
4566 }
4567
4568 break;
4569
4570 case R_X86_64_TLSGD:
4571 case R_X86_64_GOTPC32_TLSDESC:
4572 case R_X86_64_TLSDESC_CALL:
4573 case R_X86_64_GOTTPOFF:
4574 tls_type = GOT_UNKNOWN;
4575 if (h == NULL && local_got_offsets)
4576 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4577 else if (h != NULL)
4578 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4579
4580 if (! elf_x86_64_tls_transition (info, input_bfd,
4581 input_section, contents,
4582 symtab_hdr, sym_hashes,
4583 &r_type, tls_type, rel,
4584 relend, h, r_symndx))
4585 return FALSE;
4586
4587 if (r_type == R_X86_64_TPOFF32)
4588 {
4589 bfd_vma roff = rel->r_offset;
4590
4591 BFD_ASSERT (! unresolved_reloc);
4592
4593 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4594 {
4595 /* GD->LE transition. For 64bit, change
4596 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4597 .word 0x6666; rex64; call __tls_get_addr
4598 into:
4599 movq %fs:0, %rax
4600 leaq foo@tpoff(%rax), %rax
4601 For 32bit, change
4602 leaq foo@tlsgd(%rip), %rdi
4603 .word 0x6666; rex64; call __tls_get_addr
4604 into:
4605 movl %fs:0, %eax
4606 leaq foo@tpoff(%rax), %rax
4607 For largepic, change:
4608 leaq foo@tlsgd(%rip), %rdi
4609 movabsq $__tls_get_addr@pltoff, %rax
4610 addq %rbx, %rax
4611 call *%rax
4612 into:
4613 movq %fs:0, %rax
4614 leaq foo@tpoff(%rax), %rax
4615 nopw 0x0(%rax,%rax,1) */
4616 int largepic = 0;
4617 if (ABI_64_P (output_bfd)
4618 && contents[roff + 5] == (bfd_byte) '\xb8')
4619 {
4620 memcpy (contents + roff - 3,
4621 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4622 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4623 largepic = 1;
4624 }
4625 else if (ABI_64_P (output_bfd))
4626 memcpy (contents + roff - 4,
4627 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4628 16);
4629 else
4630 memcpy (contents + roff - 3,
4631 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4632 15);
4633 bfd_put_32 (output_bfd,
4634 elf_x86_64_tpoff (info, relocation),
4635 contents + roff + 8 + largepic);
4636 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4637 rel++;
4638 continue;
4639 }
4640 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4641 {
4642 /* GDesc -> LE transition.
4643 It's originally something like:
4644 leaq x@tlsdesc(%rip), %rax
4645
4646 Change it to:
4647 movl $x@tpoff, %rax. */
4648
4649 unsigned int val, type;
4650
4651 type = bfd_get_8 (input_bfd, contents + roff - 3);
4652 val = bfd_get_8 (input_bfd, contents + roff - 1);
4653 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4654 contents + roff - 3);
4655 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4656 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4657 contents + roff - 1);
4658 bfd_put_32 (output_bfd,
4659 elf_x86_64_tpoff (info, relocation),
4660 contents + roff);
4661 continue;
4662 }
4663 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4664 {
4665 /* GDesc -> LE transition.
4666 It's originally:
4667 call *(%rax)
4668 Turn it into:
4669 xchg %ax,%ax. */
4670 bfd_put_8 (output_bfd, 0x66, contents + roff);
4671 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4672 continue;
4673 }
4674 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4675 {
4676 /* IE->LE transition:
4677 For 64bit, originally it can be one of:
4678 movq foo@gottpoff(%rip), %reg
4679 addq foo@gottpoff(%rip), %reg
4680 We change it into:
4681 movq $foo, %reg
4682 leaq foo(%reg), %reg
4683 addq $foo, %reg.
4684 For 32bit, originally it can be one of:
4685 movq foo@gottpoff(%rip), %reg
4686 addl foo@gottpoff(%rip), %reg
4687 We change it into:
4688 movq $foo, %reg
4689 leal foo(%reg), %reg
4690 addl $foo, %reg. */
4691
4692 unsigned int val, type, reg;
4693
4694 if (roff >= 3)
4695 val = bfd_get_8 (input_bfd, contents + roff - 3);
4696 else
4697 val = 0;
4698 type = bfd_get_8 (input_bfd, contents + roff - 2);
4699 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4700 reg >>= 3;
4701 if (type == 0x8b)
4702 {
4703 /* movq */
4704 if (val == 0x4c)
4705 bfd_put_8 (output_bfd, 0x49,
4706 contents + roff - 3);
4707 else if (!ABI_64_P (output_bfd) && val == 0x44)
4708 bfd_put_8 (output_bfd, 0x41,
4709 contents + roff - 3);
4710 bfd_put_8 (output_bfd, 0xc7,
4711 contents + roff - 2);
4712 bfd_put_8 (output_bfd, 0xc0 | reg,
4713 contents + roff - 1);
4714 }
4715 else if (reg == 4)
4716 {
4717 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4718 is special */
4719 if (val == 0x4c)
4720 bfd_put_8 (output_bfd, 0x49,
4721 contents + roff - 3);
4722 else if (!ABI_64_P (output_bfd) && val == 0x44)
4723 bfd_put_8 (output_bfd, 0x41,
4724 contents + roff - 3);
4725 bfd_put_8 (output_bfd, 0x81,
4726 contents + roff - 2);
4727 bfd_put_8 (output_bfd, 0xc0 | reg,
4728 contents + roff - 1);
4729 }
4730 else
4731 {
4732 /* addq/addl -> leaq/leal */
4733 if (val == 0x4c)
4734 bfd_put_8 (output_bfd, 0x4d,
4735 contents + roff - 3);
4736 else if (!ABI_64_P (output_bfd) && val == 0x44)
4737 bfd_put_8 (output_bfd, 0x45,
4738 contents + roff - 3);
4739 bfd_put_8 (output_bfd, 0x8d,
4740 contents + roff - 2);
4741 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4742 contents + roff - 1);
4743 }
4744 bfd_put_32 (output_bfd,
4745 elf_x86_64_tpoff (info, relocation),
4746 contents + roff);
4747 continue;
4748 }
4749 else
4750 BFD_ASSERT (FALSE);
4751 }
4752
4753 if (htab->elf.sgot == NULL)
4754 abort ();
4755
4756 if (h != NULL)
4757 {
4758 off = h->got.offset;
4759 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4760 }
4761 else
4762 {
4763 if (local_got_offsets == NULL)
4764 abort ();
4765
4766 off = local_got_offsets[r_symndx];
4767 offplt = local_tlsdesc_gotents[r_symndx];
4768 }
4769
4770 if ((off & 1) != 0)
4771 off &= ~1;
4772 else
4773 {
4774 Elf_Internal_Rela outrel;
4775 int dr_type, indx;
4776 asection *sreloc;
4777
4778 if (htab->elf.srelgot == NULL)
4779 abort ();
4780
4781 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4782
4783 if (GOT_TLS_GDESC_P (tls_type))
4784 {
4785 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4786 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4787 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4788 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4789 + htab->elf.sgotplt->output_offset
4790 + offplt
4791 + htab->sgotplt_jump_table_size);
4792 sreloc = htab->elf.srelplt;
4793 if (indx == 0)
4794 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4795 else
4796 outrel.r_addend = 0;
4797 elf_append_rela (output_bfd, sreloc, &outrel);
4798 }
4799
4800 sreloc = htab->elf.srelgot;
4801
4802 outrel.r_offset = (htab->elf.sgot->output_section->vma
4803 + htab->elf.sgot->output_offset + off);
4804
4805 if (GOT_TLS_GD_P (tls_type))
4806 dr_type = R_X86_64_DTPMOD64;
4807 else if (GOT_TLS_GDESC_P (tls_type))
4808 goto dr_done;
4809 else
4810 dr_type = R_X86_64_TPOFF64;
4811
4812 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4813 outrel.r_addend = 0;
4814 if ((dr_type == R_X86_64_TPOFF64
4815 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4816 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4817 outrel.r_info = htab->r_info (indx, dr_type);
4818
4819 elf_append_rela (output_bfd, sreloc, &outrel);
4820
4821 if (GOT_TLS_GD_P (tls_type))
4822 {
4823 if (indx == 0)
4824 {
4825 BFD_ASSERT (! unresolved_reloc);
4826 bfd_put_64 (output_bfd,
4827 relocation - elf_x86_64_dtpoff_base (info),
4828 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4829 }
4830 else
4831 {
4832 bfd_put_64 (output_bfd, 0,
4833 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4834 outrel.r_info = htab->r_info (indx,
4835 R_X86_64_DTPOFF64);
4836 outrel.r_offset += GOT_ENTRY_SIZE;
4837 elf_append_rela (output_bfd, sreloc,
4838 &outrel);
4839 }
4840 }
4841
4842 dr_done:
4843 if (h != NULL)
4844 h->got.offset |= 1;
4845 else
4846 local_got_offsets[r_symndx] |= 1;
4847 }
4848
4849 if (off >= (bfd_vma) -2
4850 && ! GOT_TLS_GDESC_P (tls_type))
4851 abort ();
4852 if (r_type == ELF32_R_TYPE (rel->r_info))
4853 {
4854 if (r_type == R_X86_64_GOTPC32_TLSDESC
4855 || r_type == R_X86_64_TLSDESC_CALL)
4856 relocation = htab->elf.sgotplt->output_section->vma
4857 + htab->elf.sgotplt->output_offset
4858 + offplt + htab->sgotplt_jump_table_size;
4859 else
4860 relocation = htab->elf.sgot->output_section->vma
4861 + htab->elf.sgot->output_offset + off;
4862 unresolved_reloc = FALSE;
4863 }
4864 else
4865 {
4866 bfd_vma roff = rel->r_offset;
4867
4868 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4869 {
4870 /* GD->IE transition. For 64bit, change
4871 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4872 .word 0x6666; rex64; call __tls_get_addr@plt
4873 into:
4874 movq %fs:0, %rax
4875 addq foo@gottpoff(%rip), %rax
4876 For 32bit, change
4877 leaq foo@tlsgd(%rip), %rdi
4878 .word 0x6666; rex64; call __tls_get_addr@plt
4879 into:
4880 movl %fs:0, %eax
4881 addq foo@gottpoff(%rip), %rax
4882 For largepic, change:
4883 leaq foo@tlsgd(%rip), %rdi
4884 movabsq $__tls_get_addr@pltoff, %rax
4885 addq %rbx, %rax
4886 call *%rax
4887 into:
4888 movq %fs:0, %rax
4889 addq foo@gottpoff(%rax), %rax
4890 nopw 0x0(%rax,%rax,1) */
4891 int largepic = 0;
4892 if (ABI_64_P (output_bfd)
4893 && contents[roff + 5] == (bfd_byte) '\xb8')
4894 {
4895 memcpy (contents + roff - 3,
4896 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4897 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4898 largepic = 1;
4899 }
4900 else if (ABI_64_P (output_bfd))
4901 memcpy (contents + roff - 4,
4902 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4903 16);
4904 else
4905 memcpy (contents + roff - 3,
4906 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4907 15);
4908
4909 relocation = (htab->elf.sgot->output_section->vma
4910 + htab->elf.sgot->output_offset + off
4911 - roff
4912 - largepic
4913 - input_section->output_section->vma
4914 - input_section->output_offset
4915 - 12);
4916 bfd_put_32 (output_bfd, relocation,
4917 contents + roff + 8 + largepic);
4918 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4919 rel++;
4920 continue;
4921 }
4922 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4923 {
4924 /* GDesc -> IE transition.
4925 It's originally something like:
4926 leaq x@tlsdesc(%rip), %rax
4927
4928 Change it to:
4929 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4930
4931 /* Now modify the instruction as appropriate. To
4932 turn a leaq into a movq in the form we use it, it
4933 suffices to change the second byte from 0x8d to
4934 0x8b. */
4935 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4936
4937 bfd_put_32 (output_bfd,
4938 htab->elf.sgot->output_section->vma
4939 + htab->elf.sgot->output_offset + off
4940 - rel->r_offset
4941 - input_section->output_section->vma
4942 - input_section->output_offset
4943 - 4,
4944 contents + roff);
4945 continue;
4946 }
4947 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4948 {
4949 /* GDesc -> IE transition.
4950 It's originally:
4951 call *(%rax)
4952
4953 Change it to:
4954 xchg %ax, %ax. */
4955
4956 bfd_put_8 (output_bfd, 0x66, contents + roff);
4957 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4958 continue;
4959 }
4960 else
4961 BFD_ASSERT (FALSE);
4962 }
4963 break;
4964
4965 case R_X86_64_TLSLD:
4966 if (! elf_x86_64_tls_transition (info, input_bfd,
4967 input_section, contents,
4968 symtab_hdr, sym_hashes,
4969 &r_type, GOT_UNKNOWN,
4970 rel, relend, h, r_symndx))
4971 return FALSE;
4972
4973 if (r_type != R_X86_64_TLSLD)
4974 {
4975 /* LD->LE transition:
4976 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4977 For 64bit, we change it into:
4978 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4979 For 32bit, we change it into:
4980 nopl 0x0(%rax); movl %fs:0, %eax.
4981 For largepic, change:
4982 leaq foo@tlsgd(%rip), %rdi
4983 movabsq $__tls_get_addr@pltoff, %rax
4984 addq %rbx, %rax
4985 call *%rax
4986 into:
4987 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4988 movq %fs:0, %eax */
4989
4990 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4991 if (ABI_64_P (output_bfd)
4992 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4993 memcpy (contents + rel->r_offset - 3,
4994 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4995 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4996 else if (ABI_64_P (output_bfd))
4997 memcpy (contents + rel->r_offset - 3,
4998 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4999 else
5000 memcpy (contents + rel->r_offset - 3,
5001 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
5002 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
5003 rel++;
5004 continue;
5005 }
5006
5007 if (htab->elf.sgot == NULL)
5008 abort ();
5009
5010 off = htab->tls_ld_got.offset;
5011 if (off & 1)
5012 off &= ~1;
5013 else
5014 {
5015 Elf_Internal_Rela outrel;
5016
5017 if (htab->elf.srelgot == NULL)
5018 abort ();
5019
5020 outrel.r_offset = (htab->elf.sgot->output_section->vma
5021 + htab->elf.sgot->output_offset + off);
5022
5023 bfd_put_64 (output_bfd, 0,
5024 htab->elf.sgot->contents + off);
5025 bfd_put_64 (output_bfd, 0,
5026 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5027 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
5028 outrel.r_addend = 0;
5029 elf_append_rela (output_bfd, htab->elf.srelgot,
5030 &outrel);
5031 htab->tls_ld_got.offset |= 1;
5032 }
5033 relocation = htab->elf.sgot->output_section->vma
5034 + htab->elf.sgot->output_offset + off;
5035 unresolved_reloc = FALSE;
5036 break;
5037
5038 case R_X86_64_DTPOFF32:
5039 if (!bfd_link_executable (info)
5040 || (input_section->flags & SEC_CODE) == 0)
5041 relocation -= elf_x86_64_dtpoff_base (info);
5042 else
5043 relocation = elf_x86_64_tpoff (info, relocation);
5044 break;
5045
5046 case R_X86_64_TPOFF32:
5047 case R_X86_64_TPOFF64:
5048 BFD_ASSERT (bfd_link_executable (info));
5049 relocation = elf_x86_64_tpoff (info, relocation);
5050 break;
5051
5052 case R_X86_64_DTPOFF64:
5053 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
5054 relocation -= elf_x86_64_dtpoff_base (info);
5055 break;
5056
5057 default:
5058 break;
5059 }
5060
5061 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5062 because such sections are not SEC_ALLOC and thus ld.so will
5063 not process them. */
5064 if (unresolved_reloc
5065 && !((input_section->flags & SEC_DEBUGGING) != 0
5066 && h->def_dynamic)
5067 && _bfd_elf_section_offset (output_bfd, info, input_section,
5068 rel->r_offset) != (bfd_vma) -1)
5069 {
5070 (*_bfd_error_handler)
5071 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5072 input_bfd,
5073 input_section,
5074 (long) rel->r_offset,
5075 howto->name,
5076 h->root.root.string);
5077 return FALSE;
5078 }
5079
5080 do_relocation:
5081 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
5082 contents, rel->r_offset,
5083 relocation, rel->r_addend);
5084
5085 check_relocation_error:
5086 if (r != bfd_reloc_ok)
5087 {
5088 const char *name;
5089
5090 if (h != NULL)
5091 name = h->root.root.string;
5092 else
5093 {
5094 name = bfd_elf_string_from_elf_section (input_bfd,
5095 symtab_hdr->sh_link,
5096 sym->st_name);
5097 if (name == NULL)
5098 return FALSE;
5099 if (*name == '\0')
5100 name = bfd_section_name (input_bfd, sec);
5101 }
5102
5103 if (r == bfd_reloc_overflow)
5104 {
5105 if (! ((*info->callbacks->reloc_overflow)
5106 (info, (h ? &h->root : NULL), name, howto->name,
5107 (bfd_vma) 0, input_bfd, input_section,
5108 rel->r_offset)))
5109 return FALSE;
5110 }
5111 else
5112 {
5113 (*_bfd_error_handler)
5114 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5115 input_bfd, input_section,
5116 (long) rel->r_offset, name, (int) r);
5117 return FALSE;
5118 }
5119 }
5120 }
5121
5122 return TRUE;
5123 }
5124
5125 /* Finish up dynamic symbol handling. We set the contents of various
5126 dynamic sections here. */
5127
5128 static bfd_boolean
5129 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5130 struct bfd_link_info *info,
5131 struct elf_link_hash_entry *h,
5132 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
5133 {
5134 struct elf_x86_64_link_hash_table *htab;
5135 const struct elf_x86_64_backend_data *abed;
5136 bfd_boolean use_plt_bnd;
5137 struct elf_x86_64_link_hash_entry *eh;
5138
5139 htab = elf_x86_64_hash_table (info);
5140 if (htab == NULL)
5141 return FALSE;
5142
5143 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5144 section only if there is .plt section. */
5145 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5146 abed = (use_plt_bnd
5147 ? &elf_x86_64_bnd_arch_bed
5148 : get_elf_x86_64_backend_data (output_bfd));
5149
5150 eh = (struct elf_x86_64_link_hash_entry *) h;
5151
5152 if (h->plt.offset != (bfd_vma) -1)
5153 {
5154 bfd_vma plt_index;
5155 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5156 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5157 Elf_Internal_Rela rela;
5158 bfd_byte *loc;
5159 asection *plt, *gotplt, *relplt, *resolved_plt;
5160 const struct elf_backend_data *bed;
5161 bfd_vma plt_got_pcrel_offset;
5162
5163 /* When building a static executable, use .iplt, .igot.plt and
5164 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5165 if (htab->elf.splt != NULL)
5166 {
5167 plt = htab->elf.splt;
5168 gotplt = htab->elf.sgotplt;
5169 relplt = htab->elf.srelplt;
5170 }
5171 else
5172 {
5173 plt = htab->elf.iplt;
5174 gotplt = htab->elf.igotplt;
5175 relplt = htab->elf.irelplt;
5176 }
5177
5178 /* This symbol has an entry in the procedure linkage table. Set
5179 it up. */
5180 if ((h->dynindx == -1
5181 && !((h->forced_local || bfd_link_executable (info))
5182 && h->def_regular
5183 && h->type == STT_GNU_IFUNC))
5184 || plt == NULL
5185 || gotplt == NULL
5186 || relplt == NULL)
5187 abort ();
5188
5189 /* Get the index in the procedure linkage table which
5190 corresponds to this symbol. This is the index of this symbol
5191 in all the symbols for which we are making plt entries. The
5192 first entry in the procedure linkage table is reserved.
5193
5194 Get the offset into the .got table of the entry that
5195 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5196 bytes. The first three are reserved for the dynamic linker.
5197
5198 For static executables, we don't reserve anything. */
5199
5200 if (plt == htab->elf.splt)
5201 {
5202 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5203 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5204 }
5205 else
5206 {
5207 got_offset = h->plt.offset / abed->plt_entry_size;
5208 got_offset = got_offset * GOT_ENTRY_SIZE;
5209 }
5210
5211 plt_plt_insn_end = abed->plt_plt_insn_end;
5212 plt_plt_offset = abed->plt_plt_offset;
5213 plt_got_insn_size = abed->plt_got_insn_size;
5214 plt_got_offset = abed->plt_got_offset;
5215 if (use_plt_bnd)
5216 {
5217 /* Use the second PLT with BND relocations. */
5218 const bfd_byte *plt_entry, *plt2_entry;
5219
5220 if (eh->has_bnd_reloc)
5221 {
5222 plt_entry = elf_x86_64_bnd_plt_entry;
5223 plt2_entry = elf_x86_64_bnd_plt2_entry;
5224 }
5225 else
5226 {
5227 plt_entry = elf_x86_64_legacy_plt_entry;
5228 plt2_entry = elf_x86_64_legacy_plt2_entry;
5229
5230 /* Subtract 1 since there is no BND prefix. */
5231 plt_plt_insn_end -= 1;
5232 plt_plt_offset -= 1;
5233 plt_got_insn_size -= 1;
5234 plt_got_offset -= 1;
5235 }
5236
5237 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5238 == sizeof (elf_x86_64_legacy_plt_entry));
5239
5240 /* Fill in the entry in the procedure linkage table. */
5241 memcpy (plt->contents + h->plt.offset,
5242 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5243 /* Fill in the entry in the second PLT. */
5244 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5245 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5246
5247 resolved_plt = htab->plt_bnd;
5248 plt_offset = eh->plt_bnd.offset;
5249 }
5250 else
5251 {
5252 /* Fill in the entry in the procedure linkage table. */
5253 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5254 abed->plt_entry_size);
5255
5256 resolved_plt = plt;
5257 plt_offset = h->plt.offset;
5258 }
5259
5260 /* Insert the relocation positions of the plt section. */
5261
5262 /* Put offset the PC-relative instruction referring to the GOT entry,
5263 subtracting the size of that instruction. */
5264 plt_got_pcrel_offset = (gotplt->output_section->vma
5265 + gotplt->output_offset
5266 + got_offset
5267 - resolved_plt->output_section->vma
5268 - resolved_plt->output_offset
5269 - plt_offset
5270 - plt_got_insn_size);
5271
5272 /* Check PC-relative offset overflow in PLT entry. */
5273 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5274 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5275 output_bfd, h->root.root.string);
5276
5277 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5278 resolved_plt->contents + plt_offset + plt_got_offset);
5279
5280 /* Fill in the entry in the global offset table, initially this
5281 points to the second part of the PLT entry. */
5282 bfd_put_64 (output_bfd, (plt->output_section->vma
5283 + plt->output_offset
5284 + h->plt.offset + abed->plt_lazy_offset),
5285 gotplt->contents + got_offset);
5286
5287 /* Fill in the entry in the .rela.plt section. */
5288 rela.r_offset = (gotplt->output_section->vma
5289 + gotplt->output_offset
5290 + got_offset);
5291 if (h->dynindx == -1
5292 || ((bfd_link_executable (info)
5293 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5294 && h->def_regular
5295 && h->type == STT_GNU_IFUNC))
5296 {
5297 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5298 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5299 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5300 rela.r_addend = (h->root.u.def.value
5301 + h->root.u.def.section->output_section->vma
5302 + h->root.u.def.section->output_offset);
5303 /* R_X86_64_IRELATIVE comes last. */
5304 plt_index = htab->next_irelative_index--;
5305 }
5306 else
5307 {
5308 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5309 rela.r_addend = 0;
5310 plt_index = htab->next_jump_slot_index++;
5311 }
5312
5313 /* Don't fill PLT entry for static executables. */
5314 if (plt == htab->elf.splt)
5315 {
5316 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5317
5318 /* Put relocation index. */
5319 bfd_put_32 (output_bfd, plt_index,
5320 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5321
5322 /* Put offset for jmp .PLT0 and check for overflow. We don't
5323 check relocation index for overflow since branch displacement
5324 will overflow first. */
5325 if (plt0_offset > 0x80000000)
5326 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5327 output_bfd, h->root.root.string);
5328 bfd_put_32 (output_bfd, - plt0_offset,
5329 plt->contents + h->plt.offset + plt_plt_offset);
5330 }
5331
5332 bed = get_elf_backend_data (output_bfd);
5333 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5334 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5335 }
5336 else if (eh->plt_got.offset != (bfd_vma) -1)
5337 {
5338 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5339 asection *plt, *got;
5340 bfd_boolean got_after_plt;
5341 int32_t got_pcrel_offset;
5342 const bfd_byte *got_plt_entry;
5343
5344 /* Set the entry in the GOT procedure linkage table. */
5345 plt = htab->plt_got;
5346 got = htab->elf.sgot;
5347 got_offset = h->got.offset;
5348
5349 if (got_offset == (bfd_vma) -1
5350 || h->type == STT_GNU_IFUNC
5351 || plt == NULL
5352 || got == NULL)
5353 abort ();
5354
5355 /* Use the second PLT entry template for the GOT PLT since they
5356 are the identical. */
5357 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5358 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5359 if (eh->has_bnd_reloc)
5360 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5361 else
5362 {
5363 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5364
5365 /* Subtract 1 since there is no BND prefix. */
5366 plt_got_insn_size -= 1;
5367 plt_got_offset -= 1;
5368 }
5369
5370 /* Fill in the entry in the GOT procedure linkage table. */
5371 plt_offset = eh->plt_got.offset;
5372 memcpy (plt->contents + plt_offset,
5373 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5374
5375 /* Put offset the PC-relative instruction referring to the GOT
5376 entry, subtracting the size of that instruction. */
5377 got_pcrel_offset = (got->output_section->vma
5378 + got->output_offset
5379 + got_offset
5380 - plt->output_section->vma
5381 - plt->output_offset
5382 - plt_offset
5383 - plt_got_insn_size);
5384
5385 /* Check PC-relative offset overflow in GOT PLT entry. */
5386 got_after_plt = got->output_section->vma > plt->output_section->vma;
5387 if ((got_after_plt && got_pcrel_offset < 0)
5388 || (!got_after_plt && got_pcrel_offset > 0))
5389 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5390 output_bfd, h->root.root.string);
5391
5392 bfd_put_32 (output_bfd, got_pcrel_offset,
5393 plt->contents + plt_offset + plt_got_offset);
5394 }
5395
5396 if (!h->def_regular
5397 && (h->plt.offset != (bfd_vma) -1
5398 || eh->plt_got.offset != (bfd_vma) -1))
5399 {
5400 /* Mark the symbol as undefined, rather than as defined in
5401 the .plt section. Leave the value if there were any
5402 relocations where pointer equality matters (this is a clue
5403 for the dynamic linker, to make function pointer
5404 comparisons work between an application and shared
5405 library), otherwise set it to zero. If a function is only
5406 called from a binary, there is no need to slow down
5407 shared libraries because of that. */
5408 sym->st_shndx = SHN_UNDEF;
5409 if (!h->pointer_equality_needed)
5410 sym->st_value = 0;
5411 }
5412
5413 if (h->got.offset != (bfd_vma) -1
5414 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5415 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5416 {
5417 Elf_Internal_Rela rela;
5418
5419 /* This symbol has an entry in the global offset table. Set it
5420 up. */
5421 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5422 abort ();
5423
5424 rela.r_offset = (htab->elf.sgot->output_section->vma
5425 + htab->elf.sgot->output_offset
5426 + (h->got.offset &~ (bfd_vma) 1));
5427
5428 /* If this is a static link, or it is a -Bsymbolic link and the
5429 symbol is defined locally or was forced to be local because
5430 of a version file, we just want to emit a RELATIVE reloc.
5431 The entry in the global offset table will already have been
5432 initialized in the relocate_section function. */
5433 if (h->def_regular
5434 && h->type == STT_GNU_IFUNC)
5435 {
5436 if (bfd_link_pic (info))
5437 {
5438 /* Generate R_X86_64_GLOB_DAT. */
5439 goto do_glob_dat;
5440 }
5441 else
5442 {
5443 asection *plt;
5444
5445 if (!h->pointer_equality_needed)
5446 abort ();
5447
5448 /* For non-shared object, we can't use .got.plt, which
5449 contains the real function addres if we need pointer
5450 equality. We load the GOT entry with the PLT entry. */
5451 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5452 bfd_put_64 (output_bfd, (plt->output_section->vma
5453 + plt->output_offset
5454 + h->plt.offset),
5455 htab->elf.sgot->contents + h->got.offset);
5456 return TRUE;
5457 }
5458 }
5459 else if (bfd_link_pic (info)
5460 && SYMBOL_REFERENCES_LOCAL (info, h))
5461 {
5462 if (!h->def_regular)
5463 return FALSE;
5464 BFD_ASSERT((h->got.offset & 1) != 0);
5465 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5466 rela.r_addend = (h->root.u.def.value
5467 + h->root.u.def.section->output_section->vma
5468 + h->root.u.def.section->output_offset);
5469 }
5470 else
5471 {
5472 BFD_ASSERT((h->got.offset & 1) == 0);
5473 do_glob_dat:
5474 bfd_put_64 (output_bfd, (bfd_vma) 0,
5475 htab->elf.sgot->contents + h->got.offset);
5476 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5477 rela.r_addend = 0;
5478 }
5479
5480 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5481 }
5482
5483 if (h->needs_copy)
5484 {
5485 Elf_Internal_Rela rela;
5486
5487 /* This symbol needs a copy reloc. Set it up. */
5488
5489 if (h->dynindx == -1
5490 || (h->root.type != bfd_link_hash_defined
5491 && h->root.type != bfd_link_hash_defweak)
5492 || htab->srelbss == NULL)
5493 abort ();
5494
5495 rela.r_offset = (h->root.u.def.value
5496 + h->root.u.def.section->output_section->vma
5497 + h->root.u.def.section->output_offset);
5498 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5499 rela.r_addend = 0;
5500 elf_append_rela (output_bfd, htab->srelbss, &rela);
5501 }
5502
5503 return TRUE;
5504 }
5505
5506 /* Finish up local dynamic symbol handling. We set the contents of
5507 various dynamic sections here. */
5508
5509 static bfd_boolean
5510 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5511 {
5512 struct elf_link_hash_entry *h
5513 = (struct elf_link_hash_entry *) *slot;
5514 struct bfd_link_info *info
5515 = (struct bfd_link_info *) inf;
5516
5517 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5518 info, h, NULL);
5519 }
5520
5521 /* Used to decide how to sort relocs in an optimal manner for the
5522 dynamic linker, before writing them out. */
5523
5524 static enum elf_reloc_type_class
5525 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
5526 const asection *rel_sec ATTRIBUTE_UNUSED,
5527 const Elf_Internal_Rela *rela)
5528 {
5529 bfd *abfd = info->output_bfd;
5530 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5531 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
5532 unsigned long r_symndx = htab->r_sym (rela->r_info);
5533 Elf_Internal_Sym sym;
5534
5535 if (htab->elf.dynsym == NULL
5536 || !bed->s->swap_symbol_in (abfd,
5537 (htab->elf.dynsym->contents
5538 + r_symndx * bed->s->sizeof_sym),
5539 0, &sym))
5540 abort ();
5541
5542 /* Check relocation against STT_GNU_IFUNC symbol. */
5543 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
5544 return reloc_class_ifunc;
5545
5546 switch ((int) ELF32_R_TYPE (rela->r_info))
5547 {
5548 case R_X86_64_RELATIVE:
5549 case R_X86_64_RELATIVE64:
5550 return reloc_class_relative;
5551 case R_X86_64_JUMP_SLOT:
5552 return reloc_class_plt;
5553 case R_X86_64_COPY:
5554 return reloc_class_copy;
5555 default:
5556 return reloc_class_normal;
5557 }
5558 }
5559
5560 /* Finish up the dynamic sections. */
5561
5562 static bfd_boolean
5563 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5564 struct bfd_link_info *info)
5565 {
5566 struct elf_x86_64_link_hash_table *htab;
5567 bfd *dynobj;
5568 asection *sdyn;
5569 const struct elf_x86_64_backend_data *abed;
5570
5571 htab = elf_x86_64_hash_table (info);
5572 if (htab == NULL)
5573 return FALSE;
5574
5575 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5576 section only if there is .plt section. */
5577 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5578 ? &elf_x86_64_bnd_arch_bed
5579 : get_elf_x86_64_backend_data (output_bfd));
5580
5581 dynobj = htab->elf.dynobj;
5582 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5583
5584 if (htab->elf.dynamic_sections_created)
5585 {
5586 bfd_byte *dyncon, *dynconend;
5587 const struct elf_backend_data *bed;
5588 bfd_size_type sizeof_dyn;
5589
5590 if (sdyn == NULL || htab->elf.sgot == NULL)
5591 abort ();
5592
5593 bed = get_elf_backend_data (dynobj);
5594 sizeof_dyn = bed->s->sizeof_dyn;
5595 dyncon = sdyn->contents;
5596 dynconend = sdyn->contents + sdyn->size;
5597 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5598 {
5599 Elf_Internal_Dyn dyn;
5600 asection *s;
5601
5602 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5603
5604 switch (dyn.d_tag)
5605 {
5606 default:
5607 continue;
5608
5609 case DT_PLTGOT:
5610 s = htab->elf.sgotplt;
5611 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5612 break;
5613
5614 case DT_JMPREL:
5615 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5616 break;
5617
5618 case DT_PLTRELSZ:
5619 s = htab->elf.srelplt->output_section;
5620 dyn.d_un.d_val = s->size;
5621 break;
5622
5623 case DT_RELASZ:
5624 /* The procedure linkage table relocs (DT_JMPREL) should
5625 not be included in the overall relocs (DT_RELA).
5626 Therefore, we override the DT_RELASZ entry here to
5627 make it not include the JMPREL relocs. Since the
5628 linker script arranges for .rela.plt to follow all
5629 other relocation sections, we don't have to worry
5630 about changing the DT_RELA entry. */
5631 if (htab->elf.srelplt != NULL)
5632 {
5633 s = htab->elf.srelplt->output_section;
5634 dyn.d_un.d_val -= s->size;
5635 }
5636 break;
5637
5638 case DT_TLSDESC_PLT:
5639 s = htab->elf.splt;
5640 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5641 + htab->tlsdesc_plt;
5642 break;
5643
5644 case DT_TLSDESC_GOT:
5645 s = htab->elf.sgot;
5646 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5647 + htab->tlsdesc_got;
5648 break;
5649 }
5650
5651 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5652 }
5653
5654 /* Fill in the special first entry in the procedure linkage table. */
5655 if (htab->elf.splt && htab->elf.splt->size > 0)
5656 {
5657 /* Fill in the first entry in the procedure linkage table. */
5658 memcpy (htab->elf.splt->contents,
5659 abed->plt0_entry, abed->plt_entry_size);
5660 /* Add offset for pushq GOT+8(%rip), since the instruction
5661 uses 6 bytes subtract this value. */
5662 bfd_put_32 (output_bfd,
5663 (htab->elf.sgotplt->output_section->vma
5664 + htab->elf.sgotplt->output_offset
5665 + 8
5666 - htab->elf.splt->output_section->vma
5667 - htab->elf.splt->output_offset
5668 - 6),
5669 htab->elf.splt->contents + abed->plt0_got1_offset);
5670 /* Add offset for the PC-relative instruction accessing GOT+16,
5671 subtracting the offset to the end of that instruction. */
5672 bfd_put_32 (output_bfd,
5673 (htab->elf.sgotplt->output_section->vma
5674 + htab->elf.sgotplt->output_offset
5675 + 16
5676 - htab->elf.splt->output_section->vma
5677 - htab->elf.splt->output_offset
5678 - abed->plt0_got2_insn_end),
5679 htab->elf.splt->contents + abed->plt0_got2_offset);
5680
5681 elf_section_data (htab->elf.splt->output_section)
5682 ->this_hdr.sh_entsize = abed->plt_entry_size;
5683
5684 if (htab->tlsdesc_plt)
5685 {
5686 bfd_put_64 (output_bfd, (bfd_vma) 0,
5687 htab->elf.sgot->contents + htab->tlsdesc_got);
5688
5689 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5690 abed->plt0_entry, abed->plt_entry_size);
5691
5692 /* Add offset for pushq GOT+8(%rip), since the
5693 instruction uses 6 bytes subtract this value. */
5694 bfd_put_32 (output_bfd,
5695 (htab->elf.sgotplt->output_section->vma
5696 + htab->elf.sgotplt->output_offset
5697 + 8
5698 - htab->elf.splt->output_section->vma
5699 - htab->elf.splt->output_offset
5700 - htab->tlsdesc_plt
5701 - 6),
5702 htab->elf.splt->contents
5703 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5704 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5705 where TGD stands for htab->tlsdesc_got, subtracting the offset
5706 to the end of that instruction. */
5707 bfd_put_32 (output_bfd,
5708 (htab->elf.sgot->output_section->vma
5709 + htab->elf.sgot->output_offset
5710 + htab->tlsdesc_got
5711 - htab->elf.splt->output_section->vma
5712 - htab->elf.splt->output_offset
5713 - htab->tlsdesc_plt
5714 - abed->plt0_got2_insn_end),
5715 htab->elf.splt->contents
5716 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5717 }
5718 }
5719 }
5720
5721 if (htab->plt_bnd != NULL)
5722 elf_section_data (htab->plt_bnd->output_section)
5723 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5724
5725 if (htab->elf.sgotplt)
5726 {
5727 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5728 {
5729 (*_bfd_error_handler)
5730 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5731 return FALSE;
5732 }
5733
5734 /* Fill in the first three entries in the global offset table. */
5735 if (htab->elf.sgotplt->size > 0)
5736 {
5737 /* Set the first entry in the global offset table to the address of
5738 the dynamic section. */
5739 if (sdyn == NULL)
5740 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5741 else
5742 bfd_put_64 (output_bfd,
5743 sdyn->output_section->vma + sdyn->output_offset,
5744 htab->elf.sgotplt->contents);
5745 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5746 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5747 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5748 }
5749
5750 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5751 GOT_ENTRY_SIZE;
5752 }
5753
5754 /* Adjust .eh_frame for .plt section. */
5755 if (htab->plt_eh_frame != NULL
5756 && htab->plt_eh_frame->contents != NULL)
5757 {
5758 if (htab->elf.splt != NULL
5759 && htab->elf.splt->size != 0
5760 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5761 && htab->elf.splt->output_section != NULL
5762 && htab->plt_eh_frame->output_section != NULL)
5763 {
5764 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5765 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5766 + htab->plt_eh_frame->output_offset
5767 + PLT_FDE_START_OFFSET;
5768 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5769 htab->plt_eh_frame->contents
5770 + PLT_FDE_START_OFFSET);
5771 }
5772 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5773 {
5774 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5775 htab->plt_eh_frame,
5776 htab->plt_eh_frame->contents))
5777 return FALSE;
5778 }
5779 }
5780
5781 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5782 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5783 = GOT_ENTRY_SIZE;
5784
5785 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5786 htab_traverse (htab->loc_hash_table,
5787 elf_x86_64_finish_local_dynamic_symbol,
5788 info);
5789
5790 return TRUE;
5791 }
5792
5793 /* Return an array of PLT entry symbol values. */
5794
5795 static bfd_vma *
5796 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
5797 asection *relplt)
5798 {
5799 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5800 arelent *p;
5801 long count, i;
5802 bfd_vma *plt_sym_val;
5803 bfd_vma plt_offset;
5804 bfd_byte *plt_contents;
5805 const struct elf_x86_64_backend_data *bed;
5806 Elf_Internal_Shdr *hdr;
5807 asection *plt_bnd;
5808
5809 /* Get the .plt section contents. PLT passed down may point to the
5810 .plt.bnd section. Make sure that PLT always points to the .plt
5811 section. */
5812 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
5813 if (plt_bnd)
5814 {
5815 if (plt != plt_bnd)
5816 abort ();
5817 plt = bfd_get_section_by_name (abfd, ".plt");
5818 if (plt == NULL)
5819 abort ();
5820 bed = &elf_x86_64_bnd_arch_bed;
5821 }
5822 else
5823 bed = get_elf_x86_64_backend_data (abfd);
5824
5825 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
5826 if (plt_contents == NULL)
5827 return NULL;
5828 if (!bfd_get_section_contents (abfd, (asection *) plt,
5829 plt_contents, 0, plt->size))
5830 {
5831 bad_return:
5832 free (plt_contents);
5833 return NULL;
5834 }
5835
5836 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5837 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5838 goto bad_return;
5839
5840 hdr = &elf_section_data (relplt)->this_hdr;
5841 count = relplt->size / hdr->sh_entsize;
5842
5843 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
5844 if (plt_sym_val == NULL)
5845 goto bad_return;
5846
5847 for (i = 0; i < count; i++)
5848 plt_sym_val[i] = -1;
5849
5850 plt_offset = bed->plt_entry_size;
5851 p = relplt->relocation;
5852 for (i = 0; i < count; i++, p++)
5853 {
5854 long reloc_index;
5855
5856 /* Skip unknown relocation. */
5857 if (p->howto == NULL)
5858 continue;
5859
5860 if (p->howto->type != R_X86_64_JUMP_SLOT
5861 && p->howto->type != R_X86_64_IRELATIVE)
5862 continue;
5863
5864 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
5865 + bed->plt_reloc_offset));
5866 if (reloc_index >= count)
5867 abort ();
5868 if (plt_bnd)
5869 {
5870 /* This is the index in .plt section. */
5871 long plt_index = plt_offset / bed->plt_entry_size;
5872 /* Store VMA + the offset in .plt.bnd section. */
5873 plt_sym_val[reloc_index] =
5874 (plt_bnd->vma
5875 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
5876 }
5877 else
5878 plt_sym_val[reloc_index] = plt->vma + plt_offset;
5879 plt_offset += bed->plt_entry_size;
5880
5881 /* PR binutils/18437: Skip extra relocations in the .rela.plt
5882 section. */
5883 if (plt_offset >= plt->size)
5884 break;
5885 }
5886
5887 free (plt_contents);
5888
5889 return plt_sym_val;
5890 }
5891
5892 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5893 support. */
5894
5895 static long
5896 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5897 long symcount,
5898 asymbol **syms,
5899 long dynsymcount,
5900 asymbol **dynsyms,
5901 asymbol **ret)
5902 {
5903 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
5904 as PLT if it exists. */
5905 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5906 if (plt == NULL)
5907 plt = bfd_get_section_by_name (abfd, ".plt");
5908 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
5909 dynsymcount, dynsyms, ret,
5910 plt,
5911 elf_x86_64_get_plt_sym_val);
5912 }
5913
5914 /* Handle an x86-64 specific section when reading an object file. This
5915 is called when elfcode.h finds a section with an unknown type. */
5916
5917 static bfd_boolean
5918 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5919 const char *name, int shindex)
5920 {
5921 if (hdr->sh_type != SHT_X86_64_UNWIND)
5922 return FALSE;
5923
5924 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5925 return FALSE;
5926
5927 return TRUE;
5928 }
5929
5930 /* Hook called by the linker routine which adds symbols from an object
5931 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5932 of .bss. */
5933
5934 static bfd_boolean
5935 elf_x86_64_add_symbol_hook (bfd *abfd,
5936 struct bfd_link_info *info,
5937 Elf_Internal_Sym *sym,
5938 const char **namep ATTRIBUTE_UNUSED,
5939 flagword *flagsp ATTRIBUTE_UNUSED,
5940 asection **secp,
5941 bfd_vma *valp)
5942 {
5943 asection *lcomm;
5944
5945 switch (sym->st_shndx)
5946 {
5947 case SHN_X86_64_LCOMMON:
5948 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5949 if (lcomm == NULL)
5950 {
5951 lcomm = bfd_make_section_with_flags (abfd,
5952 "LARGE_COMMON",
5953 (SEC_ALLOC
5954 | SEC_IS_COMMON
5955 | SEC_LINKER_CREATED));
5956 if (lcomm == NULL)
5957 return FALSE;
5958 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5959 }
5960 *secp = lcomm;
5961 *valp = sym->st_size;
5962 return TRUE;
5963 }
5964
5965 if (ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE
5966 && (abfd->flags & DYNAMIC) == 0
5967 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5968 elf_tdata (info->output_bfd)->has_gnu_symbols
5969 |= elf_gnu_symbol_unique;
5970
5971 return TRUE;
5972 }
5973
5974
5975 /* Given a BFD section, try to locate the corresponding ELF section
5976 index. */
5977
5978 static bfd_boolean
5979 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5980 asection *sec, int *index_return)
5981 {
5982 if (sec == &_bfd_elf_large_com_section)
5983 {
5984 *index_return = SHN_X86_64_LCOMMON;
5985 return TRUE;
5986 }
5987 return FALSE;
5988 }
5989
5990 /* Process a symbol. */
5991
5992 static void
5993 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5994 asymbol *asym)
5995 {
5996 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5997
5998 switch (elfsym->internal_elf_sym.st_shndx)
5999 {
6000 case SHN_X86_64_LCOMMON:
6001 asym->section = &_bfd_elf_large_com_section;
6002 asym->value = elfsym->internal_elf_sym.st_size;
6003 /* Common symbol doesn't set BSF_GLOBAL. */
6004 asym->flags &= ~BSF_GLOBAL;
6005 break;
6006 }
6007 }
6008
6009 static bfd_boolean
6010 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
6011 {
6012 return (sym->st_shndx == SHN_COMMON
6013 || sym->st_shndx == SHN_X86_64_LCOMMON);
6014 }
6015
6016 static unsigned int
6017 elf_x86_64_common_section_index (asection *sec)
6018 {
6019 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6020 return SHN_COMMON;
6021 else
6022 return SHN_X86_64_LCOMMON;
6023 }
6024
6025 static asection *
6026 elf_x86_64_common_section (asection *sec)
6027 {
6028 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6029 return bfd_com_section_ptr;
6030 else
6031 return &_bfd_elf_large_com_section;
6032 }
6033
6034 static bfd_boolean
6035 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
6036 const Elf_Internal_Sym *sym,
6037 asection **psec,
6038 bfd_boolean newdef,
6039 bfd_boolean olddef,
6040 bfd *oldbfd,
6041 const asection *oldsec)
6042 {
6043 /* A normal common symbol and a large common symbol result in a
6044 normal common symbol. We turn the large common symbol into a
6045 normal one. */
6046 if (!olddef
6047 && h->root.type == bfd_link_hash_common
6048 && !newdef
6049 && bfd_is_com_section (*psec)
6050 && oldsec != *psec)
6051 {
6052 if (sym->st_shndx == SHN_COMMON
6053 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
6054 {
6055 h->root.u.c.p->section
6056 = bfd_make_section_old_way (oldbfd, "COMMON");
6057 h->root.u.c.p->section->flags = SEC_ALLOC;
6058 }
6059 else if (sym->st_shndx == SHN_X86_64_LCOMMON
6060 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
6061 *psec = bfd_com_section_ptr;
6062 }
6063
6064 return TRUE;
6065 }
6066
6067 static int
6068 elf_x86_64_additional_program_headers (bfd *abfd,
6069 struct bfd_link_info *info ATTRIBUTE_UNUSED)
6070 {
6071 asection *s;
6072 int count = 0;
6073
6074 /* Check to see if we need a large readonly segment. */
6075 s = bfd_get_section_by_name (abfd, ".lrodata");
6076 if (s && (s->flags & SEC_LOAD))
6077 count++;
6078
6079 /* Check to see if we need a large data segment. Since .lbss sections
6080 is placed right after the .bss section, there should be no need for
6081 a large data segment just because of .lbss. */
6082 s = bfd_get_section_by_name (abfd, ".ldata");
6083 if (s && (s->flags & SEC_LOAD))
6084 count++;
6085
6086 return count;
6087 }
6088
6089 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
6090
6091 static bfd_boolean
6092 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
6093 {
6094 if (h->plt.offset != (bfd_vma) -1
6095 && !h->def_regular
6096 && !h->pointer_equality_needed)
6097 return FALSE;
6098
6099 return _bfd_elf_hash_symbol (h);
6100 }
6101
6102 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6103
6104 static bfd_boolean
6105 elf_x86_64_relocs_compatible (const bfd_target *input,
6106 const bfd_target *output)
6107 {
6108 return ((xvec_get_elf_backend_data (input)->s->elfclass
6109 == xvec_get_elf_backend_data (output)->s->elfclass)
6110 && _bfd_elf_relocs_compatible (input, output));
6111 }
6112
6113 static const struct bfd_elf_special_section
6114 elf_x86_64_special_sections[]=
6115 {
6116 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6117 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6118 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6119 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6120 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6121 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6122 { NULL, 0, 0, 0, 0 }
6123 };
6124
6125 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6126 #define TARGET_LITTLE_NAME "elf64-x86-64"
6127 #define ELF_ARCH bfd_arch_i386
6128 #define ELF_TARGET_ID X86_64_ELF_DATA
6129 #define ELF_MACHINE_CODE EM_X86_64
6130 #define ELF_MAXPAGESIZE 0x200000
6131 #define ELF_MINPAGESIZE 0x1000
6132 #define ELF_COMMONPAGESIZE 0x1000
6133
6134 #define elf_backend_can_gc_sections 1
6135 #define elf_backend_can_refcount 1
6136 #define elf_backend_want_got_plt 1
6137 #define elf_backend_plt_readonly 1
6138 #define elf_backend_want_plt_sym 0
6139 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6140 #define elf_backend_rela_normal 1
6141 #define elf_backend_plt_alignment 4
6142 #define elf_backend_extern_protected_data 1
6143
6144 #define elf_info_to_howto elf_x86_64_info_to_howto
6145
6146 #define bfd_elf64_bfd_link_hash_table_create \
6147 elf_x86_64_link_hash_table_create
6148 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6149 #define bfd_elf64_bfd_reloc_name_lookup \
6150 elf_x86_64_reloc_name_lookup
6151
6152 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6153 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6154 #define elf_backend_check_relocs elf_x86_64_check_relocs
6155 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6156 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6157 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6158 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6159 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6160 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
6161 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6162 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6163 #ifdef CORE_HEADER
6164 #define elf_backend_write_core_note elf_x86_64_write_core_note
6165 #endif
6166 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6167 #define elf_backend_relocate_section elf_x86_64_relocate_section
6168 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6169 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6170 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6171 #define elf_backend_object_p elf64_x86_64_elf_object_p
6172 #define bfd_elf64_mkobject elf_x86_64_mkobject
6173 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6174
6175 #define elf_backend_section_from_shdr \
6176 elf_x86_64_section_from_shdr
6177
6178 #define elf_backend_section_from_bfd_section \
6179 elf_x86_64_elf_section_from_bfd_section
6180 #define elf_backend_add_symbol_hook \
6181 elf_x86_64_add_symbol_hook
6182 #define elf_backend_symbol_processing \
6183 elf_x86_64_symbol_processing
6184 #define elf_backend_common_section_index \
6185 elf_x86_64_common_section_index
6186 #define elf_backend_common_section \
6187 elf_x86_64_common_section
6188 #define elf_backend_common_definition \
6189 elf_x86_64_common_definition
6190 #define elf_backend_merge_symbol \
6191 elf_x86_64_merge_symbol
6192 #define elf_backend_special_sections \
6193 elf_x86_64_special_sections
6194 #define elf_backend_additional_program_headers \
6195 elf_x86_64_additional_program_headers
6196 #define elf_backend_hash_symbol \
6197 elf_x86_64_hash_symbol
6198
6199 #include "elf64-target.h"
6200
6201 /* CloudABI support. */
6202
6203 #undef TARGET_LITTLE_SYM
6204 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6205 #undef TARGET_LITTLE_NAME
6206 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6207
6208 #undef ELF_OSABI
6209 #define ELF_OSABI ELFOSABI_CLOUDABI
6210
6211 #undef elf64_bed
6212 #define elf64_bed elf64_x86_64_cloudabi_bed
6213
6214 #include "elf64-target.h"
6215
6216 /* FreeBSD support. */
6217
6218 #undef TARGET_LITTLE_SYM
6219 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6220 #undef TARGET_LITTLE_NAME
6221 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6222
6223 #undef ELF_OSABI
6224 #define ELF_OSABI ELFOSABI_FREEBSD
6225
6226 #undef elf64_bed
6227 #define elf64_bed elf64_x86_64_fbsd_bed
6228
6229 #include "elf64-target.h"
6230
6231 /* Solaris 2 support. */
6232
6233 #undef TARGET_LITTLE_SYM
6234 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6235 #undef TARGET_LITTLE_NAME
6236 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6237
6238 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6239 objects won't be recognized. */
6240 #undef ELF_OSABI
6241
6242 #undef elf64_bed
6243 #define elf64_bed elf64_x86_64_sol2_bed
6244
6245 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6246 boundary. */
6247 #undef elf_backend_static_tls_alignment
6248 #define elf_backend_static_tls_alignment 16
6249
6250 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6251
6252 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6253 File, p.63. */
6254 #undef elf_backend_want_plt_sym
6255 #define elf_backend_want_plt_sym 1
6256
6257 #include "elf64-target.h"
6258
6259 /* Native Client support. */
6260
6261 static bfd_boolean
6262 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6263 {
6264 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6265 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6266 return TRUE;
6267 }
6268
6269 #undef TARGET_LITTLE_SYM
6270 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6271 #undef TARGET_LITTLE_NAME
6272 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6273 #undef elf64_bed
6274 #define elf64_bed elf64_x86_64_nacl_bed
6275
6276 #undef ELF_MAXPAGESIZE
6277 #undef ELF_MINPAGESIZE
6278 #undef ELF_COMMONPAGESIZE
6279 #define ELF_MAXPAGESIZE 0x10000
6280 #define ELF_MINPAGESIZE 0x10000
6281 #define ELF_COMMONPAGESIZE 0x10000
6282
6283 /* Restore defaults. */
6284 #undef ELF_OSABI
6285 #undef elf_backend_static_tls_alignment
6286 #undef elf_backend_want_plt_sym
6287 #define elf_backend_want_plt_sym 0
6288
6289 /* NaCl uses substantially different PLT entries for the same effects. */
6290
6291 #undef elf_backend_plt_alignment
6292 #define elf_backend_plt_alignment 5
6293 #define NACL_PLT_ENTRY_SIZE 64
6294 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6295
6296 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6297 {
6298 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6299 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6300 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6301 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6302 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6303
6304 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6305 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6306
6307 /* 32 bytes of nop to pad out to the standard size. */
6308 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6309 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6310 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6311 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6312 0x66, /* excess data32 prefix */
6313 0x90 /* nop */
6314 };
6315
6316 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6317 {
6318 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6319 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6320 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6321 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6322
6323 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6324 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6325 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6326
6327 /* Lazy GOT entries point here (32-byte aligned). */
6328 0x68, /* pushq immediate */
6329 0, 0, 0, 0, /* replaced with index into relocation table. */
6330 0xe9, /* jmp relative */
6331 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6332
6333 /* 22 bytes of nop to pad out to the standard size. */
6334 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6335 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6336 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6337 };
6338
6339 /* .eh_frame covering the .plt section. */
6340
6341 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6342 {
6343 #if (PLT_CIE_LENGTH != 20 \
6344 || PLT_FDE_LENGTH != 36 \
6345 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6346 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6347 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6348 #endif
6349 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6350 0, 0, 0, 0, /* CIE ID */
6351 1, /* CIE version */
6352 'z', 'R', 0, /* Augmentation string */
6353 1, /* Code alignment factor */
6354 0x78, /* Data alignment factor */
6355 16, /* Return address column */
6356 1, /* Augmentation size */
6357 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6358 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6359 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6360 DW_CFA_nop, DW_CFA_nop,
6361
6362 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6363 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6364 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6365 0, 0, 0, 0, /* .plt size goes here */
6366 0, /* Augmentation size */
6367 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6368 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6369 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6370 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6371 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6372 13, /* Block length */
6373 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6374 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6375 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6376 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6377 DW_CFA_nop, DW_CFA_nop
6378 };
6379
6380 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6381 {
6382 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6383 elf_x86_64_nacl_plt_entry, /* plt_entry */
6384 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6385 2, /* plt0_got1_offset */
6386 9, /* plt0_got2_offset */
6387 13, /* plt0_got2_insn_end */
6388 3, /* plt_got_offset */
6389 33, /* plt_reloc_offset */
6390 38, /* plt_plt_offset */
6391 7, /* plt_got_insn_size */
6392 42, /* plt_plt_insn_end */
6393 32, /* plt_lazy_offset */
6394 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6395 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6396 };
6397
6398 #undef elf_backend_arch_data
6399 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6400
6401 #undef elf_backend_object_p
6402 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6403 #undef elf_backend_modify_segment_map
6404 #define elf_backend_modify_segment_map nacl_modify_segment_map
6405 #undef elf_backend_modify_program_headers
6406 #define elf_backend_modify_program_headers nacl_modify_program_headers
6407 #undef elf_backend_final_write_processing
6408 #define elf_backend_final_write_processing nacl_final_write_processing
6409
6410 #include "elf64-target.h"
6411
6412 /* Native Client x32 support. */
6413
6414 static bfd_boolean
6415 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6416 {
6417 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6418 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6419 return TRUE;
6420 }
6421
6422 #undef TARGET_LITTLE_SYM
6423 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6424 #undef TARGET_LITTLE_NAME
6425 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6426 #undef elf32_bed
6427 #define elf32_bed elf32_x86_64_nacl_bed
6428
6429 #define bfd_elf32_bfd_link_hash_table_create \
6430 elf_x86_64_link_hash_table_create
6431 #define bfd_elf32_bfd_reloc_type_lookup \
6432 elf_x86_64_reloc_type_lookup
6433 #define bfd_elf32_bfd_reloc_name_lookup \
6434 elf_x86_64_reloc_name_lookup
6435 #define bfd_elf32_mkobject \
6436 elf_x86_64_mkobject
6437 #define bfd_elf32_get_synthetic_symtab \
6438 elf_x86_64_get_synthetic_symtab
6439
6440 #undef elf_backend_object_p
6441 #define elf_backend_object_p \
6442 elf32_x86_64_nacl_elf_object_p
6443
6444 #undef elf_backend_bfd_from_remote_memory
6445 #define elf_backend_bfd_from_remote_memory \
6446 _bfd_elf32_bfd_from_remote_memory
6447
6448 #undef elf_backend_size_info
6449 #define elf_backend_size_info \
6450 _bfd_elf32_size_info
6451
6452 #include "elf32-target.h"
6453
6454 /* Restore defaults. */
6455 #undef elf_backend_object_p
6456 #define elf_backend_object_p elf64_x86_64_elf_object_p
6457 #undef elf_backend_bfd_from_remote_memory
6458 #undef elf_backend_size_info
6459 #undef elf_backend_modify_segment_map
6460 #undef elf_backend_modify_program_headers
6461 #undef elf_backend_final_write_processing
6462
6463 /* Intel L1OM support. */
6464
6465 static bfd_boolean
6466 elf64_l1om_elf_object_p (bfd *abfd)
6467 {
6468 /* Set the right machine number for an L1OM elf64 file. */
6469 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6470 return TRUE;
6471 }
6472
6473 #undef TARGET_LITTLE_SYM
6474 #define TARGET_LITTLE_SYM l1om_elf64_vec
6475 #undef TARGET_LITTLE_NAME
6476 #define TARGET_LITTLE_NAME "elf64-l1om"
6477 #undef ELF_ARCH
6478 #define ELF_ARCH bfd_arch_l1om
6479
6480 #undef ELF_MACHINE_CODE
6481 #define ELF_MACHINE_CODE EM_L1OM
6482
6483 #undef ELF_OSABI
6484
6485 #undef elf64_bed
6486 #define elf64_bed elf64_l1om_bed
6487
6488 #undef elf_backend_object_p
6489 #define elf_backend_object_p elf64_l1om_elf_object_p
6490
6491 /* Restore defaults. */
6492 #undef ELF_MAXPAGESIZE
6493 #undef ELF_MINPAGESIZE
6494 #undef ELF_COMMONPAGESIZE
6495 #define ELF_MAXPAGESIZE 0x200000
6496 #define ELF_MINPAGESIZE 0x1000
6497 #define ELF_COMMONPAGESIZE 0x1000
6498 #undef elf_backend_plt_alignment
6499 #define elf_backend_plt_alignment 4
6500 #undef elf_backend_arch_data
6501 #define elf_backend_arch_data &elf_x86_64_arch_bed
6502
6503 #include "elf64-target.h"
6504
6505 /* FreeBSD L1OM support. */
6506
6507 #undef TARGET_LITTLE_SYM
6508 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6509 #undef TARGET_LITTLE_NAME
6510 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6511
6512 #undef ELF_OSABI
6513 #define ELF_OSABI ELFOSABI_FREEBSD
6514
6515 #undef elf64_bed
6516 #define elf64_bed elf64_l1om_fbsd_bed
6517
6518 #include "elf64-target.h"
6519
6520 /* Intel K1OM support. */
6521
6522 static bfd_boolean
6523 elf64_k1om_elf_object_p (bfd *abfd)
6524 {
6525 /* Set the right machine number for an K1OM elf64 file. */
6526 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6527 return TRUE;
6528 }
6529
6530 #undef TARGET_LITTLE_SYM
6531 #define TARGET_LITTLE_SYM k1om_elf64_vec
6532 #undef TARGET_LITTLE_NAME
6533 #define TARGET_LITTLE_NAME "elf64-k1om"
6534 #undef ELF_ARCH
6535 #define ELF_ARCH bfd_arch_k1om
6536
6537 #undef ELF_MACHINE_CODE
6538 #define ELF_MACHINE_CODE EM_K1OM
6539
6540 #undef ELF_OSABI
6541
6542 #undef elf64_bed
6543 #define elf64_bed elf64_k1om_bed
6544
6545 #undef elf_backend_object_p
6546 #define elf_backend_object_p elf64_k1om_elf_object_p
6547
6548 #undef elf_backend_static_tls_alignment
6549
6550 #undef elf_backend_want_plt_sym
6551 #define elf_backend_want_plt_sym 0
6552
6553 #include "elf64-target.h"
6554
6555 /* FreeBSD K1OM support. */
6556
6557 #undef TARGET_LITTLE_SYM
6558 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6559 #undef TARGET_LITTLE_NAME
6560 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6561
6562 #undef ELF_OSABI
6563 #define ELF_OSABI ELFOSABI_FREEBSD
6564
6565 #undef elf64_bed
6566 #define elf64_bed elf64_k1om_fbsd_bed
6567
6568 #include "elf64-target.h"
6569
6570 /* 32bit x86-64 support. */
6571
6572 #undef TARGET_LITTLE_SYM
6573 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6574 #undef TARGET_LITTLE_NAME
6575 #define TARGET_LITTLE_NAME "elf32-x86-64"
6576 #undef elf32_bed
6577
6578 #undef ELF_ARCH
6579 #define ELF_ARCH bfd_arch_i386
6580
6581 #undef ELF_MACHINE_CODE
6582 #define ELF_MACHINE_CODE EM_X86_64
6583
6584 #undef ELF_OSABI
6585
6586 #undef elf_backend_object_p
6587 #define elf_backend_object_p \
6588 elf32_x86_64_elf_object_p
6589
6590 #undef elf_backend_bfd_from_remote_memory
6591 #define elf_backend_bfd_from_remote_memory \
6592 _bfd_elf32_bfd_from_remote_memory
6593
6594 #undef elf_backend_size_info
6595 #define elf_backend_size_info \
6596 _bfd_elf32_size_info
6597
6598 #include "elf32-target.h"
This page took 0.182294 seconds and 5 git commands to generate.