Fix grammar in error message.
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2016 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "opcode/i386.h"
35 #include "elf/x86-64.h"
36
37 #ifdef CORE_HEADER
38 #include <stdarg.h>
39 #include CORE_HEADER
40 #endif
41
42 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
43 #define MINUS_ONE (~ (bfd_vma) 0)
44
45 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
46 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
47 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
48 since they are the same. */
49
50 #define ABI_64_P(abfd) \
51 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
52
53 /* The relocation "howto" table. Order of fields:
54 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
55 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
56 static reloc_howto_type x86_64_elf_howto_table[] =
57 {
58 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
60 FALSE),
61 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
63 FALSE),
64 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
65 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
66 TRUE),
67 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
68 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
69 FALSE),
70 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
71 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
72 TRUE),
73 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
74 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
75 FALSE),
76 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
77 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
78 MINUS_ONE, FALSE),
79 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
80 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
81 MINUS_ONE, FALSE),
82 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
84 MINUS_ONE, FALSE),
85 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
86 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
87 0xffffffff, TRUE),
88 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
89 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
90 FALSE),
91 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
92 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
93 FALSE),
94 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
95 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
96 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
97 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
98 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
100 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
102 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
103 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
104 MINUS_ONE, FALSE),
105 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
106 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
107 MINUS_ONE, FALSE),
108 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
109 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
110 MINUS_ONE, FALSE),
111 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
112 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
113 0xffffffff, TRUE),
114 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
115 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
116 0xffffffff, TRUE),
117 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
118 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
119 0xffffffff, FALSE),
120 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
121 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
122 0xffffffff, TRUE),
123 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
124 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
125 0xffffffff, FALSE),
126 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
127 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
128 TRUE),
129 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
130 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
131 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
132 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
133 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
134 FALSE, 0xffffffff, 0xffffffff, TRUE),
135 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
136 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
137 FALSE),
138 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
139 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
140 MINUS_ONE, TRUE),
141 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
142 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
143 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
144 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
145 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
146 MINUS_ONE, FALSE),
147 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
148 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
149 MINUS_ONE, FALSE),
150 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
151 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
152 FALSE),
153 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
154 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
155 FALSE),
156 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
157 complain_overflow_bitfield, bfd_elf_generic_reloc,
158 "R_X86_64_GOTPC32_TLSDESC",
159 FALSE, 0xffffffff, 0xffffffff, TRUE),
160 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
161 complain_overflow_dont, bfd_elf_generic_reloc,
162 "R_X86_64_TLSDESC_CALL",
163 FALSE, 0, 0, FALSE),
164 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
165 complain_overflow_bitfield, bfd_elf_generic_reloc,
166 "R_X86_64_TLSDESC",
167 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
168 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
169 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
170 MINUS_ONE, FALSE),
171 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
172 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
173 MINUS_ONE, FALSE),
174 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
175 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
176 TRUE),
177 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
178 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
179 TRUE),
180 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
181 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
182 0xffffffff, TRUE),
183 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
184 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
185 0xffffffff, TRUE),
186
187 /* We have a gap in the reloc numbers here.
188 R_X86_64_standard counts the number up to this point, and
189 R_X86_64_vt_offset is the value to subtract from a reloc type of
190 R_X86_64_GNU_VT* to form an index into this table. */
191 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
192 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
193
194 /* GNU extension to record C++ vtable hierarchy. */
195 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
196 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
197
198 /* GNU extension to record C++ vtable member usage. */
199 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
200 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
201 FALSE),
202
203 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
204 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
205 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
206 FALSE)
207 };
208
209 #define IS_X86_64_PCREL_TYPE(TYPE) \
210 ( ((TYPE) == R_X86_64_PC8) \
211 || ((TYPE) == R_X86_64_PC16) \
212 || ((TYPE) == R_X86_64_PC32) \
213 || ((TYPE) == R_X86_64_PC32_BND) \
214 || ((TYPE) == R_X86_64_PC64))
215
216 /* Map BFD relocs to the x86_64 elf relocs. */
217 struct elf_reloc_map
218 {
219 bfd_reloc_code_real_type bfd_reloc_val;
220 unsigned char elf_reloc_val;
221 };
222
223 static const struct elf_reloc_map x86_64_reloc_map[] =
224 {
225 { BFD_RELOC_NONE, R_X86_64_NONE, },
226 { BFD_RELOC_64, R_X86_64_64, },
227 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
228 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
229 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
230 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
231 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
232 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
233 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
234 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
235 { BFD_RELOC_32, R_X86_64_32, },
236 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
237 { BFD_RELOC_16, R_X86_64_16, },
238 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
239 { BFD_RELOC_8, R_X86_64_8, },
240 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
241 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
242 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
243 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
244 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
245 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
246 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
247 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
248 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
249 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
250 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
251 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
252 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
253 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
254 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
255 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
256 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
257 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
258 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
259 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
260 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
261 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
262 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
263 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
264 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
265 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
266 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
267 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
268 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
269 };
270
271 static reloc_howto_type *
272 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
273 {
274 unsigned i;
275
276 if (r_type == (unsigned int) R_X86_64_32)
277 {
278 if (ABI_64_P (abfd))
279 i = r_type;
280 else
281 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
282 }
283 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
284 || r_type >= (unsigned int) R_X86_64_max)
285 {
286 if (r_type >= (unsigned int) R_X86_64_standard)
287 {
288 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
289 abfd, (int) r_type);
290 r_type = R_X86_64_NONE;
291 }
292 i = r_type;
293 }
294 else
295 i = r_type - (unsigned int) R_X86_64_vt_offset;
296 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
297 return &x86_64_elf_howto_table[i];
298 }
299
300 /* Given a BFD reloc type, return a HOWTO structure. */
301 static reloc_howto_type *
302 elf_x86_64_reloc_type_lookup (bfd *abfd,
303 bfd_reloc_code_real_type code)
304 {
305 unsigned int i;
306
307 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
308 i++)
309 {
310 if (x86_64_reloc_map[i].bfd_reloc_val == code)
311 return elf_x86_64_rtype_to_howto (abfd,
312 x86_64_reloc_map[i].elf_reloc_val);
313 }
314 return NULL;
315 }
316
317 static reloc_howto_type *
318 elf_x86_64_reloc_name_lookup (bfd *abfd,
319 const char *r_name)
320 {
321 unsigned int i;
322
323 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
324 {
325 /* Get x32 R_X86_64_32. */
326 reloc_howto_type *reloc
327 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
328 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
329 return reloc;
330 }
331
332 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
333 if (x86_64_elf_howto_table[i].name != NULL
334 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
335 return &x86_64_elf_howto_table[i];
336
337 return NULL;
338 }
339
340 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
341
342 static void
343 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
344 Elf_Internal_Rela *dst)
345 {
346 unsigned r_type;
347
348 r_type = ELF32_R_TYPE (dst->r_info);
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350 BFD_ASSERT (r_type == cache_ptr->howto->type);
351 }
352 \f
353 /* Support for core dump NOTE sections. */
354 static bfd_boolean
355 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
356 {
357 int offset;
358 size_t size;
359
360 switch (note->descsz)
361 {
362 default:
363 return FALSE;
364
365 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
366 /* pr_cursig */
367 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
368
369 /* pr_pid */
370 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
371
372 /* pr_reg */
373 offset = 72;
374 size = 216;
375
376 break;
377
378 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
379 /* pr_cursig */
380 elf_tdata (abfd)->core->signal
381 = bfd_get_16 (abfd, note->descdata + 12);
382
383 /* pr_pid */
384 elf_tdata (abfd)->core->lwpid
385 = bfd_get_32 (abfd, note->descdata + 32);
386
387 /* pr_reg */
388 offset = 112;
389 size = 216;
390
391 break;
392 }
393
394 /* Make a ".reg/999" section. */
395 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
396 size, note->descpos + offset);
397 }
398
399 static bfd_boolean
400 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
401 {
402 switch (note->descsz)
403 {
404 default:
405 return FALSE;
406
407 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 12);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
414 break;
415
416 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
417 elf_tdata (abfd)->core->pid
418 = bfd_get_32 (abfd, note->descdata + 24);
419 elf_tdata (abfd)->core->program
420 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
421 elf_tdata (abfd)->core->command
422 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
423 }
424
425 /* Note that for some reason, a spurious space is tacked
426 onto the end of the args in some (at least one anyway)
427 implementations, so strip it off if it exists. */
428
429 {
430 char *command = elf_tdata (abfd)->core->command;
431 int n = strlen (command);
432
433 if (0 < n && command[n - 1] == ' ')
434 command[n - 1] = '\0';
435 }
436
437 return TRUE;
438 }
439
440 #ifdef CORE_HEADER
441 static char *
442 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
443 int note_type, ...)
444 {
445 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
446 va_list ap;
447 const char *fname, *psargs;
448 long pid;
449 int cursig;
450 const void *gregs;
451
452 switch (note_type)
453 {
454 default:
455 return NULL;
456
457 case NT_PRPSINFO:
458 va_start (ap, note_type);
459 fname = va_arg (ap, const char *);
460 psargs = va_arg (ap, const char *);
461 va_end (ap);
462
463 if (bed->s->elfclass == ELFCLASS32)
464 {
465 prpsinfo32_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 else
473 {
474 prpsinfo64_t data;
475 memset (&data, 0, sizeof (data));
476 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
477 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
478 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
479 &data, sizeof (data));
480 }
481 /* NOTREACHED */
482
483 case NT_PRSTATUS:
484 va_start (ap, note_type);
485 pid = va_arg (ap, long);
486 cursig = va_arg (ap, int);
487 gregs = va_arg (ap, const void *);
488 va_end (ap);
489
490 if (bed->s->elfclass == ELFCLASS32)
491 {
492 if (bed->elf_machine_code == EM_X86_64)
493 {
494 prstatusx32_t prstat;
495 memset (&prstat, 0, sizeof (prstat));
496 prstat.pr_pid = pid;
497 prstat.pr_cursig = cursig;
498 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
499 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
500 &prstat, sizeof (prstat));
501 }
502 else
503 {
504 prstatus32_t prstat;
505 memset (&prstat, 0, sizeof (prstat));
506 prstat.pr_pid = pid;
507 prstat.pr_cursig = cursig;
508 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
509 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
510 &prstat, sizeof (prstat));
511 }
512 }
513 else
514 {
515 prstatus64_t prstat;
516 memset (&prstat, 0, sizeof (prstat));
517 prstat.pr_pid = pid;
518 prstat.pr_cursig = cursig;
519 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
520 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
521 &prstat, sizeof (prstat));
522 }
523 }
524 /* NOTREACHED */
525 }
526 #endif
527 \f
528 /* Functions for the x86-64 ELF linker. */
529
530 /* The name of the dynamic interpreter. This is put in the .interp
531 section. */
532
533 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
534 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
535
536 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
537 copying dynamic variables from a shared lib into an app's dynbss
538 section, and instead use a dynamic relocation to point into the
539 shared lib. */
540 #define ELIMINATE_COPY_RELOCS 1
541
542 /* The size in bytes of an entry in the global offset table. */
543
544 #define GOT_ENTRY_SIZE 8
545
546 /* The size in bytes of an entry in the procedure linkage table. */
547
548 #define PLT_ENTRY_SIZE 16
549
550 /* The first entry in a procedure linkage table looks like this. See the
551 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
552
553 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
556 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
557 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
558 };
559
560 /* Subsequent entries in a procedure linkage table look like this. */
561
562 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
563 {
564 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
565 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
566 0x68, /* pushq immediate */
567 0, 0, 0, 0, /* replaced with index into relocation table. */
568 0xe9, /* jmp relative */
569 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
570 };
571
572 /* The first entry in a procedure linkage table with BND relocations
573 like this. */
574
575 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
576 {
577 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
578 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
579 0x0f, 0x1f, 0 /* nopl (%rax) */
580 };
581
582 /* Subsequent entries for legacy branches in a procedure linkage table
583 with BND relocations look like this. */
584
585 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
586 {
587 0x68, 0, 0, 0, 0, /* pushq immediate */
588 0xe9, 0, 0, 0, 0, /* jmpq relative */
589 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
590 };
591
592 /* Subsequent entries for branches with BND prefx in a procedure linkage
593 table with BND relocations look like this. */
594
595 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
596 {
597 0x68, 0, 0, 0, 0, /* pushq immediate */
598 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
599 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
600 };
601
602 /* Entries for legacy branches in the second procedure linkage table
603 look like this. */
604
605 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
606 {
607 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
608 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
609 0x66, 0x90 /* xchg %ax,%ax */
610 };
611
612 /* Entries for branches with BND prefix in the second procedure linkage
613 table look like this. */
614
615 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
616 {
617 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x90 /* nop */
620 };
621
622 /* .eh_frame covering the .plt section. */
623
624 static const bfd_byte elf_x86_64_eh_frame_plt[] =
625 {
626 #define PLT_CIE_LENGTH 20
627 #define PLT_FDE_LENGTH 36
628 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
629 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
630 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
631 0, 0, 0, 0, /* CIE ID */
632 1, /* CIE version */
633 'z', 'R', 0, /* Augmentation string */
634 1, /* Code alignment factor */
635 0x78, /* Data alignment factor */
636 16, /* Return address column */
637 1, /* Augmentation size */
638 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
639 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
640 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
641 DW_CFA_nop, DW_CFA_nop,
642
643 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
644 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
645 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
646 0, 0, 0, 0, /* .plt size goes here */
647 0, /* Augmentation size */
648 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
649 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
650 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
651 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
652 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
653 11, /* Block length */
654 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
655 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
656 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
657 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
658 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
659 };
660
661 /* Architecture-specific backend data for x86-64. */
662
663 struct elf_x86_64_backend_data
664 {
665 /* Templates for the initial PLT entry and for subsequent entries. */
666 const bfd_byte *plt0_entry;
667 const bfd_byte *plt_entry;
668 unsigned int plt_entry_size; /* Size of each PLT entry. */
669
670 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
671 unsigned int plt0_got1_offset;
672 unsigned int plt0_got2_offset;
673
674 /* Offset of the end of the PC-relative instruction containing
675 plt0_got2_offset. */
676 unsigned int plt0_got2_insn_end;
677
678 /* Offsets into plt_entry that are to be replaced with... */
679 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
680 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
681 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
682
683 /* Length of the PC-relative instruction containing plt_got_offset. */
684 unsigned int plt_got_insn_size;
685
686 /* Offset of the end of the PC-relative jump to plt0_entry. */
687 unsigned int plt_plt_insn_end;
688
689 /* Offset into plt_entry where the initial value of the GOT entry points. */
690 unsigned int plt_lazy_offset;
691
692 /* .eh_frame covering the .plt section. */
693 const bfd_byte *eh_frame_plt;
694 unsigned int eh_frame_plt_size;
695 };
696
697 #define get_elf_x86_64_arch_data(bed) \
698 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
699
700 #define get_elf_x86_64_backend_data(abfd) \
701 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
702
703 #define GET_PLT_ENTRY_SIZE(abfd) \
704 get_elf_x86_64_backend_data (abfd)->plt_entry_size
705
706 /* These are the standard parameters. */
707 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
708 {
709 elf_x86_64_plt0_entry, /* plt0_entry */
710 elf_x86_64_plt_entry, /* plt_entry */
711 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
712 2, /* plt0_got1_offset */
713 8, /* plt0_got2_offset */
714 12, /* plt0_got2_insn_end */
715 2, /* plt_got_offset */
716 7, /* plt_reloc_offset */
717 12, /* plt_plt_offset */
718 6, /* plt_got_insn_size */
719 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
720 6, /* plt_lazy_offset */
721 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
722 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
723 };
724
725 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
726 {
727 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
728 elf_x86_64_bnd_plt_entry, /* plt_entry */
729 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
730 2, /* plt0_got1_offset */
731 1+8, /* plt0_got2_offset */
732 1+12, /* plt0_got2_insn_end */
733 1+2, /* plt_got_offset */
734 1, /* plt_reloc_offset */
735 7, /* plt_plt_offset */
736 1+6, /* plt_got_insn_size */
737 11, /* plt_plt_insn_end */
738 0, /* plt_lazy_offset */
739 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
740 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
741 };
742
743 #define elf_backend_arch_data &elf_x86_64_arch_bed
744
745 /* Is a undefined weak symbol which is resolved to 0. Reference to an
746 undefined weak symbol is resolved to 0 when building executable if
747 it isn't dynamic and
748 1. Has non-GOT/non-PLT relocations in text section. Or
749 2. Has no GOT/PLT relocation.
750 */
751 #define UNDEFINED_WEAK_RESOLVED_TO_ZERO(INFO, GOT_RELOC, EH) \
752 ((EH)->elf.root.type == bfd_link_hash_undefweak \
753 && bfd_link_executable (INFO) \
754 && (elf_x86_64_hash_table (INFO)->interp == NULL \
755 || !(GOT_RELOC) \
756 || (EH)->has_non_got_reloc \
757 || !(INFO)->dynamic_undefined_weak))
758
759 /* x86-64 ELF linker hash entry. */
760
761 struct elf_x86_64_link_hash_entry
762 {
763 struct elf_link_hash_entry elf;
764
765 /* Track dynamic relocs copied for this symbol. */
766 struct elf_dyn_relocs *dyn_relocs;
767
768 #define GOT_UNKNOWN 0
769 #define GOT_NORMAL 1
770 #define GOT_TLS_GD 2
771 #define GOT_TLS_IE 3
772 #define GOT_TLS_GDESC 4
773 #define GOT_TLS_GD_BOTH_P(type) \
774 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
775 #define GOT_TLS_GD_P(type) \
776 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
777 #define GOT_TLS_GDESC_P(type) \
778 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
779 #define GOT_TLS_GD_ANY_P(type) \
780 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
781 unsigned char tls_type;
782
783 /* TRUE if a weak symbol with a real definition needs a copy reloc.
784 When there is a weak symbol with a real definition, the processor
785 independent code will have arranged for us to see the real
786 definition first. We need to copy the needs_copy bit from the
787 real definition and check it when allowing copy reloc in PIE. */
788 unsigned int needs_copy : 1;
789
790 /* TRUE if symbol has at least one BND relocation. */
791 unsigned int has_bnd_reloc : 1;
792
793 /* TRUE if symbol has GOT or PLT relocations. */
794 unsigned int has_got_reloc : 1;
795
796 /* TRUE if symbol has non-GOT/non-PLT relocations in text sections. */
797 unsigned int has_non_got_reloc : 1;
798
799 /* 0: symbol isn't __tls_get_addr.
800 1: symbol is __tls_get_addr.
801 2: symbol is unknown. */
802 unsigned int tls_get_addr : 2;
803
804 /* Reference count of C/C++ function pointer relocations in read-write
805 section which can be resolved at run-time. */
806 bfd_signed_vma func_pointer_refcount;
807
808 /* Information about the GOT PLT entry. Filled when there are both
809 GOT and PLT relocations against the same function. */
810 union gotplt_union plt_got;
811
812 /* Information about the second PLT entry. Filled when has_bnd_reloc is
813 set. */
814 union gotplt_union plt_bnd;
815
816 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
817 starting at the end of the jump table. */
818 bfd_vma tlsdesc_got;
819 };
820
821 #define elf_x86_64_hash_entry(ent) \
822 ((struct elf_x86_64_link_hash_entry *)(ent))
823
824 struct elf_x86_64_obj_tdata
825 {
826 struct elf_obj_tdata root;
827
828 /* tls_type for each local got entry. */
829 char *local_got_tls_type;
830
831 /* GOTPLT entries for TLS descriptors. */
832 bfd_vma *local_tlsdesc_gotent;
833 };
834
835 #define elf_x86_64_tdata(abfd) \
836 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
837
838 #define elf_x86_64_local_got_tls_type(abfd) \
839 (elf_x86_64_tdata (abfd)->local_got_tls_type)
840
841 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
842 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
843
844 #define is_x86_64_elf(bfd) \
845 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
846 && elf_tdata (bfd) != NULL \
847 && elf_object_id (bfd) == X86_64_ELF_DATA)
848
849 static bfd_boolean
850 elf_x86_64_mkobject (bfd *abfd)
851 {
852 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
853 X86_64_ELF_DATA);
854 }
855
856 /* x86-64 ELF linker hash table. */
857
858 struct elf_x86_64_link_hash_table
859 {
860 struct elf_link_hash_table elf;
861
862 /* Short-cuts to get to dynamic linker sections. */
863 asection *interp;
864 asection *sdynbss;
865 asection *srelbss;
866 asection *plt_eh_frame;
867 asection *plt_bnd;
868 asection *plt_got;
869
870 union
871 {
872 bfd_signed_vma refcount;
873 bfd_vma offset;
874 } tls_ld_got;
875
876 /* The amount of space used by the jump slots in the GOT. */
877 bfd_vma sgotplt_jump_table_size;
878
879 /* Small local sym cache. */
880 struct sym_cache sym_cache;
881
882 bfd_vma (*r_info) (bfd_vma, bfd_vma);
883 bfd_vma (*r_sym) (bfd_vma);
884 unsigned int pointer_r_type;
885 const char *dynamic_interpreter;
886 int dynamic_interpreter_size;
887
888 /* _TLS_MODULE_BASE_ symbol. */
889 struct bfd_link_hash_entry *tls_module_base;
890
891 /* Used by local STT_GNU_IFUNC symbols. */
892 htab_t loc_hash_table;
893 void * loc_hash_memory;
894
895 /* The offset into splt of the PLT entry for the TLS descriptor
896 resolver. Special values are 0, if not necessary (or not found
897 to be necessary yet), and -1 if needed but not determined
898 yet. */
899 bfd_vma tlsdesc_plt;
900 /* The offset into sgot of the GOT entry used by the PLT entry
901 above. */
902 bfd_vma tlsdesc_got;
903
904 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
905 bfd_vma next_jump_slot_index;
906 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
907 bfd_vma next_irelative_index;
908
909 /* TRUE if there are dynamic relocs against IFUNC symbols that apply
910 to read-only sections. */
911 bfd_boolean readonly_dynrelocs_against_ifunc;
912 };
913
914 /* Get the x86-64 ELF linker hash table from a link_info structure. */
915
916 #define elf_x86_64_hash_table(p) \
917 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
918 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
919
920 #define elf_x86_64_compute_jump_table_size(htab) \
921 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
922
923 /* Create an entry in an x86-64 ELF linker hash table. */
924
925 static struct bfd_hash_entry *
926 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
927 struct bfd_hash_table *table,
928 const char *string)
929 {
930 /* Allocate the structure if it has not already been allocated by a
931 subclass. */
932 if (entry == NULL)
933 {
934 entry = (struct bfd_hash_entry *)
935 bfd_hash_allocate (table,
936 sizeof (struct elf_x86_64_link_hash_entry));
937 if (entry == NULL)
938 return entry;
939 }
940
941 /* Call the allocation method of the superclass. */
942 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
943 if (entry != NULL)
944 {
945 struct elf_x86_64_link_hash_entry *eh;
946
947 eh = (struct elf_x86_64_link_hash_entry *) entry;
948 eh->dyn_relocs = NULL;
949 eh->tls_type = GOT_UNKNOWN;
950 eh->needs_copy = 0;
951 eh->has_bnd_reloc = 0;
952 eh->has_got_reloc = 0;
953 eh->has_non_got_reloc = 0;
954 eh->tls_get_addr = 2;
955 eh->func_pointer_refcount = 0;
956 eh->plt_bnd.offset = (bfd_vma) -1;
957 eh->plt_got.offset = (bfd_vma) -1;
958 eh->tlsdesc_got = (bfd_vma) -1;
959 }
960
961 return entry;
962 }
963
964 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
965 for local symbol so that we can handle local STT_GNU_IFUNC symbols
966 as global symbol. We reuse indx and dynstr_index for local symbol
967 hash since they aren't used by global symbols in this backend. */
968
969 static hashval_t
970 elf_x86_64_local_htab_hash (const void *ptr)
971 {
972 struct elf_link_hash_entry *h
973 = (struct elf_link_hash_entry *) ptr;
974 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
975 }
976
977 /* Compare local hash entries. */
978
979 static int
980 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
981 {
982 struct elf_link_hash_entry *h1
983 = (struct elf_link_hash_entry *) ptr1;
984 struct elf_link_hash_entry *h2
985 = (struct elf_link_hash_entry *) ptr2;
986
987 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
988 }
989
990 /* Find and/or create a hash entry for local symbol. */
991
992 static struct elf_link_hash_entry *
993 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
994 bfd *abfd, const Elf_Internal_Rela *rel,
995 bfd_boolean create)
996 {
997 struct elf_x86_64_link_hash_entry e, *ret;
998 asection *sec = abfd->sections;
999 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
1000 htab->r_sym (rel->r_info));
1001 void **slot;
1002
1003 e.elf.indx = sec->id;
1004 e.elf.dynstr_index = htab->r_sym (rel->r_info);
1005 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
1006 create ? INSERT : NO_INSERT);
1007
1008 if (!slot)
1009 return NULL;
1010
1011 if (*slot)
1012 {
1013 ret = (struct elf_x86_64_link_hash_entry *) *slot;
1014 return &ret->elf;
1015 }
1016
1017 ret = (struct elf_x86_64_link_hash_entry *)
1018 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
1019 sizeof (struct elf_x86_64_link_hash_entry));
1020 if (ret)
1021 {
1022 memset (ret, 0, sizeof (*ret));
1023 ret->elf.indx = sec->id;
1024 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
1025 ret->elf.dynindx = -1;
1026 ret->func_pointer_refcount = 0;
1027 ret->plt_got.offset = (bfd_vma) -1;
1028 *slot = ret;
1029 }
1030 return &ret->elf;
1031 }
1032
1033 /* Destroy an X86-64 ELF linker hash table. */
1034
1035 static void
1036 elf_x86_64_link_hash_table_free (bfd *obfd)
1037 {
1038 struct elf_x86_64_link_hash_table *htab
1039 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
1040
1041 if (htab->loc_hash_table)
1042 htab_delete (htab->loc_hash_table);
1043 if (htab->loc_hash_memory)
1044 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
1045 _bfd_elf_link_hash_table_free (obfd);
1046 }
1047
1048 /* Create an X86-64 ELF linker hash table. */
1049
1050 static struct bfd_link_hash_table *
1051 elf_x86_64_link_hash_table_create (bfd *abfd)
1052 {
1053 struct elf_x86_64_link_hash_table *ret;
1054 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1055
1056 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1057 if (ret == NULL)
1058 return NULL;
1059
1060 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1061 elf_x86_64_link_hash_newfunc,
1062 sizeof (struct elf_x86_64_link_hash_entry),
1063 X86_64_ELF_DATA))
1064 {
1065 free (ret);
1066 return NULL;
1067 }
1068
1069 if (ABI_64_P (abfd))
1070 {
1071 ret->r_info = elf64_r_info;
1072 ret->r_sym = elf64_r_sym;
1073 ret->pointer_r_type = R_X86_64_64;
1074 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1075 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1076 }
1077 else
1078 {
1079 ret->r_info = elf32_r_info;
1080 ret->r_sym = elf32_r_sym;
1081 ret->pointer_r_type = R_X86_64_32;
1082 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1083 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1084 }
1085
1086 ret->loc_hash_table = htab_try_create (1024,
1087 elf_x86_64_local_htab_hash,
1088 elf_x86_64_local_htab_eq,
1089 NULL);
1090 ret->loc_hash_memory = objalloc_create ();
1091 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1092 {
1093 elf_x86_64_link_hash_table_free (abfd);
1094 return NULL;
1095 }
1096 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1097
1098 return &ret->elf.root;
1099 }
1100
1101 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1102 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1103 hash table. */
1104
1105 static bfd_boolean
1106 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1107 struct bfd_link_info *info)
1108 {
1109 struct elf_x86_64_link_hash_table *htab;
1110
1111 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1112 return FALSE;
1113
1114 htab = elf_x86_64_hash_table (info);
1115 if (htab == NULL)
1116 return FALSE;
1117
1118 /* Set the contents of the .interp section to the interpreter. */
1119 if (bfd_link_executable (info) && !info->nointerp)
1120 {
1121 asection *s = bfd_get_linker_section (dynobj, ".interp");
1122 if (s == NULL)
1123 abort ();
1124 s->size = htab->dynamic_interpreter_size;
1125 s->contents = (unsigned char *) htab->dynamic_interpreter;
1126 htab->interp = s;
1127 }
1128
1129 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1130 if (!htab->sdynbss)
1131 abort ();
1132
1133 if (bfd_link_executable (info))
1134 {
1135 /* Always allow copy relocs for building executables. */
1136 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1137 if (s == NULL)
1138 {
1139 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1140 s = bfd_make_section_anyway_with_flags (dynobj,
1141 ".rela.bss",
1142 (bed->dynamic_sec_flags
1143 | SEC_READONLY));
1144 if (s == NULL
1145 || ! bfd_set_section_alignment (dynobj, s,
1146 bed->s->log_file_align))
1147 return FALSE;
1148 }
1149 htab->srelbss = s;
1150 }
1151
1152 if (!info->no_ld_generated_unwind_info
1153 && htab->plt_eh_frame == NULL
1154 && htab->elf.splt != NULL)
1155 {
1156 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1157 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1158 | SEC_LINKER_CREATED);
1159 htab->plt_eh_frame
1160 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1161 if (htab->plt_eh_frame == NULL
1162 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1163 return FALSE;
1164 }
1165 return TRUE;
1166 }
1167
1168 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1169
1170 static void
1171 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1172 struct elf_link_hash_entry *dir,
1173 struct elf_link_hash_entry *ind)
1174 {
1175 struct elf_x86_64_link_hash_entry *edir, *eind;
1176
1177 edir = (struct elf_x86_64_link_hash_entry *) dir;
1178 eind = (struct elf_x86_64_link_hash_entry *) ind;
1179
1180 if (!edir->has_bnd_reloc)
1181 edir->has_bnd_reloc = eind->has_bnd_reloc;
1182
1183 if (!edir->has_got_reloc)
1184 edir->has_got_reloc = eind->has_got_reloc;
1185
1186 if (!edir->has_non_got_reloc)
1187 edir->has_non_got_reloc = eind->has_non_got_reloc;
1188
1189 if (eind->dyn_relocs != NULL)
1190 {
1191 if (edir->dyn_relocs != NULL)
1192 {
1193 struct elf_dyn_relocs **pp;
1194 struct elf_dyn_relocs *p;
1195
1196 /* Add reloc counts against the indirect sym to the direct sym
1197 list. Merge any entries against the same section. */
1198 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1199 {
1200 struct elf_dyn_relocs *q;
1201
1202 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1203 if (q->sec == p->sec)
1204 {
1205 q->pc_count += p->pc_count;
1206 q->count += p->count;
1207 *pp = p->next;
1208 break;
1209 }
1210 if (q == NULL)
1211 pp = &p->next;
1212 }
1213 *pp = edir->dyn_relocs;
1214 }
1215
1216 edir->dyn_relocs = eind->dyn_relocs;
1217 eind->dyn_relocs = NULL;
1218 }
1219
1220 if (ind->root.type == bfd_link_hash_indirect
1221 && dir->got.refcount <= 0)
1222 {
1223 edir->tls_type = eind->tls_type;
1224 eind->tls_type = GOT_UNKNOWN;
1225 }
1226
1227 if (ELIMINATE_COPY_RELOCS
1228 && ind->root.type != bfd_link_hash_indirect
1229 && dir->dynamic_adjusted)
1230 {
1231 /* If called to transfer flags for a weakdef during processing
1232 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1233 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1234 dir->ref_dynamic |= ind->ref_dynamic;
1235 dir->ref_regular |= ind->ref_regular;
1236 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1237 dir->needs_plt |= ind->needs_plt;
1238 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1239 }
1240 else
1241 {
1242 if (eind->func_pointer_refcount > 0)
1243 {
1244 edir->func_pointer_refcount += eind->func_pointer_refcount;
1245 eind->func_pointer_refcount = 0;
1246 }
1247
1248 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1249 }
1250 }
1251
1252 static bfd_boolean
1253 elf64_x86_64_elf_object_p (bfd *abfd)
1254 {
1255 /* Set the right machine number for an x86-64 elf64 file. */
1256 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1257 return TRUE;
1258 }
1259
1260 static bfd_boolean
1261 elf32_x86_64_elf_object_p (bfd *abfd)
1262 {
1263 /* Set the right machine number for an x86-64 elf32 file. */
1264 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1265 return TRUE;
1266 }
1267
1268 /* Return TRUE if the TLS access code sequence support transition
1269 from R_TYPE. */
1270
1271 static bfd_boolean
1272 elf_x86_64_check_tls_transition (bfd *abfd,
1273 struct bfd_link_info *info,
1274 asection *sec,
1275 bfd_byte *contents,
1276 Elf_Internal_Shdr *symtab_hdr,
1277 struct elf_link_hash_entry **sym_hashes,
1278 unsigned int r_type,
1279 const Elf_Internal_Rela *rel,
1280 const Elf_Internal_Rela *relend)
1281 {
1282 unsigned int val;
1283 unsigned long r_symndx;
1284 bfd_boolean largepic = FALSE;
1285 struct elf_link_hash_entry *h;
1286 bfd_vma offset;
1287 struct elf_x86_64_link_hash_table *htab;
1288 bfd_byte *call;
1289 bfd_boolean indirect_call, tls_get_addr;
1290
1291 htab = elf_x86_64_hash_table (info);
1292 offset = rel->r_offset;
1293 switch (r_type)
1294 {
1295 case R_X86_64_TLSGD:
1296 case R_X86_64_TLSLD:
1297 if ((rel + 1) >= relend)
1298 return FALSE;
1299
1300 if (r_type == R_X86_64_TLSGD)
1301 {
1302 /* Check transition from GD access model. For 64bit, only
1303 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1304 .word 0x6666; rex64; call __tls_get_addr@PLT
1305 or
1306 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1307 .byte 0x66; rex64
1308 call *__tls_get_addr@GOTPCREL(%rip)
1309 which may be converted to
1310 addr32 call __tls_get_addr
1311 can transit to different access model. For 32bit, only
1312 leaq foo@tlsgd(%rip), %rdi
1313 .word 0x6666; rex64; call __tls_get_addr@PLT
1314 or
1315 leaq foo@tlsgd(%rip), %rdi
1316 .byte 0x66; rex64
1317 call *__tls_get_addr@GOTPCREL(%rip)
1318 which may be converted to
1319 addr32 call __tls_get_addr
1320 can transit to different access model. For largepic,
1321 we also support:
1322 leaq foo@tlsgd(%rip), %rdi
1323 movabsq $__tls_get_addr@pltoff, %rax
1324 addq $r15, %rax
1325 call *%rax
1326 or
1327 leaq foo@tlsgd(%rip), %rdi
1328 movabsq $__tls_get_addr@pltoff, %rax
1329 addq $rbx, %rax
1330 call *%rax */
1331
1332 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1333
1334 if ((offset + 12) > sec->size)
1335 return FALSE;
1336
1337 call = contents + offset + 4;
1338 if (call[0] != 0x66
1339 || !((call[1] == 0x48
1340 && call[2] == 0xff
1341 && call[3] == 0x15)
1342 || (call[1] == 0x48
1343 && call[2] == 0x67
1344 && call[3] == 0xe8)
1345 || (call[1] == 0x66
1346 && call[2] == 0x48
1347 && call[3] == 0xe8)))
1348 {
1349 if (!ABI_64_P (abfd)
1350 || (offset + 19) > sec->size
1351 || offset < 3
1352 || memcmp (call - 7, leaq + 1, 3) != 0
1353 || memcmp (call, "\x48\xb8", 2) != 0
1354 || call[11] != 0x01
1355 || call[13] != 0xff
1356 || call[14] != 0xd0
1357 || !((call[10] == 0x48 && call[12] == 0xd8)
1358 || (call[10] == 0x4c && call[12] == 0xf8)))
1359 return FALSE;
1360 largepic = TRUE;
1361 }
1362 else if (ABI_64_P (abfd))
1363 {
1364 if (offset < 4
1365 || memcmp (contents + offset - 4, leaq, 4) != 0)
1366 return FALSE;
1367 }
1368 else
1369 {
1370 if (offset < 3
1371 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1372 return FALSE;
1373 }
1374 indirect_call = call[2] == 0xff;
1375 }
1376 else
1377 {
1378 /* Check transition from LD access model. Only
1379 leaq foo@tlsld(%rip), %rdi;
1380 call __tls_get_addr@PLT
1381 or
1382 leaq foo@tlsld(%rip), %rdi;
1383 call *__tls_get_addr@GOTPCREL(%rip)
1384 which may be converted to
1385 addr32 call __tls_get_addr
1386 can transit to different access model. For largepic
1387 we also support:
1388 leaq foo@tlsld(%rip), %rdi
1389 movabsq $__tls_get_addr@pltoff, %rax
1390 addq $r15, %rax
1391 call *%rax
1392 or
1393 leaq foo@tlsld(%rip), %rdi
1394 movabsq $__tls_get_addr@pltoff, %rax
1395 addq $rbx, %rax
1396 call *%rax */
1397
1398 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1399
1400 if (offset < 3 || (offset + 9) > sec->size)
1401 return FALSE;
1402
1403 if (memcmp (contents + offset - 3, lea, 3) != 0)
1404 return FALSE;
1405
1406 call = contents + offset + 4;
1407 if (!(call[0] == 0xe8
1408 || (call[0] == 0xff && call[1] == 0x15)
1409 || (call[0] == 0x67 && call[1] == 0xe8)))
1410 {
1411 if (!ABI_64_P (abfd)
1412 || (offset + 19) > sec->size
1413 || memcmp (call, "\x48\xb8", 2) != 0
1414 || call[11] != 0x01
1415 || call[13] != 0xff
1416 || call[14] != 0xd0
1417 || !((call[10] == 0x48 && call[12] == 0xd8)
1418 || (call[10] == 0x4c && call[12] == 0xf8)))
1419 return FALSE;
1420 largepic = TRUE;
1421 }
1422 indirect_call = call[0] == 0xff;
1423 }
1424
1425 r_symndx = htab->r_sym (rel[1].r_info);
1426 if (r_symndx < symtab_hdr->sh_info)
1427 return FALSE;
1428
1429 tls_get_addr = FALSE;
1430 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1431 if (h != NULL && h->root.root.string != NULL)
1432 {
1433 struct elf_x86_64_link_hash_entry *eh
1434 = (struct elf_x86_64_link_hash_entry *) h;
1435 tls_get_addr = eh->tls_get_addr == 1;
1436 if (eh->tls_get_addr > 1)
1437 {
1438 /* Use strncmp to check __tls_get_addr since
1439 __tls_get_addr may be versioned. */
1440 if (strncmp (h->root.root.string, "__tls_get_addr", 14)
1441 == 0)
1442 {
1443 eh->tls_get_addr = 1;
1444 tls_get_addr = TRUE;
1445 }
1446 else
1447 eh->tls_get_addr = 0;
1448 }
1449 }
1450
1451 if (!tls_get_addr)
1452 return FALSE;
1453 else if (largepic)
1454 return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64;
1455 else if (indirect_call)
1456 return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_GOTPCRELX;
1457 else
1458 return (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1459 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32);
1460
1461 case R_X86_64_GOTTPOFF:
1462 /* Check transition from IE access model:
1463 mov foo@gottpoff(%rip), %reg
1464 add foo@gottpoff(%rip), %reg
1465 */
1466
1467 /* Check REX prefix first. */
1468 if (offset >= 3 && (offset + 4) <= sec->size)
1469 {
1470 val = bfd_get_8 (abfd, contents + offset - 3);
1471 if (val != 0x48 && val != 0x4c)
1472 {
1473 /* X32 may have 0x44 REX prefix or no REX prefix. */
1474 if (ABI_64_P (abfd))
1475 return FALSE;
1476 }
1477 }
1478 else
1479 {
1480 /* X32 may not have any REX prefix. */
1481 if (ABI_64_P (abfd))
1482 return FALSE;
1483 if (offset < 2 || (offset + 3) > sec->size)
1484 return FALSE;
1485 }
1486
1487 val = bfd_get_8 (abfd, contents + offset - 2);
1488 if (val != 0x8b && val != 0x03)
1489 return FALSE;
1490
1491 val = bfd_get_8 (abfd, contents + offset - 1);
1492 return (val & 0xc7) == 5;
1493
1494 case R_X86_64_GOTPC32_TLSDESC:
1495 /* Check transition from GDesc access model:
1496 leaq x@tlsdesc(%rip), %rax
1497
1498 Make sure it's a leaq adding rip to a 32-bit offset
1499 into any register, although it's probably almost always
1500 going to be rax. */
1501
1502 if (offset < 3 || (offset + 4) > sec->size)
1503 return FALSE;
1504
1505 val = bfd_get_8 (abfd, contents + offset - 3);
1506 if ((val & 0xfb) != 0x48)
1507 return FALSE;
1508
1509 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1510 return FALSE;
1511
1512 val = bfd_get_8 (abfd, contents + offset - 1);
1513 return (val & 0xc7) == 0x05;
1514
1515 case R_X86_64_TLSDESC_CALL:
1516 /* Check transition from GDesc access model:
1517 call *x@tlsdesc(%rax)
1518 */
1519 if (offset + 2 <= sec->size)
1520 {
1521 /* Make sure that it's a call *x@tlsdesc(%rax). */
1522 call = contents + offset;
1523 return call[0] == 0xff && call[1] == 0x10;
1524 }
1525
1526 return FALSE;
1527
1528 default:
1529 abort ();
1530 }
1531 }
1532
1533 /* Return TRUE if the TLS access transition is OK or no transition
1534 will be performed. Update R_TYPE if there is a transition. */
1535
1536 static bfd_boolean
1537 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1538 asection *sec, bfd_byte *contents,
1539 Elf_Internal_Shdr *symtab_hdr,
1540 struct elf_link_hash_entry **sym_hashes,
1541 unsigned int *r_type, int tls_type,
1542 const Elf_Internal_Rela *rel,
1543 const Elf_Internal_Rela *relend,
1544 struct elf_link_hash_entry *h,
1545 unsigned long r_symndx,
1546 bfd_boolean from_relocate_section)
1547 {
1548 unsigned int from_type = *r_type;
1549 unsigned int to_type = from_type;
1550 bfd_boolean check = TRUE;
1551
1552 /* Skip TLS transition for functions. */
1553 if (h != NULL
1554 && (h->type == STT_FUNC
1555 || h->type == STT_GNU_IFUNC))
1556 return TRUE;
1557
1558 switch (from_type)
1559 {
1560 case R_X86_64_TLSGD:
1561 case R_X86_64_GOTPC32_TLSDESC:
1562 case R_X86_64_TLSDESC_CALL:
1563 case R_X86_64_GOTTPOFF:
1564 if (bfd_link_executable (info))
1565 {
1566 if (h == NULL)
1567 to_type = R_X86_64_TPOFF32;
1568 else
1569 to_type = R_X86_64_GOTTPOFF;
1570 }
1571
1572 /* When we are called from elf_x86_64_relocate_section, there may
1573 be additional transitions based on TLS_TYPE. */
1574 if (from_relocate_section)
1575 {
1576 unsigned int new_to_type = to_type;
1577
1578 if (bfd_link_executable (info)
1579 && h != NULL
1580 && h->dynindx == -1
1581 && tls_type == GOT_TLS_IE)
1582 new_to_type = R_X86_64_TPOFF32;
1583
1584 if (to_type == R_X86_64_TLSGD
1585 || to_type == R_X86_64_GOTPC32_TLSDESC
1586 || to_type == R_X86_64_TLSDESC_CALL)
1587 {
1588 if (tls_type == GOT_TLS_IE)
1589 new_to_type = R_X86_64_GOTTPOFF;
1590 }
1591
1592 /* We checked the transition before when we were called from
1593 elf_x86_64_check_relocs. We only want to check the new
1594 transition which hasn't been checked before. */
1595 check = new_to_type != to_type && from_type == to_type;
1596 to_type = new_to_type;
1597 }
1598
1599 break;
1600
1601 case R_X86_64_TLSLD:
1602 if (bfd_link_executable (info))
1603 to_type = R_X86_64_TPOFF32;
1604 break;
1605
1606 default:
1607 return TRUE;
1608 }
1609
1610 /* Return TRUE if there is no transition. */
1611 if (from_type == to_type)
1612 return TRUE;
1613
1614 /* Check if the transition can be performed. */
1615 if (check
1616 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1617 symtab_hdr, sym_hashes,
1618 from_type, rel, relend))
1619 {
1620 reloc_howto_type *from, *to;
1621 const char *name;
1622
1623 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1624 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1625
1626 if (h)
1627 name = h->root.root.string;
1628 else
1629 {
1630 struct elf_x86_64_link_hash_table *htab;
1631
1632 htab = elf_x86_64_hash_table (info);
1633 if (htab == NULL)
1634 name = "*unknown*";
1635 else
1636 {
1637 Elf_Internal_Sym *isym;
1638
1639 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1640 abfd, r_symndx);
1641 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1642 }
1643 }
1644
1645 (*_bfd_error_handler)
1646 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1647 "in section `%A' failed"),
1648 abfd, sec, from->name, to->name, name,
1649 (unsigned long) rel->r_offset);
1650 bfd_set_error (bfd_error_bad_value);
1651 return FALSE;
1652 }
1653
1654 *r_type = to_type;
1655 return TRUE;
1656 }
1657
1658 /* Rename some of the generic section flags to better document how they
1659 are used here. */
1660 #define need_convert_load sec_flg0
1661 #define check_relocs_failed sec_flg1
1662
1663 static bfd_boolean
1664 elf_x86_64_need_pic (bfd *input_bfd, asection *sec,
1665 struct elf_link_hash_entry *h,
1666 Elf_Internal_Shdr *symtab_hdr,
1667 Elf_Internal_Sym *isym,
1668 reloc_howto_type *howto)
1669 {
1670 const char *v = "";
1671 const char *und = "";
1672 const char *pic = "";
1673
1674 const char *name;
1675 if (h)
1676 {
1677 name = h->root.root.string;
1678 switch (ELF_ST_VISIBILITY (h->other))
1679 {
1680 case STV_HIDDEN:
1681 v = _("hidden symbol ");
1682 break;
1683 case STV_INTERNAL:
1684 v = _("internal symbol ");
1685 break;
1686 case STV_PROTECTED:
1687 v = _("protected symbol ");
1688 break;
1689 default:
1690 v = _("symbol ");
1691 pic = _("; recompile with -fPIC");
1692 break;
1693 }
1694
1695 if (!h->def_regular && !h->def_dynamic)
1696 und = _("undefined ");
1697 }
1698 else
1699 {
1700 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1701 pic = _("; recompile with -fPIC");
1702 }
1703
1704 (*_bfd_error_handler) (_("%B: relocation %s against %s%s`%s' can "
1705 "not be used when making a shared object%s"),
1706 input_bfd, howto->name, und, v, name, pic);
1707 bfd_set_error (bfd_error_bad_value);
1708 sec->check_relocs_failed = 1;
1709 return FALSE;
1710 }
1711
1712 /* With the local symbol, foo, we convert
1713 mov foo@GOTPCREL(%rip), %reg
1714 to
1715 lea foo(%rip), %reg
1716 and convert
1717 call/jmp *foo@GOTPCREL(%rip)
1718 to
1719 nop call foo/jmp foo nop
1720 When PIC is false, convert
1721 test %reg, foo@GOTPCREL(%rip)
1722 to
1723 test $foo, %reg
1724 and convert
1725 binop foo@GOTPCREL(%rip), %reg
1726 to
1727 binop $foo, %reg
1728 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1729 instructions. */
1730
1731 static bfd_boolean
1732 elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec,
1733 bfd_byte *contents,
1734 Elf_Internal_Rela *irel,
1735 struct elf_link_hash_entry *h,
1736 bfd_boolean *converted,
1737 struct bfd_link_info *link_info)
1738 {
1739 struct elf_x86_64_link_hash_table *htab;
1740 bfd_boolean is_pic;
1741 bfd_boolean require_reloc_pc32;
1742 bfd_boolean relocx;
1743 bfd_boolean to_reloc_pc32;
1744 asection *tsec;
1745 char symtype;
1746 bfd_signed_vma raddend;
1747 unsigned int opcode;
1748 unsigned int modrm;
1749 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
1750 unsigned int r_symndx;
1751 bfd_vma toff;
1752 bfd_vma roff = irel->r_offset;
1753
1754 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1755 return TRUE;
1756
1757 raddend = irel->r_addend;
1758 /* Addend for 32-bit PC-relative relocation must be -4. */
1759 if (raddend != -4)
1760 return TRUE;
1761
1762 htab = elf_x86_64_hash_table (link_info);
1763 is_pic = bfd_link_pic (link_info);
1764
1765 relocx = (r_type == R_X86_64_GOTPCRELX
1766 || r_type == R_X86_64_REX_GOTPCRELX);
1767
1768 /* TRUE if we can convert only to R_X86_64_PC32. Enable it for
1769 --no-relax. */
1770 require_reloc_pc32
1771 = link_info->disable_target_specific_optimizations > 1;
1772
1773 r_symndx = htab->r_sym (irel->r_info);
1774
1775 opcode = bfd_get_8 (abfd, contents + roff - 2);
1776
1777 /* Convert mov to lea since it has been done for a while. */
1778 if (opcode != 0x8b)
1779 {
1780 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1781 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1782 test, xor instructions. */
1783 if (!relocx)
1784 return TRUE;
1785 }
1786
1787 /* We convert only to R_X86_64_PC32:
1788 1. Branch.
1789 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1790 3. require_reloc_pc32 is true.
1791 4. PIC.
1792 */
1793 to_reloc_pc32 = (opcode == 0xff
1794 || !relocx
1795 || require_reloc_pc32
1796 || is_pic);
1797
1798 /* Get the symbol referred to by the reloc. */
1799 if (h == NULL)
1800 {
1801 Elf_Internal_Sym *isym
1802 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1803
1804 /* Skip relocation against undefined symbols. */
1805 if (isym->st_shndx == SHN_UNDEF)
1806 return TRUE;
1807
1808 symtype = ELF_ST_TYPE (isym->st_info);
1809
1810 if (isym->st_shndx == SHN_ABS)
1811 tsec = bfd_abs_section_ptr;
1812 else if (isym->st_shndx == SHN_COMMON)
1813 tsec = bfd_com_section_ptr;
1814 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1815 tsec = &_bfd_elf_large_com_section;
1816 else
1817 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1818
1819 toff = isym->st_value;
1820 }
1821 else
1822 {
1823 /* Undefined weak symbol is only bound locally in executable
1824 and its reference is resolved as 0 without relocation
1825 overflow. We can only perform this optimization for
1826 GOTPCRELX relocations since we need to modify REX byte.
1827 It is OK convert mov with R_X86_64_GOTPCREL to
1828 R_X86_64_PC32. */
1829 if ((relocx || opcode == 0x8b)
1830 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info,
1831 TRUE,
1832 elf_x86_64_hash_entry (h)))
1833 {
1834 if (opcode == 0xff)
1835 {
1836 /* Skip for branch instructions since R_X86_64_PC32
1837 may overflow. */
1838 if (require_reloc_pc32)
1839 return TRUE;
1840 }
1841 else if (relocx)
1842 {
1843 /* For non-branch instructions, we can convert to
1844 R_X86_64_32/R_X86_64_32S since we know if there
1845 is a REX byte. */
1846 to_reloc_pc32 = FALSE;
1847 }
1848
1849 /* Since we don't know the current PC when PIC is true,
1850 we can't convert to R_X86_64_PC32. */
1851 if (to_reloc_pc32 && is_pic)
1852 return TRUE;
1853
1854 goto convert;
1855 }
1856 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1857 ld.so may use its link-time address. */
1858 else if ((h->def_regular
1859 || h->root.type == bfd_link_hash_defined
1860 || h->root.type == bfd_link_hash_defweak)
1861 && h != htab->elf.hdynamic
1862 && SYMBOL_REFERENCES_LOCAL (link_info, h))
1863 {
1864 /* bfd_link_hash_new or bfd_link_hash_undefined is
1865 set by an assignment in a linker script in
1866 bfd_elf_record_link_assignment. */
1867 if (h->def_regular
1868 && (h->root.type == bfd_link_hash_new
1869 || h->root.type == bfd_link_hash_undefined))
1870 {
1871 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1872 if (require_reloc_pc32)
1873 return TRUE;
1874 goto convert;
1875 }
1876 tsec = h->root.u.def.section;
1877 toff = h->root.u.def.value;
1878 symtype = h->type;
1879 }
1880 else
1881 return TRUE;
1882 }
1883
1884 /* Don't convert GOTPCREL relocation against large section. */
1885 if (elf_section_data (tsec) != NULL
1886 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1887 return TRUE;
1888
1889 /* We can only estimate relocation overflow for R_X86_64_PC32. */
1890 if (!to_reloc_pc32)
1891 goto convert;
1892
1893 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
1894 {
1895 /* At this stage in linking, no SEC_MERGE symbol has been
1896 adjusted, so all references to such symbols need to be
1897 passed through _bfd_merged_section_offset. (Later, in
1898 relocate_section, all SEC_MERGE symbols *except* for
1899 section symbols have been adjusted.)
1900
1901 gas may reduce relocations against symbols in SEC_MERGE
1902 sections to a relocation against the section symbol when
1903 the original addend was zero. When the reloc is against
1904 a section symbol we should include the addend in the
1905 offset passed to _bfd_merged_section_offset, since the
1906 location of interest is the original symbol. On the
1907 other hand, an access to "sym+addend" where "sym" is not
1908 a section symbol should not include the addend; Such an
1909 access is presumed to be an offset from "sym"; The
1910 location of interest is just "sym". */
1911 if (symtype == STT_SECTION)
1912 toff += raddend;
1913
1914 toff = _bfd_merged_section_offset (abfd, &tsec,
1915 elf_section_data (tsec)->sec_info,
1916 toff);
1917
1918 if (symtype != STT_SECTION)
1919 toff += raddend;
1920 }
1921 else
1922 toff += raddend;
1923
1924 /* Don't convert if R_X86_64_PC32 relocation overflows. */
1925 if (tsec->output_section == sec->output_section)
1926 {
1927 if ((toff - roff + 0x80000000) > 0xffffffff)
1928 return TRUE;
1929 }
1930 else
1931 {
1932 bfd_signed_vma distance;
1933
1934 /* At this point, we don't know the load addresses of TSEC
1935 section nor SEC section. We estimate the distrance between
1936 SEC and TSEC. We store the estimated distances in the
1937 compressed_size field of the output section, which is only
1938 used to decompress the compressed input section. */
1939 if (sec->output_section->compressed_size == 0)
1940 {
1941 asection *asect;
1942 bfd_size_type size = 0;
1943 for (asect = link_info->output_bfd->sections;
1944 asect != NULL;
1945 asect = asect->next)
1946 /* Skip debug sections since compressed_size is used to
1947 compress debug sections. */
1948 if ((asect->flags & SEC_DEBUGGING) == 0)
1949 {
1950 asection *i;
1951 for (i = asect->map_head.s;
1952 i != NULL;
1953 i = i->map_head.s)
1954 {
1955 size = align_power (size, i->alignment_power);
1956 size += i->size;
1957 }
1958 asect->compressed_size = size;
1959 }
1960 }
1961
1962 /* Don't convert GOTPCREL relocations if TSEC isn't placed
1963 after SEC. */
1964 distance = (tsec->output_section->compressed_size
1965 - sec->output_section->compressed_size);
1966 if (distance < 0)
1967 return TRUE;
1968
1969 /* Take PT_GNU_RELRO segment into account by adding
1970 maxpagesize. */
1971 if ((toff + distance + get_elf_backend_data (abfd)->maxpagesize
1972 - roff + 0x80000000) > 0xffffffff)
1973 return TRUE;
1974 }
1975
1976 convert:
1977 if (opcode == 0xff)
1978 {
1979 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1980 unsigned int nop;
1981 unsigned int disp;
1982 bfd_vma nop_offset;
1983
1984 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1985 R_X86_64_PC32. */
1986 modrm = bfd_get_8 (abfd, contents + roff - 1);
1987 if (modrm == 0x25)
1988 {
1989 /* Convert to "jmp foo nop". */
1990 modrm = 0xe9;
1991 nop = NOP_OPCODE;
1992 nop_offset = irel->r_offset + 3;
1993 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1994 irel->r_offset -= 1;
1995 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1996 }
1997 else
1998 {
1999 struct elf_x86_64_link_hash_entry *eh
2000 = (struct elf_x86_64_link_hash_entry *) h;
2001
2002 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
2003 is a nop prefix. */
2004 modrm = 0xe8;
2005 /* To support TLS optimization, always use addr32 prefix for
2006 "call *__tls_get_addr@GOTPCREL(%rip)". */
2007 if (eh && eh->tls_get_addr == 1)
2008 {
2009 nop = 0x67;
2010 nop_offset = irel->r_offset - 2;
2011 }
2012 else
2013 {
2014 nop = link_info->call_nop_byte;
2015 if (link_info->call_nop_as_suffix)
2016 {
2017 nop_offset = irel->r_offset + 3;
2018 disp = bfd_get_32 (abfd, contents + irel->r_offset);
2019 irel->r_offset -= 1;
2020 bfd_put_32 (abfd, disp, contents + irel->r_offset);
2021 }
2022 else
2023 nop_offset = irel->r_offset - 2;
2024 }
2025 }
2026 bfd_put_8 (abfd, nop, contents + nop_offset);
2027 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
2028 r_type = R_X86_64_PC32;
2029 }
2030 else
2031 {
2032 unsigned int rex;
2033 unsigned int rex_mask = REX_R;
2034
2035 if (r_type == R_X86_64_REX_GOTPCRELX)
2036 rex = bfd_get_8 (abfd, contents + roff - 3);
2037 else
2038 rex = 0;
2039
2040 if (opcode == 0x8b)
2041 {
2042 if (to_reloc_pc32)
2043 {
2044 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2045 "lea foo(%rip), %reg". */
2046 opcode = 0x8d;
2047 r_type = R_X86_64_PC32;
2048 }
2049 else
2050 {
2051 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2052 "mov $foo, %reg". */
2053 opcode = 0xc7;
2054 modrm = bfd_get_8 (abfd, contents + roff - 1);
2055 modrm = 0xc0 | (modrm & 0x38) >> 3;
2056 if ((rex & REX_W) != 0
2057 && ABI_64_P (link_info->output_bfd))
2058 {
2059 /* Keep the REX_W bit in REX byte for LP64. */
2060 r_type = R_X86_64_32S;
2061 goto rewrite_modrm_rex;
2062 }
2063 else
2064 {
2065 /* If the REX_W bit in REX byte isn't needed,
2066 use R_X86_64_32 and clear the W bit to avoid
2067 sign-extend imm32 to imm64. */
2068 r_type = R_X86_64_32;
2069 /* Clear the W bit in REX byte. */
2070 rex_mask |= REX_W;
2071 goto rewrite_modrm_rex;
2072 }
2073 }
2074 }
2075 else
2076 {
2077 /* R_X86_64_PC32 isn't supported. */
2078 if (to_reloc_pc32)
2079 return TRUE;
2080
2081 modrm = bfd_get_8 (abfd, contents + roff - 1);
2082 if (opcode == 0x85)
2083 {
2084 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
2085 "test $foo, %reg". */
2086 modrm = 0xc0 | (modrm & 0x38) >> 3;
2087 opcode = 0xf7;
2088 }
2089 else
2090 {
2091 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
2092 "binop $foo, %reg". */
2093 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
2094 opcode = 0x81;
2095 }
2096
2097 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
2098 overflow when sign-extending imm32 to imm64. */
2099 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
2100
2101 rewrite_modrm_rex:
2102 bfd_put_8 (abfd, modrm, contents + roff - 1);
2103
2104 if (rex)
2105 {
2106 /* Move the R bit to the B bit in REX byte. */
2107 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
2108 bfd_put_8 (abfd, rex, contents + roff - 3);
2109 }
2110
2111 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
2112 irel->r_addend = 0;
2113 }
2114
2115 bfd_put_8 (abfd, opcode, contents + roff - 2);
2116 }
2117
2118 irel->r_info = htab->r_info (r_symndx, r_type);
2119
2120 *converted = TRUE;
2121
2122 return TRUE;
2123 }
2124
2125 /* Look through the relocs for a section during the first phase, and
2126 calculate needed space in the global offset table, procedure
2127 linkage table, and dynamic reloc sections. */
2128
2129 static bfd_boolean
2130 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
2131 asection *sec,
2132 const Elf_Internal_Rela *relocs)
2133 {
2134 struct elf_x86_64_link_hash_table *htab;
2135 Elf_Internal_Shdr *symtab_hdr;
2136 struct elf_link_hash_entry **sym_hashes;
2137 const Elf_Internal_Rela *rel;
2138 const Elf_Internal_Rela *rel_end;
2139 asection *sreloc;
2140 bfd_byte *contents;
2141 bfd_boolean use_plt_got;
2142
2143 if (bfd_link_relocatable (info))
2144 return TRUE;
2145
2146 /* Don't do anything special with non-loaded, non-alloced sections.
2147 In particular, any relocs in such sections should not affect GOT
2148 and PLT reference counting (ie. we don't allow them to create GOT
2149 or PLT entries), there's no possibility or desire to optimize TLS
2150 relocs, and there's not much point in propagating relocs to shared
2151 libs that the dynamic linker won't relocate. */
2152 if ((sec->flags & SEC_ALLOC) == 0)
2153 return TRUE;
2154
2155 BFD_ASSERT (is_x86_64_elf (abfd));
2156
2157 htab = elf_x86_64_hash_table (info);
2158 if (htab == NULL)
2159 {
2160 sec->check_relocs_failed = 1;
2161 return FALSE;
2162 }
2163
2164 /* Get the section contents. */
2165 if (elf_section_data (sec)->this_hdr.contents != NULL)
2166 contents = elf_section_data (sec)->this_hdr.contents;
2167 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2168 {
2169 sec->check_relocs_failed = 1;
2170 return FALSE;
2171 }
2172
2173 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
2174
2175 symtab_hdr = &elf_symtab_hdr (abfd);
2176 sym_hashes = elf_sym_hashes (abfd);
2177
2178 sreloc = NULL;
2179
2180 rel_end = relocs + sec->reloc_count;
2181 for (rel = relocs; rel < rel_end; rel++)
2182 {
2183 unsigned int r_type;
2184 unsigned long r_symndx;
2185 struct elf_link_hash_entry *h;
2186 struct elf_x86_64_link_hash_entry *eh;
2187 Elf_Internal_Sym *isym;
2188 const char *name;
2189 bfd_boolean size_reloc;
2190
2191 r_symndx = htab->r_sym (rel->r_info);
2192 r_type = ELF32_R_TYPE (rel->r_info);
2193
2194 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
2195 {
2196 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
2197 abfd, r_symndx);
2198 goto error_return;
2199 }
2200
2201 if (r_symndx < symtab_hdr->sh_info)
2202 {
2203 /* A local symbol. */
2204 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2205 abfd, r_symndx);
2206 if (isym == NULL)
2207 goto error_return;
2208
2209 /* Check relocation against local STT_GNU_IFUNC symbol. */
2210 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2211 {
2212 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
2213 TRUE);
2214 if (h == NULL)
2215 goto error_return;
2216
2217 /* Fake a STT_GNU_IFUNC symbol. */
2218 h->type = STT_GNU_IFUNC;
2219 h->def_regular = 1;
2220 h->ref_regular = 1;
2221 h->forced_local = 1;
2222 h->root.type = bfd_link_hash_defined;
2223 }
2224 else
2225 h = NULL;
2226 }
2227 else
2228 {
2229 isym = NULL;
2230 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2231 while (h->root.type == bfd_link_hash_indirect
2232 || h->root.type == bfd_link_hash_warning)
2233 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2234 }
2235
2236 /* Check invalid x32 relocations. */
2237 if (!ABI_64_P (abfd))
2238 switch (r_type)
2239 {
2240 default:
2241 break;
2242
2243 case R_X86_64_DTPOFF64:
2244 case R_X86_64_TPOFF64:
2245 case R_X86_64_PC64:
2246 case R_X86_64_GOTOFF64:
2247 case R_X86_64_GOT64:
2248 case R_X86_64_GOTPCREL64:
2249 case R_X86_64_GOTPC64:
2250 case R_X86_64_GOTPLT64:
2251 case R_X86_64_PLTOFF64:
2252 {
2253 if (h)
2254 name = h->root.root.string;
2255 else
2256 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
2257 NULL);
2258 (*_bfd_error_handler)
2259 (_("%B: relocation %s against symbol `%s' isn't "
2260 "supported in x32 mode"), abfd,
2261 x86_64_elf_howto_table[r_type].name, name);
2262 bfd_set_error (bfd_error_bad_value);
2263 goto error_return;
2264 }
2265 break;
2266 }
2267
2268 if (h != NULL)
2269 {
2270 switch (r_type)
2271 {
2272 default:
2273 break;
2274
2275 case R_X86_64_PC32_BND:
2276 case R_X86_64_PLT32_BND:
2277 case R_X86_64_PC32:
2278 case R_X86_64_PLT32:
2279 case R_X86_64_32:
2280 case R_X86_64_64:
2281 /* MPX PLT is supported only if elf_x86_64_arch_bed
2282 is used in 64-bit mode. */
2283 if (ABI_64_P (abfd)
2284 && info->bndplt
2285 && (get_elf_x86_64_backend_data (abfd)
2286 == &elf_x86_64_arch_bed))
2287 {
2288 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
2289
2290 /* Create the second PLT for Intel MPX support. */
2291 if (htab->plt_bnd == NULL)
2292 {
2293 unsigned int plt_bnd_align;
2294 const struct elf_backend_data *bed;
2295
2296 bed = get_elf_backend_data (info->output_bfd);
2297 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
2298 && (sizeof (elf_x86_64_bnd_plt2_entry)
2299 == sizeof (elf_x86_64_legacy_plt2_entry)));
2300 plt_bnd_align = 3;
2301
2302 if (htab->elf.dynobj == NULL)
2303 htab->elf.dynobj = abfd;
2304 htab->plt_bnd
2305 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2306 ".plt.bnd",
2307 (bed->dynamic_sec_flags
2308 | SEC_ALLOC
2309 | SEC_CODE
2310 | SEC_LOAD
2311 | SEC_READONLY));
2312 if (htab->plt_bnd == NULL
2313 || !bfd_set_section_alignment (htab->elf.dynobj,
2314 htab->plt_bnd,
2315 plt_bnd_align))
2316 goto error_return;
2317 }
2318 }
2319
2320 case R_X86_64_32S:
2321 case R_X86_64_PC64:
2322 case R_X86_64_GOTPCREL:
2323 case R_X86_64_GOTPCRELX:
2324 case R_X86_64_REX_GOTPCRELX:
2325 case R_X86_64_GOTPCREL64:
2326 if (htab->elf.dynobj == NULL)
2327 htab->elf.dynobj = abfd;
2328 /* Create the ifunc sections for static executables. */
2329 if (h->type == STT_GNU_IFUNC
2330 && !_bfd_elf_create_ifunc_sections (htab->elf.dynobj,
2331 info))
2332 goto error_return;
2333 break;
2334 }
2335
2336 /* It is referenced by a non-shared object. */
2337 h->ref_regular = 1;
2338 h->root.non_ir_ref = 1;
2339
2340 if (h->type == STT_GNU_IFUNC)
2341 elf_tdata (info->output_bfd)->has_gnu_symbols
2342 |= elf_gnu_symbol_ifunc;
2343 }
2344
2345 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2346 symtab_hdr, sym_hashes,
2347 &r_type, GOT_UNKNOWN,
2348 rel, rel_end, h, r_symndx, FALSE))
2349 goto error_return;
2350
2351 eh = (struct elf_x86_64_link_hash_entry *) h;
2352 switch (r_type)
2353 {
2354 case R_X86_64_TLSLD:
2355 htab->tls_ld_got.refcount += 1;
2356 goto create_got;
2357
2358 case R_X86_64_TPOFF32:
2359 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2360 return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
2361 &x86_64_elf_howto_table[r_type]);
2362 if (eh != NULL)
2363 eh->has_got_reloc = 1;
2364 break;
2365
2366 case R_X86_64_GOTTPOFF:
2367 if (!bfd_link_executable (info))
2368 info->flags |= DF_STATIC_TLS;
2369 /* Fall through */
2370
2371 case R_X86_64_GOT32:
2372 case R_X86_64_GOTPCREL:
2373 case R_X86_64_GOTPCRELX:
2374 case R_X86_64_REX_GOTPCRELX:
2375 case R_X86_64_TLSGD:
2376 case R_X86_64_GOT64:
2377 case R_X86_64_GOTPCREL64:
2378 case R_X86_64_GOTPLT64:
2379 case R_X86_64_GOTPC32_TLSDESC:
2380 case R_X86_64_TLSDESC_CALL:
2381 /* This symbol requires a global offset table entry. */
2382 {
2383 int tls_type, old_tls_type;
2384
2385 switch (r_type)
2386 {
2387 default: tls_type = GOT_NORMAL; break;
2388 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2389 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2390 case R_X86_64_GOTPC32_TLSDESC:
2391 case R_X86_64_TLSDESC_CALL:
2392 tls_type = GOT_TLS_GDESC; break;
2393 }
2394
2395 if (h != NULL)
2396 {
2397 h->got.refcount += 1;
2398 old_tls_type = eh->tls_type;
2399 }
2400 else
2401 {
2402 bfd_signed_vma *local_got_refcounts;
2403
2404 /* This is a global offset table entry for a local symbol. */
2405 local_got_refcounts = elf_local_got_refcounts (abfd);
2406 if (local_got_refcounts == NULL)
2407 {
2408 bfd_size_type size;
2409
2410 size = symtab_hdr->sh_info;
2411 size *= sizeof (bfd_signed_vma)
2412 + sizeof (bfd_vma) + sizeof (char);
2413 local_got_refcounts = ((bfd_signed_vma *)
2414 bfd_zalloc (abfd, size));
2415 if (local_got_refcounts == NULL)
2416 goto error_return;
2417 elf_local_got_refcounts (abfd) = local_got_refcounts;
2418 elf_x86_64_local_tlsdesc_gotent (abfd)
2419 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2420 elf_x86_64_local_got_tls_type (abfd)
2421 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2422 }
2423 local_got_refcounts[r_symndx] += 1;
2424 old_tls_type
2425 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
2426 }
2427
2428 /* If a TLS symbol is accessed using IE at least once,
2429 there is no point to use dynamic model for it. */
2430 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2431 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2432 || tls_type != GOT_TLS_IE))
2433 {
2434 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2435 tls_type = old_tls_type;
2436 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2437 && GOT_TLS_GD_ANY_P (tls_type))
2438 tls_type |= old_tls_type;
2439 else
2440 {
2441 if (h)
2442 name = h->root.root.string;
2443 else
2444 name = bfd_elf_sym_name (abfd, symtab_hdr,
2445 isym, NULL);
2446 (*_bfd_error_handler)
2447 (_("%B: '%s' accessed both as normal and thread local symbol"),
2448 abfd, name);
2449 bfd_set_error (bfd_error_bad_value);
2450 goto error_return;
2451 }
2452 }
2453
2454 if (old_tls_type != tls_type)
2455 {
2456 if (eh != NULL)
2457 eh->tls_type = tls_type;
2458 else
2459 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
2460 }
2461 }
2462 /* Fall through */
2463
2464 case R_X86_64_GOTOFF64:
2465 case R_X86_64_GOTPC32:
2466 case R_X86_64_GOTPC64:
2467 create_got:
2468 if (eh != NULL)
2469 eh->has_got_reloc = 1;
2470 if (htab->elf.sgot == NULL)
2471 {
2472 if (htab->elf.dynobj == NULL)
2473 htab->elf.dynobj = abfd;
2474 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
2475 info))
2476 goto error_return;
2477 }
2478 break;
2479
2480 case R_X86_64_PLT32:
2481 case R_X86_64_PLT32_BND:
2482 /* This symbol requires a procedure linkage table entry. We
2483 actually build the entry in adjust_dynamic_symbol,
2484 because this might be a case of linking PIC code which is
2485 never referenced by a dynamic object, in which case we
2486 don't need to generate a procedure linkage table entry
2487 after all. */
2488
2489 /* If this is a local symbol, we resolve it directly without
2490 creating a procedure linkage table entry. */
2491 if (h == NULL)
2492 continue;
2493
2494 eh->has_got_reloc = 1;
2495 h->needs_plt = 1;
2496 h->plt.refcount += 1;
2497 break;
2498
2499 case R_X86_64_PLTOFF64:
2500 /* This tries to form the 'address' of a function relative
2501 to GOT. For global symbols we need a PLT entry. */
2502 if (h != NULL)
2503 {
2504 h->needs_plt = 1;
2505 h->plt.refcount += 1;
2506 }
2507 goto create_got;
2508
2509 case R_X86_64_SIZE32:
2510 case R_X86_64_SIZE64:
2511 size_reloc = TRUE;
2512 goto do_size;
2513
2514 case R_X86_64_32:
2515 if (!ABI_64_P (abfd))
2516 goto pointer;
2517 case R_X86_64_8:
2518 case R_X86_64_16:
2519 case R_X86_64_32S:
2520 /* Check relocation overflow as these relocs may lead to
2521 run-time relocation overflow. Don't error out for
2522 sections we don't care about, such as debug sections or
2523 when relocation overflow check is disabled. */
2524 if (!info->no_reloc_overflow_check
2525 && (bfd_link_pic (info)
2526 || (bfd_link_executable (info)
2527 && h != NULL
2528 && !h->def_regular
2529 && h->def_dynamic
2530 && (sec->flags & SEC_READONLY) == 0)))
2531 return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
2532 &x86_64_elf_howto_table[r_type]);
2533 /* Fall through. */
2534
2535 case R_X86_64_PC8:
2536 case R_X86_64_PC16:
2537 case R_X86_64_PC32:
2538 case R_X86_64_PC32_BND:
2539 case R_X86_64_PC64:
2540 case R_X86_64_64:
2541 pointer:
2542 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2543 eh->has_non_got_reloc = 1;
2544 /* We are called after all symbols have been resolved. Only
2545 relocation against STT_GNU_IFUNC symbol must go through
2546 PLT. */
2547 if (h != NULL
2548 && (bfd_link_executable (info)
2549 || h->type == STT_GNU_IFUNC))
2550 {
2551 /* If this reloc is in a read-only section, we might
2552 need a copy reloc. We can't check reliably at this
2553 stage whether the section is read-only, as input
2554 sections have not yet been mapped to output sections.
2555 Tentatively set the flag for now, and correct in
2556 adjust_dynamic_symbol. */
2557 h->non_got_ref = 1;
2558
2559 /* We may need a .plt entry if the symbol is a function
2560 defined in a shared lib or is a STT_GNU_IFUNC function
2561 referenced from the code or read-only section. */
2562 if (!h->def_regular
2563 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2564 h->plt.refcount += 1;
2565
2566 if (r_type == R_X86_64_PC32)
2567 {
2568 /* Since something like ".long foo - ." may be used
2569 as pointer, make sure that PLT is used if foo is
2570 a function defined in a shared library. */
2571 if ((sec->flags & SEC_CODE) == 0)
2572 h->pointer_equality_needed = 1;
2573 }
2574 else if (r_type != R_X86_64_PC32_BND
2575 && r_type != R_X86_64_PC64)
2576 {
2577 h->pointer_equality_needed = 1;
2578 /* At run-time, R_X86_64_64 can be resolved for both
2579 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2580 can only be resolved for x32. */
2581 if ((sec->flags & SEC_READONLY) == 0
2582 && (r_type == R_X86_64_64
2583 || (!ABI_64_P (abfd)
2584 && (r_type == R_X86_64_32
2585 || r_type == R_X86_64_32S))))
2586 eh->func_pointer_refcount += 1;
2587 }
2588 }
2589
2590 size_reloc = FALSE;
2591 do_size:
2592 /* If we are creating a shared library, and this is a reloc
2593 against a global symbol, or a non PC relative reloc
2594 against a local symbol, then we need to copy the reloc
2595 into the shared library. However, if we are linking with
2596 -Bsymbolic, we do not need to copy a reloc against a
2597 global symbol which is defined in an object we are
2598 including in the link (i.e., DEF_REGULAR is set). At
2599 this point we have not seen all the input files, so it is
2600 possible that DEF_REGULAR is not set now but will be set
2601 later (it is never cleared). In case of a weak definition,
2602 DEF_REGULAR may be cleared later by a strong definition in
2603 a shared library. We account for that possibility below by
2604 storing information in the relocs_copied field of the hash
2605 table entry. A similar situation occurs when creating
2606 shared libraries and symbol visibility changes render the
2607 symbol local.
2608
2609 If on the other hand, we are creating an executable, we
2610 may need to keep relocations for symbols satisfied by a
2611 dynamic library if we manage to avoid copy relocs for the
2612 symbol.
2613
2614 Generate dynamic pointer relocation against STT_GNU_IFUNC
2615 symbol in the non-code section. */
2616 if ((bfd_link_pic (info)
2617 && (! IS_X86_64_PCREL_TYPE (r_type)
2618 || (h != NULL
2619 && (! (bfd_link_pie (info)
2620 || SYMBOLIC_BIND (info, h))
2621 || h->root.type == bfd_link_hash_defweak
2622 || !h->def_regular))))
2623 || (h != NULL
2624 && h->type == STT_GNU_IFUNC
2625 && r_type == htab->pointer_r_type
2626 && (sec->flags & SEC_CODE) == 0)
2627 || (ELIMINATE_COPY_RELOCS
2628 && !bfd_link_pic (info)
2629 && h != NULL
2630 && (h->root.type == bfd_link_hash_defweak
2631 || !h->def_regular)))
2632 {
2633 struct elf_dyn_relocs *p;
2634 struct elf_dyn_relocs **head;
2635
2636 /* We must copy these reloc types into the output file.
2637 Create a reloc section in dynobj and make room for
2638 this reloc. */
2639 if (sreloc == NULL)
2640 {
2641 if (htab->elf.dynobj == NULL)
2642 htab->elf.dynobj = abfd;
2643
2644 sreloc = _bfd_elf_make_dynamic_reloc_section
2645 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2646 abfd, /*rela?*/ TRUE);
2647
2648 if (sreloc == NULL)
2649 goto error_return;
2650 }
2651
2652 /* If this is a global symbol, we count the number of
2653 relocations we need for this symbol. */
2654 if (h != NULL)
2655 head = &eh->dyn_relocs;
2656 else
2657 {
2658 /* Track dynamic relocs needed for local syms too.
2659 We really need local syms available to do this
2660 easily. Oh well. */
2661 asection *s;
2662 void **vpp;
2663
2664 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2665 abfd, r_symndx);
2666 if (isym == NULL)
2667 goto error_return;
2668
2669 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2670 if (s == NULL)
2671 s = sec;
2672
2673 /* Beware of type punned pointers vs strict aliasing
2674 rules. */
2675 vpp = &(elf_section_data (s)->local_dynrel);
2676 head = (struct elf_dyn_relocs **)vpp;
2677 }
2678
2679 p = *head;
2680 if (p == NULL || p->sec != sec)
2681 {
2682 bfd_size_type amt = sizeof *p;
2683
2684 p = ((struct elf_dyn_relocs *)
2685 bfd_alloc (htab->elf.dynobj, amt));
2686 if (p == NULL)
2687 goto error_return;
2688 p->next = *head;
2689 *head = p;
2690 p->sec = sec;
2691 p->count = 0;
2692 p->pc_count = 0;
2693 }
2694
2695 p->count += 1;
2696 /* Count size relocation as PC-relative relocation. */
2697 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2698 p->pc_count += 1;
2699 }
2700 break;
2701
2702 /* This relocation describes the C++ object vtable hierarchy.
2703 Reconstruct it for later use during GC. */
2704 case R_X86_64_GNU_VTINHERIT:
2705 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2706 goto error_return;
2707 break;
2708
2709 /* This relocation describes which C++ vtable entries are actually
2710 used. Record for later use during GC. */
2711 case R_X86_64_GNU_VTENTRY:
2712 BFD_ASSERT (h != NULL);
2713 if (h != NULL
2714 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2715 goto error_return;
2716 break;
2717
2718 default:
2719 break;
2720 }
2721
2722 if (use_plt_got
2723 && h != NULL
2724 && h->plt.refcount > 0
2725 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2726 || h->got.refcount > 0)
2727 && htab->plt_got == NULL)
2728 {
2729 /* Create the GOT procedure linkage table. */
2730 unsigned int plt_got_align;
2731 const struct elf_backend_data *bed;
2732
2733 bed = get_elf_backend_data (info->output_bfd);
2734 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2735 && (sizeof (elf_x86_64_bnd_plt2_entry)
2736 == sizeof (elf_x86_64_legacy_plt2_entry)));
2737 plt_got_align = 3;
2738
2739 if (htab->elf.dynobj == NULL)
2740 htab->elf.dynobj = abfd;
2741 htab->plt_got
2742 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2743 ".plt.got",
2744 (bed->dynamic_sec_flags
2745 | SEC_ALLOC
2746 | SEC_CODE
2747 | SEC_LOAD
2748 | SEC_READONLY));
2749 if (htab->plt_got == NULL
2750 || !bfd_set_section_alignment (htab->elf.dynobj,
2751 htab->plt_got,
2752 plt_got_align))
2753 goto error_return;
2754 }
2755
2756 if ((r_type == R_X86_64_GOTPCREL
2757 || r_type == R_X86_64_GOTPCRELX
2758 || r_type == R_X86_64_REX_GOTPCRELX)
2759 && (h == NULL || h->type != STT_GNU_IFUNC))
2760 sec->need_convert_load = 1;
2761 }
2762
2763 if (elf_section_data (sec)->this_hdr.contents != contents)
2764 {
2765 if (!info->keep_memory)
2766 free (contents);
2767 else
2768 {
2769 /* Cache the section contents for elf_link_input_bfd. */
2770 elf_section_data (sec)->this_hdr.contents = contents;
2771 }
2772 }
2773
2774 return TRUE;
2775
2776 error_return:
2777 if (elf_section_data (sec)->this_hdr.contents != contents)
2778 free (contents);
2779 sec->check_relocs_failed = 1;
2780 return FALSE;
2781 }
2782
2783 /* Return the section that should be marked against GC for a given
2784 relocation. */
2785
2786 static asection *
2787 elf_x86_64_gc_mark_hook (asection *sec,
2788 struct bfd_link_info *info,
2789 Elf_Internal_Rela *rel,
2790 struct elf_link_hash_entry *h,
2791 Elf_Internal_Sym *sym)
2792 {
2793 if (h != NULL)
2794 switch (ELF32_R_TYPE (rel->r_info))
2795 {
2796 case R_X86_64_GNU_VTINHERIT:
2797 case R_X86_64_GNU_VTENTRY:
2798 return NULL;
2799 }
2800
2801 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2802 }
2803
2804 /* Remove undefined weak symbol from the dynamic symbol table if it
2805 is resolved to 0. */
2806
2807 static bfd_boolean
2808 elf_x86_64_fixup_symbol (struct bfd_link_info *info,
2809 struct elf_link_hash_entry *h)
2810 {
2811 if (h->dynindx != -1
2812 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
2813 elf_x86_64_hash_entry (h)->has_got_reloc,
2814 elf_x86_64_hash_entry (h)))
2815 {
2816 h->dynindx = -1;
2817 _bfd_elf_strtab_delref (elf_hash_table (info)->dynstr,
2818 h->dynstr_index);
2819 }
2820 return TRUE;
2821 }
2822
2823 /* Adjust a symbol defined by a dynamic object and referenced by a
2824 regular object. The current definition is in some section of the
2825 dynamic object, but we're not including those sections. We have to
2826 change the definition to something the rest of the link can
2827 understand. */
2828
2829 static bfd_boolean
2830 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2831 struct elf_link_hash_entry *h)
2832 {
2833 struct elf_x86_64_link_hash_table *htab;
2834 asection *s;
2835 struct elf_x86_64_link_hash_entry *eh;
2836 struct elf_dyn_relocs *p;
2837
2838 /* STT_GNU_IFUNC symbol must go through PLT. */
2839 if (h->type == STT_GNU_IFUNC)
2840 {
2841 /* All local STT_GNU_IFUNC references must be treate as local
2842 calls via local PLT. */
2843 if (h->ref_regular
2844 && SYMBOL_CALLS_LOCAL (info, h))
2845 {
2846 bfd_size_type pc_count = 0, count = 0;
2847 struct elf_dyn_relocs **pp;
2848
2849 eh = (struct elf_x86_64_link_hash_entry *) h;
2850 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2851 {
2852 pc_count += p->pc_count;
2853 p->count -= p->pc_count;
2854 p->pc_count = 0;
2855 count += p->count;
2856 if (p->count == 0)
2857 *pp = p->next;
2858 else
2859 pp = &p->next;
2860 }
2861
2862 if (pc_count || count)
2863 {
2864 h->non_got_ref = 1;
2865 if (pc_count)
2866 {
2867 /* Increment PLT reference count only for PC-relative
2868 references. */
2869 h->needs_plt = 1;
2870 if (h->plt.refcount <= 0)
2871 h->plt.refcount = 1;
2872 else
2873 h->plt.refcount += 1;
2874 }
2875 }
2876 }
2877
2878 if (h->plt.refcount <= 0)
2879 {
2880 h->plt.offset = (bfd_vma) -1;
2881 h->needs_plt = 0;
2882 }
2883 return TRUE;
2884 }
2885
2886 /* If this is a function, put it in the procedure linkage table. We
2887 will fill in the contents of the procedure linkage table later,
2888 when we know the address of the .got section. */
2889 if (h->type == STT_FUNC
2890 || h->needs_plt)
2891 {
2892 if (h->plt.refcount <= 0
2893 || SYMBOL_CALLS_LOCAL (info, h)
2894 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2895 && h->root.type == bfd_link_hash_undefweak))
2896 {
2897 /* This case can occur if we saw a PLT32 reloc in an input
2898 file, but the symbol was never referred to by a dynamic
2899 object, or if all references were garbage collected. In
2900 such a case, we don't actually need to build a procedure
2901 linkage table, and we can just do a PC32 reloc instead. */
2902 h->plt.offset = (bfd_vma) -1;
2903 h->needs_plt = 0;
2904 }
2905
2906 return TRUE;
2907 }
2908 else
2909 /* It's possible that we incorrectly decided a .plt reloc was
2910 needed for an R_X86_64_PC32 reloc to a non-function sym in
2911 check_relocs. We can't decide accurately between function and
2912 non-function syms in check-relocs; Objects loaded later in
2913 the link may change h->type. So fix it now. */
2914 h->plt.offset = (bfd_vma) -1;
2915
2916 /* If this is a weak symbol, and there is a real definition, the
2917 processor independent code will have arranged for us to see the
2918 real definition first, and we can just use the same value. */
2919 if (h->u.weakdef != NULL)
2920 {
2921 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2922 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2923 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2924 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2925 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2926 {
2927 eh = (struct elf_x86_64_link_hash_entry *) h;
2928 h->non_got_ref = h->u.weakdef->non_got_ref;
2929 eh->needs_copy = h->u.weakdef->needs_copy;
2930 }
2931 return TRUE;
2932 }
2933
2934 /* This is a reference to a symbol defined by a dynamic object which
2935 is not a function. */
2936
2937 /* If we are creating a shared library, we must presume that the
2938 only references to the symbol are via the global offset table.
2939 For such cases we need not do anything here; the relocations will
2940 be handled correctly by relocate_section. */
2941 if (!bfd_link_executable (info))
2942 return TRUE;
2943
2944 /* If there are no references to this symbol that do not use the
2945 GOT, we don't need to generate a copy reloc. */
2946 if (!h->non_got_ref)
2947 return TRUE;
2948
2949 /* If -z nocopyreloc was given, we won't generate them either. */
2950 if (info->nocopyreloc)
2951 {
2952 h->non_got_ref = 0;
2953 return TRUE;
2954 }
2955
2956 if (ELIMINATE_COPY_RELOCS)
2957 {
2958 eh = (struct elf_x86_64_link_hash_entry *) h;
2959 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2960 {
2961 s = p->sec->output_section;
2962 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2963 break;
2964 }
2965
2966 /* If we didn't find any dynamic relocs in read-only sections, then
2967 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2968 if (p == NULL)
2969 {
2970 h->non_got_ref = 0;
2971 return TRUE;
2972 }
2973 }
2974
2975 /* We must allocate the symbol in our .dynbss section, which will
2976 become part of the .bss section of the executable. There will be
2977 an entry for this symbol in the .dynsym section. The dynamic
2978 object will contain position independent code, so all references
2979 from the dynamic object to this symbol will go through the global
2980 offset table. The dynamic linker will use the .dynsym entry to
2981 determine the address it must put in the global offset table, so
2982 both the dynamic object and the regular object will refer to the
2983 same memory location for the variable. */
2984
2985 htab = elf_x86_64_hash_table (info);
2986 if (htab == NULL)
2987 return FALSE;
2988
2989 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2990 to copy the initial value out of the dynamic object and into the
2991 runtime process image. */
2992 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2993 {
2994 const struct elf_backend_data *bed;
2995 bed = get_elf_backend_data (info->output_bfd);
2996 htab->srelbss->size += bed->s->sizeof_rela;
2997 h->needs_copy = 1;
2998 }
2999
3000 s = htab->sdynbss;
3001
3002 return _bfd_elf_adjust_dynamic_copy (info, h, s);
3003 }
3004
3005 /* Allocate space in .plt, .got and associated reloc sections for
3006 dynamic relocs. */
3007
3008 static bfd_boolean
3009 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
3010 {
3011 struct bfd_link_info *info;
3012 struct elf_x86_64_link_hash_table *htab;
3013 struct elf_x86_64_link_hash_entry *eh;
3014 struct elf_dyn_relocs *p;
3015 const struct elf_backend_data *bed;
3016 unsigned int plt_entry_size;
3017 bfd_boolean resolved_to_zero;
3018
3019 if (h->root.type == bfd_link_hash_indirect)
3020 return TRUE;
3021
3022 eh = (struct elf_x86_64_link_hash_entry *) h;
3023
3024 info = (struct bfd_link_info *) inf;
3025 htab = elf_x86_64_hash_table (info);
3026 if (htab == NULL)
3027 return FALSE;
3028 bed = get_elf_backend_data (info->output_bfd);
3029 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3030
3031 resolved_to_zero = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3032 eh->has_got_reloc,
3033 eh);
3034
3035 /* We can't use the GOT PLT if pointer equality is needed since
3036 finish_dynamic_symbol won't clear symbol value and the dynamic
3037 linker won't update the GOT slot. We will get into an infinite
3038 loop at run-time. */
3039 if (htab->plt_got != NULL
3040 && h->type != STT_GNU_IFUNC
3041 && !h->pointer_equality_needed
3042 && h->plt.refcount > 0
3043 && h->got.refcount > 0)
3044 {
3045 /* Don't use the regular PLT if there are both GOT and GOTPLT
3046 reloctions. */
3047 h->plt.offset = (bfd_vma) -1;
3048
3049 /* Use the GOT PLT. */
3050 eh->plt_got.refcount = 1;
3051 }
3052
3053 /* Clear the reference count of function pointer relocations if
3054 symbol isn't a normal function. */
3055 if (h->type != STT_FUNC)
3056 eh->func_pointer_refcount = 0;
3057
3058 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
3059 here if it is defined and referenced in a non-shared object. */
3060 if (h->type == STT_GNU_IFUNC
3061 && h->def_regular)
3062 {
3063 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
3064 &eh->dyn_relocs,
3065 &htab->readonly_dynrelocs_against_ifunc,
3066 plt_entry_size,
3067 plt_entry_size,
3068 GOT_ENTRY_SIZE, TRUE))
3069 {
3070 asection *s = htab->plt_bnd;
3071 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
3072 {
3073 /* Use the .plt.bnd section if it is created. */
3074 eh->plt_bnd.offset = s->size;
3075
3076 /* Make room for this entry in the .plt.bnd section. */
3077 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3078 }
3079
3080 return TRUE;
3081 }
3082 else
3083 return FALSE;
3084 }
3085 /* Don't create the PLT entry if there are only function pointer
3086 relocations which can be resolved at run-time. */
3087 else if (htab->elf.dynamic_sections_created
3088 && (h->plt.refcount > eh->func_pointer_refcount
3089 || eh->plt_got.refcount > 0))
3090 {
3091 bfd_boolean use_plt_got;
3092
3093 /* Clear the reference count of function pointer relocations
3094 if PLT is used. */
3095 eh->func_pointer_refcount = 0;
3096
3097 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
3098 {
3099 /* Don't use the regular PLT for DF_BIND_NOW. */
3100 h->plt.offset = (bfd_vma) -1;
3101
3102 /* Use the GOT PLT. */
3103 h->got.refcount = 1;
3104 eh->plt_got.refcount = 1;
3105 }
3106
3107 use_plt_got = eh->plt_got.refcount > 0;
3108
3109 /* Make sure this symbol is output as a dynamic symbol.
3110 Undefined weak syms won't yet be marked as dynamic. */
3111 if (h->dynindx == -1
3112 && !h->forced_local
3113 && !resolved_to_zero)
3114 {
3115 if (! bfd_elf_link_record_dynamic_symbol (info, h))
3116 return FALSE;
3117 }
3118
3119 if (bfd_link_pic (info)
3120 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3121 {
3122 asection *s = htab->elf.splt;
3123 asection *bnd_s = htab->plt_bnd;
3124 asection *got_s = htab->plt_got;
3125
3126 /* If this is the first .plt entry, make room for the special
3127 first entry. The .plt section is used by prelink to undo
3128 prelinking for dynamic relocations. */
3129 if (s->size == 0)
3130 s->size = plt_entry_size;
3131
3132 if (use_plt_got)
3133 eh->plt_got.offset = got_s->size;
3134 else
3135 {
3136 h->plt.offset = s->size;
3137 if (bnd_s)
3138 eh->plt_bnd.offset = bnd_s->size;
3139 }
3140
3141 /* If this symbol is not defined in a regular file, and we are
3142 not generating a shared library, then set the symbol to this
3143 location in the .plt. This is required to make function
3144 pointers compare as equal between the normal executable and
3145 the shared library. */
3146 if (! bfd_link_pic (info)
3147 && !h->def_regular)
3148 {
3149 if (use_plt_got)
3150 {
3151 /* We need to make a call to the entry of the GOT PLT
3152 instead of regular PLT entry. */
3153 h->root.u.def.section = got_s;
3154 h->root.u.def.value = eh->plt_got.offset;
3155 }
3156 else
3157 {
3158 if (bnd_s)
3159 {
3160 /* We need to make a call to the entry of the second
3161 PLT instead of regular PLT entry. */
3162 h->root.u.def.section = bnd_s;
3163 h->root.u.def.value = eh->plt_bnd.offset;
3164 }
3165 else
3166 {
3167 h->root.u.def.section = s;
3168 h->root.u.def.value = h->plt.offset;
3169 }
3170 }
3171 }
3172
3173 /* Make room for this entry. */
3174 if (use_plt_got)
3175 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3176 else
3177 {
3178 s->size += plt_entry_size;
3179 if (bnd_s)
3180 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3181
3182 /* We also need to make an entry in the .got.plt section,
3183 which will be placed in the .got section by the linker
3184 script. */
3185 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
3186
3187 /* There should be no PLT relocation against resolved
3188 undefined weak symbol in executable. */
3189 if (!resolved_to_zero)
3190 {
3191 /* We also need to make an entry in the .rela.plt
3192 section. */
3193 htab->elf.srelplt->size += bed->s->sizeof_rela;
3194 htab->elf.srelplt->reloc_count++;
3195 }
3196 }
3197 }
3198 else
3199 {
3200 eh->plt_got.offset = (bfd_vma) -1;
3201 h->plt.offset = (bfd_vma) -1;
3202 h->needs_plt = 0;
3203 }
3204 }
3205 else
3206 {
3207 eh->plt_got.offset = (bfd_vma) -1;
3208 h->plt.offset = (bfd_vma) -1;
3209 h->needs_plt = 0;
3210 }
3211
3212 eh->tlsdesc_got = (bfd_vma) -1;
3213
3214 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
3215 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
3216 if (h->got.refcount > 0
3217 && bfd_link_executable (info)
3218 && h->dynindx == -1
3219 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
3220 {
3221 h->got.offset = (bfd_vma) -1;
3222 }
3223 else if (h->got.refcount > 0)
3224 {
3225 asection *s;
3226 bfd_boolean dyn;
3227 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
3228
3229 /* Make sure this symbol is output as a dynamic symbol.
3230 Undefined weak syms won't yet be marked as dynamic. */
3231 if (h->dynindx == -1
3232 && !h->forced_local
3233 && !resolved_to_zero)
3234 {
3235 if (! bfd_elf_link_record_dynamic_symbol (info, h))
3236 return FALSE;
3237 }
3238
3239 if (GOT_TLS_GDESC_P (tls_type))
3240 {
3241 eh->tlsdesc_got = htab->elf.sgotplt->size
3242 - elf_x86_64_compute_jump_table_size (htab);
3243 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3244 h->got.offset = (bfd_vma) -2;
3245 }
3246 if (! GOT_TLS_GDESC_P (tls_type)
3247 || GOT_TLS_GD_P (tls_type))
3248 {
3249 s = htab->elf.sgot;
3250 h->got.offset = s->size;
3251 s->size += GOT_ENTRY_SIZE;
3252 if (GOT_TLS_GD_P (tls_type))
3253 s->size += GOT_ENTRY_SIZE;
3254 }
3255 dyn = htab->elf.dynamic_sections_created;
3256 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
3257 and two if global. R_X86_64_GOTTPOFF needs one dynamic
3258 relocation. No dynamic relocation against resolved undefined
3259 weak symbol in executable. */
3260 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
3261 || tls_type == GOT_TLS_IE)
3262 htab->elf.srelgot->size += bed->s->sizeof_rela;
3263 else if (GOT_TLS_GD_P (tls_type))
3264 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
3265 else if (! GOT_TLS_GDESC_P (tls_type)
3266 && ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3267 && !resolved_to_zero)
3268 || h->root.type != bfd_link_hash_undefweak)
3269 && (bfd_link_pic (info)
3270 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3271 htab->elf.srelgot->size += bed->s->sizeof_rela;
3272 if (GOT_TLS_GDESC_P (tls_type))
3273 {
3274 htab->elf.srelplt->size += bed->s->sizeof_rela;
3275 htab->tlsdesc_plt = (bfd_vma) -1;
3276 }
3277 }
3278 else
3279 h->got.offset = (bfd_vma) -1;
3280
3281 if (eh->dyn_relocs == NULL)
3282 return TRUE;
3283
3284 /* In the shared -Bsymbolic case, discard space allocated for
3285 dynamic pc-relative relocs against symbols which turn out to be
3286 defined in regular objects. For the normal shared case, discard
3287 space for pc-relative relocs that have become local due to symbol
3288 visibility changes. */
3289
3290 if (bfd_link_pic (info))
3291 {
3292 /* Relocs that use pc_count are those that appear on a call
3293 insn, or certain REL relocs that can generated via assembly.
3294 We want calls to protected symbols to resolve directly to the
3295 function rather than going via the plt. If people want
3296 function pointer comparisons to work as expected then they
3297 should avoid writing weird assembly. */
3298 if (SYMBOL_CALLS_LOCAL (info, h))
3299 {
3300 struct elf_dyn_relocs **pp;
3301
3302 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
3303 {
3304 p->count -= p->pc_count;
3305 p->pc_count = 0;
3306 if (p->count == 0)
3307 *pp = p->next;
3308 else
3309 pp = &p->next;
3310 }
3311 }
3312
3313 /* Also discard relocs on undefined weak syms with non-default
3314 visibility or in PIE. */
3315 if (eh->dyn_relocs != NULL)
3316 {
3317 if (h->root.type == bfd_link_hash_undefweak)
3318 {
3319 /* Undefined weak symbol is never bound locally in shared
3320 library. */
3321 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3322 || resolved_to_zero)
3323 eh->dyn_relocs = NULL;
3324 else if (h->dynindx == -1
3325 && ! h->forced_local
3326 && ! bfd_elf_link_record_dynamic_symbol (info, h))
3327 return FALSE;
3328 }
3329 /* For PIE, discard space for pc-relative relocs against
3330 symbols which turn out to need copy relocs. */
3331 else if (bfd_link_executable (info)
3332 && (h->needs_copy || eh->needs_copy)
3333 && h->def_dynamic
3334 && !h->def_regular)
3335 {
3336 struct elf_dyn_relocs **pp;
3337
3338 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
3339 {
3340 if (p->pc_count != 0)
3341 *pp = p->next;
3342 else
3343 pp = &p->next;
3344 }
3345 }
3346 }
3347 }
3348 else if (ELIMINATE_COPY_RELOCS)
3349 {
3350 /* For the non-shared case, discard space for relocs against
3351 symbols which turn out to need copy relocs or are not
3352 dynamic. Keep dynamic relocations for run-time function
3353 pointer initialization. */
3354
3355 if ((!h->non_got_ref
3356 || eh->func_pointer_refcount > 0
3357 || (h->root.type == bfd_link_hash_undefweak
3358 && !resolved_to_zero))
3359 && ((h->def_dynamic
3360 && !h->def_regular)
3361 || (htab->elf.dynamic_sections_created
3362 && (h->root.type == bfd_link_hash_undefweak
3363 || h->root.type == bfd_link_hash_undefined))))
3364 {
3365 /* Make sure this symbol is output as a dynamic symbol.
3366 Undefined weak syms won't yet be marked as dynamic. */
3367 if (h->dynindx == -1
3368 && ! h->forced_local
3369 && ! resolved_to_zero
3370 && ! bfd_elf_link_record_dynamic_symbol (info, h))
3371 return FALSE;
3372
3373 /* If that succeeded, we know we'll be keeping all the
3374 relocs. */
3375 if (h->dynindx != -1)
3376 goto keep;
3377 }
3378
3379 eh->dyn_relocs = NULL;
3380 eh->func_pointer_refcount = 0;
3381
3382 keep: ;
3383 }
3384
3385 /* Finally, allocate space. */
3386 for (p = eh->dyn_relocs; p != NULL; p = p->next)
3387 {
3388 asection * sreloc;
3389
3390 sreloc = elf_section_data (p->sec)->sreloc;
3391
3392 BFD_ASSERT (sreloc != NULL);
3393
3394 sreloc->size += p->count * bed->s->sizeof_rela;
3395 }
3396
3397 return TRUE;
3398 }
3399
3400 /* Allocate space in .plt, .got and associated reloc sections for
3401 local dynamic relocs. */
3402
3403 static bfd_boolean
3404 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
3405 {
3406 struct elf_link_hash_entry *h
3407 = (struct elf_link_hash_entry *) *slot;
3408
3409 if (h->type != STT_GNU_IFUNC
3410 || !h->def_regular
3411 || !h->ref_regular
3412 || !h->forced_local
3413 || h->root.type != bfd_link_hash_defined)
3414 abort ();
3415
3416 return elf_x86_64_allocate_dynrelocs (h, inf);
3417 }
3418
3419 /* Find any dynamic relocs that apply to read-only sections. */
3420
3421 static bfd_boolean
3422 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
3423 void * inf)
3424 {
3425 struct elf_x86_64_link_hash_entry *eh;
3426 struct elf_dyn_relocs *p;
3427
3428 /* Skip local IFUNC symbols. */
3429 if (h->forced_local && h->type == STT_GNU_IFUNC)
3430 return TRUE;
3431
3432 eh = (struct elf_x86_64_link_hash_entry *) h;
3433 for (p = eh->dyn_relocs; p != NULL; p = p->next)
3434 {
3435 asection *s = p->sec->output_section;
3436
3437 if (s != NULL && (s->flags & SEC_READONLY) != 0)
3438 {
3439 struct bfd_link_info *info = (struct bfd_link_info *) inf;
3440
3441 info->flags |= DF_TEXTREL;
3442
3443 if ((info->warn_shared_textrel && bfd_link_pic (info))
3444 || info->error_textrel)
3445 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
3446 p->sec->owner, h->root.root.string,
3447 p->sec);
3448
3449 /* Not an error, just cut short the traversal. */
3450 return FALSE;
3451 }
3452 }
3453 return TRUE;
3454 }
3455
3456 /* Convert load via the GOT slot to load immediate. */
3457
3458 static bfd_boolean
3459 elf_x86_64_convert_load (bfd *abfd, asection *sec,
3460 struct bfd_link_info *link_info)
3461 {
3462 Elf_Internal_Shdr *symtab_hdr;
3463 Elf_Internal_Rela *internal_relocs;
3464 Elf_Internal_Rela *irel, *irelend;
3465 bfd_byte *contents;
3466 struct elf_x86_64_link_hash_table *htab;
3467 bfd_boolean changed;
3468 bfd_signed_vma *local_got_refcounts;
3469
3470 /* Don't even try to convert non-ELF outputs. */
3471 if (!is_elf_hash_table (link_info->hash))
3472 return FALSE;
3473
3474 /* Nothing to do if there is no need or no output. */
3475 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
3476 || sec->need_convert_load == 0
3477 || bfd_is_abs_section (sec->output_section))
3478 return TRUE;
3479
3480 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
3481
3482 /* Load the relocations for this section. */
3483 internal_relocs = (_bfd_elf_link_read_relocs
3484 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
3485 link_info->keep_memory));
3486 if (internal_relocs == NULL)
3487 return FALSE;
3488
3489 changed = FALSE;
3490 htab = elf_x86_64_hash_table (link_info);
3491 local_got_refcounts = elf_local_got_refcounts (abfd);
3492
3493 /* Get the section contents. */
3494 if (elf_section_data (sec)->this_hdr.contents != NULL)
3495 contents = elf_section_data (sec)->this_hdr.contents;
3496 else
3497 {
3498 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
3499 goto error_return;
3500 }
3501
3502 irelend = internal_relocs + sec->reloc_count;
3503 for (irel = internal_relocs; irel < irelend; irel++)
3504 {
3505 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
3506 unsigned int r_symndx;
3507 struct elf_link_hash_entry *h;
3508 bfd_boolean converted;
3509
3510 if (r_type != R_X86_64_GOTPCRELX
3511 && r_type != R_X86_64_REX_GOTPCRELX
3512 && r_type != R_X86_64_GOTPCREL)
3513 continue;
3514
3515 r_symndx = htab->r_sym (irel->r_info);
3516 if (r_symndx < symtab_hdr->sh_info)
3517 h = elf_x86_64_get_local_sym_hash (htab, sec->owner,
3518 (const Elf_Internal_Rela *) irel,
3519 FALSE);
3520 else
3521 {
3522 h = elf_sym_hashes (abfd)[r_symndx - symtab_hdr->sh_info];
3523 while (h->root.type == bfd_link_hash_indirect
3524 || h->root.type == bfd_link_hash_warning)
3525 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3526 }
3527
3528 /* STT_GNU_IFUNC must keep GOTPCREL relocations. */
3529 if (h != NULL && h->type == STT_GNU_IFUNC)
3530 continue;
3531
3532 converted = FALSE;
3533 if (!elf_x86_64_convert_load_reloc (abfd, sec, contents, irel, h,
3534 &converted, link_info))
3535 goto error_return;
3536
3537 if (converted)
3538 {
3539 changed = converted;
3540 if (h)
3541 {
3542 if (h->got.refcount > 0)
3543 h->got.refcount -= 1;
3544 }
3545 else
3546 {
3547 if (local_got_refcounts != NULL
3548 && local_got_refcounts[r_symndx] > 0)
3549 local_got_refcounts[r_symndx] -= 1;
3550 }
3551 }
3552 }
3553
3554 if (contents != NULL
3555 && elf_section_data (sec)->this_hdr.contents != contents)
3556 {
3557 if (!changed && !link_info->keep_memory)
3558 free (contents);
3559 else
3560 {
3561 /* Cache the section contents for elf_link_input_bfd. */
3562 elf_section_data (sec)->this_hdr.contents = contents;
3563 }
3564 }
3565
3566 if (elf_section_data (sec)->relocs != internal_relocs)
3567 {
3568 if (!changed)
3569 free (internal_relocs);
3570 else
3571 elf_section_data (sec)->relocs = internal_relocs;
3572 }
3573
3574 return TRUE;
3575
3576 error_return:
3577 if (contents != NULL
3578 && elf_section_data (sec)->this_hdr.contents != contents)
3579 free (contents);
3580 if (internal_relocs != NULL
3581 && elf_section_data (sec)->relocs != internal_relocs)
3582 free (internal_relocs);
3583 return FALSE;
3584 }
3585
3586 /* Set the sizes of the dynamic sections. */
3587
3588 static bfd_boolean
3589 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3590 struct bfd_link_info *info)
3591 {
3592 struct elf_x86_64_link_hash_table *htab;
3593 bfd *dynobj;
3594 asection *s;
3595 bfd_boolean relocs;
3596 bfd *ibfd;
3597 const struct elf_backend_data *bed;
3598
3599 htab = elf_x86_64_hash_table (info);
3600 if (htab == NULL)
3601 return FALSE;
3602 bed = get_elf_backend_data (output_bfd);
3603
3604 dynobj = htab->elf.dynobj;
3605 if (dynobj == NULL)
3606 abort ();
3607
3608 /* Set up .got offsets for local syms, and space for local dynamic
3609 relocs. */
3610 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3611 {
3612 bfd_signed_vma *local_got;
3613 bfd_signed_vma *end_local_got;
3614 char *local_tls_type;
3615 bfd_vma *local_tlsdesc_gotent;
3616 bfd_size_type locsymcount;
3617 Elf_Internal_Shdr *symtab_hdr;
3618 asection *srel;
3619
3620 if (! is_x86_64_elf (ibfd))
3621 continue;
3622
3623 for (s = ibfd->sections; s != NULL; s = s->next)
3624 {
3625 struct elf_dyn_relocs *p;
3626
3627 if (!elf_x86_64_convert_load (ibfd, s, info))
3628 return FALSE;
3629
3630 for (p = (struct elf_dyn_relocs *)
3631 (elf_section_data (s)->local_dynrel);
3632 p != NULL;
3633 p = p->next)
3634 {
3635 if (!bfd_is_abs_section (p->sec)
3636 && bfd_is_abs_section (p->sec->output_section))
3637 {
3638 /* Input section has been discarded, either because
3639 it is a copy of a linkonce section or due to
3640 linker script /DISCARD/, so we'll be discarding
3641 the relocs too. */
3642 }
3643 else if (p->count != 0)
3644 {
3645 srel = elf_section_data (p->sec)->sreloc;
3646 srel->size += p->count * bed->s->sizeof_rela;
3647 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3648 && (info->flags & DF_TEXTREL) == 0)
3649 {
3650 info->flags |= DF_TEXTREL;
3651 if ((info->warn_shared_textrel && bfd_link_pic (info))
3652 || info->error_textrel)
3653 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3654 p->sec->owner, p->sec);
3655 }
3656 }
3657 }
3658 }
3659
3660 local_got = elf_local_got_refcounts (ibfd);
3661 if (!local_got)
3662 continue;
3663
3664 symtab_hdr = &elf_symtab_hdr (ibfd);
3665 locsymcount = symtab_hdr->sh_info;
3666 end_local_got = local_got + locsymcount;
3667 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3668 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3669 s = htab->elf.sgot;
3670 srel = htab->elf.srelgot;
3671 for (; local_got < end_local_got;
3672 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3673 {
3674 *local_tlsdesc_gotent = (bfd_vma) -1;
3675 if (*local_got > 0)
3676 {
3677 if (GOT_TLS_GDESC_P (*local_tls_type))
3678 {
3679 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3680 - elf_x86_64_compute_jump_table_size (htab);
3681 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3682 *local_got = (bfd_vma) -2;
3683 }
3684 if (! GOT_TLS_GDESC_P (*local_tls_type)
3685 || GOT_TLS_GD_P (*local_tls_type))
3686 {
3687 *local_got = s->size;
3688 s->size += GOT_ENTRY_SIZE;
3689 if (GOT_TLS_GD_P (*local_tls_type))
3690 s->size += GOT_ENTRY_SIZE;
3691 }
3692 if (bfd_link_pic (info)
3693 || GOT_TLS_GD_ANY_P (*local_tls_type)
3694 || *local_tls_type == GOT_TLS_IE)
3695 {
3696 if (GOT_TLS_GDESC_P (*local_tls_type))
3697 {
3698 htab->elf.srelplt->size
3699 += bed->s->sizeof_rela;
3700 htab->tlsdesc_plt = (bfd_vma) -1;
3701 }
3702 if (! GOT_TLS_GDESC_P (*local_tls_type)
3703 || GOT_TLS_GD_P (*local_tls_type))
3704 srel->size += bed->s->sizeof_rela;
3705 }
3706 }
3707 else
3708 *local_got = (bfd_vma) -1;
3709 }
3710 }
3711
3712 if (htab->tls_ld_got.refcount > 0)
3713 {
3714 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3715 relocs. */
3716 htab->tls_ld_got.offset = htab->elf.sgot->size;
3717 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3718 htab->elf.srelgot->size += bed->s->sizeof_rela;
3719 }
3720 else
3721 htab->tls_ld_got.offset = -1;
3722
3723 /* Allocate global sym .plt and .got entries, and space for global
3724 sym dynamic relocs. */
3725 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3726 info);
3727
3728 /* Allocate .plt and .got entries, and space for local symbols. */
3729 htab_traverse (htab->loc_hash_table,
3730 elf_x86_64_allocate_local_dynrelocs,
3731 info);
3732
3733 /* For every jump slot reserved in the sgotplt, reloc_count is
3734 incremented. However, when we reserve space for TLS descriptors,
3735 it's not incremented, so in order to compute the space reserved
3736 for them, it suffices to multiply the reloc count by the jump
3737 slot size.
3738
3739 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3740 so that R_X86_64_IRELATIVE entries come last. */
3741 if (htab->elf.srelplt)
3742 {
3743 htab->sgotplt_jump_table_size
3744 = elf_x86_64_compute_jump_table_size (htab);
3745 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3746 }
3747 else if (htab->elf.irelplt)
3748 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3749
3750 if (htab->tlsdesc_plt)
3751 {
3752 /* If we're not using lazy TLS relocations, don't generate the
3753 PLT and GOT entries they require. */
3754 if ((info->flags & DF_BIND_NOW))
3755 htab->tlsdesc_plt = 0;
3756 else
3757 {
3758 htab->tlsdesc_got = htab->elf.sgot->size;
3759 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3760 /* Reserve room for the initial entry.
3761 FIXME: we could probably do away with it in this case. */
3762 if (htab->elf.splt->size == 0)
3763 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3764 htab->tlsdesc_plt = htab->elf.splt->size;
3765 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3766 }
3767 }
3768
3769 if (htab->elf.sgotplt)
3770 {
3771 /* Don't allocate .got.plt section if there are no GOT nor PLT
3772 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3773 if ((htab->elf.hgot == NULL
3774 || !htab->elf.hgot->ref_regular_nonweak)
3775 && (htab->elf.sgotplt->size
3776 == get_elf_backend_data (output_bfd)->got_header_size)
3777 && (htab->elf.splt == NULL
3778 || htab->elf.splt->size == 0)
3779 && (htab->elf.sgot == NULL
3780 || htab->elf.sgot->size == 0)
3781 && (htab->elf.iplt == NULL
3782 || htab->elf.iplt->size == 0)
3783 && (htab->elf.igotplt == NULL
3784 || htab->elf.igotplt->size == 0))
3785 htab->elf.sgotplt->size = 0;
3786 }
3787
3788 if (htab->plt_eh_frame != NULL
3789 && htab->elf.splt != NULL
3790 && htab->elf.splt->size != 0
3791 && !bfd_is_abs_section (htab->elf.splt->output_section)
3792 && _bfd_elf_eh_frame_present (info))
3793 {
3794 const struct elf_x86_64_backend_data *arch_data
3795 = get_elf_x86_64_arch_data (bed);
3796 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3797 }
3798
3799 /* We now have determined the sizes of the various dynamic sections.
3800 Allocate memory for them. */
3801 relocs = FALSE;
3802 for (s = dynobj->sections; s != NULL; s = s->next)
3803 {
3804 if ((s->flags & SEC_LINKER_CREATED) == 0)
3805 continue;
3806
3807 if (s == htab->elf.splt
3808 || s == htab->elf.sgot
3809 || s == htab->elf.sgotplt
3810 || s == htab->elf.iplt
3811 || s == htab->elf.igotplt
3812 || s == htab->plt_bnd
3813 || s == htab->plt_got
3814 || s == htab->plt_eh_frame
3815 || s == htab->sdynbss)
3816 {
3817 /* Strip this section if we don't need it; see the
3818 comment below. */
3819 }
3820 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3821 {
3822 if (s->size != 0 && s != htab->elf.srelplt)
3823 relocs = TRUE;
3824
3825 /* We use the reloc_count field as a counter if we need
3826 to copy relocs into the output file. */
3827 if (s != htab->elf.srelplt)
3828 s->reloc_count = 0;
3829 }
3830 else
3831 {
3832 /* It's not one of our sections, so don't allocate space. */
3833 continue;
3834 }
3835
3836 if (s->size == 0)
3837 {
3838 /* If we don't need this section, strip it from the
3839 output file. This is mostly to handle .rela.bss and
3840 .rela.plt. We must create both sections in
3841 create_dynamic_sections, because they must be created
3842 before the linker maps input sections to output
3843 sections. The linker does that before
3844 adjust_dynamic_symbol is called, and it is that
3845 function which decides whether anything needs to go
3846 into these sections. */
3847
3848 s->flags |= SEC_EXCLUDE;
3849 continue;
3850 }
3851
3852 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3853 continue;
3854
3855 /* Allocate memory for the section contents. We use bfd_zalloc
3856 here in case unused entries are not reclaimed before the
3857 section's contents are written out. This should not happen,
3858 but this way if it does, we get a R_X86_64_NONE reloc instead
3859 of garbage. */
3860 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3861 if (s->contents == NULL)
3862 return FALSE;
3863 }
3864
3865 if (htab->plt_eh_frame != NULL
3866 && htab->plt_eh_frame->contents != NULL)
3867 {
3868 const struct elf_x86_64_backend_data *arch_data
3869 = get_elf_x86_64_arch_data (bed);
3870
3871 memcpy (htab->plt_eh_frame->contents,
3872 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3873 bfd_put_32 (dynobj, htab->elf.splt->size,
3874 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3875 }
3876
3877 if (htab->elf.dynamic_sections_created)
3878 {
3879 /* Add some entries to the .dynamic section. We fill in the
3880 values later, in elf_x86_64_finish_dynamic_sections, but we
3881 must add the entries now so that we get the correct size for
3882 the .dynamic section. The DT_DEBUG entry is filled in by the
3883 dynamic linker and used by the debugger. */
3884 #define add_dynamic_entry(TAG, VAL) \
3885 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3886
3887 if (bfd_link_executable (info))
3888 {
3889 if (!add_dynamic_entry (DT_DEBUG, 0))
3890 return FALSE;
3891 }
3892
3893 if (htab->elf.splt->size != 0)
3894 {
3895 /* DT_PLTGOT is used by prelink even if there is no PLT
3896 relocation. */
3897 if (!add_dynamic_entry (DT_PLTGOT, 0))
3898 return FALSE;
3899
3900 if (htab->elf.srelplt->size != 0)
3901 {
3902 if (!add_dynamic_entry (DT_PLTRELSZ, 0)
3903 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3904 || !add_dynamic_entry (DT_JMPREL, 0))
3905 return FALSE;
3906 }
3907
3908 if (htab->tlsdesc_plt
3909 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3910 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3911 return FALSE;
3912 }
3913
3914 if (relocs)
3915 {
3916 if (!add_dynamic_entry (DT_RELA, 0)
3917 || !add_dynamic_entry (DT_RELASZ, 0)
3918 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3919 return FALSE;
3920
3921 /* If any dynamic relocs apply to a read-only section,
3922 then we need a DT_TEXTREL entry. */
3923 if ((info->flags & DF_TEXTREL) == 0)
3924 elf_link_hash_traverse (&htab->elf,
3925 elf_x86_64_readonly_dynrelocs,
3926 info);
3927
3928 if ((info->flags & DF_TEXTREL) != 0)
3929 {
3930 if (htab->readonly_dynrelocs_against_ifunc)
3931 {
3932 info->callbacks->einfo
3933 (_("%P%X: read-only segment has dynamic IFUNC relocations; recompile with -fPIC\n"));
3934 bfd_set_error (bfd_error_bad_value);
3935 return FALSE;
3936 }
3937
3938 if (!add_dynamic_entry (DT_TEXTREL, 0))
3939 return FALSE;
3940 }
3941 }
3942 }
3943 #undef add_dynamic_entry
3944
3945 return TRUE;
3946 }
3947
3948 static bfd_boolean
3949 elf_x86_64_always_size_sections (bfd *output_bfd,
3950 struct bfd_link_info *info)
3951 {
3952 asection *tls_sec = elf_hash_table (info)->tls_sec;
3953
3954 if (tls_sec)
3955 {
3956 struct elf_link_hash_entry *tlsbase;
3957
3958 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3959 "_TLS_MODULE_BASE_",
3960 FALSE, FALSE, FALSE);
3961
3962 if (tlsbase && tlsbase->type == STT_TLS)
3963 {
3964 struct elf_x86_64_link_hash_table *htab;
3965 struct bfd_link_hash_entry *bh = NULL;
3966 const struct elf_backend_data *bed
3967 = get_elf_backend_data (output_bfd);
3968
3969 htab = elf_x86_64_hash_table (info);
3970 if (htab == NULL)
3971 return FALSE;
3972
3973 if (!(_bfd_generic_link_add_one_symbol
3974 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3975 tls_sec, 0, NULL, FALSE,
3976 bed->collect, &bh)))
3977 return FALSE;
3978
3979 htab->tls_module_base = bh;
3980
3981 tlsbase = (struct elf_link_hash_entry *)bh;
3982 tlsbase->def_regular = 1;
3983 tlsbase->other = STV_HIDDEN;
3984 tlsbase->root.linker_def = 1;
3985 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3986 }
3987 }
3988
3989 return TRUE;
3990 }
3991
3992 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3993 executables. Rather than setting it to the beginning of the TLS
3994 section, we have to set it to the end. This function may be called
3995 multiple times, it is idempotent. */
3996
3997 static void
3998 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3999 {
4000 struct elf_x86_64_link_hash_table *htab;
4001 struct bfd_link_hash_entry *base;
4002
4003 if (!bfd_link_executable (info))
4004 return;
4005
4006 htab = elf_x86_64_hash_table (info);
4007 if (htab == NULL)
4008 return;
4009
4010 base = htab->tls_module_base;
4011 if (base == NULL)
4012 return;
4013
4014 base->u.def.value = htab->elf.tls_size;
4015 }
4016
4017 /* Return the base VMA address which should be subtracted from real addresses
4018 when resolving @dtpoff relocation.
4019 This is PT_TLS segment p_vaddr. */
4020
4021 static bfd_vma
4022 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
4023 {
4024 /* If tls_sec is NULL, we should have signalled an error already. */
4025 if (elf_hash_table (info)->tls_sec == NULL)
4026 return 0;
4027 return elf_hash_table (info)->tls_sec->vma;
4028 }
4029
4030 /* Return the relocation value for @tpoff relocation
4031 if STT_TLS virtual address is ADDRESS. */
4032
4033 static bfd_vma
4034 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
4035 {
4036 struct elf_link_hash_table *htab = elf_hash_table (info);
4037 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
4038 bfd_vma static_tls_size;
4039
4040 /* If tls_segment is NULL, we should have signalled an error already. */
4041 if (htab->tls_sec == NULL)
4042 return 0;
4043
4044 /* Consider special static TLS alignment requirements. */
4045 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
4046 return address - static_tls_size - htab->tls_sec->vma;
4047 }
4048
4049 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
4050 branch? */
4051
4052 static bfd_boolean
4053 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
4054 {
4055 /* Opcode Instruction
4056 0xe8 call
4057 0xe9 jump
4058 0x0f 0x8x conditional jump */
4059 return ((offset > 0
4060 && (contents [offset - 1] == 0xe8
4061 || contents [offset - 1] == 0xe9))
4062 || (offset > 1
4063 && contents [offset - 2] == 0x0f
4064 && (contents [offset - 1] & 0xf0) == 0x80));
4065 }
4066
4067 /* Relocate an x86_64 ELF section. */
4068
4069 static bfd_boolean
4070 elf_x86_64_relocate_section (bfd *output_bfd,
4071 struct bfd_link_info *info,
4072 bfd *input_bfd,
4073 asection *input_section,
4074 bfd_byte *contents,
4075 Elf_Internal_Rela *relocs,
4076 Elf_Internal_Sym *local_syms,
4077 asection **local_sections)
4078 {
4079 struct elf_x86_64_link_hash_table *htab;
4080 Elf_Internal_Shdr *symtab_hdr;
4081 struct elf_link_hash_entry **sym_hashes;
4082 bfd_vma *local_got_offsets;
4083 bfd_vma *local_tlsdesc_gotents;
4084 Elf_Internal_Rela *rel;
4085 Elf_Internal_Rela *wrel;
4086 Elf_Internal_Rela *relend;
4087 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
4088
4089 BFD_ASSERT (is_x86_64_elf (input_bfd));
4090
4091 /* Skip if check_relocs failed. */
4092 if (input_section->check_relocs_failed)
4093 return FALSE;
4094
4095 htab = elf_x86_64_hash_table (info);
4096 if (htab == NULL)
4097 return FALSE;
4098 symtab_hdr = &elf_symtab_hdr (input_bfd);
4099 sym_hashes = elf_sym_hashes (input_bfd);
4100 local_got_offsets = elf_local_got_offsets (input_bfd);
4101 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
4102
4103 elf_x86_64_set_tls_module_base (info);
4104
4105 rel = wrel = relocs;
4106 relend = relocs + input_section->reloc_count;
4107 for (; rel < relend; wrel++, rel++)
4108 {
4109 unsigned int r_type;
4110 reloc_howto_type *howto;
4111 unsigned long r_symndx;
4112 struct elf_link_hash_entry *h;
4113 struct elf_x86_64_link_hash_entry *eh;
4114 Elf_Internal_Sym *sym;
4115 asection *sec;
4116 bfd_vma off, offplt, plt_offset;
4117 bfd_vma relocation;
4118 bfd_boolean unresolved_reloc;
4119 bfd_reloc_status_type r;
4120 int tls_type;
4121 asection *base_got, *resolved_plt;
4122 bfd_vma st_size;
4123 bfd_boolean resolved_to_zero;
4124
4125 r_type = ELF32_R_TYPE (rel->r_info);
4126 if (r_type == (int) R_X86_64_GNU_VTINHERIT
4127 || r_type == (int) R_X86_64_GNU_VTENTRY)
4128 {
4129 if (wrel != rel)
4130 *wrel = *rel;
4131 continue;
4132 }
4133
4134 if (r_type >= (int) R_X86_64_standard)
4135 {
4136 (*_bfd_error_handler)
4137 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4138 input_bfd, input_section, r_type);
4139 bfd_set_error (bfd_error_bad_value);
4140 return FALSE;
4141 }
4142
4143 if (r_type != (int) R_X86_64_32
4144 || ABI_64_P (output_bfd))
4145 howto = x86_64_elf_howto_table + r_type;
4146 else
4147 howto = (x86_64_elf_howto_table
4148 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
4149 r_symndx = htab->r_sym (rel->r_info);
4150 h = NULL;
4151 sym = NULL;
4152 sec = NULL;
4153 unresolved_reloc = FALSE;
4154 if (r_symndx < symtab_hdr->sh_info)
4155 {
4156 sym = local_syms + r_symndx;
4157 sec = local_sections[r_symndx];
4158
4159 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
4160 &sec, rel);
4161 st_size = sym->st_size;
4162
4163 /* Relocate against local STT_GNU_IFUNC symbol. */
4164 if (!bfd_link_relocatable (info)
4165 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4166 {
4167 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
4168 rel, FALSE);
4169 if (h == NULL)
4170 abort ();
4171
4172 /* Set STT_GNU_IFUNC symbol value. */
4173 h->root.u.def.value = sym->st_value;
4174 h->root.u.def.section = sec;
4175 }
4176 }
4177 else
4178 {
4179 bfd_boolean warned ATTRIBUTE_UNUSED;
4180 bfd_boolean ignored ATTRIBUTE_UNUSED;
4181
4182 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4183 r_symndx, symtab_hdr, sym_hashes,
4184 h, sec, relocation,
4185 unresolved_reloc, warned, ignored);
4186 st_size = h->size;
4187 }
4188
4189 if (sec != NULL && discarded_section (sec))
4190 {
4191 _bfd_clear_contents (howto, input_bfd, input_section,
4192 contents + rel->r_offset);
4193 wrel->r_offset = rel->r_offset;
4194 wrel->r_info = 0;
4195 wrel->r_addend = 0;
4196
4197 /* For ld -r, remove relocations in debug sections against
4198 sections defined in discarded sections. Not done for
4199 eh_frame editing code expects to be present. */
4200 if (bfd_link_relocatable (info)
4201 && (input_section->flags & SEC_DEBUGGING))
4202 wrel--;
4203
4204 continue;
4205 }
4206
4207 if (bfd_link_relocatable (info))
4208 {
4209 if (wrel != rel)
4210 *wrel = *rel;
4211 continue;
4212 }
4213
4214 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
4215 {
4216 if (r_type == R_X86_64_64)
4217 {
4218 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
4219 zero-extend it to 64bit if addend is zero. */
4220 r_type = R_X86_64_32;
4221 memset (contents + rel->r_offset + 4, 0, 4);
4222 }
4223 else if (r_type == R_X86_64_SIZE64)
4224 {
4225 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
4226 zero-extend it to 64bit if addend is zero. */
4227 r_type = R_X86_64_SIZE32;
4228 memset (contents + rel->r_offset + 4, 0, 4);
4229 }
4230 }
4231
4232 eh = (struct elf_x86_64_link_hash_entry *) h;
4233
4234 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4235 it here if it is defined in a non-shared object. */
4236 if (h != NULL
4237 && h->type == STT_GNU_IFUNC
4238 && h->def_regular)
4239 {
4240 bfd_vma plt_index;
4241 const char *name;
4242
4243 if ((input_section->flags & SEC_ALLOC) == 0)
4244 {
4245 /* Dynamic relocs are not propagated for SEC_DEBUGGING
4246 sections because such sections are not SEC_ALLOC and
4247 thus ld.so will not process them. */
4248 if ((input_section->flags & SEC_DEBUGGING) != 0)
4249 continue;
4250 abort ();
4251 }
4252
4253 switch (r_type)
4254 {
4255 default:
4256 break;
4257
4258 case R_X86_64_GOTPCREL:
4259 case R_X86_64_GOTPCRELX:
4260 case R_X86_64_REX_GOTPCRELX:
4261 case R_X86_64_GOTPCREL64:
4262 base_got = htab->elf.sgot;
4263 off = h->got.offset;
4264
4265 if (base_got == NULL)
4266 abort ();
4267
4268 if (off == (bfd_vma) -1)
4269 {
4270 /* We can't use h->got.offset here to save state, or
4271 even just remember the offset, as finish_dynamic_symbol
4272 would use that as offset into .got. */
4273
4274 if (h->plt.offset == (bfd_vma) -1)
4275 abort ();
4276
4277 if (htab->elf.splt != NULL)
4278 {
4279 plt_index = h->plt.offset / plt_entry_size - 1;
4280 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4281 base_got = htab->elf.sgotplt;
4282 }
4283 else
4284 {
4285 plt_index = h->plt.offset / plt_entry_size;
4286 off = plt_index * GOT_ENTRY_SIZE;
4287 base_got = htab->elf.igotplt;
4288 }
4289
4290 if (h->dynindx == -1
4291 || h->forced_local
4292 || info->symbolic)
4293 {
4294 /* This references the local defitionion. We must
4295 initialize this entry in the global offset table.
4296 Since the offset must always be a multiple of 8,
4297 we use the least significant bit to record
4298 whether we have initialized it already.
4299
4300 When doing a dynamic link, we create a .rela.got
4301 relocation entry to initialize the value. This
4302 is done in the finish_dynamic_symbol routine. */
4303 if ((off & 1) != 0)
4304 off &= ~1;
4305 else
4306 {
4307 bfd_put_64 (output_bfd, relocation,
4308 base_got->contents + off);
4309 /* Note that this is harmless for the GOTPLT64
4310 case, as -1 | 1 still is -1. */
4311 h->got.offset |= 1;
4312 }
4313 }
4314 }
4315
4316 relocation = (base_got->output_section->vma
4317 + base_got->output_offset + off);
4318
4319 goto do_relocation;
4320 }
4321
4322 if (h->plt.offset == (bfd_vma) -1)
4323 {
4324 /* Handle static pointers of STT_GNU_IFUNC symbols. */
4325 if (r_type == htab->pointer_r_type
4326 && (input_section->flags & SEC_CODE) == 0)
4327 goto do_ifunc_pointer;
4328 goto bad_ifunc_reloc;
4329 }
4330
4331 /* STT_GNU_IFUNC symbol must go through PLT. */
4332 if (htab->elf.splt != NULL)
4333 {
4334 if (htab->plt_bnd != NULL)
4335 {
4336 resolved_plt = htab->plt_bnd;
4337 plt_offset = eh->plt_bnd.offset;
4338 }
4339 else
4340 {
4341 resolved_plt = htab->elf.splt;
4342 plt_offset = h->plt.offset;
4343 }
4344 }
4345 else
4346 {
4347 resolved_plt = htab->elf.iplt;
4348 plt_offset = h->plt.offset;
4349 }
4350
4351 relocation = (resolved_plt->output_section->vma
4352 + resolved_plt->output_offset + plt_offset);
4353
4354 switch (r_type)
4355 {
4356 default:
4357 bad_ifunc_reloc:
4358 if (h->root.root.string)
4359 name = h->root.root.string;
4360 else
4361 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4362 NULL);
4363 (*_bfd_error_handler)
4364 (_("%B: relocation %s against STT_GNU_IFUNC "
4365 "symbol `%s' isn't supported"), input_bfd,
4366 howto->name, name);
4367 bfd_set_error (bfd_error_bad_value);
4368 return FALSE;
4369
4370 case R_X86_64_32S:
4371 if (bfd_link_pic (info))
4372 abort ();
4373 goto do_relocation;
4374
4375 case R_X86_64_32:
4376 if (ABI_64_P (output_bfd))
4377 goto do_relocation;
4378 /* FALLTHROUGH */
4379 case R_X86_64_64:
4380 do_ifunc_pointer:
4381 if (rel->r_addend != 0)
4382 {
4383 if (h->root.root.string)
4384 name = h->root.root.string;
4385 else
4386 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4387 sym, NULL);
4388 (*_bfd_error_handler)
4389 (_("%B: relocation %s against STT_GNU_IFUNC "
4390 "symbol `%s' has non-zero addend: %d"),
4391 input_bfd, howto->name, name, rel->r_addend);
4392 bfd_set_error (bfd_error_bad_value);
4393 return FALSE;
4394 }
4395
4396 /* Generate dynamic relcoation only when there is a
4397 non-GOT reference in a shared object or there is no
4398 PLT. */
4399 if ((bfd_link_pic (info) && h->non_got_ref)
4400 || h->plt.offset == (bfd_vma) -1)
4401 {
4402 Elf_Internal_Rela outrel;
4403 asection *sreloc;
4404
4405 /* Need a dynamic relocation to get the real function
4406 address. */
4407 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4408 info,
4409 input_section,
4410 rel->r_offset);
4411 if (outrel.r_offset == (bfd_vma) -1
4412 || outrel.r_offset == (bfd_vma) -2)
4413 abort ();
4414
4415 outrel.r_offset += (input_section->output_section->vma
4416 + input_section->output_offset);
4417
4418 if (h->dynindx == -1
4419 || h->forced_local
4420 || bfd_link_executable (info))
4421 {
4422 /* This symbol is resolved locally. */
4423 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4424 outrel.r_addend = (h->root.u.def.value
4425 + h->root.u.def.section->output_section->vma
4426 + h->root.u.def.section->output_offset);
4427 }
4428 else
4429 {
4430 outrel.r_info = htab->r_info (h->dynindx, r_type);
4431 outrel.r_addend = 0;
4432 }
4433
4434 /* Dynamic relocations are stored in
4435 1. .rela.ifunc section in PIC object.
4436 2. .rela.got section in dynamic executable.
4437 3. .rela.iplt section in static executable. */
4438 if (bfd_link_pic (info))
4439 sreloc = htab->elf.irelifunc;
4440 else if (htab->elf.splt != NULL)
4441 sreloc = htab->elf.srelgot;
4442 else
4443 sreloc = htab->elf.irelplt;
4444 elf_append_rela (output_bfd, sreloc, &outrel);
4445
4446 /* If this reloc is against an external symbol, we
4447 do not want to fiddle with the addend. Otherwise,
4448 we need to include the symbol value so that it
4449 becomes an addend for the dynamic reloc. For an
4450 internal symbol, we have updated addend. */
4451 continue;
4452 }
4453 /* FALLTHROUGH */
4454 case R_X86_64_PC32:
4455 case R_X86_64_PC32_BND:
4456 case R_X86_64_PC64:
4457 case R_X86_64_PLT32:
4458 case R_X86_64_PLT32_BND:
4459 goto do_relocation;
4460 }
4461 }
4462
4463 resolved_to_zero = (eh != NULL
4464 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
4465 eh->has_got_reloc,
4466 eh));
4467
4468 /* When generating a shared object, the relocations handled here are
4469 copied into the output file to be resolved at run time. */
4470 switch (r_type)
4471 {
4472 case R_X86_64_GOT32:
4473 case R_X86_64_GOT64:
4474 /* Relocation is to the entry for this symbol in the global
4475 offset table. */
4476 case R_X86_64_GOTPCREL:
4477 case R_X86_64_GOTPCRELX:
4478 case R_X86_64_REX_GOTPCRELX:
4479 case R_X86_64_GOTPCREL64:
4480 /* Use global offset table entry as symbol value. */
4481 case R_X86_64_GOTPLT64:
4482 /* This is obsolete and treated the the same as GOT64. */
4483 base_got = htab->elf.sgot;
4484
4485 if (htab->elf.sgot == NULL)
4486 abort ();
4487
4488 if (h != NULL)
4489 {
4490 bfd_boolean dyn;
4491
4492 off = h->got.offset;
4493 if (h->needs_plt
4494 && h->plt.offset != (bfd_vma)-1
4495 && off == (bfd_vma)-1)
4496 {
4497 /* We can't use h->got.offset here to save
4498 state, or even just remember the offset, as
4499 finish_dynamic_symbol would use that as offset into
4500 .got. */
4501 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
4502 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4503 base_got = htab->elf.sgotplt;
4504 }
4505
4506 dyn = htab->elf.dynamic_sections_created;
4507
4508 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4509 || (bfd_link_pic (info)
4510 && SYMBOL_REFERENCES_LOCAL (info, h))
4511 || (ELF_ST_VISIBILITY (h->other)
4512 && h->root.type == bfd_link_hash_undefweak))
4513 {
4514 /* This is actually a static link, or it is a -Bsymbolic
4515 link and the symbol is defined locally, or the symbol
4516 was forced to be local because of a version file. We
4517 must initialize this entry in the global offset table.
4518 Since the offset must always be a multiple of 8, we
4519 use the least significant bit to record whether we
4520 have initialized it already.
4521
4522 When doing a dynamic link, we create a .rela.got
4523 relocation entry to initialize the value. This is
4524 done in the finish_dynamic_symbol routine. */
4525 if ((off & 1) != 0)
4526 off &= ~1;
4527 else
4528 {
4529 bfd_put_64 (output_bfd, relocation,
4530 base_got->contents + off);
4531 /* Note that this is harmless for the GOTPLT64 case,
4532 as -1 | 1 still is -1. */
4533 h->got.offset |= 1;
4534 }
4535 }
4536 else
4537 unresolved_reloc = FALSE;
4538 }
4539 else
4540 {
4541 if (local_got_offsets == NULL)
4542 abort ();
4543
4544 off = local_got_offsets[r_symndx];
4545
4546 /* The offset must always be a multiple of 8. We use
4547 the least significant bit to record whether we have
4548 already generated the necessary reloc. */
4549 if ((off & 1) != 0)
4550 off &= ~1;
4551 else
4552 {
4553 bfd_put_64 (output_bfd, relocation,
4554 base_got->contents + off);
4555
4556 if (bfd_link_pic (info))
4557 {
4558 asection *s;
4559 Elf_Internal_Rela outrel;
4560
4561 /* We need to generate a R_X86_64_RELATIVE reloc
4562 for the dynamic linker. */
4563 s = htab->elf.srelgot;
4564 if (s == NULL)
4565 abort ();
4566
4567 outrel.r_offset = (base_got->output_section->vma
4568 + base_got->output_offset
4569 + off);
4570 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4571 outrel.r_addend = relocation;
4572 elf_append_rela (output_bfd, s, &outrel);
4573 }
4574
4575 local_got_offsets[r_symndx] |= 1;
4576 }
4577 }
4578
4579 if (off >= (bfd_vma) -2)
4580 abort ();
4581
4582 relocation = base_got->output_section->vma
4583 + base_got->output_offset + off;
4584 if (r_type != R_X86_64_GOTPCREL
4585 && r_type != R_X86_64_GOTPCRELX
4586 && r_type != R_X86_64_REX_GOTPCRELX
4587 && r_type != R_X86_64_GOTPCREL64)
4588 relocation -= htab->elf.sgotplt->output_section->vma
4589 - htab->elf.sgotplt->output_offset;
4590
4591 break;
4592
4593 case R_X86_64_GOTOFF64:
4594 /* Relocation is relative to the start of the global offset
4595 table. */
4596
4597 /* Check to make sure it isn't a protected function or data
4598 symbol for shared library since it may not be local when
4599 used as function address or with copy relocation. We also
4600 need to make sure that a symbol is referenced locally. */
4601 if (bfd_link_pic (info) && h)
4602 {
4603 if (!h->def_regular)
4604 {
4605 const char *v;
4606
4607 switch (ELF_ST_VISIBILITY (h->other))
4608 {
4609 case STV_HIDDEN:
4610 v = _("hidden symbol");
4611 break;
4612 case STV_INTERNAL:
4613 v = _("internal symbol");
4614 break;
4615 case STV_PROTECTED:
4616 v = _("protected symbol");
4617 break;
4618 default:
4619 v = _("symbol");
4620 break;
4621 }
4622
4623 (*_bfd_error_handler)
4624 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4625 input_bfd, v, h->root.root.string);
4626 bfd_set_error (bfd_error_bad_value);
4627 return FALSE;
4628 }
4629 else if (!bfd_link_executable (info)
4630 && !SYMBOL_REFERENCES_LOCAL (info, h)
4631 && (h->type == STT_FUNC
4632 || h->type == STT_OBJECT)
4633 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4634 {
4635 (*_bfd_error_handler)
4636 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4637 input_bfd,
4638 h->type == STT_FUNC ? "function" : "data",
4639 h->root.root.string);
4640 bfd_set_error (bfd_error_bad_value);
4641 return FALSE;
4642 }
4643 }
4644
4645 /* Note that sgot is not involved in this
4646 calculation. We always want the start of .got.plt. If we
4647 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4648 permitted by the ABI, we might have to change this
4649 calculation. */
4650 relocation -= htab->elf.sgotplt->output_section->vma
4651 + htab->elf.sgotplt->output_offset;
4652 break;
4653
4654 case R_X86_64_GOTPC32:
4655 case R_X86_64_GOTPC64:
4656 /* Use global offset table as symbol value. */
4657 relocation = htab->elf.sgotplt->output_section->vma
4658 + htab->elf.sgotplt->output_offset;
4659 unresolved_reloc = FALSE;
4660 break;
4661
4662 case R_X86_64_PLTOFF64:
4663 /* Relocation is PLT entry relative to GOT. For local
4664 symbols it's the symbol itself relative to GOT. */
4665 if (h != NULL
4666 /* See PLT32 handling. */
4667 && h->plt.offset != (bfd_vma) -1
4668 && htab->elf.splt != NULL)
4669 {
4670 if (htab->plt_bnd != NULL)
4671 {
4672 resolved_plt = htab->plt_bnd;
4673 plt_offset = eh->plt_bnd.offset;
4674 }
4675 else
4676 {
4677 resolved_plt = htab->elf.splt;
4678 plt_offset = h->plt.offset;
4679 }
4680
4681 relocation = (resolved_plt->output_section->vma
4682 + resolved_plt->output_offset
4683 + plt_offset);
4684 unresolved_reloc = FALSE;
4685 }
4686
4687 relocation -= htab->elf.sgotplt->output_section->vma
4688 + htab->elf.sgotplt->output_offset;
4689 break;
4690
4691 case R_X86_64_PLT32:
4692 case R_X86_64_PLT32_BND:
4693 /* Relocation is to the entry for this symbol in the
4694 procedure linkage table. */
4695
4696 /* Resolve a PLT32 reloc against a local symbol directly,
4697 without using the procedure linkage table. */
4698 if (h == NULL)
4699 break;
4700
4701 if ((h->plt.offset == (bfd_vma) -1
4702 && eh->plt_got.offset == (bfd_vma) -1)
4703 || htab->elf.splt == NULL)
4704 {
4705 /* We didn't make a PLT entry for this symbol. This
4706 happens when statically linking PIC code, or when
4707 using -Bsymbolic. */
4708 break;
4709 }
4710
4711 if (h->plt.offset != (bfd_vma) -1)
4712 {
4713 if (htab->plt_bnd != NULL)
4714 {
4715 resolved_plt = htab->plt_bnd;
4716 plt_offset = eh->plt_bnd.offset;
4717 }
4718 else
4719 {
4720 resolved_plt = htab->elf.splt;
4721 plt_offset = h->plt.offset;
4722 }
4723 }
4724 else
4725 {
4726 /* Use the GOT PLT. */
4727 resolved_plt = htab->plt_got;
4728 plt_offset = eh->plt_got.offset;
4729 }
4730
4731 relocation = (resolved_plt->output_section->vma
4732 + resolved_plt->output_offset
4733 + plt_offset);
4734 unresolved_reloc = FALSE;
4735 break;
4736
4737 case R_X86_64_SIZE32:
4738 case R_X86_64_SIZE64:
4739 /* Set to symbol size. */
4740 relocation = st_size;
4741 goto direct;
4742
4743 case R_X86_64_PC8:
4744 case R_X86_64_PC16:
4745 case R_X86_64_PC32:
4746 case R_X86_64_PC32_BND:
4747 /* Don't complain about -fPIC if the symbol is undefined when
4748 building executable unless it is unresolved weak symbol. */
4749 if ((input_section->flags & SEC_ALLOC) != 0
4750 && (input_section->flags & SEC_READONLY) != 0
4751 && h != NULL
4752 && ((bfd_link_executable (info)
4753 && h->root.type == bfd_link_hash_undefweak
4754 && !resolved_to_zero)
4755 || (bfd_link_pic (info)
4756 && !(bfd_link_pie (info)
4757 && h->root.type == bfd_link_hash_undefined))))
4758 {
4759 bfd_boolean fail = FALSE;
4760 bfd_boolean branch
4761 = ((r_type == R_X86_64_PC32
4762 || r_type == R_X86_64_PC32_BND)
4763 && is_32bit_relative_branch (contents, rel->r_offset));
4764
4765 if (SYMBOL_REFERENCES_LOCAL (info, h))
4766 {
4767 /* Symbol is referenced locally. Make sure it is
4768 defined locally or for a branch. */
4769 fail = !h->def_regular && !branch;
4770 }
4771 else if (!(bfd_link_pie (info)
4772 && (h->needs_copy || eh->needs_copy)))
4773 {
4774 /* Symbol doesn't need copy reloc and isn't referenced
4775 locally. We only allow branch to symbol with
4776 non-default visibility. */
4777 fail = (!branch
4778 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4779 }
4780
4781 if (fail)
4782 return elf_x86_64_need_pic (input_bfd, input_section,
4783 h, NULL, NULL, howto);
4784 }
4785 /* Fall through. */
4786
4787 case R_X86_64_8:
4788 case R_X86_64_16:
4789 case R_X86_64_32:
4790 case R_X86_64_PC64:
4791 case R_X86_64_64:
4792 /* FIXME: The ABI says the linker should make sure the value is
4793 the same when it's zeroextended to 64 bit. */
4794
4795 direct:
4796 if ((input_section->flags & SEC_ALLOC) == 0)
4797 break;
4798
4799 /* Don't copy a pc-relative relocation into the output file
4800 if the symbol needs copy reloc or the symbol is undefined
4801 when building executable. Copy dynamic function pointer
4802 relocations. Don't generate dynamic relocations against
4803 resolved undefined weak symbols in PIE. */
4804 if ((bfd_link_pic (info)
4805 && !(bfd_link_pie (info)
4806 && h != NULL
4807 && (h->needs_copy
4808 || eh->needs_copy
4809 || h->root.type == bfd_link_hash_undefined)
4810 && IS_X86_64_PCREL_TYPE (r_type))
4811 && (h == NULL
4812 || ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4813 && !resolved_to_zero)
4814 || h->root.type != bfd_link_hash_undefweak))
4815 && ((! IS_X86_64_PCREL_TYPE (r_type)
4816 && r_type != R_X86_64_SIZE32
4817 && r_type != R_X86_64_SIZE64)
4818 || ! SYMBOL_CALLS_LOCAL (info, h)))
4819 || (ELIMINATE_COPY_RELOCS
4820 && !bfd_link_pic (info)
4821 && h != NULL
4822 && h->dynindx != -1
4823 && (!h->non_got_ref
4824 || eh->func_pointer_refcount > 0
4825 || (h->root.type == bfd_link_hash_undefweak
4826 && !resolved_to_zero))
4827 && ((h->def_dynamic && !h->def_regular)
4828 /* Undefined weak symbol is bound locally when
4829 PIC is false. */
4830 || h->root.type == bfd_link_hash_undefined)))
4831 {
4832 Elf_Internal_Rela outrel;
4833 bfd_boolean skip, relocate;
4834 asection *sreloc;
4835
4836 /* When generating a shared object, these relocations
4837 are copied into the output file to be resolved at run
4838 time. */
4839 skip = FALSE;
4840 relocate = FALSE;
4841
4842 outrel.r_offset =
4843 _bfd_elf_section_offset (output_bfd, info, input_section,
4844 rel->r_offset);
4845 if (outrel.r_offset == (bfd_vma) -1)
4846 skip = TRUE;
4847 else if (outrel.r_offset == (bfd_vma) -2)
4848 skip = TRUE, relocate = TRUE;
4849
4850 outrel.r_offset += (input_section->output_section->vma
4851 + input_section->output_offset);
4852
4853 if (skip)
4854 memset (&outrel, 0, sizeof outrel);
4855
4856 /* h->dynindx may be -1 if this symbol was marked to
4857 become local. */
4858 else if (h != NULL
4859 && h->dynindx != -1
4860 && (IS_X86_64_PCREL_TYPE (r_type)
4861 || !(bfd_link_executable (info)
4862 || SYMBOLIC_BIND (info, h))
4863 || ! h->def_regular))
4864 {
4865 outrel.r_info = htab->r_info (h->dynindx, r_type);
4866 outrel.r_addend = rel->r_addend;
4867 }
4868 else
4869 {
4870 /* This symbol is local, or marked to become local.
4871 When relocation overflow check is disabled, we
4872 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
4873 if (r_type == htab->pointer_r_type
4874 || (r_type == R_X86_64_32
4875 && info->no_reloc_overflow_check))
4876 {
4877 relocate = TRUE;
4878 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4879 outrel.r_addend = relocation + rel->r_addend;
4880 }
4881 else if (r_type == R_X86_64_64
4882 && !ABI_64_P (output_bfd))
4883 {
4884 relocate = TRUE;
4885 outrel.r_info = htab->r_info (0,
4886 R_X86_64_RELATIVE64);
4887 outrel.r_addend = relocation + rel->r_addend;
4888 /* Check addend overflow. */
4889 if ((outrel.r_addend & 0x80000000)
4890 != (rel->r_addend & 0x80000000))
4891 {
4892 const char *name;
4893 int addend = rel->r_addend;
4894 if (h && h->root.root.string)
4895 name = h->root.root.string;
4896 else
4897 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4898 sym, NULL);
4899 if (addend < 0)
4900 (*_bfd_error_handler)
4901 (_("%B: addend -0x%x in relocation %s against "
4902 "symbol `%s' at 0x%lx in section `%A' is "
4903 "out of range"),
4904 input_bfd, input_section, addend,
4905 howto->name, name,
4906 (unsigned long) rel->r_offset);
4907 else
4908 (*_bfd_error_handler)
4909 (_("%B: addend 0x%x in relocation %s against "
4910 "symbol `%s' at 0x%lx in section `%A' is "
4911 "out of range"),
4912 input_bfd, input_section, addend,
4913 howto->name, name,
4914 (unsigned long) rel->r_offset);
4915 bfd_set_error (bfd_error_bad_value);
4916 return FALSE;
4917 }
4918 }
4919 else
4920 {
4921 long sindx;
4922
4923 if (bfd_is_abs_section (sec))
4924 sindx = 0;
4925 else if (sec == NULL || sec->owner == NULL)
4926 {
4927 bfd_set_error (bfd_error_bad_value);
4928 return FALSE;
4929 }
4930 else
4931 {
4932 asection *osec;
4933
4934 /* We are turning this relocation into one
4935 against a section symbol. It would be
4936 proper to subtract the symbol's value,
4937 osec->vma, from the emitted reloc addend,
4938 but ld.so expects buggy relocs. */
4939 osec = sec->output_section;
4940 sindx = elf_section_data (osec)->dynindx;
4941 if (sindx == 0)
4942 {
4943 asection *oi = htab->elf.text_index_section;
4944 sindx = elf_section_data (oi)->dynindx;
4945 }
4946 BFD_ASSERT (sindx != 0);
4947 }
4948
4949 outrel.r_info = htab->r_info (sindx, r_type);
4950 outrel.r_addend = relocation + rel->r_addend;
4951 }
4952 }
4953
4954 sreloc = elf_section_data (input_section)->sreloc;
4955
4956 if (sreloc == NULL || sreloc->contents == NULL)
4957 {
4958 r = bfd_reloc_notsupported;
4959 goto check_relocation_error;
4960 }
4961
4962 elf_append_rela (output_bfd, sreloc, &outrel);
4963
4964 /* If this reloc is against an external symbol, we do
4965 not want to fiddle with the addend. Otherwise, we
4966 need to include the symbol value so that it becomes
4967 an addend for the dynamic reloc. */
4968 if (! relocate)
4969 continue;
4970 }
4971
4972 break;
4973
4974 case R_X86_64_TLSGD:
4975 case R_X86_64_GOTPC32_TLSDESC:
4976 case R_X86_64_TLSDESC_CALL:
4977 case R_X86_64_GOTTPOFF:
4978 tls_type = GOT_UNKNOWN;
4979 if (h == NULL && local_got_offsets)
4980 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4981 else if (h != NULL)
4982 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4983
4984 if (! elf_x86_64_tls_transition (info, input_bfd,
4985 input_section, contents,
4986 symtab_hdr, sym_hashes,
4987 &r_type, tls_type, rel,
4988 relend, h, r_symndx, TRUE))
4989 return FALSE;
4990
4991 if (r_type == R_X86_64_TPOFF32)
4992 {
4993 bfd_vma roff = rel->r_offset;
4994
4995 BFD_ASSERT (! unresolved_reloc);
4996
4997 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4998 {
4999 /* GD->LE transition. For 64bit, change
5000 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5001 .word 0x6666; rex64; call __tls_get_addr@PLT
5002 or
5003 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5004 .byte 0x66; rex64
5005 call *__tls_get_addr@GOTPCREL(%rip)
5006 which may be converted to
5007 addr32 call __tls_get_addr
5008 into:
5009 movq %fs:0, %rax
5010 leaq foo@tpoff(%rax), %rax
5011 For 32bit, change
5012 leaq foo@tlsgd(%rip), %rdi
5013 .word 0x6666; rex64; call __tls_get_addr@PLT
5014 or
5015 leaq foo@tlsgd(%rip), %rdi
5016 .byte 0x66; rex64
5017 call *__tls_get_addr@GOTPCREL(%rip)
5018 which may be converted to
5019 addr32 call __tls_get_addr
5020 into:
5021 movl %fs:0, %eax
5022 leaq foo@tpoff(%rax), %rax
5023 For largepic, change:
5024 leaq foo@tlsgd(%rip), %rdi
5025 movabsq $__tls_get_addr@pltoff, %rax
5026 addq %r15, %rax
5027 call *%rax
5028 into:
5029 movq %fs:0, %rax
5030 leaq foo@tpoff(%rax), %rax
5031 nopw 0x0(%rax,%rax,1) */
5032 int largepic = 0;
5033 if (ABI_64_P (output_bfd))
5034 {
5035 if (contents[roff + 5] == 0xb8)
5036 {
5037 memcpy (contents + roff - 3,
5038 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
5039 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
5040 largepic = 1;
5041 }
5042 else
5043 memcpy (contents + roff - 4,
5044 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
5045 16);
5046 }
5047 else
5048 memcpy (contents + roff - 3,
5049 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
5050 15);
5051 bfd_put_32 (output_bfd,
5052 elf_x86_64_tpoff (info, relocation),
5053 contents + roff + 8 + largepic);
5054 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
5055 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
5056 rel++;
5057 wrel++;
5058 continue;
5059 }
5060 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5061 {
5062 /* GDesc -> LE transition.
5063 It's originally something like:
5064 leaq x@tlsdesc(%rip), %rax
5065
5066 Change it to:
5067 movl $x@tpoff, %rax. */
5068
5069 unsigned int val, type;
5070
5071 type = bfd_get_8 (input_bfd, contents + roff - 3);
5072 val = bfd_get_8 (input_bfd, contents + roff - 1);
5073 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
5074 contents + roff - 3);
5075 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
5076 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
5077 contents + roff - 1);
5078 bfd_put_32 (output_bfd,
5079 elf_x86_64_tpoff (info, relocation),
5080 contents + roff);
5081 continue;
5082 }
5083 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5084 {
5085 /* GDesc -> LE transition.
5086 It's originally:
5087 call *(%rax)
5088 Turn it into:
5089 xchg %ax,%ax. */
5090 bfd_put_8 (output_bfd, 0x66, contents + roff);
5091 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5092 continue;
5093 }
5094 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
5095 {
5096 /* IE->LE transition:
5097 For 64bit, originally it can be one of:
5098 movq foo@gottpoff(%rip), %reg
5099 addq foo@gottpoff(%rip), %reg
5100 We change it into:
5101 movq $foo, %reg
5102 leaq foo(%reg), %reg
5103 addq $foo, %reg.
5104 For 32bit, originally it can be one of:
5105 movq foo@gottpoff(%rip), %reg
5106 addl foo@gottpoff(%rip), %reg
5107 We change it into:
5108 movq $foo, %reg
5109 leal foo(%reg), %reg
5110 addl $foo, %reg. */
5111
5112 unsigned int val, type, reg;
5113
5114 if (roff >= 3)
5115 val = bfd_get_8 (input_bfd, contents + roff - 3);
5116 else
5117 val = 0;
5118 type = bfd_get_8 (input_bfd, contents + roff - 2);
5119 reg = bfd_get_8 (input_bfd, contents + roff - 1);
5120 reg >>= 3;
5121 if (type == 0x8b)
5122 {
5123 /* movq */
5124 if (val == 0x4c)
5125 bfd_put_8 (output_bfd, 0x49,
5126 contents + roff - 3);
5127 else if (!ABI_64_P (output_bfd) && val == 0x44)
5128 bfd_put_8 (output_bfd, 0x41,
5129 contents + roff - 3);
5130 bfd_put_8 (output_bfd, 0xc7,
5131 contents + roff - 2);
5132 bfd_put_8 (output_bfd, 0xc0 | reg,
5133 contents + roff - 1);
5134 }
5135 else if (reg == 4)
5136 {
5137 /* addq/addl -> addq/addl - addressing with %rsp/%r12
5138 is special */
5139 if (val == 0x4c)
5140 bfd_put_8 (output_bfd, 0x49,
5141 contents + roff - 3);
5142 else if (!ABI_64_P (output_bfd) && val == 0x44)
5143 bfd_put_8 (output_bfd, 0x41,
5144 contents + roff - 3);
5145 bfd_put_8 (output_bfd, 0x81,
5146 contents + roff - 2);
5147 bfd_put_8 (output_bfd, 0xc0 | reg,
5148 contents + roff - 1);
5149 }
5150 else
5151 {
5152 /* addq/addl -> leaq/leal */
5153 if (val == 0x4c)
5154 bfd_put_8 (output_bfd, 0x4d,
5155 contents + roff - 3);
5156 else if (!ABI_64_P (output_bfd) && val == 0x44)
5157 bfd_put_8 (output_bfd, 0x45,
5158 contents + roff - 3);
5159 bfd_put_8 (output_bfd, 0x8d,
5160 contents + roff - 2);
5161 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
5162 contents + roff - 1);
5163 }
5164 bfd_put_32 (output_bfd,
5165 elf_x86_64_tpoff (info, relocation),
5166 contents + roff);
5167 continue;
5168 }
5169 else
5170 BFD_ASSERT (FALSE);
5171 }
5172
5173 if (htab->elf.sgot == NULL)
5174 abort ();
5175
5176 if (h != NULL)
5177 {
5178 off = h->got.offset;
5179 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
5180 }
5181 else
5182 {
5183 if (local_got_offsets == NULL)
5184 abort ();
5185
5186 off = local_got_offsets[r_symndx];
5187 offplt = local_tlsdesc_gotents[r_symndx];
5188 }
5189
5190 if ((off & 1) != 0)
5191 off &= ~1;
5192 else
5193 {
5194 Elf_Internal_Rela outrel;
5195 int dr_type, indx;
5196 asection *sreloc;
5197
5198 if (htab->elf.srelgot == NULL)
5199 abort ();
5200
5201 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5202
5203 if (GOT_TLS_GDESC_P (tls_type))
5204 {
5205 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
5206 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
5207 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
5208 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
5209 + htab->elf.sgotplt->output_offset
5210 + offplt
5211 + htab->sgotplt_jump_table_size);
5212 sreloc = htab->elf.srelplt;
5213 if (indx == 0)
5214 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
5215 else
5216 outrel.r_addend = 0;
5217 elf_append_rela (output_bfd, sreloc, &outrel);
5218 }
5219
5220 sreloc = htab->elf.srelgot;
5221
5222 outrel.r_offset = (htab->elf.sgot->output_section->vma
5223 + htab->elf.sgot->output_offset + off);
5224
5225 if (GOT_TLS_GD_P (tls_type))
5226 dr_type = R_X86_64_DTPMOD64;
5227 else if (GOT_TLS_GDESC_P (tls_type))
5228 goto dr_done;
5229 else
5230 dr_type = R_X86_64_TPOFF64;
5231
5232 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
5233 outrel.r_addend = 0;
5234 if ((dr_type == R_X86_64_TPOFF64
5235 || dr_type == R_X86_64_TLSDESC) && indx == 0)
5236 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
5237 outrel.r_info = htab->r_info (indx, dr_type);
5238
5239 elf_append_rela (output_bfd, sreloc, &outrel);
5240
5241 if (GOT_TLS_GD_P (tls_type))
5242 {
5243 if (indx == 0)
5244 {
5245 BFD_ASSERT (! unresolved_reloc);
5246 bfd_put_64 (output_bfd,
5247 relocation - elf_x86_64_dtpoff_base (info),
5248 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5249 }
5250 else
5251 {
5252 bfd_put_64 (output_bfd, 0,
5253 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5254 outrel.r_info = htab->r_info (indx,
5255 R_X86_64_DTPOFF64);
5256 outrel.r_offset += GOT_ENTRY_SIZE;
5257 elf_append_rela (output_bfd, sreloc,
5258 &outrel);
5259 }
5260 }
5261
5262 dr_done:
5263 if (h != NULL)
5264 h->got.offset |= 1;
5265 else
5266 local_got_offsets[r_symndx] |= 1;
5267 }
5268
5269 if (off >= (bfd_vma) -2
5270 && ! GOT_TLS_GDESC_P (tls_type))
5271 abort ();
5272 if (r_type == ELF32_R_TYPE (rel->r_info))
5273 {
5274 if (r_type == R_X86_64_GOTPC32_TLSDESC
5275 || r_type == R_X86_64_TLSDESC_CALL)
5276 relocation = htab->elf.sgotplt->output_section->vma
5277 + htab->elf.sgotplt->output_offset
5278 + offplt + htab->sgotplt_jump_table_size;
5279 else
5280 relocation = htab->elf.sgot->output_section->vma
5281 + htab->elf.sgot->output_offset + off;
5282 unresolved_reloc = FALSE;
5283 }
5284 else
5285 {
5286 bfd_vma roff = rel->r_offset;
5287
5288 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
5289 {
5290 /* GD->IE transition. For 64bit, change
5291 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5292 .word 0x6666; rex64; call __tls_get_addr@PLT
5293 or
5294 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5295 .byte 0x66; rex64
5296 call *__tls_get_addr@GOTPCREL(%rip
5297 which may be converted to
5298 addr32 call __tls_get_addr
5299 into:
5300 movq %fs:0, %rax
5301 addq foo@gottpoff(%rip), %rax
5302 For 32bit, change
5303 leaq foo@tlsgd(%rip), %rdi
5304 .word 0x6666; rex64; call __tls_get_addr@PLT
5305 or
5306 leaq foo@tlsgd(%rip), %rdi
5307 .byte 0x66; rex64;
5308 call *__tls_get_addr@GOTPCREL(%rip)
5309 which may be converted to
5310 addr32 call __tls_get_addr
5311 into:
5312 movl %fs:0, %eax
5313 addq foo@gottpoff(%rip), %rax
5314 For largepic, change:
5315 leaq foo@tlsgd(%rip), %rdi
5316 movabsq $__tls_get_addr@pltoff, %rax
5317 addq %r15, %rax
5318 call *%rax
5319 into:
5320 movq %fs:0, %rax
5321 addq foo@gottpoff(%rax), %rax
5322 nopw 0x0(%rax,%rax,1) */
5323 int largepic = 0;
5324 if (ABI_64_P (output_bfd))
5325 {
5326 if (contents[roff + 5] == 0xb8)
5327 {
5328 memcpy (contents + roff - 3,
5329 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
5330 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
5331 largepic = 1;
5332 }
5333 else
5334 memcpy (contents + roff - 4,
5335 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5336 16);
5337 }
5338 else
5339 memcpy (contents + roff - 3,
5340 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5341 15);
5342
5343 relocation = (htab->elf.sgot->output_section->vma
5344 + htab->elf.sgot->output_offset + off
5345 - roff
5346 - largepic
5347 - input_section->output_section->vma
5348 - input_section->output_offset
5349 - 12);
5350 bfd_put_32 (output_bfd, relocation,
5351 contents + roff + 8 + largepic);
5352 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
5353 rel++;
5354 wrel++;
5355 continue;
5356 }
5357 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5358 {
5359 /* GDesc -> IE transition.
5360 It's originally something like:
5361 leaq x@tlsdesc(%rip), %rax
5362
5363 Change it to:
5364 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
5365
5366 /* Now modify the instruction as appropriate. To
5367 turn a leaq into a movq in the form we use it, it
5368 suffices to change the second byte from 0x8d to
5369 0x8b. */
5370 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
5371
5372 bfd_put_32 (output_bfd,
5373 htab->elf.sgot->output_section->vma
5374 + htab->elf.sgot->output_offset + off
5375 - rel->r_offset
5376 - input_section->output_section->vma
5377 - input_section->output_offset
5378 - 4,
5379 contents + roff);
5380 continue;
5381 }
5382 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5383 {
5384 /* GDesc -> IE transition.
5385 It's originally:
5386 call *(%rax)
5387
5388 Change it to:
5389 xchg %ax, %ax. */
5390
5391 bfd_put_8 (output_bfd, 0x66, contents + roff);
5392 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5393 continue;
5394 }
5395 else
5396 BFD_ASSERT (FALSE);
5397 }
5398 break;
5399
5400 case R_X86_64_TLSLD:
5401 if (! elf_x86_64_tls_transition (info, input_bfd,
5402 input_section, contents,
5403 symtab_hdr, sym_hashes,
5404 &r_type, GOT_UNKNOWN, rel,
5405 relend, h, r_symndx, TRUE))
5406 return FALSE;
5407
5408 if (r_type != R_X86_64_TLSLD)
5409 {
5410 /* LD->LE transition:
5411 leaq foo@tlsld(%rip), %rdi
5412 call __tls_get_addr@PLT
5413 For 64bit, we change it into:
5414 .word 0x6666; .byte 0x66; movq %fs:0, %rax
5415 For 32bit, we change it into:
5416 nopl 0x0(%rax); movl %fs:0, %eax
5417 Or
5418 leaq foo@tlsld(%rip), %rdi;
5419 call *__tls_get_addr@GOTPCREL(%rip)
5420 which may be converted to
5421 addr32 call __tls_get_addr
5422 For 64bit, we change it into:
5423 .word 0x6666; .word 0x6666; movq %fs:0, %rax
5424 For 32bit, we change it into:
5425 nopw 0x0(%rax); movl %fs:0, %eax
5426 For largepic, change:
5427 leaq foo@tlsgd(%rip), %rdi
5428 movabsq $__tls_get_addr@pltoff, %rax
5429 addq %rbx, %rax
5430 call *%rax
5431 into
5432 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
5433 movq %fs:0, %eax */
5434
5435 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
5436 if (ABI_64_P (output_bfd))
5437 {
5438 if (contents[rel->r_offset + 5] == 0xb8)
5439 memcpy (contents + rel->r_offset - 3,
5440 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
5441 "\x64\x48\x8b\x04\x25\0\0\0", 22);
5442 else if (contents[rel->r_offset + 4] == 0xff
5443 || contents[rel->r_offset + 4] == 0x67)
5444 memcpy (contents + rel->r_offset - 3,
5445 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
5446 13);
5447 else
5448 memcpy (contents + rel->r_offset - 3,
5449 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
5450 }
5451 else
5452 {
5453 if (contents[rel->r_offset + 4] == 0xff)
5454 memcpy (contents + rel->r_offset - 3,
5455 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
5456 13);
5457 else
5458 memcpy (contents + rel->r_offset - 3,
5459 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
5460 }
5461 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
5462 and R_X86_64_PLTOFF64. */
5463 rel++;
5464 wrel++;
5465 continue;
5466 }
5467
5468 if (htab->elf.sgot == NULL)
5469 abort ();
5470
5471 off = htab->tls_ld_got.offset;
5472 if (off & 1)
5473 off &= ~1;
5474 else
5475 {
5476 Elf_Internal_Rela outrel;
5477
5478 if (htab->elf.srelgot == NULL)
5479 abort ();
5480
5481 outrel.r_offset = (htab->elf.sgot->output_section->vma
5482 + htab->elf.sgot->output_offset + off);
5483
5484 bfd_put_64 (output_bfd, 0,
5485 htab->elf.sgot->contents + off);
5486 bfd_put_64 (output_bfd, 0,
5487 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5488 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
5489 outrel.r_addend = 0;
5490 elf_append_rela (output_bfd, htab->elf.srelgot,
5491 &outrel);
5492 htab->tls_ld_got.offset |= 1;
5493 }
5494 relocation = htab->elf.sgot->output_section->vma
5495 + htab->elf.sgot->output_offset + off;
5496 unresolved_reloc = FALSE;
5497 break;
5498
5499 case R_X86_64_DTPOFF32:
5500 if (!bfd_link_executable (info)
5501 || (input_section->flags & SEC_CODE) == 0)
5502 relocation -= elf_x86_64_dtpoff_base (info);
5503 else
5504 relocation = elf_x86_64_tpoff (info, relocation);
5505 break;
5506
5507 case R_X86_64_TPOFF32:
5508 case R_X86_64_TPOFF64:
5509 BFD_ASSERT (bfd_link_executable (info));
5510 relocation = elf_x86_64_tpoff (info, relocation);
5511 break;
5512
5513 case R_X86_64_DTPOFF64:
5514 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
5515 relocation -= elf_x86_64_dtpoff_base (info);
5516 break;
5517
5518 default:
5519 break;
5520 }
5521
5522 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5523 because such sections are not SEC_ALLOC and thus ld.so will
5524 not process them. */
5525 if (unresolved_reloc
5526 && !((input_section->flags & SEC_DEBUGGING) != 0
5527 && h->def_dynamic)
5528 && _bfd_elf_section_offset (output_bfd, info, input_section,
5529 rel->r_offset) != (bfd_vma) -1)
5530 {
5531 (*_bfd_error_handler)
5532 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5533 input_bfd,
5534 input_section,
5535 (long) rel->r_offset,
5536 howto->name,
5537 h->root.root.string);
5538 return FALSE;
5539 }
5540
5541 do_relocation:
5542 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
5543 contents, rel->r_offset,
5544 relocation, rel->r_addend);
5545
5546 check_relocation_error:
5547 if (r != bfd_reloc_ok)
5548 {
5549 const char *name;
5550
5551 if (h != NULL)
5552 name = h->root.root.string;
5553 else
5554 {
5555 name = bfd_elf_string_from_elf_section (input_bfd,
5556 symtab_hdr->sh_link,
5557 sym->st_name);
5558 if (name == NULL)
5559 return FALSE;
5560 if (*name == '\0')
5561 name = bfd_section_name (input_bfd, sec);
5562 }
5563
5564 if (r == bfd_reloc_overflow)
5565 (*info->callbacks->reloc_overflow)
5566 (info, (h ? &h->root : NULL), name, howto->name,
5567 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5568 else
5569 {
5570 (*_bfd_error_handler)
5571 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5572 input_bfd, input_section,
5573 (long) rel->r_offset, name, (int) r);
5574 return FALSE;
5575 }
5576 }
5577
5578 if (wrel != rel)
5579 *wrel = *rel;
5580 }
5581
5582 if (wrel != rel)
5583 {
5584 Elf_Internal_Shdr *rel_hdr;
5585 size_t deleted = rel - wrel;
5586
5587 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
5588 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5589 if (rel_hdr->sh_size == 0)
5590 {
5591 /* It is too late to remove an empty reloc section. Leave
5592 one NONE reloc.
5593 ??? What is wrong with an empty section??? */
5594 rel_hdr->sh_size = rel_hdr->sh_entsize;
5595 deleted -= 1;
5596 }
5597 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5598 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5599 input_section->reloc_count -= deleted;
5600 }
5601
5602 return TRUE;
5603 }
5604
5605 /* Finish up dynamic symbol handling. We set the contents of various
5606 dynamic sections here. */
5607
5608 static bfd_boolean
5609 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5610 struct bfd_link_info *info,
5611 struct elf_link_hash_entry *h,
5612 Elf_Internal_Sym *sym)
5613 {
5614 struct elf_x86_64_link_hash_table *htab;
5615 const struct elf_x86_64_backend_data *abed;
5616 bfd_boolean use_plt_bnd;
5617 struct elf_x86_64_link_hash_entry *eh;
5618 bfd_boolean local_undefweak;
5619
5620 htab = elf_x86_64_hash_table (info);
5621 if (htab == NULL)
5622 return FALSE;
5623
5624 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5625 section only if there is .plt section. */
5626 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5627 abed = (use_plt_bnd
5628 ? &elf_x86_64_bnd_arch_bed
5629 : get_elf_x86_64_backend_data (output_bfd));
5630
5631 eh = (struct elf_x86_64_link_hash_entry *) h;
5632
5633 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
5634 resolved undefined weak symbols in executable so that their
5635 references have value 0 at run-time. */
5636 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
5637 eh->has_got_reloc,
5638 eh);
5639
5640 if (h->plt.offset != (bfd_vma) -1)
5641 {
5642 bfd_vma plt_index;
5643 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5644 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5645 Elf_Internal_Rela rela;
5646 bfd_byte *loc;
5647 asection *plt, *gotplt, *relplt, *resolved_plt;
5648 const struct elf_backend_data *bed;
5649 bfd_vma plt_got_pcrel_offset;
5650
5651 /* When building a static executable, use .iplt, .igot.plt and
5652 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5653 if (htab->elf.splt != NULL)
5654 {
5655 plt = htab->elf.splt;
5656 gotplt = htab->elf.sgotplt;
5657 relplt = htab->elf.srelplt;
5658 }
5659 else
5660 {
5661 plt = htab->elf.iplt;
5662 gotplt = htab->elf.igotplt;
5663 relplt = htab->elf.irelplt;
5664 }
5665
5666 /* This symbol has an entry in the procedure linkage table. Set
5667 it up. */
5668 if ((h->dynindx == -1
5669 && !local_undefweak
5670 && !((h->forced_local || bfd_link_executable (info))
5671 && h->def_regular
5672 && h->type == STT_GNU_IFUNC))
5673 || plt == NULL
5674 || gotplt == NULL
5675 || relplt == NULL)
5676 abort ();
5677
5678 /* Get the index in the procedure linkage table which
5679 corresponds to this symbol. This is the index of this symbol
5680 in all the symbols for which we are making plt entries. The
5681 first entry in the procedure linkage table is reserved.
5682
5683 Get the offset into the .got table of the entry that
5684 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5685 bytes. The first three are reserved for the dynamic linker.
5686
5687 For static executables, we don't reserve anything. */
5688
5689 if (plt == htab->elf.splt)
5690 {
5691 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5692 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5693 }
5694 else
5695 {
5696 got_offset = h->plt.offset / abed->plt_entry_size;
5697 got_offset = got_offset * GOT_ENTRY_SIZE;
5698 }
5699
5700 plt_plt_insn_end = abed->plt_plt_insn_end;
5701 plt_plt_offset = abed->plt_plt_offset;
5702 plt_got_insn_size = abed->plt_got_insn_size;
5703 plt_got_offset = abed->plt_got_offset;
5704 if (use_plt_bnd)
5705 {
5706 /* Use the second PLT with BND relocations. */
5707 const bfd_byte *plt_entry, *plt2_entry;
5708
5709 if (eh->has_bnd_reloc)
5710 {
5711 plt_entry = elf_x86_64_bnd_plt_entry;
5712 plt2_entry = elf_x86_64_bnd_plt2_entry;
5713 }
5714 else
5715 {
5716 plt_entry = elf_x86_64_legacy_plt_entry;
5717 plt2_entry = elf_x86_64_legacy_plt2_entry;
5718
5719 /* Subtract 1 since there is no BND prefix. */
5720 plt_plt_insn_end -= 1;
5721 plt_plt_offset -= 1;
5722 plt_got_insn_size -= 1;
5723 plt_got_offset -= 1;
5724 }
5725
5726 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5727 == sizeof (elf_x86_64_legacy_plt_entry));
5728
5729 /* Fill in the entry in the procedure linkage table. */
5730 memcpy (plt->contents + h->plt.offset,
5731 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5732 /* Fill in the entry in the second PLT. */
5733 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5734 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5735
5736 resolved_plt = htab->plt_bnd;
5737 plt_offset = eh->plt_bnd.offset;
5738 }
5739 else
5740 {
5741 /* Fill in the entry in the procedure linkage table. */
5742 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5743 abed->plt_entry_size);
5744
5745 resolved_plt = plt;
5746 plt_offset = h->plt.offset;
5747 }
5748
5749 /* Insert the relocation positions of the plt section. */
5750
5751 /* Put offset the PC-relative instruction referring to the GOT entry,
5752 subtracting the size of that instruction. */
5753 plt_got_pcrel_offset = (gotplt->output_section->vma
5754 + gotplt->output_offset
5755 + got_offset
5756 - resolved_plt->output_section->vma
5757 - resolved_plt->output_offset
5758 - plt_offset
5759 - plt_got_insn_size);
5760
5761 /* Check PC-relative offset overflow in PLT entry. */
5762 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5763 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5764 output_bfd, h->root.root.string);
5765
5766 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5767 resolved_plt->contents + plt_offset + plt_got_offset);
5768
5769 /* Fill in the entry in the global offset table, initially this
5770 points to the second part of the PLT entry. Leave the entry
5771 as zero for undefined weak symbol in PIE. No PLT relocation
5772 against undefined weak symbol in PIE. */
5773 if (!local_undefweak)
5774 {
5775 bfd_put_64 (output_bfd, (plt->output_section->vma
5776 + plt->output_offset
5777 + h->plt.offset
5778 + abed->plt_lazy_offset),
5779 gotplt->contents + got_offset);
5780
5781 /* Fill in the entry in the .rela.plt section. */
5782 rela.r_offset = (gotplt->output_section->vma
5783 + gotplt->output_offset
5784 + got_offset);
5785 if (h->dynindx == -1
5786 || ((bfd_link_executable (info)
5787 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5788 && h->def_regular
5789 && h->type == STT_GNU_IFUNC))
5790 {
5791 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5792 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5793 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5794 rela.r_addend = (h->root.u.def.value
5795 + h->root.u.def.section->output_section->vma
5796 + h->root.u.def.section->output_offset);
5797 /* R_X86_64_IRELATIVE comes last. */
5798 plt_index = htab->next_irelative_index--;
5799 }
5800 else
5801 {
5802 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5803 rela.r_addend = 0;
5804 plt_index = htab->next_jump_slot_index++;
5805 }
5806
5807 /* Don't fill PLT entry for static executables. */
5808 if (plt == htab->elf.splt)
5809 {
5810 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5811
5812 /* Put relocation index. */
5813 bfd_put_32 (output_bfd, plt_index,
5814 (plt->contents + h->plt.offset
5815 + abed->plt_reloc_offset));
5816
5817 /* Put offset for jmp .PLT0 and check for overflow. We don't
5818 check relocation index for overflow since branch displacement
5819 will overflow first. */
5820 if (plt0_offset > 0x80000000)
5821 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5822 output_bfd, h->root.root.string);
5823 bfd_put_32 (output_bfd, - plt0_offset,
5824 plt->contents + h->plt.offset + plt_plt_offset);
5825 }
5826
5827 bed = get_elf_backend_data (output_bfd);
5828 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5829 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5830 }
5831 }
5832 else if (eh->plt_got.offset != (bfd_vma) -1)
5833 {
5834 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5835 asection *plt, *got;
5836 bfd_boolean got_after_plt;
5837 int32_t got_pcrel_offset;
5838 const bfd_byte *got_plt_entry;
5839
5840 /* Set the entry in the GOT procedure linkage table. */
5841 plt = htab->plt_got;
5842 got = htab->elf.sgot;
5843 got_offset = h->got.offset;
5844
5845 if (got_offset == (bfd_vma) -1
5846 || h->type == STT_GNU_IFUNC
5847 || plt == NULL
5848 || got == NULL)
5849 abort ();
5850
5851 /* Use the second PLT entry template for the GOT PLT since they
5852 are the identical. */
5853 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5854 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5855 if (eh->has_bnd_reloc)
5856 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5857 else
5858 {
5859 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5860
5861 /* Subtract 1 since there is no BND prefix. */
5862 plt_got_insn_size -= 1;
5863 plt_got_offset -= 1;
5864 }
5865
5866 /* Fill in the entry in the GOT procedure linkage table. */
5867 plt_offset = eh->plt_got.offset;
5868 memcpy (plt->contents + plt_offset,
5869 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5870
5871 /* Put offset the PC-relative instruction referring to the GOT
5872 entry, subtracting the size of that instruction. */
5873 got_pcrel_offset = (got->output_section->vma
5874 + got->output_offset
5875 + got_offset
5876 - plt->output_section->vma
5877 - plt->output_offset
5878 - plt_offset
5879 - plt_got_insn_size);
5880
5881 /* Check PC-relative offset overflow in GOT PLT entry. */
5882 got_after_plt = got->output_section->vma > plt->output_section->vma;
5883 if ((got_after_plt && got_pcrel_offset < 0)
5884 || (!got_after_plt && got_pcrel_offset > 0))
5885 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5886 output_bfd, h->root.root.string);
5887
5888 bfd_put_32 (output_bfd, got_pcrel_offset,
5889 plt->contents + plt_offset + plt_got_offset);
5890 }
5891
5892 if (!local_undefweak
5893 && !h->def_regular
5894 && (h->plt.offset != (bfd_vma) -1
5895 || eh->plt_got.offset != (bfd_vma) -1))
5896 {
5897 /* Mark the symbol as undefined, rather than as defined in
5898 the .plt section. Leave the value if there were any
5899 relocations where pointer equality matters (this is a clue
5900 for the dynamic linker, to make function pointer
5901 comparisons work between an application and shared
5902 library), otherwise set it to zero. If a function is only
5903 called from a binary, there is no need to slow down
5904 shared libraries because of that. */
5905 sym->st_shndx = SHN_UNDEF;
5906 if (!h->pointer_equality_needed)
5907 sym->st_value = 0;
5908 }
5909
5910 /* Don't generate dynamic GOT relocation against undefined weak
5911 symbol in executable. */
5912 if (h->got.offset != (bfd_vma) -1
5913 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5914 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE
5915 && !local_undefweak)
5916 {
5917 Elf_Internal_Rela rela;
5918 asection *relgot = htab->elf.srelgot;
5919
5920 /* This symbol has an entry in the global offset table. Set it
5921 up. */
5922 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5923 abort ();
5924
5925 rela.r_offset = (htab->elf.sgot->output_section->vma
5926 + htab->elf.sgot->output_offset
5927 + (h->got.offset &~ (bfd_vma) 1));
5928
5929 /* If this is a static link, or it is a -Bsymbolic link and the
5930 symbol is defined locally or was forced to be local because
5931 of a version file, we just want to emit a RELATIVE reloc.
5932 The entry in the global offset table will already have been
5933 initialized in the relocate_section function. */
5934 if (h->def_regular
5935 && h->type == STT_GNU_IFUNC)
5936 {
5937 if (h->plt.offset == (bfd_vma) -1)
5938 {
5939 /* STT_GNU_IFUNC is referenced without PLT. */
5940 if (htab->elf.splt == NULL)
5941 {
5942 /* use .rel[a].iplt section to store .got relocations
5943 in static executable. */
5944 relgot = htab->elf.irelplt;
5945 }
5946 if (SYMBOL_REFERENCES_LOCAL (info, h))
5947 {
5948 rela.r_info = htab->r_info (0,
5949 R_X86_64_IRELATIVE);
5950 rela.r_addend = (h->root.u.def.value
5951 + h->root.u.def.section->output_section->vma
5952 + h->root.u.def.section->output_offset);
5953 }
5954 else
5955 goto do_glob_dat;
5956 }
5957 else if (bfd_link_pic (info))
5958 {
5959 /* Generate R_X86_64_GLOB_DAT. */
5960 goto do_glob_dat;
5961 }
5962 else
5963 {
5964 asection *plt;
5965
5966 if (!h->pointer_equality_needed)
5967 abort ();
5968
5969 /* For non-shared object, we can't use .got.plt, which
5970 contains the real function addres if we need pointer
5971 equality. We load the GOT entry with the PLT entry. */
5972 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5973 bfd_put_64 (output_bfd, (plt->output_section->vma
5974 + plt->output_offset
5975 + h->plt.offset),
5976 htab->elf.sgot->contents + h->got.offset);
5977 return TRUE;
5978 }
5979 }
5980 else if (bfd_link_pic (info)
5981 && SYMBOL_REFERENCES_LOCAL (info, h))
5982 {
5983 if (!h->def_regular)
5984 return FALSE;
5985 BFD_ASSERT((h->got.offset & 1) != 0);
5986 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5987 rela.r_addend = (h->root.u.def.value
5988 + h->root.u.def.section->output_section->vma
5989 + h->root.u.def.section->output_offset);
5990 }
5991 else
5992 {
5993 BFD_ASSERT((h->got.offset & 1) == 0);
5994 do_glob_dat:
5995 bfd_put_64 (output_bfd, (bfd_vma) 0,
5996 htab->elf.sgot->contents + h->got.offset);
5997 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5998 rela.r_addend = 0;
5999 }
6000
6001 elf_append_rela (output_bfd, relgot, &rela);
6002 }
6003
6004 if (h->needs_copy)
6005 {
6006 Elf_Internal_Rela rela;
6007
6008 /* This symbol needs a copy reloc. Set it up. */
6009
6010 if (h->dynindx == -1
6011 || (h->root.type != bfd_link_hash_defined
6012 && h->root.type != bfd_link_hash_defweak)
6013 || htab->srelbss == NULL)
6014 abort ();
6015
6016 rela.r_offset = (h->root.u.def.value
6017 + h->root.u.def.section->output_section->vma
6018 + h->root.u.def.section->output_offset);
6019 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
6020 rela.r_addend = 0;
6021 elf_append_rela (output_bfd, htab->srelbss, &rela);
6022 }
6023
6024 return TRUE;
6025 }
6026
6027 /* Finish up local dynamic symbol handling. We set the contents of
6028 various dynamic sections here. */
6029
6030 static bfd_boolean
6031 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
6032 {
6033 struct elf_link_hash_entry *h
6034 = (struct elf_link_hash_entry *) *slot;
6035 struct bfd_link_info *info
6036 = (struct bfd_link_info *) inf;
6037
6038 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
6039 info, h, NULL);
6040 }
6041
6042 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
6043 here since undefined weak symbol may not be dynamic and may not be
6044 called for elf_x86_64_finish_dynamic_symbol. */
6045
6046 static bfd_boolean
6047 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
6048 void *inf)
6049 {
6050 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
6051 struct bfd_link_info *info = (struct bfd_link_info *) inf;
6052
6053 if (h->root.type != bfd_link_hash_undefweak
6054 || h->dynindx != -1)
6055 return TRUE;
6056
6057 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
6058 info, h, NULL);
6059 }
6060
6061 /* Used to decide how to sort relocs in an optimal manner for the
6062 dynamic linker, before writing them out. */
6063
6064 static enum elf_reloc_type_class
6065 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
6066 const asection *rel_sec ATTRIBUTE_UNUSED,
6067 const Elf_Internal_Rela *rela)
6068 {
6069 bfd *abfd = info->output_bfd;
6070 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6071 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
6072
6073 if (htab->elf.dynsym != NULL
6074 && htab->elf.dynsym->contents != NULL)
6075 {
6076 /* Check relocation against STT_GNU_IFUNC symbol if there are
6077 dynamic symbols. */
6078 unsigned long r_symndx = htab->r_sym (rela->r_info);
6079 if (r_symndx != STN_UNDEF)
6080 {
6081 Elf_Internal_Sym sym;
6082 if (!bed->s->swap_symbol_in (abfd,
6083 (htab->elf.dynsym->contents
6084 + r_symndx * bed->s->sizeof_sym),
6085 0, &sym))
6086 abort ();
6087
6088 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
6089 return reloc_class_ifunc;
6090 }
6091 }
6092
6093 switch ((int) ELF32_R_TYPE (rela->r_info))
6094 {
6095 case R_X86_64_IRELATIVE:
6096 return reloc_class_ifunc;
6097 case R_X86_64_RELATIVE:
6098 case R_X86_64_RELATIVE64:
6099 return reloc_class_relative;
6100 case R_X86_64_JUMP_SLOT:
6101 return reloc_class_plt;
6102 case R_X86_64_COPY:
6103 return reloc_class_copy;
6104 default:
6105 return reloc_class_normal;
6106 }
6107 }
6108
6109 /* Finish up the dynamic sections. */
6110
6111 static bfd_boolean
6112 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
6113 struct bfd_link_info *info)
6114 {
6115 struct elf_x86_64_link_hash_table *htab;
6116 bfd *dynobj;
6117 asection *sdyn;
6118 const struct elf_x86_64_backend_data *abed;
6119
6120 htab = elf_x86_64_hash_table (info);
6121 if (htab == NULL)
6122 return FALSE;
6123
6124 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
6125 section only if there is .plt section. */
6126 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
6127 ? &elf_x86_64_bnd_arch_bed
6128 : get_elf_x86_64_backend_data (output_bfd));
6129
6130 dynobj = htab->elf.dynobj;
6131 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6132
6133 if (htab->elf.dynamic_sections_created)
6134 {
6135 bfd_byte *dyncon, *dynconend;
6136 const struct elf_backend_data *bed;
6137 bfd_size_type sizeof_dyn;
6138
6139 if (sdyn == NULL || htab->elf.sgot == NULL)
6140 abort ();
6141
6142 bed = get_elf_backend_data (dynobj);
6143 sizeof_dyn = bed->s->sizeof_dyn;
6144 dyncon = sdyn->contents;
6145 dynconend = sdyn->contents + sdyn->size;
6146 for (; dyncon < dynconend; dyncon += sizeof_dyn)
6147 {
6148 Elf_Internal_Dyn dyn;
6149 asection *s;
6150
6151 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
6152
6153 switch (dyn.d_tag)
6154 {
6155 default:
6156 continue;
6157
6158 case DT_PLTGOT:
6159 s = htab->elf.sgotplt;
6160 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6161 break;
6162
6163 case DT_JMPREL:
6164 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
6165 break;
6166
6167 case DT_PLTRELSZ:
6168 s = htab->elf.srelplt->output_section;
6169 dyn.d_un.d_val = s->size;
6170 break;
6171
6172 case DT_RELASZ:
6173 /* The procedure linkage table relocs (DT_JMPREL) should
6174 not be included in the overall relocs (DT_RELA).
6175 Therefore, we override the DT_RELASZ entry here to
6176 make it not include the JMPREL relocs. Since the
6177 linker script arranges for .rela.plt to follow all
6178 other relocation sections, we don't have to worry
6179 about changing the DT_RELA entry. */
6180 if (htab->elf.srelplt != NULL)
6181 {
6182 s = htab->elf.srelplt->output_section;
6183 dyn.d_un.d_val -= s->size;
6184 }
6185 break;
6186
6187 case DT_TLSDESC_PLT:
6188 s = htab->elf.splt;
6189 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6190 + htab->tlsdesc_plt;
6191 break;
6192
6193 case DT_TLSDESC_GOT:
6194 s = htab->elf.sgot;
6195 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6196 + htab->tlsdesc_got;
6197 break;
6198 }
6199
6200 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
6201 }
6202
6203 /* Fill in the special first entry in the procedure linkage table. */
6204 if (htab->elf.splt && htab->elf.splt->size > 0)
6205 {
6206 /* Fill in the first entry in the procedure linkage table. */
6207 memcpy (htab->elf.splt->contents,
6208 abed->plt0_entry, abed->plt_entry_size);
6209 /* Add offset for pushq GOT+8(%rip), since the instruction
6210 uses 6 bytes subtract this value. */
6211 bfd_put_32 (output_bfd,
6212 (htab->elf.sgotplt->output_section->vma
6213 + htab->elf.sgotplt->output_offset
6214 + 8
6215 - htab->elf.splt->output_section->vma
6216 - htab->elf.splt->output_offset
6217 - 6),
6218 htab->elf.splt->contents + abed->plt0_got1_offset);
6219 /* Add offset for the PC-relative instruction accessing GOT+16,
6220 subtracting the offset to the end of that instruction. */
6221 bfd_put_32 (output_bfd,
6222 (htab->elf.sgotplt->output_section->vma
6223 + htab->elf.sgotplt->output_offset
6224 + 16
6225 - htab->elf.splt->output_section->vma
6226 - htab->elf.splt->output_offset
6227 - abed->plt0_got2_insn_end),
6228 htab->elf.splt->contents + abed->plt0_got2_offset);
6229
6230 elf_section_data (htab->elf.splt->output_section)
6231 ->this_hdr.sh_entsize = abed->plt_entry_size;
6232
6233 if (htab->tlsdesc_plt)
6234 {
6235 bfd_put_64 (output_bfd, (bfd_vma) 0,
6236 htab->elf.sgot->contents + htab->tlsdesc_got);
6237
6238 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
6239 abed->plt0_entry, abed->plt_entry_size);
6240
6241 /* Add offset for pushq GOT+8(%rip), since the
6242 instruction uses 6 bytes subtract this value. */
6243 bfd_put_32 (output_bfd,
6244 (htab->elf.sgotplt->output_section->vma
6245 + htab->elf.sgotplt->output_offset
6246 + 8
6247 - htab->elf.splt->output_section->vma
6248 - htab->elf.splt->output_offset
6249 - htab->tlsdesc_plt
6250 - 6),
6251 htab->elf.splt->contents
6252 + htab->tlsdesc_plt + abed->plt0_got1_offset);
6253 /* Add offset for the PC-relative instruction accessing GOT+TDG,
6254 where TGD stands for htab->tlsdesc_got, subtracting the offset
6255 to the end of that instruction. */
6256 bfd_put_32 (output_bfd,
6257 (htab->elf.sgot->output_section->vma
6258 + htab->elf.sgot->output_offset
6259 + htab->tlsdesc_got
6260 - htab->elf.splt->output_section->vma
6261 - htab->elf.splt->output_offset
6262 - htab->tlsdesc_plt
6263 - abed->plt0_got2_insn_end),
6264 htab->elf.splt->contents
6265 + htab->tlsdesc_plt + abed->plt0_got2_offset);
6266 }
6267 }
6268 }
6269
6270 if (htab->plt_bnd != NULL)
6271 elf_section_data (htab->plt_bnd->output_section)
6272 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
6273
6274 if (htab->elf.sgotplt)
6275 {
6276 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
6277 {
6278 (*_bfd_error_handler)
6279 (_("discarded output section: `%A'"), htab->elf.sgotplt);
6280 return FALSE;
6281 }
6282
6283 /* Fill in the first three entries in the global offset table. */
6284 if (htab->elf.sgotplt->size > 0)
6285 {
6286 /* Set the first entry in the global offset table to the address of
6287 the dynamic section. */
6288 if (sdyn == NULL)
6289 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
6290 else
6291 bfd_put_64 (output_bfd,
6292 sdyn->output_section->vma + sdyn->output_offset,
6293 htab->elf.sgotplt->contents);
6294 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6295 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
6296 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
6297 }
6298
6299 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
6300 GOT_ENTRY_SIZE;
6301 }
6302
6303 /* Adjust .eh_frame for .plt section. */
6304 if (htab->plt_eh_frame != NULL
6305 && htab->plt_eh_frame->contents != NULL)
6306 {
6307 if (htab->elf.splt != NULL
6308 && htab->elf.splt->size != 0
6309 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
6310 && htab->elf.splt->output_section != NULL
6311 && htab->plt_eh_frame->output_section != NULL)
6312 {
6313 bfd_vma plt_start = htab->elf.splt->output_section->vma;
6314 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
6315 + htab->plt_eh_frame->output_offset
6316 + PLT_FDE_START_OFFSET;
6317 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
6318 htab->plt_eh_frame->contents
6319 + PLT_FDE_START_OFFSET);
6320 }
6321 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
6322 {
6323 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
6324 htab->plt_eh_frame,
6325 htab->plt_eh_frame->contents))
6326 return FALSE;
6327 }
6328 }
6329
6330 if (htab->elf.sgot && htab->elf.sgot->size > 0)
6331 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
6332 = GOT_ENTRY_SIZE;
6333
6334 /* Fill PLT entries for undefined weak symbols in PIE. */
6335 if (bfd_link_pie (info))
6336 bfd_hash_traverse (&info->hash->table,
6337 elf_x86_64_pie_finish_undefweak_symbol,
6338 info);
6339
6340 return TRUE;
6341 }
6342
6343 /* Fill PLT/GOT entries and allocate dynamic relocations for local
6344 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
6345 It has to be done before elf_link_sort_relocs is called so that
6346 dynamic relocations are properly sorted. */
6347
6348 static bfd_boolean
6349 elf_x86_64_output_arch_local_syms
6350 (bfd *output_bfd ATTRIBUTE_UNUSED,
6351 struct bfd_link_info *info,
6352 void *flaginfo ATTRIBUTE_UNUSED,
6353 int (*func) (void *, const char *,
6354 Elf_Internal_Sym *,
6355 asection *,
6356 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
6357 {
6358 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
6359 if (htab == NULL)
6360 return FALSE;
6361
6362 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
6363 htab_traverse (htab->loc_hash_table,
6364 elf_x86_64_finish_local_dynamic_symbol,
6365 info);
6366
6367 return TRUE;
6368 }
6369
6370 /* Return an array of PLT entry symbol values. */
6371
6372 static bfd_vma *
6373 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
6374 asection *relplt)
6375 {
6376 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
6377 arelent *p;
6378 long count, i;
6379 bfd_vma *plt_sym_val;
6380 bfd_vma plt_offset;
6381 bfd_byte *plt_contents;
6382 const struct elf_x86_64_backend_data *bed;
6383 Elf_Internal_Shdr *hdr;
6384 asection *plt_bnd;
6385
6386 /* Get the .plt section contents. PLT passed down may point to the
6387 .plt.bnd section. Make sure that PLT always points to the .plt
6388 section. */
6389 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
6390 if (plt_bnd)
6391 {
6392 if (plt != plt_bnd)
6393 abort ();
6394 plt = bfd_get_section_by_name (abfd, ".plt");
6395 if (plt == NULL)
6396 abort ();
6397 bed = &elf_x86_64_bnd_arch_bed;
6398 }
6399 else
6400 bed = get_elf_x86_64_backend_data (abfd);
6401
6402 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
6403 if (plt_contents == NULL)
6404 return NULL;
6405 if (!bfd_get_section_contents (abfd, (asection *) plt,
6406 plt_contents, 0, plt->size))
6407 {
6408 bad_return:
6409 free (plt_contents);
6410 return NULL;
6411 }
6412
6413 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
6414 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
6415 goto bad_return;
6416
6417 hdr = &elf_section_data (relplt)->this_hdr;
6418 count = relplt->size / hdr->sh_entsize;
6419
6420 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
6421 if (plt_sym_val == NULL)
6422 goto bad_return;
6423
6424 for (i = 0; i < count; i++)
6425 plt_sym_val[i] = -1;
6426
6427 plt_offset = bed->plt_entry_size;
6428 p = relplt->relocation;
6429 for (i = 0; i < count; i++, p++)
6430 {
6431 long reloc_index;
6432
6433 /* Skip unknown relocation. */
6434 if (p->howto == NULL)
6435 continue;
6436
6437 if (p->howto->type != R_X86_64_JUMP_SLOT
6438 && p->howto->type != R_X86_64_IRELATIVE)
6439 continue;
6440
6441 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
6442 + bed->plt_reloc_offset));
6443 if (reloc_index < count)
6444 {
6445 if (plt_bnd)
6446 {
6447 /* This is the index in .plt section. */
6448 long plt_index = plt_offset / bed->plt_entry_size;
6449 /* Store VMA + the offset in .plt.bnd section. */
6450 plt_sym_val[reloc_index] =
6451 (plt_bnd->vma
6452 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
6453 }
6454 else
6455 plt_sym_val[reloc_index] = plt->vma + plt_offset;
6456 }
6457 plt_offset += bed->plt_entry_size;
6458
6459 /* PR binutils/18437: Skip extra relocations in the .rela.plt
6460 section. */
6461 if (plt_offset >= plt->size)
6462 break;
6463 }
6464
6465 free (plt_contents);
6466
6467 return plt_sym_val;
6468 }
6469
6470 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
6471 support. */
6472
6473 static long
6474 elf_x86_64_get_synthetic_symtab (bfd *abfd,
6475 long symcount,
6476 asymbol **syms,
6477 long dynsymcount,
6478 asymbol **dynsyms,
6479 asymbol **ret)
6480 {
6481 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
6482 as PLT if it exists. */
6483 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
6484 if (plt == NULL)
6485 plt = bfd_get_section_by_name (abfd, ".plt");
6486 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
6487 dynsymcount, dynsyms, ret,
6488 plt,
6489 elf_x86_64_get_plt_sym_val);
6490 }
6491
6492 /* Handle an x86-64 specific section when reading an object file. This
6493 is called when elfcode.h finds a section with an unknown type. */
6494
6495 static bfd_boolean
6496 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
6497 const char *name, int shindex)
6498 {
6499 if (hdr->sh_type != SHT_X86_64_UNWIND)
6500 return FALSE;
6501
6502 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6503 return FALSE;
6504
6505 return TRUE;
6506 }
6507
6508 /* Hook called by the linker routine which adds symbols from an object
6509 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
6510 of .bss. */
6511
6512 static bfd_boolean
6513 elf_x86_64_add_symbol_hook (bfd *abfd,
6514 struct bfd_link_info *info ATTRIBUTE_UNUSED,
6515 Elf_Internal_Sym *sym,
6516 const char **namep ATTRIBUTE_UNUSED,
6517 flagword *flagsp ATTRIBUTE_UNUSED,
6518 asection **secp,
6519 bfd_vma *valp)
6520 {
6521 asection *lcomm;
6522
6523 switch (sym->st_shndx)
6524 {
6525 case SHN_X86_64_LCOMMON:
6526 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
6527 if (lcomm == NULL)
6528 {
6529 lcomm = bfd_make_section_with_flags (abfd,
6530 "LARGE_COMMON",
6531 (SEC_ALLOC
6532 | SEC_IS_COMMON
6533 | SEC_LINKER_CREATED));
6534 if (lcomm == NULL)
6535 return FALSE;
6536 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
6537 }
6538 *secp = lcomm;
6539 *valp = sym->st_size;
6540 return TRUE;
6541 }
6542
6543 return TRUE;
6544 }
6545
6546
6547 /* Given a BFD section, try to locate the corresponding ELF section
6548 index. */
6549
6550 static bfd_boolean
6551 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
6552 asection *sec, int *index_return)
6553 {
6554 if (sec == &_bfd_elf_large_com_section)
6555 {
6556 *index_return = SHN_X86_64_LCOMMON;
6557 return TRUE;
6558 }
6559 return FALSE;
6560 }
6561
6562 /* Process a symbol. */
6563
6564 static void
6565 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
6566 asymbol *asym)
6567 {
6568 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
6569
6570 switch (elfsym->internal_elf_sym.st_shndx)
6571 {
6572 case SHN_X86_64_LCOMMON:
6573 asym->section = &_bfd_elf_large_com_section;
6574 asym->value = elfsym->internal_elf_sym.st_size;
6575 /* Common symbol doesn't set BSF_GLOBAL. */
6576 asym->flags &= ~BSF_GLOBAL;
6577 break;
6578 }
6579 }
6580
6581 static bfd_boolean
6582 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
6583 {
6584 return (sym->st_shndx == SHN_COMMON
6585 || sym->st_shndx == SHN_X86_64_LCOMMON);
6586 }
6587
6588 static unsigned int
6589 elf_x86_64_common_section_index (asection *sec)
6590 {
6591 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6592 return SHN_COMMON;
6593 else
6594 return SHN_X86_64_LCOMMON;
6595 }
6596
6597 static asection *
6598 elf_x86_64_common_section (asection *sec)
6599 {
6600 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6601 return bfd_com_section_ptr;
6602 else
6603 return &_bfd_elf_large_com_section;
6604 }
6605
6606 static bfd_boolean
6607 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
6608 const Elf_Internal_Sym *sym,
6609 asection **psec,
6610 bfd_boolean newdef,
6611 bfd_boolean olddef,
6612 bfd *oldbfd,
6613 const asection *oldsec)
6614 {
6615 /* A normal common symbol and a large common symbol result in a
6616 normal common symbol. We turn the large common symbol into a
6617 normal one. */
6618 if (!olddef
6619 && h->root.type == bfd_link_hash_common
6620 && !newdef
6621 && bfd_is_com_section (*psec)
6622 && oldsec != *psec)
6623 {
6624 if (sym->st_shndx == SHN_COMMON
6625 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
6626 {
6627 h->root.u.c.p->section
6628 = bfd_make_section_old_way (oldbfd, "COMMON");
6629 h->root.u.c.p->section->flags = SEC_ALLOC;
6630 }
6631 else if (sym->st_shndx == SHN_X86_64_LCOMMON
6632 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
6633 *psec = bfd_com_section_ptr;
6634 }
6635
6636 return TRUE;
6637 }
6638
6639 static int
6640 elf_x86_64_additional_program_headers (bfd *abfd,
6641 struct bfd_link_info *info ATTRIBUTE_UNUSED)
6642 {
6643 asection *s;
6644 int count = 0;
6645
6646 /* Check to see if we need a large readonly segment. */
6647 s = bfd_get_section_by_name (abfd, ".lrodata");
6648 if (s && (s->flags & SEC_LOAD))
6649 count++;
6650
6651 /* Check to see if we need a large data segment. Since .lbss sections
6652 is placed right after the .bss section, there should be no need for
6653 a large data segment just because of .lbss. */
6654 s = bfd_get_section_by_name (abfd, ".ldata");
6655 if (s && (s->flags & SEC_LOAD))
6656 count++;
6657
6658 return count;
6659 }
6660
6661 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
6662
6663 static bfd_boolean
6664 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
6665 {
6666 if (h->plt.offset != (bfd_vma) -1
6667 && !h->def_regular
6668 && !h->pointer_equality_needed)
6669 return FALSE;
6670
6671 return _bfd_elf_hash_symbol (h);
6672 }
6673
6674 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6675
6676 static bfd_boolean
6677 elf_x86_64_relocs_compatible (const bfd_target *input,
6678 const bfd_target *output)
6679 {
6680 return ((xvec_get_elf_backend_data (input)->s->elfclass
6681 == xvec_get_elf_backend_data (output)->s->elfclass)
6682 && _bfd_elf_relocs_compatible (input, output));
6683 }
6684
6685 static const struct bfd_elf_special_section
6686 elf_x86_64_special_sections[]=
6687 {
6688 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6689 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6690 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6691 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6692 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6693 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6694 { NULL, 0, 0, 0, 0 }
6695 };
6696
6697 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6698 #define TARGET_LITTLE_NAME "elf64-x86-64"
6699 #define ELF_ARCH bfd_arch_i386
6700 #define ELF_TARGET_ID X86_64_ELF_DATA
6701 #define ELF_MACHINE_CODE EM_X86_64
6702 #define ELF_MAXPAGESIZE 0x200000
6703 #define ELF_MINPAGESIZE 0x1000
6704 #define ELF_COMMONPAGESIZE 0x1000
6705
6706 #define elf_backend_can_gc_sections 1
6707 #define elf_backend_can_refcount 1
6708 #define elf_backend_want_got_plt 1
6709 #define elf_backend_plt_readonly 1
6710 #define elf_backend_want_plt_sym 0
6711 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6712 #define elf_backend_rela_normal 1
6713 #define elf_backend_plt_alignment 4
6714 #define elf_backend_extern_protected_data 1
6715 #define elf_backend_caches_rawsize 1
6716
6717 #define elf_info_to_howto elf_x86_64_info_to_howto
6718
6719 #define bfd_elf64_bfd_link_hash_table_create \
6720 elf_x86_64_link_hash_table_create
6721 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6722 #define bfd_elf64_bfd_reloc_name_lookup \
6723 elf_x86_64_reloc_name_lookup
6724
6725 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6726 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6727 #define elf_backend_check_relocs elf_x86_64_check_relocs
6728 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6729 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6730 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6731 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6732 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
6733 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6734 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6735 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6736 #ifdef CORE_HEADER
6737 #define elf_backend_write_core_note elf_x86_64_write_core_note
6738 #endif
6739 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6740 #define elf_backend_relocate_section elf_x86_64_relocate_section
6741 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6742 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6743 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6744 #define elf_backend_object_p elf64_x86_64_elf_object_p
6745 #define bfd_elf64_mkobject elf_x86_64_mkobject
6746 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6747
6748 #define elf_backend_section_from_shdr \
6749 elf_x86_64_section_from_shdr
6750
6751 #define elf_backend_section_from_bfd_section \
6752 elf_x86_64_elf_section_from_bfd_section
6753 #define elf_backend_add_symbol_hook \
6754 elf_x86_64_add_symbol_hook
6755 #define elf_backend_symbol_processing \
6756 elf_x86_64_symbol_processing
6757 #define elf_backend_common_section_index \
6758 elf_x86_64_common_section_index
6759 #define elf_backend_common_section \
6760 elf_x86_64_common_section
6761 #define elf_backend_common_definition \
6762 elf_x86_64_common_definition
6763 #define elf_backend_merge_symbol \
6764 elf_x86_64_merge_symbol
6765 #define elf_backend_special_sections \
6766 elf_x86_64_special_sections
6767 #define elf_backend_additional_program_headers \
6768 elf_x86_64_additional_program_headers
6769 #define elf_backend_hash_symbol \
6770 elf_x86_64_hash_symbol
6771 #define elf_backend_omit_section_dynsym \
6772 ((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true)
6773 #define elf_backend_fixup_symbol \
6774 elf_x86_64_fixup_symbol
6775
6776 #include "elf64-target.h"
6777
6778 /* CloudABI support. */
6779
6780 #undef TARGET_LITTLE_SYM
6781 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6782 #undef TARGET_LITTLE_NAME
6783 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6784
6785 #undef ELF_OSABI
6786 #define ELF_OSABI ELFOSABI_CLOUDABI
6787
6788 #undef elf64_bed
6789 #define elf64_bed elf64_x86_64_cloudabi_bed
6790
6791 #include "elf64-target.h"
6792
6793 /* FreeBSD support. */
6794
6795 #undef TARGET_LITTLE_SYM
6796 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6797 #undef TARGET_LITTLE_NAME
6798 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6799
6800 #undef ELF_OSABI
6801 #define ELF_OSABI ELFOSABI_FREEBSD
6802
6803 #undef elf64_bed
6804 #define elf64_bed elf64_x86_64_fbsd_bed
6805
6806 #include "elf64-target.h"
6807
6808 /* Solaris 2 support. */
6809
6810 #undef TARGET_LITTLE_SYM
6811 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6812 #undef TARGET_LITTLE_NAME
6813 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6814
6815 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6816 objects won't be recognized. */
6817 #undef ELF_OSABI
6818
6819 #undef elf64_bed
6820 #define elf64_bed elf64_x86_64_sol2_bed
6821
6822 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6823 boundary. */
6824 #undef elf_backend_static_tls_alignment
6825 #define elf_backend_static_tls_alignment 16
6826
6827 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6828
6829 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6830 File, p.63. */
6831 #undef elf_backend_want_plt_sym
6832 #define elf_backend_want_plt_sym 1
6833
6834 #undef elf_backend_strtab_flags
6835 #define elf_backend_strtab_flags SHF_STRINGS
6836
6837 static bfd_boolean
6838 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
6839 bfd *obfd ATTRIBUTE_UNUSED,
6840 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
6841 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
6842 {
6843 /* PR 19938: FIXME: Need to add code for setting the sh_info
6844 and sh_link fields of Solaris specific section types. */
6845 return FALSE;
6846 }
6847
6848 #undef elf_backend_copy_special_section_fields
6849 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
6850
6851 #include "elf64-target.h"
6852
6853 /* Native Client support. */
6854
6855 static bfd_boolean
6856 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6857 {
6858 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6859 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6860 return TRUE;
6861 }
6862
6863 #undef TARGET_LITTLE_SYM
6864 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6865 #undef TARGET_LITTLE_NAME
6866 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6867 #undef elf64_bed
6868 #define elf64_bed elf64_x86_64_nacl_bed
6869
6870 #undef ELF_MAXPAGESIZE
6871 #undef ELF_MINPAGESIZE
6872 #undef ELF_COMMONPAGESIZE
6873 #define ELF_MAXPAGESIZE 0x10000
6874 #define ELF_MINPAGESIZE 0x10000
6875 #define ELF_COMMONPAGESIZE 0x10000
6876
6877 /* Restore defaults. */
6878 #undef ELF_OSABI
6879 #undef elf_backend_static_tls_alignment
6880 #undef elf_backend_want_plt_sym
6881 #define elf_backend_want_plt_sym 0
6882 #undef elf_backend_strtab_flags
6883 #undef elf_backend_copy_special_section_fields
6884
6885 /* NaCl uses substantially different PLT entries for the same effects. */
6886
6887 #undef elf_backend_plt_alignment
6888 #define elf_backend_plt_alignment 5
6889 #define NACL_PLT_ENTRY_SIZE 64
6890 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6891
6892 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6893 {
6894 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6895 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6896 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6897 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6898 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6899
6900 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6901 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6902
6903 /* 32 bytes of nop to pad out to the standard size. */
6904 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6905 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6906 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6907 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6908 0x66, /* excess data16 prefix */
6909 0x90 /* nop */
6910 };
6911
6912 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6913 {
6914 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6915 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6916 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6917 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6918
6919 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6920 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6921 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6922
6923 /* Lazy GOT entries point here (32-byte aligned). */
6924 0x68, /* pushq immediate */
6925 0, 0, 0, 0, /* replaced with index into relocation table. */
6926 0xe9, /* jmp relative */
6927 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6928
6929 /* 22 bytes of nop to pad out to the standard size. */
6930 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6931 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6932 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6933 };
6934
6935 /* .eh_frame covering the .plt section. */
6936
6937 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6938 {
6939 #if (PLT_CIE_LENGTH != 20 \
6940 || PLT_FDE_LENGTH != 36 \
6941 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6942 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6943 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6944 #endif
6945 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6946 0, 0, 0, 0, /* CIE ID */
6947 1, /* CIE version */
6948 'z', 'R', 0, /* Augmentation string */
6949 1, /* Code alignment factor */
6950 0x78, /* Data alignment factor */
6951 16, /* Return address column */
6952 1, /* Augmentation size */
6953 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6954 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6955 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6956 DW_CFA_nop, DW_CFA_nop,
6957
6958 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6959 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6960 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6961 0, 0, 0, 0, /* .plt size goes here */
6962 0, /* Augmentation size */
6963 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6964 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6965 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6966 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6967 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6968 13, /* Block length */
6969 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6970 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6971 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6972 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6973 DW_CFA_nop, DW_CFA_nop
6974 };
6975
6976 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6977 {
6978 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6979 elf_x86_64_nacl_plt_entry, /* plt_entry */
6980 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6981 2, /* plt0_got1_offset */
6982 9, /* plt0_got2_offset */
6983 13, /* plt0_got2_insn_end */
6984 3, /* plt_got_offset */
6985 33, /* plt_reloc_offset */
6986 38, /* plt_plt_offset */
6987 7, /* plt_got_insn_size */
6988 42, /* plt_plt_insn_end */
6989 32, /* plt_lazy_offset */
6990 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6991 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6992 };
6993
6994 #undef elf_backend_arch_data
6995 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6996
6997 #undef elf_backend_object_p
6998 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6999 #undef elf_backend_modify_segment_map
7000 #define elf_backend_modify_segment_map nacl_modify_segment_map
7001 #undef elf_backend_modify_program_headers
7002 #define elf_backend_modify_program_headers nacl_modify_program_headers
7003 #undef elf_backend_final_write_processing
7004 #define elf_backend_final_write_processing nacl_final_write_processing
7005
7006 #include "elf64-target.h"
7007
7008 /* Native Client x32 support. */
7009
7010 static bfd_boolean
7011 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
7012 {
7013 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
7014 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
7015 return TRUE;
7016 }
7017
7018 #undef TARGET_LITTLE_SYM
7019 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
7020 #undef TARGET_LITTLE_NAME
7021 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
7022 #undef elf32_bed
7023 #define elf32_bed elf32_x86_64_nacl_bed
7024
7025 #define bfd_elf32_bfd_link_hash_table_create \
7026 elf_x86_64_link_hash_table_create
7027 #define bfd_elf32_bfd_reloc_type_lookup \
7028 elf_x86_64_reloc_type_lookup
7029 #define bfd_elf32_bfd_reloc_name_lookup \
7030 elf_x86_64_reloc_name_lookup
7031 #define bfd_elf32_mkobject \
7032 elf_x86_64_mkobject
7033 #define bfd_elf32_get_synthetic_symtab \
7034 elf_x86_64_get_synthetic_symtab
7035
7036 #undef elf_backend_object_p
7037 #define elf_backend_object_p \
7038 elf32_x86_64_nacl_elf_object_p
7039
7040 #undef elf_backend_bfd_from_remote_memory
7041 #define elf_backend_bfd_from_remote_memory \
7042 _bfd_elf32_bfd_from_remote_memory
7043
7044 #undef elf_backend_size_info
7045 #define elf_backend_size_info \
7046 _bfd_elf32_size_info
7047
7048 #include "elf32-target.h"
7049
7050 /* Restore defaults. */
7051 #undef elf_backend_object_p
7052 #define elf_backend_object_p elf64_x86_64_elf_object_p
7053 #undef elf_backend_bfd_from_remote_memory
7054 #undef elf_backend_size_info
7055 #undef elf_backend_modify_segment_map
7056 #undef elf_backend_modify_program_headers
7057 #undef elf_backend_final_write_processing
7058
7059 /* Intel L1OM support. */
7060
7061 static bfd_boolean
7062 elf64_l1om_elf_object_p (bfd *abfd)
7063 {
7064 /* Set the right machine number for an L1OM elf64 file. */
7065 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
7066 return TRUE;
7067 }
7068
7069 #undef TARGET_LITTLE_SYM
7070 #define TARGET_LITTLE_SYM l1om_elf64_vec
7071 #undef TARGET_LITTLE_NAME
7072 #define TARGET_LITTLE_NAME "elf64-l1om"
7073 #undef ELF_ARCH
7074 #define ELF_ARCH bfd_arch_l1om
7075
7076 #undef ELF_MACHINE_CODE
7077 #define ELF_MACHINE_CODE EM_L1OM
7078
7079 #undef ELF_OSABI
7080
7081 #undef elf64_bed
7082 #define elf64_bed elf64_l1om_bed
7083
7084 #undef elf_backend_object_p
7085 #define elf_backend_object_p elf64_l1om_elf_object_p
7086
7087 /* Restore defaults. */
7088 #undef ELF_MAXPAGESIZE
7089 #undef ELF_MINPAGESIZE
7090 #undef ELF_COMMONPAGESIZE
7091 #define ELF_MAXPAGESIZE 0x200000
7092 #define ELF_MINPAGESIZE 0x1000
7093 #define ELF_COMMONPAGESIZE 0x1000
7094 #undef elf_backend_plt_alignment
7095 #define elf_backend_plt_alignment 4
7096 #undef elf_backend_arch_data
7097 #define elf_backend_arch_data &elf_x86_64_arch_bed
7098
7099 #include "elf64-target.h"
7100
7101 /* FreeBSD L1OM support. */
7102
7103 #undef TARGET_LITTLE_SYM
7104 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
7105 #undef TARGET_LITTLE_NAME
7106 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
7107
7108 #undef ELF_OSABI
7109 #define ELF_OSABI ELFOSABI_FREEBSD
7110
7111 #undef elf64_bed
7112 #define elf64_bed elf64_l1om_fbsd_bed
7113
7114 #include "elf64-target.h"
7115
7116 /* Intel K1OM support. */
7117
7118 static bfd_boolean
7119 elf64_k1om_elf_object_p (bfd *abfd)
7120 {
7121 /* Set the right machine number for an K1OM elf64 file. */
7122 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
7123 return TRUE;
7124 }
7125
7126 #undef TARGET_LITTLE_SYM
7127 #define TARGET_LITTLE_SYM k1om_elf64_vec
7128 #undef TARGET_LITTLE_NAME
7129 #define TARGET_LITTLE_NAME "elf64-k1om"
7130 #undef ELF_ARCH
7131 #define ELF_ARCH bfd_arch_k1om
7132
7133 #undef ELF_MACHINE_CODE
7134 #define ELF_MACHINE_CODE EM_K1OM
7135
7136 #undef ELF_OSABI
7137
7138 #undef elf64_bed
7139 #define elf64_bed elf64_k1om_bed
7140
7141 #undef elf_backend_object_p
7142 #define elf_backend_object_p elf64_k1om_elf_object_p
7143
7144 #undef elf_backend_static_tls_alignment
7145
7146 #undef elf_backend_want_plt_sym
7147 #define elf_backend_want_plt_sym 0
7148
7149 #include "elf64-target.h"
7150
7151 /* FreeBSD K1OM support. */
7152
7153 #undef TARGET_LITTLE_SYM
7154 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
7155 #undef TARGET_LITTLE_NAME
7156 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
7157
7158 #undef ELF_OSABI
7159 #define ELF_OSABI ELFOSABI_FREEBSD
7160
7161 #undef elf64_bed
7162 #define elf64_bed elf64_k1om_fbsd_bed
7163
7164 #include "elf64-target.h"
7165
7166 /* 32bit x86-64 support. */
7167
7168 #undef TARGET_LITTLE_SYM
7169 #define TARGET_LITTLE_SYM x86_64_elf32_vec
7170 #undef TARGET_LITTLE_NAME
7171 #define TARGET_LITTLE_NAME "elf32-x86-64"
7172 #undef elf32_bed
7173
7174 #undef ELF_ARCH
7175 #define ELF_ARCH bfd_arch_i386
7176
7177 #undef ELF_MACHINE_CODE
7178 #define ELF_MACHINE_CODE EM_X86_64
7179
7180 #undef ELF_OSABI
7181
7182 #undef elf_backend_object_p
7183 #define elf_backend_object_p \
7184 elf32_x86_64_elf_object_p
7185
7186 #undef elf_backend_bfd_from_remote_memory
7187 #define elf_backend_bfd_from_remote_memory \
7188 _bfd_elf32_bfd_from_remote_memory
7189
7190 #undef elf_backend_size_info
7191 #define elf_backend_size_info \
7192 _bfd_elf32_size_info
7193
7194 #include "elf32-target.h"
This page took 0.204322 seconds and 4 git commands to generate.