2016-08-26 Thomas Preud'homme <thomas.preudhomme@arm.com>
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2016 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "opcode/i386.h"
35 #include "elf/x86-64.h"
36
37 #ifdef CORE_HEADER
38 #include <stdarg.h>
39 #include CORE_HEADER
40 #endif
41
42 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
43 #define MINUS_ONE (~ (bfd_vma) 0)
44
45 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
46 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
47 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
48 since they are the same. */
49
50 #define ABI_64_P(abfd) \
51 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
52
53 /* The relocation "howto" table. Order of fields:
54 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
55 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
56 static reloc_howto_type x86_64_elf_howto_table[] =
57 {
58 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
60 FALSE),
61 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
63 FALSE),
64 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
65 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
66 TRUE),
67 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
68 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
69 FALSE),
70 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
71 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
72 TRUE),
73 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
74 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
75 FALSE),
76 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
77 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
78 MINUS_ONE, FALSE),
79 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
80 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
81 MINUS_ONE, FALSE),
82 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
84 MINUS_ONE, FALSE),
85 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
86 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
87 0xffffffff, TRUE),
88 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
89 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
90 FALSE),
91 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
92 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
93 FALSE),
94 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
95 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
96 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
97 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
98 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
100 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
102 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
103 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
104 MINUS_ONE, FALSE),
105 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
106 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
107 MINUS_ONE, FALSE),
108 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
109 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
110 MINUS_ONE, FALSE),
111 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
112 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
113 0xffffffff, TRUE),
114 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
115 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
116 0xffffffff, TRUE),
117 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
118 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
119 0xffffffff, FALSE),
120 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
121 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
122 0xffffffff, TRUE),
123 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
124 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
125 0xffffffff, FALSE),
126 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
127 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
128 TRUE),
129 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
130 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
131 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
132 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
133 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
134 FALSE, 0xffffffff, 0xffffffff, TRUE),
135 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
136 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
137 FALSE),
138 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
139 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
140 MINUS_ONE, TRUE),
141 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
142 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
143 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
144 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
145 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
146 MINUS_ONE, FALSE),
147 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
148 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
149 MINUS_ONE, FALSE),
150 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
151 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
152 FALSE),
153 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
154 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
155 FALSE),
156 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
157 complain_overflow_bitfield, bfd_elf_generic_reloc,
158 "R_X86_64_GOTPC32_TLSDESC",
159 FALSE, 0xffffffff, 0xffffffff, TRUE),
160 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
161 complain_overflow_dont, bfd_elf_generic_reloc,
162 "R_X86_64_TLSDESC_CALL",
163 FALSE, 0, 0, FALSE),
164 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
165 complain_overflow_bitfield, bfd_elf_generic_reloc,
166 "R_X86_64_TLSDESC",
167 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
168 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
169 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
170 MINUS_ONE, FALSE),
171 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
172 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
173 MINUS_ONE, FALSE),
174 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
175 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
176 TRUE),
177 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
178 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
179 TRUE),
180 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
181 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
182 0xffffffff, TRUE),
183 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
184 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
185 0xffffffff, TRUE),
186
187 /* We have a gap in the reloc numbers here.
188 R_X86_64_standard counts the number up to this point, and
189 R_X86_64_vt_offset is the value to subtract from a reloc type of
190 R_X86_64_GNU_VT* to form an index into this table. */
191 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
192 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
193
194 /* GNU extension to record C++ vtable hierarchy. */
195 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
196 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
197
198 /* GNU extension to record C++ vtable member usage. */
199 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
200 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
201 FALSE),
202
203 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
204 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
205 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
206 FALSE)
207 };
208
209 #define IS_X86_64_PCREL_TYPE(TYPE) \
210 ( ((TYPE) == R_X86_64_PC8) \
211 || ((TYPE) == R_X86_64_PC16) \
212 || ((TYPE) == R_X86_64_PC32) \
213 || ((TYPE) == R_X86_64_PC32_BND) \
214 || ((TYPE) == R_X86_64_PC64))
215
216 /* Map BFD relocs to the x86_64 elf relocs. */
217 struct elf_reloc_map
218 {
219 bfd_reloc_code_real_type bfd_reloc_val;
220 unsigned char elf_reloc_val;
221 };
222
223 static const struct elf_reloc_map x86_64_reloc_map[] =
224 {
225 { BFD_RELOC_NONE, R_X86_64_NONE, },
226 { BFD_RELOC_64, R_X86_64_64, },
227 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
228 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
229 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
230 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
231 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
232 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
233 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
234 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
235 { BFD_RELOC_32, R_X86_64_32, },
236 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
237 { BFD_RELOC_16, R_X86_64_16, },
238 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
239 { BFD_RELOC_8, R_X86_64_8, },
240 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
241 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
242 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
243 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
244 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
245 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
246 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
247 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
248 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
249 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
250 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
251 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
252 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
253 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
254 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
255 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
256 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
257 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
258 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
259 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
260 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
261 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
262 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
263 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
264 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
265 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
266 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
267 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
268 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
269 };
270
271 static reloc_howto_type *
272 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
273 {
274 unsigned i;
275
276 if (r_type == (unsigned int) R_X86_64_32)
277 {
278 if (ABI_64_P (abfd))
279 i = r_type;
280 else
281 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
282 }
283 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
284 || r_type >= (unsigned int) R_X86_64_max)
285 {
286 if (r_type >= (unsigned int) R_X86_64_standard)
287 {
288 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
289 abfd, (int) r_type);
290 r_type = R_X86_64_NONE;
291 }
292 i = r_type;
293 }
294 else
295 i = r_type - (unsigned int) R_X86_64_vt_offset;
296 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
297 return &x86_64_elf_howto_table[i];
298 }
299
300 /* Given a BFD reloc type, return a HOWTO structure. */
301 static reloc_howto_type *
302 elf_x86_64_reloc_type_lookup (bfd *abfd,
303 bfd_reloc_code_real_type code)
304 {
305 unsigned int i;
306
307 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
308 i++)
309 {
310 if (x86_64_reloc_map[i].bfd_reloc_val == code)
311 return elf_x86_64_rtype_to_howto (abfd,
312 x86_64_reloc_map[i].elf_reloc_val);
313 }
314 return NULL;
315 }
316
317 static reloc_howto_type *
318 elf_x86_64_reloc_name_lookup (bfd *abfd,
319 const char *r_name)
320 {
321 unsigned int i;
322
323 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
324 {
325 /* Get x32 R_X86_64_32. */
326 reloc_howto_type *reloc
327 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
328 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
329 return reloc;
330 }
331
332 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
333 if (x86_64_elf_howto_table[i].name != NULL
334 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
335 return &x86_64_elf_howto_table[i];
336
337 return NULL;
338 }
339
340 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
341
342 static void
343 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
344 Elf_Internal_Rela *dst)
345 {
346 unsigned r_type;
347
348 r_type = ELF32_R_TYPE (dst->r_info);
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
350 BFD_ASSERT (r_type == cache_ptr->howto->type);
351 }
352 \f
353 /* Support for core dump NOTE sections. */
354 static bfd_boolean
355 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
356 {
357 int offset;
358 size_t size;
359
360 switch (note->descsz)
361 {
362 default:
363 return FALSE;
364
365 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
366 /* pr_cursig */
367 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
368
369 /* pr_pid */
370 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
371
372 /* pr_reg */
373 offset = 72;
374 size = 216;
375
376 break;
377
378 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
379 /* pr_cursig */
380 elf_tdata (abfd)->core->signal
381 = bfd_get_16 (abfd, note->descdata + 12);
382
383 /* pr_pid */
384 elf_tdata (abfd)->core->lwpid
385 = bfd_get_32 (abfd, note->descdata + 32);
386
387 /* pr_reg */
388 offset = 112;
389 size = 216;
390
391 break;
392 }
393
394 /* Make a ".reg/999" section. */
395 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
396 size, note->descpos + offset);
397 }
398
399 static bfd_boolean
400 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
401 {
402 switch (note->descsz)
403 {
404 default:
405 return FALSE;
406
407 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 12);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
414 break;
415
416 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
417 elf_tdata (abfd)->core->pid
418 = bfd_get_32 (abfd, note->descdata + 24);
419 elf_tdata (abfd)->core->program
420 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
421 elf_tdata (abfd)->core->command
422 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
423 }
424
425 /* Note that for some reason, a spurious space is tacked
426 onto the end of the args in some (at least one anyway)
427 implementations, so strip it off if it exists. */
428
429 {
430 char *command = elf_tdata (abfd)->core->command;
431 int n = strlen (command);
432
433 if (0 < n && command[n - 1] == ' ')
434 command[n - 1] = '\0';
435 }
436
437 return TRUE;
438 }
439
440 #ifdef CORE_HEADER
441 static char *
442 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
443 int note_type, ...)
444 {
445 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
446 va_list ap;
447 const char *fname, *psargs;
448 long pid;
449 int cursig;
450 const void *gregs;
451
452 switch (note_type)
453 {
454 default:
455 return NULL;
456
457 case NT_PRPSINFO:
458 va_start (ap, note_type);
459 fname = va_arg (ap, const char *);
460 psargs = va_arg (ap, const char *);
461 va_end (ap);
462
463 if (bed->s->elfclass == ELFCLASS32)
464 {
465 prpsinfo32_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 else
473 {
474 prpsinfo64_t data;
475 memset (&data, 0, sizeof (data));
476 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
477 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
478 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
479 &data, sizeof (data));
480 }
481 /* NOTREACHED */
482
483 case NT_PRSTATUS:
484 va_start (ap, note_type);
485 pid = va_arg (ap, long);
486 cursig = va_arg (ap, int);
487 gregs = va_arg (ap, const void *);
488 va_end (ap);
489
490 if (bed->s->elfclass == ELFCLASS32)
491 {
492 if (bed->elf_machine_code == EM_X86_64)
493 {
494 prstatusx32_t prstat;
495 memset (&prstat, 0, sizeof (prstat));
496 prstat.pr_pid = pid;
497 prstat.pr_cursig = cursig;
498 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
499 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
500 &prstat, sizeof (prstat));
501 }
502 else
503 {
504 prstatus32_t prstat;
505 memset (&prstat, 0, sizeof (prstat));
506 prstat.pr_pid = pid;
507 prstat.pr_cursig = cursig;
508 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
509 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
510 &prstat, sizeof (prstat));
511 }
512 }
513 else
514 {
515 prstatus64_t prstat;
516 memset (&prstat, 0, sizeof (prstat));
517 prstat.pr_pid = pid;
518 prstat.pr_cursig = cursig;
519 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
520 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
521 &prstat, sizeof (prstat));
522 }
523 }
524 /* NOTREACHED */
525 }
526 #endif
527 \f
528 /* Functions for the x86-64 ELF linker. */
529
530 /* The name of the dynamic interpreter. This is put in the .interp
531 section. */
532
533 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
534 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
535
536 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
537 copying dynamic variables from a shared lib into an app's dynbss
538 section, and instead use a dynamic relocation to point into the
539 shared lib. */
540 #define ELIMINATE_COPY_RELOCS 1
541
542 /* The size in bytes of an entry in the global offset table. */
543
544 #define GOT_ENTRY_SIZE 8
545
546 /* The size in bytes of an entry in the procedure linkage table. */
547
548 #define PLT_ENTRY_SIZE 16
549
550 /* The first entry in a procedure linkage table looks like this. See the
551 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
552
553 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
556 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
557 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
558 };
559
560 /* Subsequent entries in a procedure linkage table look like this. */
561
562 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
563 {
564 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
565 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
566 0x68, /* pushq immediate */
567 0, 0, 0, 0, /* replaced with index into relocation table. */
568 0xe9, /* jmp relative */
569 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
570 };
571
572 /* The first entry in a procedure linkage table with BND relocations
573 like this. */
574
575 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
576 {
577 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
578 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
579 0x0f, 0x1f, 0 /* nopl (%rax) */
580 };
581
582 /* Subsequent entries for legacy branches in a procedure linkage table
583 with BND relocations look like this. */
584
585 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
586 {
587 0x68, 0, 0, 0, 0, /* pushq immediate */
588 0xe9, 0, 0, 0, 0, /* jmpq relative */
589 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
590 };
591
592 /* Subsequent entries for branches with BND prefx in a procedure linkage
593 table with BND relocations look like this. */
594
595 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
596 {
597 0x68, 0, 0, 0, 0, /* pushq immediate */
598 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
599 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
600 };
601
602 /* Entries for legacy branches in the second procedure linkage table
603 look like this. */
604
605 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
606 {
607 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
608 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
609 0x66, 0x90 /* xchg %ax,%ax */
610 };
611
612 /* Entries for branches with BND prefix in the second procedure linkage
613 table look like this. */
614
615 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
616 {
617 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x90 /* nop */
620 };
621
622 /* .eh_frame covering the .plt section. */
623
624 static const bfd_byte elf_x86_64_eh_frame_plt[] =
625 {
626 #define PLT_CIE_LENGTH 20
627 #define PLT_FDE_LENGTH 36
628 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
629 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
630 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
631 0, 0, 0, 0, /* CIE ID */
632 1, /* CIE version */
633 'z', 'R', 0, /* Augmentation string */
634 1, /* Code alignment factor */
635 0x78, /* Data alignment factor */
636 16, /* Return address column */
637 1, /* Augmentation size */
638 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
639 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
640 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
641 DW_CFA_nop, DW_CFA_nop,
642
643 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
644 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
645 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
646 0, 0, 0, 0, /* .plt size goes here */
647 0, /* Augmentation size */
648 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
649 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
650 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
651 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
652 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
653 11, /* Block length */
654 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
655 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
656 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
657 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
658 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
659 };
660
661 /* Architecture-specific backend data for x86-64. */
662
663 struct elf_x86_64_backend_data
664 {
665 /* Templates for the initial PLT entry and for subsequent entries. */
666 const bfd_byte *plt0_entry;
667 const bfd_byte *plt_entry;
668 unsigned int plt_entry_size; /* Size of each PLT entry. */
669
670 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
671 unsigned int plt0_got1_offset;
672 unsigned int plt0_got2_offset;
673
674 /* Offset of the end of the PC-relative instruction containing
675 plt0_got2_offset. */
676 unsigned int plt0_got2_insn_end;
677
678 /* Offsets into plt_entry that are to be replaced with... */
679 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
680 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
681 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
682
683 /* Length of the PC-relative instruction containing plt_got_offset. */
684 unsigned int plt_got_insn_size;
685
686 /* Offset of the end of the PC-relative jump to plt0_entry. */
687 unsigned int plt_plt_insn_end;
688
689 /* Offset into plt_entry where the initial value of the GOT entry points. */
690 unsigned int plt_lazy_offset;
691
692 /* .eh_frame covering the .plt section. */
693 const bfd_byte *eh_frame_plt;
694 unsigned int eh_frame_plt_size;
695 };
696
697 #define get_elf_x86_64_arch_data(bed) \
698 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
699
700 #define get_elf_x86_64_backend_data(abfd) \
701 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
702
703 #define GET_PLT_ENTRY_SIZE(abfd) \
704 get_elf_x86_64_backend_data (abfd)->plt_entry_size
705
706 /* These are the standard parameters. */
707 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
708 {
709 elf_x86_64_plt0_entry, /* plt0_entry */
710 elf_x86_64_plt_entry, /* plt_entry */
711 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
712 2, /* plt0_got1_offset */
713 8, /* plt0_got2_offset */
714 12, /* plt0_got2_insn_end */
715 2, /* plt_got_offset */
716 7, /* plt_reloc_offset */
717 12, /* plt_plt_offset */
718 6, /* plt_got_insn_size */
719 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
720 6, /* plt_lazy_offset */
721 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
722 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
723 };
724
725 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
726 {
727 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
728 elf_x86_64_bnd_plt_entry, /* plt_entry */
729 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
730 2, /* plt0_got1_offset */
731 1+8, /* plt0_got2_offset */
732 1+12, /* plt0_got2_insn_end */
733 1+2, /* plt_got_offset */
734 1, /* plt_reloc_offset */
735 7, /* plt_plt_offset */
736 1+6, /* plt_got_insn_size */
737 11, /* plt_plt_insn_end */
738 0, /* plt_lazy_offset */
739 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
740 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
741 };
742
743 #define elf_backend_arch_data &elf_x86_64_arch_bed
744
745 /* Is a undefined weak symbol which is resolved to 0. Reference to an
746 undefined weak symbol is resolved to 0 when building executable if
747 it isn't dynamic and
748 1. Has non-GOT/non-PLT relocations in text section. Or
749 2. Has no GOT/PLT relocation.
750 */
751 #define UNDEFINED_WEAK_RESOLVED_TO_ZERO(INFO, GOT_RELOC, EH) \
752 ((EH)->elf.root.type == bfd_link_hash_undefweak \
753 && bfd_link_executable (INFO) \
754 && (elf_x86_64_hash_table (INFO)->interp == NULL \
755 || !(GOT_RELOC) \
756 || (EH)->has_non_got_reloc \
757 || !(INFO)->dynamic_undefined_weak))
758
759 /* x86-64 ELF linker hash entry. */
760
761 struct elf_x86_64_link_hash_entry
762 {
763 struct elf_link_hash_entry elf;
764
765 /* Track dynamic relocs copied for this symbol. */
766 struct elf_dyn_relocs *dyn_relocs;
767
768 #define GOT_UNKNOWN 0
769 #define GOT_NORMAL 1
770 #define GOT_TLS_GD 2
771 #define GOT_TLS_IE 3
772 #define GOT_TLS_GDESC 4
773 #define GOT_TLS_GD_BOTH_P(type) \
774 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
775 #define GOT_TLS_GD_P(type) \
776 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
777 #define GOT_TLS_GDESC_P(type) \
778 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
779 #define GOT_TLS_GD_ANY_P(type) \
780 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
781 unsigned char tls_type;
782
783 /* TRUE if a weak symbol with a real definition needs a copy reloc.
784 When there is a weak symbol with a real definition, the processor
785 independent code will have arranged for us to see the real
786 definition first. We need to copy the needs_copy bit from the
787 real definition and check it when allowing copy reloc in PIE. */
788 unsigned int needs_copy : 1;
789
790 /* TRUE if symbol has at least one BND relocation. */
791 unsigned int has_bnd_reloc : 1;
792
793 /* TRUE if symbol has GOT or PLT relocations. */
794 unsigned int has_got_reloc : 1;
795
796 /* TRUE if symbol has non-GOT/non-PLT relocations in text sections. */
797 unsigned int has_non_got_reloc : 1;
798
799 /* 0: symbol isn't __tls_get_addr.
800 1: symbol is __tls_get_addr.
801 2: symbol is unknown. */
802 unsigned int tls_get_addr : 2;
803
804 /* Reference count of C/C++ function pointer relocations in read-write
805 section which can be resolved at run-time. */
806 bfd_signed_vma func_pointer_refcount;
807
808 /* Information about the GOT PLT entry. Filled when there are both
809 GOT and PLT relocations against the same function. */
810 union gotplt_union plt_got;
811
812 /* Information about the second PLT entry. Filled when has_bnd_reloc is
813 set. */
814 union gotplt_union plt_bnd;
815
816 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
817 starting at the end of the jump table. */
818 bfd_vma tlsdesc_got;
819 };
820
821 #define elf_x86_64_hash_entry(ent) \
822 ((struct elf_x86_64_link_hash_entry *)(ent))
823
824 struct elf_x86_64_obj_tdata
825 {
826 struct elf_obj_tdata root;
827
828 /* tls_type for each local got entry. */
829 char *local_got_tls_type;
830
831 /* GOTPLT entries for TLS descriptors. */
832 bfd_vma *local_tlsdesc_gotent;
833 };
834
835 #define elf_x86_64_tdata(abfd) \
836 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
837
838 #define elf_x86_64_local_got_tls_type(abfd) \
839 (elf_x86_64_tdata (abfd)->local_got_tls_type)
840
841 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
842 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
843
844 #define is_x86_64_elf(bfd) \
845 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
846 && elf_tdata (bfd) != NULL \
847 && elf_object_id (bfd) == X86_64_ELF_DATA)
848
849 static bfd_boolean
850 elf_x86_64_mkobject (bfd *abfd)
851 {
852 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
853 X86_64_ELF_DATA);
854 }
855
856 /* x86-64 ELF linker hash table. */
857
858 struct elf_x86_64_link_hash_table
859 {
860 struct elf_link_hash_table elf;
861
862 /* Short-cuts to get to dynamic linker sections. */
863 asection *interp;
864 asection *sdynbss;
865 asection *srelbss;
866 asection *plt_eh_frame;
867 asection *plt_bnd;
868 asection *plt_got;
869
870 union
871 {
872 bfd_signed_vma refcount;
873 bfd_vma offset;
874 } tls_ld_got;
875
876 /* The amount of space used by the jump slots in the GOT. */
877 bfd_vma sgotplt_jump_table_size;
878
879 /* Small local sym cache. */
880 struct sym_cache sym_cache;
881
882 bfd_vma (*r_info) (bfd_vma, bfd_vma);
883 bfd_vma (*r_sym) (bfd_vma);
884 unsigned int pointer_r_type;
885 const char *dynamic_interpreter;
886 int dynamic_interpreter_size;
887
888 /* _TLS_MODULE_BASE_ symbol. */
889 struct bfd_link_hash_entry *tls_module_base;
890
891 /* Used by local STT_GNU_IFUNC symbols. */
892 htab_t loc_hash_table;
893 void * loc_hash_memory;
894
895 /* The offset into splt of the PLT entry for the TLS descriptor
896 resolver. Special values are 0, if not necessary (or not found
897 to be necessary yet), and -1 if needed but not determined
898 yet. */
899 bfd_vma tlsdesc_plt;
900 /* The offset into sgot of the GOT entry used by the PLT entry
901 above. */
902 bfd_vma tlsdesc_got;
903
904 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
905 bfd_vma next_jump_slot_index;
906 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
907 bfd_vma next_irelative_index;
908
909 /* TRUE if there are dynamic relocs against IFUNC symbols that apply
910 to read-only sections. */
911 bfd_boolean readonly_dynrelocs_against_ifunc;
912 };
913
914 /* Get the x86-64 ELF linker hash table from a link_info structure. */
915
916 #define elf_x86_64_hash_table(p) \
917 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
918 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
919
920 #define elf_x86_64_compute_jump_table_size(htab) \
921 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
922
923 /* Create an entry in an x86-64 ELF linker hash table. */
924
925 static struct bfd_hash_entry *
926 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
927 struct bfd_hash_table *table,
928 const char *string)
929 {
930 /* Allocate the structure if it has not already been allocated by a
931 subclass. */
932 if (entry == NULL)
933 {
934 entry = (struct bfd_hash_entry *)
935 bfd_hash_allocate (table,
936 sizeof (struct elf_x86_64_link_hash_entry));
937 if (entry == NULL)
938 return entry;
939 }
940
941 /* Call the allocation method of the superclass. */
942 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
943 if (entry != NULL)
944 {
945 struct elf_x86_64_link_hash_entry *eh;
946
947 eh = (struct elf_x86_64_link_hash_entry *) entry;
948 eh->dyn_relocs = NULL;
949 eh->tls_type = GOT_UNKNOWN;
950 eh->needs_copy = 0;
951 eh->has_bnd_reloc = 0;
952 eh->has_got_reloc = 0;
953 eh->has_non_got_reloc = 0;
954 eh->tls_get_addr = 2;
955 eh->func_pointer_refcount = 0;
956 eh->plt_bnd.offset = (bfd_vma) -1;
957 eh->plt_got.offset = (bfd_vma) -1;
958 eh->tlsdesc_got = (bfd_vma) -1;
959 }
960
961 return entry;
962 }
963
964 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
965 for local symbol so that we can handle local STT_GNU_IFUNC symbols
966 as global symbol. We reuse indx and dynstr_index for local symbol
967 hash since they aren't used by global symbols in this backend. */
968
969 static hashval_t
970 elf_x86_64_local_htab_hash (const void *ptr)
971 {
972 struct elf_link_hash_entry *h
973 = (struct elf_link_hash_entry *) ptr;
974 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
975 }
976
977 /* Compare local hash entries. */
978
979 static int
980 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
981 {
982 struct elf_link_hash_entry *h1
983 = (struct elf_link_hash_entry *) ptr1;
984 struct elf_link_hash_entry *h2
985 = (struct elf_link_hash_entry *) ptr2;
986
987 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
988 }
989
990 /* Find and/or create a hash entry for local symbol. */
991
992 static struct elf_link_hash_entry *
993 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
994 bfd *abfd, const Elf_Internal_Rela *rel,
995 bfd_boolean create)
996 {
997 struct elf_x86_64_link_hash_entry e, *ret;
998 asection *sec = abfd->sections;
999 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
1000 htab->r_sym (rel->r_info));
1001 void **slot;
1002
1003 e.elf.indx = sec->id;
1004 e.elf.dynstr_index = htab->r_sym (rel->r_info);
1005 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
1006 create ? INSERT : NO_INSERT);
1007
1008 if (!slot)
1009 return NULL;
1010
1011 if (*slot)
1012 {
1013 ret = (struct elf_x86_64_link_hash_entry *) *slot;
1014 return &ret->elf;
1015 }
1016
1017 ret = (struct elf_x86_64_link_hash_entry *)
1018 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
1019 sizeof (struct elf_x86_64_link_hash_entry));
1020 if (ret)
1021 {
1022 memset (ret, 0, sizeof (*ret));
1023 ret->elf.indx = sec->id;
1024 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
1025 ret->elf.dynindx = -1;
1026 ret->func_pointer_refcount = 0;
1027 ret->plt_got.offset = (bfd_vma) -1;
1028 *slot = ret;
1029 }
1030 return &ret->elf;
1031 }
1032
1033 /* Destroy an X86-64 ELF linker hash table. */
1034
1035 static void
1036 elf_x86_64_link_hash_table_free (bfd *obfd)
1037 {
1038 struct elf_x86_64_link_hash_table *htab
1039 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
1040
1041 if (htab->loc_hash_table)
1042 htab_delete (htab->loc_hash_table);
1043 if (htab->loc_hash_memory)
1044 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
1045 _bfd_elf_link_hash_table_free (obfd);
1046 }
1047
1048 /* Create an X86-64 ELF linker hash table. */
1049
1050 static struct bfd_link_hash_table *
1051 elf_x86_64_link_hash_table_create (bfd *abfd)
1052 {
1053 struct elf_x86_64_link_hash_table *ret;
1054 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1055
1056 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1057 if (ret == NULL)
1058 return NULL;
1059
1060 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1061 elf_x86_64_link_hash_newfunc,
1062 sizeof (struct elf_x86_64_link_hash_entry),
1063 X86_64_ELF_DATA))
1064 {
1065 free (ret);
1066 return NULL;
1067 }
1068
1069 if (ABI_64_P (abfd))
1070 {
1071 ret->r_info = elf64_r_info;
1072 ret->r_sym = elf64_r_sym;
1073 ret->pointer_r_type = R_X86_64_64;
1074 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1075 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1076 }
1077 else
1078 {
1079 ret->r_info = elf32_r_info;
1080 ret->r_sym = elf32_r_sym;
1081 ret->pointer_r_type = R_X86_64_32;
1082 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1083 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1084 }
1085
1086 ret->loc_hash_table = htab_try_create (1024,
1087 elf_x86_64_local_htab_hash,
1088 elf_x86_64_local_htab_eq,
1089 NULL);
1090 ret->loc_hash_memory = objalloc_create ();
1091 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1092 {
1093 elf_x86_64_link_hash_table_free (abfd);
1094 return NULL;
1095 }
1096 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1097
1098 return &ret->elf.root;
1099 }
1100
1101 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1102 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1103 hash table. */
1104
1105 static bfd_boolean
1106 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1107 struct bfd_link_info *info)
1108 {
1109 struct elf_x86_64_link_hash_table *htab;
1110
1111 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1112 return FALSE;
1113
1114 htab = elf_x86_64_hash_table (info);
1115 if (htab == NULL)
1116 return FALSE;
1117
1118 /* Set the contents of the .interp section to the interpreter. */
1119 if (bfd_link_executable (info) && !info->nointerp)
1120 {
1121 asection *s = bfd_get_linker_section (dynobj, ".interp");
1122 if (s == NULL)
1123 abort ();
1124 s->size = htab->dynamic_interpreter_size;
1125 s->contents = (unsigned char *) htab->dynamic_interpreter;
1126 htab->interp = s;
1127 }
1128
1129 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1130 if (!htab->sdynbss)
1131 abort ();
1132
1133 if (bfd_link_executable (info))
1134 {
1135 /* Always allow copy relocs for building executables. */
1136 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1137 if (s == NULL)
1138 {
1139 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1140 s = bfd_make_section_anyway_with_flags (dynobj,
1141 ".rela.bss",
1142 (bed->dynamic_sec_flags
1143 | SEC_READONLY));
1144 if (s == NULL
1145 || ! bfd_set_section_alignment (dynobj, s,
1146 bed->s->log_file_align))
1147 return FALSE;
1148 }
1149 htab->srelbss = s;
1150 }
1151
1152 if (!info->no_ld_generated_unwind_info
1153 && htab->plt_eh_frame == NULL
1154 && htab->elf.splt != NULL)
1155 {
1156 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1157 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1158 | SEC_LINKER_CREATED);
1159 htab->plt_eh_frame
1160 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1161 if (htab->plt_eh_frame == NULL
1162 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1163 return FALSE;
1164 }
1165
1166 /* Align .got section to its entry size. */
1167 if (htab->elf.sgot != NULL
1168 && !bfd_set_section_alignment (dynobj, htab->elf.sgot, 3))
1169 return FALSE;
1170
1171 /* Align .got.plt section to its entry size. */
1172 if (htab->elf.sgotplt != NULL
1173 && !bfd_set_section_alignment (dynobj, htab->elf.sgotplt, 3))
1174 return FALSE;
1175
1176 return TRUE;
1177 }
1178
1179 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1180
1181 static void
1182 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1183 struct elf_link_hash_entry *dir,
1184 struct elf_link_hash_entry *ind)
1185 {
1186 struct elf_x86_64_link_hash_entry *edir, *eind;
1187
1188 edir = (struct elf_x86_64_link_hash_entry *) dir;
1189 eind = (struct elf_x86_64_link_hash_entry *) ind;
1190
1191 if (!edir->has_bnd_reloc)
1192 edir->has_bnd_reloc = eind->has_bnd_reloc;
1193
1194 if (!edir->has_got_reloc)
1195 edir->has_got_reloc = eind->has_got_reloc;
1196
1197 if (!edir->has_non_got_reloc)
1198 edir->has_non_got_reloc = eind->has_non_got_reloc;
1199
1200 if (eind->dyn_relocs != NULL)
1201 {
1202 if (edir->dyn_relocs != NULL)
1203 {
1204 struct elf_dyn_relocs **pp;
1205 struct elf_dyn_relocs *p;
1206
1207 /* Add reloc counts against the indirect sym to the direct sym
1208 list. Merge any entries against the same section. */
1209 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1210 {
1211 struct elf_dyn_relocs *q;
1212
1213 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1214 if (q->sec == p->sec)
1215 {
1216 q->pc_count += p->pc_count;
1217 q->count += p->count;
1218 *pp = p->next;
1219 break;
1220 }
1221 if (q == NULL)
1222 pp = &p->next;
1223 }
1224 *pp = edir->dyn_relocs;
1225 }
1226
1227 edir->dyn_relocs = eind->dyn_relocs;
1228 eind->dyn_relocs = NULL;
1229 }
1230
1231 if (ind->root.type == bfd_link_hash_indirect
1232 && dir->got.refcount <= 0)
1233 {
1234 edir->tls_type = eind->tls_type;
1235 eind->tls_type = GOT_UNKNOWN;
1236 }
1237
1238 if (ELIMINATE_COPY_RELOCS
1239 && ind->root.type != bfd_link_hash_indirect
1240 && dir->dynamic_adjusted)
1241 {
1242 /* If called to transfer flags for a weakdef during processing
1243 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1244 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1245 dir->ref_dynamic |= ind->ref_dynamic;
1246 dir->ref_regular |= ind->ref_regular;
1247 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1248 dir->needs_plt |= ind->needs_plt;
1249 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1250 }
1251 else
1252 {
1253 if (eind->func_pointer_refcount > 0)
1254 {
1255 edir->func_pointer_refcount += eind->func_pointer_refcount;
1256 eind->func_pointer_refcount = 0;
1257 }
1258
1259 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1260 }
1261 }
1262
1263 static bfd_boolean
1264 elf64_x86_64_elf_object_p (bfd *abfd)
1265 {
1266 /* Set the right machine number for an x86-64 elf64 file. */
1267 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1268 return TRUE;
1269 }
1270
1271 static bfd_boolean
1272 elf32_x86_64_elf_object_p (bfd *abfd)
1273 {
1274 /* Set the right machine number for an x86-64 elf32 file. */
1275 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1276 return TRUE;
1277 }
1278
1279 /* Return TRUE if the TLS access code sequence support transition
1280 from R_TYPE. */
1281
1282 static bfd_boolean
1283 elf_x86_64_check_tls_transition (bfd *abfd,
1284 struct bfd_link_info *info,
1285 asection *sec,
1286 bfd_byte *contents,
1287 Elf_Internal_Shdr *symtab_hdr,
1288 struct elf_link_hash_entry **sym_hashes,
1289 unsigned int r_type,
1290 const Elf_Internal_Rela *rel,
1291 const Elf_Internal_Rela *relend)
1292 {
1293 unsigned int val;
1294 unsigned long r_symndx;
1295 bfd_boolean largepic = FALSE;
1296 struct elf_link_hash_entry *h;
1297 bfd_vma offset;
1298 struct elf_x86_64_link_hash_table *htab;
1299 bfd_byte *call;
1300 bfd_boolean indirect_call, tls_get_addr;
1301
1302 htab = elf_x86_64_hash_table (info);
1303 offset = rel->r_offset;
1304 switch (r_type)
1305 {
1306 case R_X86_64_TLSGD:
1307 case R_X86_64_TLSLD:
1308 if ((rel + 1) >= relend)
1309 return FALSE;
1310
1311 if (r_type == R_X86_64_TLSGD)
1312 {
1313 /* Check transition from GD access model. For 64bit, only
1314 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1315 .word 0x6666; rex64; call __tls_get_addr@PLT
1316 or
1317 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1318 .byte 0x66; rex64
1319 call *__tls_get_addr@GOTPCREL(%rip)
1320 which may be converted to
1321 addr32 call __tls_get_addr
1322 can transit to different access model. For 32bit, only
1323 leaq foo@tlsgd(%rip), %rdi
1324 .word 0x6666; rex64; call __tls_get_addr@PLT
1325 or
1326 leaq foo@tlsgd(%rip), %rdi
1327 .byte 0x66; rex64
1328 call *__tls_get_addr@GOTPCREL(%rip)
1329 which may be converted to
1330 addr32 call __tls_get_addr
1331 can transit to different access model. For largepic,
1332 we also support:
1333 leaq foo@tlsgd(%rip), %rdi
1334 movabsq $__tls_get_addr@pltoff, %rax
1335 addq $r15, %rax
1336 call *%rax
1337 or
1338 leaq foo@tlsgd(%rip), %rdi
1339 movabsq $__tls_get_addr@pltoff, %rax
1340 addq $rbx, %rax
1341 call *%rax */
1342
1343 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1344
1345 if ((offset + 12) > sec->size)
1346 return FALSE;
1347
1348 call = contents + offset + 4;
1349 if (call[0] != 0x66
1350 || !((call[1] == 0x48
1351 && call[2] == 0xff
1352 && call[3] == 0x15)
1353 || (call[1] == 0x48
1354 && call[2] == 0x67
1355 && call[3] == 0xe8)
1356 || (call[1] == 0x66
1357 && call[2] == 0x48
1358 && call[3] == 0xe8)))
1359 {
1360 if (!ABI_64_P (abfd)
1361 || (offset + 19) > sec->size
1362 || offset < 3
1363 || memcmp (call - 7, leaq + 1, 3) != 0
1364 || memcmp (call, "\x48\xb8", 2) != 0
1365 || call[11] != 0x01
1366 || call[13] != 0xff
1367 || call[14] != 0xd0
1368 || !((call[10] == 0x48 && call[12] == 0xd8)
1369 || (call[10] == 0x4c && call[12] == 0xf8)))
1370 return FALSE;
1371 largepic = TRUE;
1372 }
1373 else if (ABI_64_P (abfd))
1374 {
1375 if (offset < 4
1376 || memcmp (contents + offset - 4, leaq, 4) != 0)
1377 return FALSE;
1378 }
1379 else
1380 {
1381 if (offset < 3
1382 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1383 return FALSE;
1384 }
1385 indirect_call = call[2] == 0xff;
1386 }
1387 else
1388 {
1389 /* Check transition from LD access model. Only
1390 leaq foo@tlsld(%rip), %rdi;
1391 call __tls_get_addr@PLT
1392 or
1393 leaq foo@tlsld(%rip), %rdi;
1394 call *__tls_get_addr@GOTPCREL(%rip)
1395 which may be converted to
1396 addr32 call __tls_get_addr
1397 can transit to different access model. For largepic
1398 we also support:
1399 leaq foo@tlsld(%rip), %rdi
1400 movabsq $__tls_get_addr@pltoff, %rax
1401 addq $r15, %rax
1402 call *%rax
1403 or
1404 leaq foo@tlsld(%rip), %rdi
1405 movabsq $__tls_get_addr@pltoff, %rax
1406 addq $rbx, %rax
1407 call *%rax */
1408
1409 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1410
1411 if (offset < 3 || (offset + 9) > sec->size)
1412 return FALSE;
1413
1414 if (memcmp (contents + offset - 3, lea, 3) != 0)
1415 return FALSE;
1416
1417 call = contents + offset + 4;
1418 if (!(call[0] == 0xe8
1419 || (call[0] == 0xff && call[1] == 0x15)
1420 || (call[0] == 0x67 && call[1] == 0xe8)))
1421 {
1422 if (!ABI_64_P (abfd)
1423 || (offset + 19) > sec->size
1424 || memcmp (call, "\x48\xb8", 2) != 0
1425 || call[11] != 0x01
1426 || call[13] != 0xff
1427 || call[14] != 0xd0
1428 || !((call[10] == 0x48 && call[12] == 0xd8)
1429 || (call[10] == 0x4c && call[12] == 0xf8)))
1430 return FALSE;
1431 largepic = TRUE;
1432 }
1433 indirect_call = call[0] == 0xff;
1434 }
1435
1436 r_symndx = htab->r_sym (rel[1].r_info);
1437 if (r_symndx < symtab_hdr->sh_info)
1438 return FALSE;
1439
1440 tls_get_addr = FALSE;
1441 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1442 if (h != NULL && h->root.root.string != NULL)
1443 {
1444 struct elf_x86_64_link_hash_entry *eh
1445 = (struct elf_x86_64_link_hash_entry *) h;
1446 tls_get_addr = eh->tls_get_addr == 1;
1447 if (eh->tls_get_addr > 1)
1448 {
1449 /* Use strncmp to check __tls_get_addr since
1450 __tls_get_addr may be versioned. */
1451 if (strncmp (h->root.root.string, "__tls_get_addr", 14)
1452 == 0)
1453 {
1454 eh->tls_get_addr = 1;
1455 tls_get_addr = TRUE;
1456 }
1457 else
1458 eh->tls_get_addr = 0;
1459 }
1460 }
1461
1462 if (!tls_get_addr)
1463 return FALSE;
1464 else if (largepic)
1465 return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64;
1466 else if (indirect_call)
1467 return ELF32_R_TYPE (rel[1].r_info) == R_X86_64_GOTPCRELX;
1468 else
1469 return (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1470 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32);
1471
1472 case R_X86_64_GOTTPOFF:
1473 /* Check transition from IE access model:
1474 mov foo@gottpoff(%rip), %reg
1475 add foo@gottpoff(%rip), %reg
1476 */
1477
1478 /* Check REX prefix first. */
1479 if (offset >= 3 && (offset + 4) <= sec->size)
1480 {
1481 val = bfd_get_8 (abfd, contents + offset - 3);
1482 if (val != 0x48 && val != 0x4c)
1483 {
1484 /* X32 may have 0x44 REX prefix or no REX prefix. */
1485 if (ABI_64_P (abfd))
1486 return FALSE;
1487 }
1488 }
1489 else
1490 {
1491 /* X32 may not have any REX prefix. */
1492 if (ABI_64_P (abfd))
1493 return FALSE;
1494 if (offset < 2 || (offset + 3) > sec->size)
1495 return FALSE;
1496 }
1497
1498 val = bfd_get_8 (abfd, contents + offset - 2);
1499 if (val != 0x8b && val != 0x03)
1500 return FALSE;
1501
1502 val = bfd_get_8 (abfd, contents + offset - 1);
1503 return (val & 0xc7) == 5;
1504
1505 case R_X86_64_GOTPC32_TLSDESC:
1506 /* Check transition from GDesc access model:
1507 leaq x@tlsdesc(%rip), %rax
1508
1509 Make sure it's a leaq adding rip to a 32-bit offset
1510 into any register, although it's probably almost always
1511 going to be rax. */
1512
1513 if (offset < 3 || (offset + 4) > sec->size)
1514 return FALSE;
1515
1516 val = bfd_get_8 (abfd, contents + offset - 3);
1517 if ((val & 0xfb) != 0x48)
1518 return FALSE;
1519
1520 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1521 return FALSE;
1522
1523 val = bfd_get_8 (abfd, contents + offset - 1);
1524 return (val & 0xc7) == 0x05;
1525
1526 case R_X86_64_TLSDESC_CALL:
1527 /* Check transition from GDesc access model:
1528 call *x@tlsdesc(%rax)
1529 */
1530 if (offset + 2 <= sec->size)
1531 {
1532 /* Make sure that it's a call *x@tlsdesc(%rax). */
1533 call = contents + offset;
1534 return call[0] == 0xff && call[1] == 0x10;
1535 }
1536
1537 return FALSE;
1538
1539 default:
1540 abort ();
1541 }
1542 }
1543
1544 /* Return TRUE if the TLS access transition is OK or no transition
1545 will be performed. Update R_TYPE if there is a transition. */
1546
1547 static bfd_boolean
1548 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1549 asection *sec, bfd_byte *contents,
1550 Elf_Internal_Shdr *symtab_hdr,
1551 struct elf_link_hash_entry **sym_hashes,
1552 unsigned int *r_type, int tls_type,
1553 const Elf_Internal_Rela *rel,
1554 const Elf_Internal_Rela *relend,
1555 struct elf_link_hash_entry *h,
1556 unsigned long r_symndx,
1557 bfd_boolean from_relocate_section)
1558 {
1559 unsigned int from_type = *r_type;
1560 unsigned int to_type = from_type;
1561 bfd_boolean check = TRUE;
1562
1563 /* Skip TLS transition for functions. */
1564 if (h != NULL
1565 && (h->type == STT_FUNC
1566 || h->type == STT_GNU_IFUNC))
1567 return TRUE;
1568
1569 switch (from_type)
1570 {
1571 case R_X86_64_TLSGD:
1572 case R_X86_64_GOTPC32_TLSDESC:
1573 case R_X86_64_TLSDESC_CALL:
1574 case R_X86_64_GOTTPOFF:
1575 if (bfd_link_executable (info))
1576 {
1577 if (h == NULL)
1578 to_type = R_X86_64_TPOFF32;
1579 else
1580 to_type = R_X86_64_GOTTPOFF;
1581 }
1582
1583 /* When we are called from elf_x86_64_relocate_section, there may
1584 be additional transitions based on TLS_TYPE. */
1585 if (from_relocate_section)
1586 {
1587 unsigned int new_to_type = to_type;
1588
1589 if (bfd_link_executable (info)
1590 && h != NULL
1591 && h->dynindx == -1
1592 && tls_type == GOT_TLS_IE)
1593 new_to_type = R_X86_64_TPOFF32;
1594
1595 if (to_type == R_X86_64_TLSGD
1596 || to_type == R_X86_64_GOTPC32_TLSDESC
1597 || to_type == R_X86_64_TLSDESC_CALL)
1598 {
1599 if (tls_type == GOT_TLS_IE)
1600 new_to_type = R_X86_64_GOTTPOFF;
1601 }
1602
1603 /* We checked the transition before when we were called from
1604 elf_x86_64_check_relocs. We only want to check the new
1605 transition which hasn't been checked before. */
1606 check = new_to_type != to_type && from_type == to_type;
1607 to_type = new_to_type;
1608 }
1609
1610 break;
1611
1612 case R_X86_64_TLSLD:
1613 if (bfd_link_executable (info))
1614 to_type = R_X86_64_TPOFF32;
1615 break;
1616
1617 default:
1618 return TRUE;
1619 }
1620
1621 /* Return TRUE if there is no transition. */
1622 if (from_type == to_type)
1623 return TRUE;
1624
1625 /* Check if the transition can be performed. */
1626 if (check
1627 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1628 symtab_hdr, sym_hashes,
1629 from_type, rel, relend))
1630 {
1631 reloc_howto_type *from, *to;
1632 const char *name;
1633
1634 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1635 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1636
1637 if (h)
1638 name = h->root.root.string;
1639 else
1640 {
1641 struct elf_x86_64_link_hash_table *htab;
1642
1643 htab = elf_x86_64_hash_table (info);
1644 if (htab == NULL)
1645 name = "*unknown*";
1646 else
1647 {
1648 Elf_Internal_Sym *isym;
1649
1650 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1651 abfd, r_symndx);
1652 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1653 }
1654 }
1655
1656 (*_bfd_error_handler)
1657 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1658 "in section `%A' failed"),
1659 abfd, sec, from->name, to->name, name,
1660 (unsigned long) rel->r_offset);
1661 bfd_set_error (bfd_error_bad_value);
1662 return FALSE;
1663 }
1664
1665 *r_type = to_type;
1666 return TRUE;
1667 }
1668
1669 /* Rename some of the generic section flags to better document how they
1670 are used here. */
1671 #define need_convert_load sec_flg0
1672 #define check_relocs_failed sec_flg1
1673
1674 static bfd_boolean
1675 elf_x86_64_need_pic (bfd *input_bfd, asection *sec,
1676 struct elf_link_hash_entry *h,
1677 Elf_Internal_Shdr *symtab_hdr,
1678 Elf_Internal_Sym *isym,
1679 reloc_howto_type *howto)
1680 {
1681 const char *v = "";
1682 const char *und = "";
1683 const char *pic = "";
1684
1685 const char *name;
1686 if (h)
1687 {
1688 name = h->root.root.string;
1689 switch (ELF_ST_VISIBILITY (h->other))
1690 {
1691 case STV_HIDDEN:
1692 v = _("hidden symbol ");
1693 break;
1694 case STV_INTERNAL:
1695 v = _("internal symbol ");
1696 break;
1697 case STV_PROTECTED:
1698 v = _("protected symbol ");
1699 break;
1700 default:
1701 v = _("symbol ");
1702 pic = _("; recompile with -fPIC");
1703 break;
1704 }
1705
1706 if (!h->def_regular && !h->def_dynamic)
1707 und = _("undefined ");
1708 }
1709 else
1710 {
1711 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1712 pic = _("; recompile with -fPIC");
1713 }
1714
1715 (*_bfd_error_handler) (_("%B: relocation %s against %s%s`%s' can "
1716 "not be used when making a shared object%s"),
1717 input_bfd, howto->name, und, v, name, pic);
1718 bfd_set_error (bfd_error_bad_value);
1719 sec->check_relocs_failed = 1;
1720 return FALSE;
1721 }
1722
1723 /* With the local symbol, foo, we convert
1724 mov foo@GOTPCREL(%rip), %reg
1725 to
1726 lea foo(%rip), %reg
1727 and convert
1728 call/jmp *foo@GOTPCREL(%rip)
1729 to
1730 nop call foo/jmp foo nop
1731 When PIC is false, convert
1732 test %reg, foo@GOTPCREL(%rip)
1733 to
1734 test $foo, %reg
1735 and convert
1736 binop foo@GOTPCREL(%rip), %reg
1737 to
1738 binop $foo, %reg
1739 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1740 instructions. */
1741
1742 static bfd_boolean
1743 elf_x86_64_convert_load_reloc (bfd *abfd, asection *sec,
1744 bfd_byte *contents,
1745 Elf_Internal_Rela *irel,
1746 struct elf_link_hash_entry *h,
1747 bfd_boolean *converted,
1748 struct bfd_link_info *link_info)
1749 {
1750 struct elf_x86_64_link_hash_table *htab;
1751 bfd_boolean is_pic;
1752 bfd_boolean require_reloc_pc32;
1753 bfd_boolean relocx;
1754 bfd_boolean to_reloc_pc32;
1755 asection *tsec;
1756 char symtype;
1757 bfd_signed_vma raddend;
1758 unsigned int opcode;
1759 unsigned int modrm;
1760 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
1761 unsigned int r_symndx;
1762 bfd_vma toff;
1763 bfd_vma roff = irel->r_offset;
1764
1765 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1766 return TRUE;
1767
1768 raddend = irel->r_addend;
1769 /* Addend for 32-bit PC-relative relocation must be -4. */
1770 if (raddend != -4)
1771 return TRUE;
1772
1773 htab = elf_x86_64_hash_table (link_info);
1774 is_pic = bfd_link_pic (link_info);
1775
1776 relocx = (r_type == R_X86_64_GOTPCRELX
1777 || r_type == R_X86_64_REX_GOTPCRELX);
1778
1779 /* TRUE if we can convert only to R_X86_64_PC32. Enable it for
1780 --no-relax. */
1781 require_reloc_pc32
1782 = link_info->disable_target_specific_optimizations > 1;
1783
1784 r_symndx = htab->r_sym (irel->r_info);
1785
1786 opcode = bfd_get_8 (abfd, contents + roff - 2);
1787
1788 /* Convert mov to lea since it has been done for a while. */
1789 if (opcode != 0x8b)
1790 {
1791 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1792 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1793 test, xor instructions. */
1794 if (!relocx)
1795 return TRUE;
1796 }
1797
1798 /* We convert only to R_X86_64_PC32:
1799 1. Branch.
1800 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1801 3. require_reloc_pc32 is true.
1802 4. PIC.
1803 */
1804 to_reloc_pc32 = (opcode == 0xff
1805 || !relocx
1806 || require_reloc_pc32
1807 || is_pic);
1808
1809 /* Get the symbol referred to by the reloc. */
1810 if (h == NULL)
1811 {
1812 Elf_Internal_Sym *isym
1813 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1814
1815 /* Skip relocation against undefined symbols. */
1816 if (isym->st_shndx == SHN_UNDEF)
1817 return TRUE;
1818
1819 symtype = ELF_ST_TYPE (isym->st_info);
1820
1821 if (isym->st_shndx == SHN_ABS)
1822 tsec = bfd_abs_section_ptr;
1823 else if (isym->st_shndx == SHN_COMMON)
1824 tsec = bfd_com_section_ptr;
1825 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1826 tsec = &_bfd_elf_large_com_section;
1827 else
1828 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1829
1830 toff = isym->st_value;
1831 }
1832 else
1833 {
1834 /* Undefined weak symbol is only bound locally in executable
1835 and its reference is resolved as 0 without relocation
1836 overflow. We can only perform this optimization for
1837 GOTPCRELX relocations since we need to modify REX byte.
1838 It is OK convert mov with R_X86_64_GOTPCREL to
1839 R_X86_64_PC32. */
1840 if ((relocx || opcode == 0x8b)
1841 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (link_info,
1842 TRUE,
1843 elf_x86_64_hash_entry (h)))
1844 {
1845 if (opcode == 0xff)
1846 {
1847 /* Skip for branch instructions since R_X86_64_PC32
1848 may overflow. */
1849 if (require_reloc_pc32)
1850 return TRUE;
1851 }
1852 else if (relocx)
1853 {
1854 /* For non-branch instructions, we can convert to
1855 R_X86_64_32/R_X86_64_32S since we know if there
1856 is a REX byte. */
1857 to_reloc_pc32 = FALSE;
1858 }
1859
1860 /* Since we don't know the current PC when PIC is true,
1861 we can't convert to R_X86_64_PC32. */
1862 if (to_reloc_pc32 && is_pic)
1863 return TRUE;
1864
1865 goto convert;
1866 }
1867 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1868 ld.so may use its link-time address. */
1869 else if ((h->def_regular
1870 || h->root.type == bfd_link_hash_defined
1871 || h->root.type == bfd_link_hash_defweak)
1872 && h != htab->elf.hdynamic
1873 && SYMBOL_REFERENCES_LOCAL (link_info, h))
1874 {
1875 /* bfd_link_hash_new or bfd_link_hash_undefined is
1876 set by an assignment in a linker script in
1877 bfd_elf_record_link_assignment. */
1878 if (h->def_regular
1879 && (h->root.type == bfd_link_hash_new
1880 || h->root.type == bfd_link_hash_undefined))
1881 {
1882 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1883 if (require_reloc_pc32)
1884 return TRUE;
1885 goto convert;
1886 }
1887 tsec = h->root.u.def.section;
1888 toff = h->root.u.def.value;
1889 symtype = h->type;
1890 }
1891 else
1892 return TRUE;
1893 }
1894
1895 /* Don't convert GOTPCREL relocation against large section. */
1896 if (elf_section_data (tsec) != NULL
1897 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1898 return TRUE;
1899
1900 /* We can only estimate relocation overflow for R_X86_64_PC32. */
1901 if (!to_reloc_pc32)
1902 goto convert;
1903
1904 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
1905 {
1906 /* At this stage in linking, no SEC_MERGE symbol has been
1907 adjusted, so all references to such symbols need to be
1908 passed through _bfd_merged_section_offset. (Later, in
1909 relocate_section, all SEC_MERGE symbols *except* for
1910 section symbols have been adjusted.)
1911
1912 gas may reduce relocations against symbols in SEC_MERGE
1913 sections to a relocation against the section symbol when
1914 the original addend was zero. When the reloc is against
1915 a section symbol we should include the addend in the
1916 offset passed to _bfd_merged_section_offset, since the
1917 location of interest is the original symbol. On the
1918 other hand, an access to "sym+addend" where "sym" is not
1919 a section symbol should not include the addend; Such an
1920 access is presumed to be an offset from "sym"; The
1921 location of interest is just "sym". */
1922 if (symtype == STT_SECTION)
1923 toff += raddend;
1924
1925 toff = _bfd_merged_section_offset (abfd, &tsec,
1926 elf_section_data (tsec)->sec_info,
1927 toff);
1928
1929 if (symtype != STT_SECTION)
1930 toff += raddend;
1931 }
1932 else
1933 toff += raddend;
1934
1935 /* Don't convert if R_X86_64_PC32 relocation overflows. */
1936 if (tsec->output_section == sec->output_section)
1937 {
1938 if ((toff - roff + 0x80000000) > 0xffffffff)
1939 return TRUE;
1940 }
1941 else
1942 {
1943 bfd_signed_vma distance;
1944
1945 /* At this point, we don't know the load addresses of TSEC
1946 section nor SEC section. We estimate the distrance between
1947 SEC and TSEC. We store the estimated distances in the
1948 compressed_size field of the output section, which is only
1949 used to decompress the compressed input section. */
1950 if (sec->output_section->compressed_size == 0)
1951 {
1952 asection *asect;
1953 bfd_size_type size = 0;
1954 for (asect = link_info->output_bfd->sections;
1955 asect != NULL;
1956 asect = asect->next)
1957 /* Skip debug sections since compressed_size is used to
1958 compress debug sections. */
1959 if ((asect->flags & SEC_DEBUGGING) == 0)
1960 {
1961 asection *i;
1962 for (i = asect->map_head.s;
1963 i != NULL;
1964 i = i->map_head.s)
1965 {
1966 size = align_power (size, i->alignment_power);
1967 size += i->size;
1968 }
1969 asect->compressed_size = size;
1970 }
1971 }
1972
1973 /* Don't convert GOTPCREL relocations if TSEC isn't placed
1974 after SEC. */
1975 distance = (tsec->output_section->compressed_size
1976 - sec->output_section->compressed_size);
1977 if (distance < 0)
1978 return TRUE;
1979
1980 /* Take PT_GNU_RELRO segment into account by adding
1981 maxpagesize. */
1982 if ((toff + distance + get_elf_backend_data (abfd)->maxpagesize
1983 - roff + 0x80000000) > 0xffffffff)
1984 return TRUE;
1985 }
1986
1987 convert:
1988 if (opcode == 0xff)
1989 {
1990 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1991 unsigned int nop;
1992 unsigned int disp;
1993 bfd_vma nop_offset;
1994
1995 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1996 R_X86_64_PC32. */
1997 modrm = bfd_get_8 (abfd, contents + roff - 1);
1998 if (modrm == 0x25)
1999 {
2000 /* Convert to "jmp foo nop". */
2001 modrm = 0xe9;
2002 nop = NOP_OPCODE;
2003 nop_offset = irel->r_offset + 3;
2004 disp = bfd_get_32 (abfd, contents + irel->r_offset);
2005 irel->r_offset -= 1;
2006 bfd_put_32 (abfd, disp, contents + irel->r_offset);
2007 }
2008 else
2009 {
2010 struct elf_x86_64_link_hash_entry *eh
2011 = (struct elf_x86_64_link_hash_entry *) h;
2012
2013 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
2014 is a nop prefix. */
2015 modrm = 0xe8;
2016 /* To support TLS optimization, always use addr32 prefix for
2017 "call *__tls_get_addr@GOTPCREL(%rip)". */
2018 if (eh && eh->tls_get_addr == 1)
2019 {
2020 nop = 0x67;
2021 nop_offset = irel->r_offset - 2;
2022 }
2023 else
2024 {
2025 nop = link_info->call_nop_byte;
2026 if (link_info->call_nop_as_suffix)
2027 {
2028 nop_offset = irel->r_offset + 3;
2029 disp = bfd_get_32 (abfd, contents + irel->r_offset);
2030 irel->r_offset -= 1;
2031 bfd_put_32 (abfd, disp, contents + irel->r_offset);
2032 }
2033 else
2034 nop_offset = irel->r_offset - 2;
2035 }
2036 }
2037 bfd_put_8 (abfd, nop, contents + nop_offset);
2038 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
2039 r_type = R_X86_64_PC32;
2040 }
2041 else
2042 {
2043 unsigned int rex;
2044 unsigned int rex_mask = REX_R;
2045
2046 if (r_type == R_X86_64_REX_GOTPCRELX)
2047 rex = bfd_get_8 (abfd, contents + roff - 3);
2048 else
2049 rex = 0;
2050
2051 if (opcode == 0x8b)
2052 {
2053 if (to_reloc_pc32)
2054 {
2055 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2056 "lea foo(%rip), %reg". */
2057 opcode = 0x8d;
2058 r_type = R_X86_64_PC32;
2059 }
2060 else
2061 {
2062 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
2063 "mov $foo, %reg". */
2064 opcode = 0xc7;
2065 modrm = bfd_get_8 (abfd, contents + roff - 1);
2066 modrm = 0xc0 | (modrm & 0x38) >> 3;
2067 if ((rex & REX_W) != 0
2068 && ABI_64_P (link_info->output_bfd))
2069 {
2070 /* Keep the REX_W bit in REX byte for LP64. */
2071 r_type = R_X86_64_32S;
2072 goto rewrite_modrm_rex;
2073 }
2074 else
2075 {
2076 /* If the REX_W bit in REX byte isn't needed,
2077 use R_X86_64_32 and clear the W bit to avoid
2078 sign-extend imm32 to imm64. */
2079 r_type = R_X86_64_32;
2080 /* Clear the W bit in REX byte. */
2081 rex_mask |= REX_W;
2082 goto rewrite_modrm_rex;
2083 }
2084 }
2085 }
2086 else
2087 {
2088 /* R_X86_64_PC32 isn't supported. */
2089 if (to_reloc_pc32)
2090 return TRUE;
2091
2092 modrm = bfd_get_8 (abfd, contents + roff - 1);
2093 if (opcode == 0x85)
2094 {
2095 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
2096 "test $foo, %reg". */
2097 modrm = 0xc0 | (modrm & 0x38) >> 3;
2098 opcode = 0xf7;
2099 }
2100 else
2101 {
2102 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
2103 "binop $foo, %reg". */
2104 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
2105 opcode = 0x81;
2106 }
2107
2108 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
2109 overflow when sign-extending imm32 to imm64. */
2110 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
2111
2112 rewrite_modrm_rex:
2113 bfd_put_8 (abfd, modrm, contents + roff - 1);
2114
2115 if (rex)
2116 {
2117 /* Move the R bit to the B bit in REX byte. */
2118 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
2119 bfd_put_8 (abfd, rex, contents + roff - 3);
2120 }
2121
2122 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
2123 irel->r_addend = 0;
2124 }
2125
2126 bfd_put_8 (abfd, opcode, contents + roff - 2);
2127 }
2128
2129 irel->r_info = htab->r_info (r_symndx, r_type);
2130
2131 *converted = TRUE;
2132
2133 return TRUE;
2134 }
2135
2136 /* Look through the relocs for a section during the first phase, and
2137 calculate needed space in the global offset table, procedure
2138 linkage table, and dynamic reloc sections. */
2139
2140 static bfd_boolean
2141 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
2142 asection *sec,
2143 const Elf_Internal_Rela *relocs)
2144 {
2145 struct elf_x86_64_link_hash_table *htab;
2146 Elf_Internal_Shdr *symtab_hdr;
2147 struct elf_link_hash_entry **sym_hashes;
2148 const Elf_Internal_Rela *rel;
2149 const Elf_Internal_Rela *rel_end;
2150 asection *sreloc;
2151 bfd_byte *contents;
2152 bfd_boolean use_plt_got;
2153
2154 if (bfd_link_relocatable (info))
2155 return TRUE;
2156
2157 /* Don't do anything special with non-loaded, non-alloced sections.
2158 In particular, any relocs in such sections should not affect GOT
2159 and PLT reference counting (ie. we don't allow them to create GOT
2160 or PLT entries), there's no possibility or desire to optimize TLS
2161 relocs, and there's not much point in propagating relocs to shared
2162 libs that the dynamic linker won't relocate. */
2163 if ((sec->flags & SEC_ALLOC) == 0)
2164 return TRUE;
2165
2166 BFD_ASSERT (is_x86_64_elf (abfd));
2167
2168 htab = elf_x86_64_hash_table (info);
2169 if (htab == NULL)
2170 {
2171 sec->check_relocs_failed = 1;
2172 return FALSE;
2173 }
2174
2175 /* Get the section contents. */
2176 if (elf_section_data (sec)->this_hdr.contents != NULL)
2177 contents = elf_section_data (sec)->this_hdr.contents;
2178 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2179 {
2180 sec->check_relocs_failed = 1;
2181 return FALSE;
2182 }
2183
2184 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
2185
2186 symtab_hdr = &elf_symtab_hdr (abfd);
2187 sym_hashes = elf_sym_hashes (abfd);
2188
2189 sreloc = NULL;
2190
2191 rel_end = relocs + sec->reloc_count;
2192 for (rel = relocs; rel < rel_end; rel++)
2193 {
2194 unsigned int r_type;
2195 unsigned long r_symndx;
2196 struct elf_link_hash_entry *h;
2197 struct elf_x86_64_link_hash_entry *eh;
2198 Elf_Internal_Sym *isym;
2199 const char *name;
2200 bfd_boolean size_reloc;
2201
2202 r_symndx = htab->r_sym (rel->r_info);
2203 r_type = ELF32_R_TYPE (rel->r_info);
2204
2205 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
2206 {
2207 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
2208 abfd, r_symndx);
2209 goto error_return;
2210 }
2211
2212 if (r_symndx < symtab_hdr->sh_info)
2213 {
2214 /* A local symbol. */
2215 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2216 abfd, r_symndx);
2217 if (isym == NULL)
2218 goto error_return;
2219
2220 /* Check relocation against local STT_GNU_IFUNC symbol. */
2221 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2222 {
2223 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
2224 TRUE);
2225 if (h == NULL)
2226 goto error_return;
2227
2228 /* Fake a STT_GNU_IFUNC symbol. */
2229 h->type = STT_GNU_IFUNC;
2230 h->def_regular = 1;
2231 h->ref_regular = 1;
2232 h->forced_local = 1;
2233 h->root.type = bfd_link_hash_defined;
2234 }
2235 else
2236 h = NULL;
2237 }
2238 else
2239 {
2240 isym = NULL;
2241 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2242 while (h->root.type == bfd_link_hash_indirect
2243 || h->root.type == bfd_link_hash_warning)
2244 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2245 }
2246
2247 /* Check invalid x32 relocations. */
2248 if (!ABI_64_P (abfd))
2249 switch (r_type)
2250 {
2251 default:
2252 break;
2253
2254 case R_X86_64_DTPOFF64:
2255 case R_X86_64_TPOFF64:
2256 case R_X86_64_PC64:
2257 case R_X86_64_GOTOFF64:
2258 case R_X86_64_GOT64:
2259 case R_X86_64_GOTPCREL64:
2260 case R_X86_64_GOTPC64:
2261 case R_X86_64_GOTPLT64:
2262 case R_X86_64_PLTOFF64:
2263 {
2264 if (h)
2265 name = h->root.root.string;
2266 else
2267 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
2268 NULL);
2269 (*_bfd_error_handler)
2270 (_("%B: relocation %s against symbol `%s' isn't "
2271 "supported in x32 mode"), abfd,
2272 x86_64_elf_howto_table[r_type].name, name);
2273 bfd_set_error (bfd_error_bad_value);
2274 goto error_return;
2275 }
2276 break;
2277 }
2278
2279 if (h != NULL)
2280 {
2281 switch (r_type)
2282 {
2283 default:
2284 break;
2285
2286 case R_X86_64_PC32_BND:
2287 case R_X86_64_PLT32_BND:
2288 case R_X86_64_PC32:
2289 case R_X86_64_PLT32:
2290 case R_X86_64_32:
2291 case R_X86_64_64:
2292 /* MPX PLT is supported only if elf_x86_64_arch_bed
2293 is used in 64-bit mode. */
2294 if (ABI_64_P (abfd)
2295 && info->bndplt
2296 && (get_elf_x86_64_backend_data (abfd)
2297 == &elf_x86_64_arch_bed))
2298 {
2299 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
2300
2301 /* Create the second PLT for Intel MPX support. */
2302 if (htab->plt_bnd == NULL)
2303 {
2304 unsigned int plt_bnd_align;
2305 const struct elf_backend_data *bed;
2306
2307 bed = get_elf_backend_data (info->output_bfd);
2308 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
2309 && (sizeof (elf_x86_64_bnd_plt2_entry)
2310 == sizeof (elf_x86_64_legacy_plt2_entry)));
2311 plt_bnd_align = 3;
2312
2313 if (htab->elf.dynobj == NULL)
2314 htab->elf.dynobj = abfd;
2315 htab->plt_bnd
2316 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2317 ".plt.bnd",
2318 (bed->dynamic_sec_flags
2319 | SEC_ALLOC
2320 | SEC_CODE
2321 | SEC_LOAD
2322 | SEC_READONLY));
2323 if (htab->plt_bnd == NULL
2324 || !bfd_set_section_alignment (htab->elf.dynobj,
2325 htab->plt_bnd,
2326 plt_bnd_align))
2327 goto error_return;
2328 }
2329 }
2330
2331 case R_X86_64_32S:
2332 case R_X86_64_PC64:
2333 case R_X86_64_GOTPCREL:
2334 case R_X86_64_GOTPCRELX:
2335 case R_X86_64_REX_GOTPCRELX:
2336 case R_X86_64_GOTPCREL64:
2337 if (htab->elf.dynobj == NULL)
2338 htab->elf.dynobj = abfd;
2339 /* Create the ifunc sections for static executables. */
2340 if (h->type == STT_GNU_IFUNC
2341 && !_bfd_elf_create_ifunc_sections (htab->elf.dynobj,
2342 info))
2343 goto error_return;
2344 break;
2345 }
2346
2347 /* It is referenced by a non-shared object. */
2348 h->ref_regular = 1;
2349 h->root.non_ir_ref = 1;
2350
2351 if (h->type == STT_GNU_IFUNC)
2352 elf_tdata (info->output_bfd)->has_gnu_symbols
2353 |= elf_gnu_symbol_ifunc;
2354 }
2355
2356 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2357 symtab_hdr, sym_hashes,
2358 &r_type, GOT_UNKNOWN,
2359 rel, rel_end, h, r_symndx, FALSE))
2360 goto error_return;
2361
2362 eh = (struct elf_x86_64_link_hash_entry *) h;
2363 switch (r_type)
2364 {
2365 case R_X86_64_TLSLD:
2366 htab->tls_ld_got.refcount += 1;
2367 goto create_got;
2368
2369 case R_X86_64_TPOFF32:
2370 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2371 return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
2372 &x86_64_elf_howto_table[r_type]);
2373 if (eh != NULL)
2374 eh->has_got_reloc = 1;
2375 break;
2376
2377 case R_X86_64_GOTTPOFF:
2378 if (!bfd_link_executable (info))
2379 info->flags |= DF_STATIC_TLS;
2380 /* Fall through */
2381
2382 case R_X86_64_GOT32:
2383 case R_X86_64_GOTPCREL:
2384 case R_X86_64_GOTPCRELX:
2385 case R_X86_64_REX_GOTPCRELX:
2386 case R_X86_64_TLSGD:
2387 case R_X86_64_GOT64:
2388 case R_X86_64_GOTPCREL64:
2389 case R_X86_64_GOTPLT64:
2390 case R_X86_64_GOTPC32_TLSDESC:
2391 case R_X86_64_TLSDESC_CALL:
2392 /* This symbol requires a global offset table entry. */
2393 {
2394 int tls_type, old_tls_type;
2395
2396 switch (r_type)
2397 {
2398 default: tls_type = GOT_NORMAL; break;
2399 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2400 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2401 case R_X86_64_GOTPC32_TLSDESC:
2402 case R_X86_64_TLSDESC_CALL:
2403 tls_type = GOT_TLS_GDESC; break;
2404 }
2405
2406 if (h != NULL)
2407 {
2408 h->got.refcount += 1;
2409 old_tls_type = eh->tls_type;
2410 }
2411 else
2412 {
2413 bfd_signed_vma *local_got_refcounts;
2414
2415 /* This is a global offset table entry for a local symbol. */
2416 local_got_refcounts = elf_local_got_refcounts (abfd);
2417 if (local_got_refcounts == NULL)
2418 {
2419 bfd_size_type size;
2420
2421 size = symtab_hdr->sh_info;
2422 size *= sizeof (bfd_signed_vma)
2423 + sizeof (bfd_vma) + sizeof (char);
2424 local_got_refcounts = ((bfd_signed_vma *)
2425 bfd_zalloc (abfd, size));
2426 if (local_got_refcounts == NULL)
2427 goto error_return;
2428 elf_local_got_refcounts (abfd) = local_got_refcounts;
2429 elf_x86_64_local_tlsdesc_gotent (abfd)
2430 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2431 elf_x86_64_local_got_tls_type (abfd)
2432 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2433 }
2434 local_got_refcounts[r_symndx] += 1;
2435 old_tls_type
2436 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
2437 }
2438
2439 /* If a TLS symbol is accessed using IE at least once,
2440 there is no point to use dynamic model for it. */
2441 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2442 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2443 || tls_type != GOT_TLS_IE))
2444 {
2445 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2446 tls_type = old_tls_type;
2447 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2448 && GOT_TLS_GD_ANY_P (tls_type))
2449 tls_type |= old_tls_type;
2450 else
2451 {
2452 if (h)
2453 name = h->root.root.string;
2454 else
2455 name = bfd_elf_sym_name (abfd, symtab_hdr,
2456 isym, NULL);
2457 (*_bfd_error_handler)
2458 (_("%B: '%s' accessed both as normal and thread local symbol"),
2459 abfd, name);
2460 bfd_set_error (bfd_error_bad_value);
2461 goto error_return;
2462 }
2463 }
2464
2465 if (old_tls_type != tls_type)
2466 {
2467 if (eh != NULL)
2468 eh->tls_type = tls_type;
2469 else
2470 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
2471 }
2472 }
2473 /* Fall through */
2474
2475 case R_X86_64_GOTOFF64:
2476 case R_X86_64_GOTPC32:
2477 case R_X86_64_GOTPC64:
2478 create_got:
2479 if (eh != NULL)
2480 eh->has_got_reloc = 1;
2481 if (htab->elf.sgot == NULL)
2482 {
2483 if (htab->elf.dynobj == NULL)
2484 htab->elf.dynobj = abfd;
2485 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
2486 info))
2487 goto error_return;
2488 }
2489 break;
2490
2491 case R_X86_64_PLT32:
2492 case R_X86_64_PLT32_BND:
2493 /* This symbol requires a procedure linkage table entry. We
2494 actually build the entry in adjust_dynamic_symbol,
2495 because this might be a case of linking PIC code which is
2496 never referenced by a dynamic object, in which case we
2497 don't need to generate a procedure linkage table entry
2498 after all. */
2499
2500 /* If this is a local symbol, we resolve it directly without
2501 creating a procedure linkage table entry. */
2502 if (h == NULL)
2503 continue;
2504
2505 eh->has_got_reloc = 1;
2506 h->needs_plt = 1;
2507 h->plt.refcount += 1;
2508 break;
2509
2510 case R_X86_64_PLTOFF64:
2511 /* This tries to form the 'address' of a function relative
2512 to GOT. For global symbols we need a PLT entry. */
2513 if (h != NULL)
2514 {
2515 h->needs_plt = 1;
2516 h->plt.refcount += 1;
2517 }
2518 goto create_got;
2519
2520 case R_X86_64_SIZE32:
2521 case R_X86_64_SIZE64:
2522 size_reloc = TRUE;
2523 goto do_size;
2524
2525 case R_X86_64_32:
2526 if (!ABI_64_P (abfd))
2527 goto pointer;
2528 case R_X86_64_8:
2529 case R_X86_64_16:
2530 case R_X86_64_32S:
2531 /* Check relocation overflow as these relocs may lead to
2532 run-time relocation overflow. Don't error out for
2533 sections we don't care about, such as debug sections or
2534 when relocation overflow check is disabled. */
2535 if (!info->no_reloc_overflow_check
2536 && (bfd_link_pic (info)
2537 || (bfd_link_executable (info)
2538 && h != NULL
2539 && !h->def_regular
2540 && h->def_dynamic
2541 && (sec->flags & SEC_READONLY) == 0)))
2542 return elf_x86_64_need_pic (abfd, sec, h, symtab_hdr, isym,
2543 &x86_64_elf_howto_table[r_type]);
2544 /* Fall through. */
2545
2546 case R_X86_64_PC8:
2547 case R_X86_64_PC16:
2548 case R_X86_64_PC32:
2549 case R_X86_64_PC32_BND:
2550 case R_X86_64_PC64:
2551 case R_X86_64_64:
2552 pointer:
2553 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2554 eh->has_non_got_reloc = 1;
2555 /* We are called after all symbols have been resolved. Only
2556 relocation against STT_GNU_IFUNC symbol must go through
2557 PLT. */
2558 if (h != NULL
2559 && (bfd_link_executable (info)
2560 || h->type == STT_GNU_IFUNC))
2561 {
2562 /* If this reloc is in a read-only section, we might
2563 need a copy reloc. We can't check reliably at this
2564 stage whether the section is read-only, as input
2565 sections have not yet been mapped to output sections.
2566 Tentatively set the flag for now, and correct in
2567 adjust_dynamic_symbol. */
2568 h->non_got_ref = 1;
2569
2570 /* We may need a .plt entry if the symbol is a function
2571 defined in a shared lib or is a STT_GNU_IFUNC function
2572 referenced from the code or read-only section. */
2573 if (!h->def_regular
2574 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2575 h->plt.refcount += 1;
2576
2577 if (r_type == R_X86_64_PC32)
2578 {
2579 /* Since something like ".long foo - ." may be used
2580 as pointer, make sure that PLT is used if foo is
2581 a function defined in a shared library. */
2582 if ((sec->flags & SEC_CODE) == 0)
2583 h->pointer_equality_needed = 1;
2584 }
2585 else if (r_type != R_X86_64_PC32_BND
2586 && r_type != R_X86_64_PC64)
2587 {
2588 h->pointer_equality_needed = 1;
2589 /* At run-time, R_X86_64_64 can be resolved for both
2590 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2591 can only be resolved for x32. */
2592 if ((sec->flags & SEC_READONLY) == 0
2593 && (r_type == R_X86_64_64
2594 || (!ABI_64_P (abfd)
2595 && (r_type == R_X86_64_32
2596 || r_type == R_X86_64_32S))))
2597 eh->func_pointer_refcount += 1;
2598 }
2599 }
2600
2601 size_reloc = FALSE;
2602 do_size:
2603 /* If we are creating a shared library, and this is a reloc
2604 against a global symbol, or a non PC relative reloc
2605 against a local symbol, then we need to copy the reloc
2606 into the shared library. However, if we are linking with
2607 -Bsymbolic, we do not need to copy a reloc against a
2608 global symbol which is defined in an object we are
2609 including in the link (i.e., DEF_REGULAR is set). At
2610 this point we have not seen all the input files, so it is
2611 possible that DEF_REGULAR is not set now but will be set
2612 later (it is never cleared). In case of a weak definition,
2613 DEF_REGULAR may be cleared later by a strong definition in
2614 a shared library. We account for that possibility below by
2615 storing information in the relocs_copied field of the hash
2616 table entry. A similar situation occurs when creating
2617 shared libraries and symbol visibility changes render the
2618 symbol local.
2619
2620 If on the other hand, we are creating an executable, we
2621 may need to keep relocations for symbols satisfied by a
2622 dynamic library if we manage to avoid copy relocs for the
2623 symbol.
2624
2625 Generate dynamic pointer relocation against STT_GNU_IFUNC
2626 symbol in the non-code section. */
2627 if ((bfd_link_pic (info)
2628 && (! IS_X86_64_PCREL_TYPE (r_type)
2629 || (h != NULL
2630 && (! (bfd_link_pie (info)
2631 || SYMBOLIC_BIND (info, h))
2632 || h->root.type == bfd_link_hash_defweak
2633 || !h->def_regular))))
2634 || (h != NULL
2635 && h->type == STT_GNU_IFUNC
2636 && r_type == htab->pointer_r_type
2637 && (sec->flags & SEC_CODE) == 0)
2638 || (ELIMINATE_COPY_RELOCS
2639 && !bfd_link_pic (info)
2640 && h != NULL
2641 && (h->root.type == bfd_link_hash_defweak
2642 || !h->def_regular)))
2643 {
2644 struct elf_dyn_relocs *p;
2645 struct elf_dyn_relocs **head;
2646
2647 /* We must copy these reloc types into the output file.
2648 Create a reloc section in dynobj and make room for
2649 this reloc. */
2650 if (sreloc == NULL)
2651 {
2652 if (htab->elf.dynobj == NULL)
2653 htab->elf.dynobj = abfd;
2654
2655 sreloc = _bfd_elf_make_dynamic_reloc_section
2656 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2657 abfd, /*rela?*/ TRUE);
2658
2659 if (sreloc == NULL)
2660 goto error_return;
2661 }
2662
2663 /* If this is a global symbol, we count the number of
2664 relocations we need for this symbol. */
2665 if (h != NULL)
2666 head = &eh->dyn_relocs;
2667 else
2668 {
2669 /* Track dynamic relocs needed for local syms too.
2670 We really need local syms available to do this
2671 easily. Oh well. */
2672 asection *s;
2673 void **vpp;
2674
2675 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2676 abfd, r_symndx);
2677 if (isym == NULL)
2678 goto error_return;
2679
2680 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2681 if (s == NULL)
2682 s = sec;
2683
2684 /* Beware of type punned pointers vs strict aliasing
2685 rules. */
2686 vpp = &(elf_section_data (s)->local_dynrel);
2687 head = (struct elf_dyn_relocs **)vpp;
2688 }
2689
2690 p = *head;
2691 if (p == NULL || p->sec != sec)
2692 {
2693 bfd_size_type amt = sizeof *p;
2694
2695 p = ((struct elf_dyn_relocs *)
2696 bfd_alloc (htab->elf.dynobj, amt));
2697 if (p == NULL)
2698 goto error_return;
2699 p->next = *head;
2700 *head = p;
2701 p->sec = sec;
2702 p->count = 0;
2703 p->pc_count = 0;
2704 }
2705
2706 p->count += 1;
2707 /* Count size relocation as PC-relative relocation. */
2708 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2709 p->pc_count += 1;
2710 }
2711 break;
2712
2713 /* This relocation describes the C++ object vtable hierarchy.
2714 Reconstruct it for later use during GC. */
2715 case R_X86_64_GNU_VTINHERIT:
2716 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2717 goto error_return;
2718 break;
2719
2720 /* This relocation describes which C++ vtable entries are actually
2721 used. Record for later use during GC. */
2722 case R_X86_64_GNU_VTENTRY:
2723 BFD_ASSERT (h != NULL);
2724 if (h != NULL
2725 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2726 goto error_return;
2727 break;
2728
2729 default:
2730 break;
2731 }
2732
2733 if (use_plt_got
2734 && h != NULL
2735 && h->plt.refcount > 0
2736 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2737 || h->got.refcount > 0)
2738 && htab->plt_got == NULL)
2739 {
2740 /* Create the GOT procedure linkage table. */
2741 unsigned int plt_got_align;
2742 const struct elf_backend_data *bed;
2743
2744 bed = get_elf_backend_data (info->output_bfd);
2745 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2746 && (sizeof (elf_x86_64_bnd_plt2_entry)
2747 == sizeof (elf_x86_64_legacy_plt2_entry)));
2748 plt_got_align = 3;
2749
2750 if (htab->elf.dynobj == NULL)
2751 htab->elf.dynobj = abfd;
2752 htab->plt_got
2753 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2754 ".plt.got",
2755 (bed->dynamic_sec_flags
2756 | SEC_ALLOC
2757 | SEC_CODE
2758 | SEC_LOAD
2759 | SEC_READONLY));
2760 if (htab->plt_got == NULL
2761 || !bfd_set_section_alignment (htab->elf.dynobj,
2762 htab->plt_got,
2763 plt_got_align))
2764 goto error_return;
2765 }
2766
2767 if ((r_type == R_X86_64_GOTPCREL
2768 || r_type == R_X86_64_GOTPCRELX
2769 || r_type == R_X86_64_REX_GOTPCRELX)
2770 && (h == NULL || h->type != STT_GNU_IFUNC))
2771 sec->need_convert_load = 1;
2772 }
2773
2774 if (elf_section_data (sec)->this_hdr.contents != contents)
2775 {
2776 if (!info->keep_memory)
2777 free (contents);
2778 else
2779 {
2780 /* Cache the section contents for elf_link_input_bfd. */
2781 elf_section_data (sec)->this_hdr.contents = contents;
2782 }
2783 }
2784
2785 return TRUE;
2786
2787 error_return:
2788 if (elf_section_data (sec)->this_hdr.contents != contents)
2789 free (contents);
2790 sec->check_relocs_failed = 1;
2791 return FALSE;
2792 }
2793
2794 /* Return the section that should be marked against GC for a given
2795 relocation. */
2796
2797 static asection *
2798 elf_x86_64_gc_mark_hook (asection *sec,
2799 struct bfd_link_info *info,
2800 Elf_Internal_Rela *rel,
2801 struct elf_link_hash_entry *h,
2802 Elf_Internal_Sym *sym)
2803 {
2804 if (h != NULL)
2805 switch (ELF32_R_TYPE (rel->r_info))
2806 {
2807 case R_X86_64_GNU_VTINHERIT:
2808 case R_X86_64_GNU_VTENTRY:
2809 return NULL;
2810 }
2811
2812 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2813 }
2814
2815 /* Remove undefined weak symbol from the dynamic symbol table if it
2816 is resolved to 0. */
2817
2818 static bfd_boolean
2819 elf_x86_64_fixup_symbol (struct bfd_link_info *info,
2820 struct elf_link_hash_entry *h)
2821 {
2822 if (h->dynindx != -1
2823 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
2824 elf_x86_64_hash_entry (h)->has_got_reloc,
2825 elf_x86_64_hash_entry (h)))
2826 {
2827 h->dynindx = -1;
2828 _bfd_elf_strtab_delref (elf_hash_table (info)->dynstr,
2829 h->dynstr_index);
2830 }
2831 return TRUE;
2832 }
2833
2834 /* Adjust a symbol defined by a dynamic object and referenced by a
2835 regular object. The current definition is in some section of the
2836 dynamic object, but we're not including those sections. We have to
2837 change the definition to something the rest of the link can
2838 understand. */
2839
2840 static bfd_boolean
2841 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2842 struct elf_link_hash_entry *h)
2843 {
2844 struct elf_x86_64_link_hash_table *htab;
2845 asection *s;
2846 struct elf_x86_64_link_hash_entry *eh;
2847 struct elf_dyn_relocs *p;
2848
2849 /* STT_GNU_IFUNC symbol must go through PLT. */
2850 if (h->type == STT_GNU_IFUNC)
2851 {
2852 /* All local STT_GNU_IFUNC references must be treate as local
2853 calls via local PLT. */
2854 if (h->ref_regular
2855 && SYMBOL_CALLS_LOCAL (info, h))
2856 {
2857 bfd_size_type pc_count = 0, count = 0;
2858 struct elf_dyn_relocs **pp;
2859
2860 eh = (struct elf_x86_64_link_hash_entry *) h;
2861 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2862 {
2863 pc_count += p->pc_count;
2864 p->count -= p->pc_count;
2865 p->pc_count = 0;
2866 count += p->count;
2867 if (p->count == 0)
2868 *pp = p->next;
2869 else
2870 pp = &p->next;
2871 }
2872
2873 if (pc_count || count)
2874 {
2875 h->non_got_ref = 1;
2876 if (pc_count)
2877 {
2878 /* Increment PLT reference count only for PC-relative
2879 references. */
2880 h->needs_plt = 1;
2881 if (h->plt.refcount <= 0)
2882 h->plt.refcount = 1;
2883 else
2884 h->plt.refcount += 1;
2885 }
2886 }
2887 }
2888
2889 if (h->plt.refcount <= 0)
2890 {
2891 h->plt.offset = (bfd_vma) -1;
2892 h->needs_plt = 0;
2893 }
2894 return TRUE;
2895 }
2896
2897 /* If this is a function, put it in the procedure linkage table. We
2898 will fill in the contents of the procedure linkage table later,
2899 when we know the address of the .got section. */
2900 if (h->type == STT_FUNC
2901 || h->needs_plt)
2902 {
2903 if (h->plt.refcount <= 0
2904 || SYMBOL_CALLS_LOCAL (info, h)
2905 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2906 && h->root.type == bfd_link_hash_undefweak))
2907 {
2908 /* This case can occur if we saw a PLT32 reloc in an input
2909 file, but the symbol was never referred to by a dynamic
2910 object, or if all references were garbage collected. In
2911 such a case, we don't actually need to build a procedure
2912 linkage table, and we can just do a PC32 reloc instead. */
2913 h->plt.offset = (bfd_vma) -1;
2914 h->needs_plt = 0;
2915 }
2916
2917 return TRUE;
2918 }
2919 else
2920 /* It's possible that we incorrectly decided a .plt reloc was
2921 needed for an R_X86_64_PC32 reloc to a non-function sym in
2922 check_relocs. We can't decide accurately between function and
2923 non-function syms in check-relocs; Objects loaded later in
2924 the link may change h->type. So fix it now. */
2925 h->plt.offset = (bfd_vma) -1;
2926
2927 /* If this is a weak symbol, and there is a real definition, the
2928 processor independent code will have arranged for us to see the
2929 real definition first, and we can just use the same value. */
2930 if (h->u.weakdef != NULL)
2931 {
2932 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2933 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2934 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2935 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2936 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2937 {
2938 eh = (struct elf_x86_64_link_hash_entry *) h;
2939 h->non_got_ref = h->u.weakdef->non_got_ref;
2940 eh->needs_copy = h->u.weakdef->needs_copy;
2941 }
2942 return TRUE;
2943 }
2944
2945 /* This is a reference to a symbol defined by a dynamic object which
2946 is not a function. */
2947
2948 /* If we are creating a shared library, we must presume that the
2949 only references to the symbol are via the global offset table.
2950 For such cases we need not do anything here; the relocations will
2951 be handled correctly by relocate_section. */
2952 if (!bfd_link_executable (info))
2953 return TRUE;
2954
2955 /* If there are no references to this symbol that do not use the
2956 GOT, we don't need to generate a copy reloc. */
2957 if (!h->non_got_ref)
2958 return TRUE;
2959
2960 /* If -z nocopyreloc was given, we won't generate them either. */
2961 if (info->nocopyreloc)
2962 {
2963 h->non_got_ref = 0;
2964 return TRUE;
2965 }
2966
2967 if (ELIMINATE_COPY_RELOCS)
2968 {
2969 eh = (struct elf_x86_64_link_hash_entry *) h;
2970 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2971 {
2972 s = p->sec->output_section;
2973 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2974 break;
2975 }
2976
2977 /* If we didn't find any dynamic relocs in read-only sections, then
2978 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2979 if (p == NULL)
2980 {
2981 h->non_got_ref = 0;
2982 return TRUE;
2983 }
2984 }
2985
2986 /* We must allocate the symbol in our .dynbss section, which will
2987 become part of the .bss section of the executable. There will be
2988 an entry for this symbol in the .dynsym section. The dynamic
2989 object will contain position independent code, so all references
2990 from the dynamic object to this symbol will go through the global
2991 offset table. The dynamic linker will use the .dynsym entry to
2992 determine the address it must put in the global offset table, so
2993 both the dynamic object and the regular object will refer to the
2994 same memory location for the variable. */
2995
2996 htab = elf_x86_64_hash_table (info);
2997 if (htab == NULL)
2998 return FALSE;
2999
3000 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
3001 to copy the initial value out of the dynamic object and into the
3002 runtime process image. */
3003 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
3004 {
3005 const struct elf_backend_data *bed;
3006 bed = get_elf_backend_data (info->output_bfd);
3007 htab->srelbss->size += bed->s->sizeof_rela;
3008 h->needs_copy = 1;
3009 }
3010
3011 s = htab->sdynbss;
3012
3013 return _bfd_elf_adjust_dynamic_copy (info, h, s);
3014 }
3015
3016 /* Allocate space in .plt, .got and associated reloc sections for
3017 dynamic relocs. */
3018
3019 static bfd_boolean
3020 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
3021 {
3022 struct bfd_link_info *info;
3023 struct elf_x86_64_link_hash_table *htab;
3024 struct elf_x86_64_link_hash_entry *eh;
3025 struct elf_dyn_relocs *p;
3026 const struct elf_backend_data *bed;
3027 unsigned int plt_entry_size;
3028 bfd_boolean resolved_to_zero;
3029
3030 if (h->root.type == bfd_link_hash_indirect)
3031 return TRUE;
3032
3033 eh = (struct elf_x86_64_link_hash_entry *) h;
3034
3035 info = (struct bfd_link_info *) inf;
3036 htab = elf_x86_64_hash_table (info);
3037 if (htab == NULL)
3038 return FALSE;
3039 bed = get_elf_backend_data (info->output_bfd);
3040 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3041
3042 resolved_to_zero = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3043 eh->has_got_reloc,
3044 eh);
3045
3046 /* We can't use the GOT PLT if pointer equality is needed since
3047 finish_dynamic_symbol won't clear symbol value and the dynamic
3048 linker won't update the GOT slot. We will get into an infinite
3049 loop at run-time. */
3050 if (htab->plt_got != NULL
3051 && h->type != STT_GNU_IFUNC
3052 && !h->pointer_equality_needed
3053 && h->plt.refcount > 0
3054 && h->got.refcount > 0)
3055 {
3056 /* Don't use the regular PLT if there are both GOT and GOTPLT
3057 reloctions. */
3058 h->plt.offset = (bfd_vma) -1;
3059
3060 /* Use the GOT PLT. */
3061 eh->plt_got.refcount = 1;
3062 }
3063
3064 /* Clear the reference count of function pointer relocations if
3065 symbol isn't a normal function. */
3066 if (h->type != STT_FUNC)
3067 eh->func_pointer_refcount = 0;
3068
3069 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
3070 here if it is defined and referenced in a non-shared object. */
3071 if (h->type == STT_GNU_IFUNC
3072 && h->def_regular)
3073 {
3074 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
3075 &eh->dyn_relocs,
3076 &htab->readonly_dynrelocs_against_ifunc,
3077 plt_entry_size,
3078 plt_entry_size,
3079 GOT_ENTRY_SIZE, TRUE))
3080 {
3081 asection *s = htab->plt_bnd;
3082 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
3083 {
3084 /* Use the .plt.bnd section if it is created. */
3085 eh->plt_bnd.offset = s->size;
3086
3087 /* Make room for this entry in the .plt.bnd section. */
3088 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3089 }
3090
3091 return TRUE;
3092 }
3093 else
3094 return FALSE;
3095 }
3096 /* Don't create the PLT entry if there are only function pointer
3097 relocations which can be resolved at run-time. */
3098 else if (htab->elf.dynamic_sections_created
3099 && (h->plt.refcount > eh->func_pointer_refcount
3100 || eh->plt_got.refcount > 0))
3101 {
3102 bfd_boolean use_plt_got;
3103
3104 /* Clear the reference count of function pointer relocations
3105 if PLT is used. */
3106 eh->func_pointer_refcount = 0;
3107
3108 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
3109 {
3110 /* Don't use the regular PLT for DF_BIND_NOW. */
3111 h->plt.offset = (bfd_vma) -1;
3112
3113 /* Use the GOT PLT. */
3114 h->got.refcount = 1;
3115 eh->plt_got.refcount = 1;
3116 }
3117
3118 use_plt_got = eh->plt_got.refcount > 0;
3119
3120 /* Make sure this symbol is output as a dynamic symbol.
3121 Undefined weak syms won't yet be marked as dynamic. */
3122 if (h->dynindx == -1
3123 && !h->forced_local
3124 && !resolved_to_zero)
3125 {
3126 if (! bfd_elf_link_record_dynamic_symbol (info, h))
3127 return FALSE;
3128 }
3129
3130 if (bfd_link_pic (info)
3131 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
3132 {
3133 asection *s = htab->elf.splt;
3134 asection *bnd_s = htab->plt_bnd;
3135 asection *got_s = htab->plt_got;
3136
3137 /* If this is the first .plt entry, make room for the special
3138 first entry. The .plt section is used by prelink to undo
3139 prelinking for dynamic relocations. */
3140 if (s->size == 0)
3141 s->size = plt_entry_size;
3142
3143 if (use_plt_got)
3144 eh->plt_got.offset = got_s->size;
3145 else
3146 {
3147 h->plt.offset = s->size;
3148 if (bnd_s)
3149 eh->plt_bnd.offset = bnd_s->size;
3150 }
3151
3152 /* If this symbol is not defined in a regular file, and we are
3153 not generating a shared library, then set the symbol to this
3154 location in the .plt. This is required to make function
3155 pointers compare as equal between the normal executable and
3156 the shared library. */
3157 if (! bfd_link_pic (info)
3158 && !h->def_regular)
3159 {
3160 if (use_plt_got)
3161 {
3162 /* We need to make a call to the entry of the GOT PLT
3163 instead of regular PLT entry. */
3164 h->root.u.def.section = got_s;
3165 h->root.u.def.value = eh->plt_got.offset;
3166 }
3167 else
3168 {
3169 if (bnd_s)
3170 {
3171 /* We need to make a call to the entry of the second
3172 PLT instead of regular PLT entry. */
3173 h->root.u.def.section = bnd_s;
3174 h->root.u.def.value = eh->plt_bnd.offset;
3175 }
3176 else
3177 {
3178 h->root.u.def.section = s;
3179 h->root.u.def.value = h->plt.offset;
3180 }
3181 }
3182 }
3183
3184 /* Make room for this entry. */
3185 if (use_plt_got)
3186 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3187 else
3188 {
3189 s->size += plt_entry_size;
3190 if (bnd_s)
3191 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
3192
3193 /* We also need to make an entry in the .got.plt section,
3194 which will be placed in the .got section by the linker
3195 script. */
3196 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
3197
3198 /* There should be no PLT relocation against resolved
3199 undefined weak symbol in executable. */
3200 if (!resolved_to_zero)
3201 {
3202 /* We also need to make an entry in the .rela.plt
3203 section. */
3204 htab->elf.srelplt->size += bed->s->sizeof_rela;
3205 htab->elf.srelplt->reloc_count++;
3206 }
3207 }
3208 }
3209 else
3210 {
3211 eh->plt_got.offset = (bfd_vma) -1;
3212 h->plt.offset = (bfd_vma) -1;
3213 h->needs_plt = 0;
3214 }
3215 }
3216 else
3217 {
3218 eh->plt_got.offset = (bfd_vma) -1;
3219 h->plt.offset = (bfd_vma) -1;
3220 h->needs_plt = 0;
3221 }
3222
3223 eh->tlsdesc_got = (bfd_vma) -1;
3224
3225 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
3226 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
3227 if (h->got.refcount > 0
3228 && bfd_link_executable (info)
3229 && h->dynindx == -1
3230 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
3231 {
3232 h->got.offset = (bfd_vma) -1;
3233 }
3234 else if (h->got.refcount > 0)
3235 {
3236 asection *s;
3237 bfd_boolean dyn;
3238 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
3239
3240 /* Make sure this symbol is output as a dynamic symbol.
3241 Undefined weak syms won't yet be marked as dynamic. */
3242 if (h->dynindx == -1
3243 && !h->forced_local
3244 && !resolved_to_zero)
3245 {
3246 if (! bfd_elf_link_record_dynamic_symbol (info, h))
3247 return FALSE;
3248 }
3249
3250 if (GOT_TLS_GDESC_P (tls_type))
3251 {
3252 eh->tlsdesc_got = htab->elf.sgotplt->size
3253 - elf_x86_64_compute_jump_table_size (htab);
3254 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3255 h->got.offset = (bfd_vma) -2;
3256 }
3257 if (! GOT_TLS_GDESC_P (tls_type)
3258 || GOT_TLS_GD_P (tls_type))
3259 {
3260 s = htab->elf.sgot;
3261 h->got.offset = s->size;
3262 s->size += GOT_ENTRY_SIZE;
3263 if (GOT_TLS_GD_P (tls_type))
3264 s->size += GOT_ENTRY_SIZE;
3265 }
3266 dyn = htab->elf.dynamic_sections_created;
3267 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
3268 and two if global. R_X86_64_GOTTPOFF needs one dynamic
3269 relocation. No dynamic relocation against resolved undefined
3270 weak symbol in executable. */
3271 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
3272 || tls_type == GOT_TLS_IE)
3273 htab->elf.srelgot->size += bed->s->sizeof_rela;
3274 else if (GOT_TLS_GD_P (tls_type))
3275 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
3276 else if (! GOT_TLS_GDESC_P (tls_type)
3277 && ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3278 && !resolved_to_zero)
3279 || h->root.type != bfd_link_hash_undefweak)
3280 && (bfd_link_pic (info)
3281 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
3282 htab->elf.srelgot->size += bed->s->sizeof_rela;
3283 if (GOT_TLS_GDESC_P (tls_type))
3284 {
3285 htab->elf.srelplt->size += bed->s->sizeof_rela;
3286 htab->tlsdesc_plt = (bfd_vma) -1;
3287 }
3288 }
3289 else
3290 h->got.offset = (bfd_vma) -1;
3291
3292 if (eh->dyn_relocs == NULL)
3293 return TRUE;
3294
3295 /* In the shared -Bsymbolic case, discard space allocated for
3296 dynamic pc-relative relocs against symbols which turn out to be
3297 defined in regular objects. For the normal shared case, discard
3298 space for pc-relative relocs that have become local due to symbol
3299 visibility changes. */
3300
3301 if (bfd_link_pic (info))
3302 {
3303 /* Relocs that use pc_count are those that appear on a call
3304 insn, or certain REL relocs that can generated via assembly.
3305 We want calls to protected symbols to resolve directly to the
3306 function rather than going via the plt. If people want
3307 function pointer comparisons to work as expected then they
3308 should avoid writing weird assembly. */
3309 if (SYMBOL_CALLS_LOCAL (info, h))
3310 {
3311 struct elf_dyn_relocs **pp;
3312
3313 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
3314 {
3315 p->count -= p->pc_count;
3316 p->pc_count = 0;
3317 if (p->count == 0)
3318 *pp = p->next;
3319 else
3320 pp = &p->next;
3321 }
3322 }
3323
3324 /* Also discard relocs on undefined weak syms with non-default
3325 visibility or in PIE. */
3326 if (eh->dyn_relocs != NULL)
3327 {
3328 if (h->root.type == bfd_link_hash_undefweak)
3329 {
3330 /* Undefined weak symbol is never bound locally in shared
3331 library. */
3332 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
3333 || resolved_to_zero)
3334 eh->dyn_relocs = NULL;
3335 else if (h->dynindx == -1
3336 && ! h->forced_local
3337 && ! bfd_elf_link_record_dynamic_symbol (info, h))
3338 return FALSE;
3339 }
3340 /* For PIE, discard space for pc-relative relocs against
3341 symbols which turn out to need copy relocs. */
3342 else if (bfd_link_executable (info)
3343 && (h->needs_copy || eh->needs_copy)
3344 && h->def_dynamic
3345 && !h->def_regular)
3346 {
3347 struct elf_dyn_relocs **pp;
3348
3349 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
3350 {
3351 if (p->pc_count != 0)
3352 *pp = p->next;
3353 else
3354 pp = &p->next;
3355 }
3356 }
3357 }
3358 }
3359 else if (ELIMINATE_COPY_RELOCS)
3360 {
3361 /* For the non-shared case, discard space for relocs against
3362 symbols which turn out to need copy relocs or are not
3363 dynamic. Keep dynamic relocations for run-time function
3364 pointer initialization. */
3365
3366 if ((!h->non_got_ref
3367 || eh->func_pointer_refcount > 0
3368 || (h->root.type == bfd_link_hash_undefweak
3369 && !resolved_to_zero))
3370 && ((h->def_dynamic
3371 && !h->def_regular)
3372 || (htab->elf.dynamic_sections_created
3373 && (h->root.type == bfd_link_hash_undefweak
3374 || h->root.type == bfd_link_hash_undefined))))
3375 {
3376 /* Make sure this symbol is output as a dynamic symbol.
3377 Undefined weak syms won't yet be marked as dynamic. */
3378 if (h->dynindx == -1
3379 && ! h->forced_local
3380 && ! resolved_to_zero
3381 && ! bfd_elf_link_record_dynamic_symbol (info, h))
3382 return FALSE;
3383
3384 /* If that succeeded, we know we'll be keeping all the
3385 relocs. */
3386 if (h->dynindx != -1)
3387 goto keep;
3388 }
3389
3390 eh->dyn_relocs = NULL;
3391 eh->func_pointer_refcount = 0;
3392
3393 keep: ;
3394 }
3395
3396 /* Finally, allocate space. */
3397 for (p = eh->dyn_relocs; p != NULL; p = p->next)
3398 {
3399 asection * sreloc;
3400
3401 sreloc = elf_section_data (p->sec)->sreloc;
3402
3403 BFD_ASSERT (sreloc != NULL);
3404
3405 sreloc->size += p->count * bed->s->sizeof_rela;
3406 }
3407
3408 return TRUE;
3409 }
3410
3411 /* Allocate space in .plt, .got and associated reloc sections for
3412 local dynamic relocs. */
3413
3414 static bfd_boolean
3415 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
3416 {
3417 struct elf_link_hash_entry *h
3418 = (struct elf_link_hash_entry *) *slot;
3419
3420 if (h->type != STT_GNU_IFUNC
3421 || !h->def_regular
3422 || !h->ref_regular
3423 || !h->forced_local
3424 || h->root.type != bfd_link_hash_defined)
3425 abort ();
3426
3427 return elf_x86_64_allocate_dynrelocs (h, inf);
3428 }
3429
3430 /* Find any dynamic relocs that apply to read-only sections. */
3431
3432 static bfd_boolean
3433 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
3434 void * inf)
3435 {
3436 struct elf_x86_64_link_hash_entry *eh;
3437 struct elf_dyn_relocs *p;
3438
3439 /* Skip local IFUNC symbols. */
3440 if (h->forced_local && h->type == STT_GNU_IFUNC)
3441 return TRUE;
3442
3443 eh = (struct elf_x86_64_link_hash_entry *) h;
3444 for (p = eh->dyn_relocs; p != NULL; p = p->next)
3445 {
3446 asection *s = p->sec->output_section;
3447
3448 if (s != NULL && (s->flags & SEC_READONLY) != 0)
3449 {
3450 struct bfd_link_info *info = (struct bfd_link_info *) inf;
3451
3452 info->flags |= DF_TEXTREL;
3453
3454 if ((info->warn_shared_textrel && bfd_link_pic (info))
3455 || info->error_textrel)
3456 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
3457 p->sec->owner, h->root.root.string,
3458 p->sec);
3459
3460 /* Not an error, just cut short the traversal. */
3461 return FALSE;
3462 }
3463 }
3464 return TRUE;
3465 }
3466
3467 /* Convert load via the GOT slot to load immediate. */
3468
3469 static bfd_boolean
3470 elf_x86_64_convert_load (bfd *abfd, asection *sec,
3471 struct bfd_link_info *link_info)
3472 {
3473 Elf_Internal_Shdr *symtab_hdr;
3474 Elf_Internal_Rela *internal_relocs;
3475 Elf_Internal_Rela *irel, *irelend;
3476 bfd_byte *contents;
3477 struct elf_x86_64_link_hash_table *htab;
3478 bfd_boolean changed;
3479 bfd_signed_vma *local_got_refcounts;
3480
3481 /* Don't even try to convert non-ELF outputs. */
3482 if (!is_elf_hash_table (link_info->hash))
3483 return FALSE;
3484
3485 /* Nothing to do if there is no need or no output. */
3486 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
3487 || sec->need_convert_load == 0
3488 || bfd_is_abs_section (sec->output_section))
3489 return TRUE;
3490
3491 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
3492
3493 /* Load the relocations for this section. */
3494 internal_relocs = (_bfd_elf_link_read_relocs
3495 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
3496 link_info->keep_memory));
3497 if (internal_relocs == NULL)
3498 return FALSE;
3499
3500 changed = FALSE;
3501 htab = elf_x86_64_hash_table (link_info);
3502 local_got_refcounts = elf_local_got_refcounts (abfd);
3503
3504 /* Get the section contents. */
3505 if (elf_section_data (sec)->this_hdr.contents != NULL)
3506 contents = elf_section_data (sec)->this_hdr.contents;
3507 else
3508 {
3509 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
3510 goto error_return;
3511 }
3512
3513 irelend = internal_relocs + sec->reloc_count;
3514 for (irel = internal_relocs; irel < irelend; irel++)
3515 {
3516 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
3517 unsigned int r_symndx;
3518 struct elf_link_hash_entry *h;
3519 bfd_boolean converted;
3520
3521 if (r_type != R_X86_64_GOTPCRELX
3522 && r_type != R_X86_64_REX_GOTPCRELX
3523 && r_type != R_X86_64_GOTPCREL)
3524 continue;
3525
3526 r_symndx = htab->r_sym (irel->r_info);
3527 if (r_symndx < symtab_hdr->sh_info)
3528 h = elf_x86_64_get_local_sym_hash (htab, sec->owner,
3529 (const Elf_Internal_Rela *) irel,
3530 FALSE);
3531 else
3532 {
3533 h = elf_sym_hashes (abfd)[r_symndx - symtab_hdr->sh_info];
3534 while (h->root.type == bfd_link_hash_indirect
3535 || h->root.type == bfd_link_hash_warning)
3536 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3537 }
3538
3539 /* STT_GNU_IFUNC must keep GOTPCREL relocations. */
3540 if (h != NULL && h->type == STT_GNU_IFUNC)
3541 continue;
3542
3543 converted = FALSE;
3544 if (!elf_x86_64_convert_load_reloc (abfd, sec, contents, irel, h,
3545 &converted, link_info))
3546 goto error_return;
3547
3548 if (converted)
3549 {
3550 changed = converted;
3551 if (h)
3552 {
3553 if (h->got.refcount > 0)
3554 h->got.refcount -= 1;
3555 }
3556 else
3557 {
3558 if (local_got_refcounts != NULL
3559 && local_got_refcounts[r_symndx] > 0)
3560 local_got_refcounts[r_symndx] -= 1;
3561 }
3562 }
3563 }
3564
3565 if (contents != NULL
3566 && elf_section_data (sec)->this_hdr.contents != contents)
3567 {
3568 if (!changed && !link_info->keep_memory)
3569 free (contents);
3570 else
3571 {
3572 /* Cache the section contents for elf_link_input_bfd. */
3573 elf_section_data (sec)->this_hdr.contents = contents;
3574 }
3575 }
3576
3577 if (elf_section_data (sec)->relocs != internal_relocs)
3578 {
3579 if (!changed)
3580 free (internal_relocs);
3581 else
3582 elf_section_data (sec)->relocs = internal_relocs;
3583 }
3584
3585 return TRUE;
3586
3587 error_return:
3588 if (contents != NULL
3589 && elf_section_data (sec)->this_hdr.contents != contents)
3590 free (contents);
3591 if (internal_relocs != NULL
3592 && elf_section_data (sec)->relocs != internal_relocs)
3593 free (internal_relocs);
3594 return FALSE;
3595 }
3596
3597 /* Set the sizes of the dynamic sections. */
3598
3599 static bfd_boolean
3600 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3601 struct bfd_link_info *info)
3602 {
3603 struct elf_x86_64_link_hash_table *htab;
3604 bfd *dynobj;
3605 asection *s;
3606 bfd_boolean relocs;
3607 bfd *ibfd;
3608 const struct elf_backend_data *bed;
3609
3610 htab = elf_x86_64_hash_table (info);
3611 if (htab == NULL)
3612 return FALSE;
3613 bed = get_elf_backend_data (output_bfd);
3614
3615 dynobj = htab->elf.dynobj;
3616 if (dynobj == NULL)
3617 abort ();
3618
3619 /* Set up .got offsets for local syms, and space for local dynamic
3620 relocs. */
3621 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3622 {
3623 bfd_signed_vma *local_got;
3624 bfd_signed_vma *end_local_got;
3625 char *local_tls_type;
3626 bfd_vma *local_tlsdesc_gotent;
3627 bfd_size_type locsymcount;
3628 Elf_Internal_Shdr *symtab_hdr;
3629 asection *srel;
3630
3631 if (! is_x86_64_elf (ibfd))
3632 continue;
3633
3634 for (s = ibfd->sections; s != NULL; s = s->next)
3635 {
3636 struct elf_dyn_relocs *p;
3637
3638 if (!elf_x86_64_convert_load (ibfd, s, info))
3639 return FALSE;
3640
3641 for (p = (struct elf_dyn_relocs *)
3642 (elf_section_data (s)->local_dynrel);
3643 p != NULL;
3644 p = p->next)
3645 {
3646 if (!bfd_is_abs_section (p->sec)
3647 && bfd_is_abs_section (p->sec->output_section))
3648 {
3649 /* Input section has been discarded, either because
3650 it is a copy of a linkonce section or due to
3651 linker script /DISCARD/, so we'll be discarding
3652 the relocs too. */
3653 }
3654 else if (p->count != 0)
3655 {
3656 srel = elf_section_data (p->sec)->sreloc;
3657 srel->size += p->count * bed->s->sizeof_rela;
3658 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3659 && (info->flags & DF_TEXTREL) == 0)
3660 {
3661 info->flags |= DF_TEXTREL;
3662 if ((info->warn_shared_textrel && bfd_link_pic (info))
3663 || info->error_textrel)
3664 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3665 p->sec->owner, p->sec);
3666 }
3667 }
3668 }
3669 }
3670
3671 local_got = elf_local_got_refcounts (ibfd);
3672 if (!local_got)
3673 continue;
3674
3675 symtab_hdr = &elf_symtab_hdr (ibfd);
3676 locsymcount = symtab_hdr->sh_info;
3677 end_local_got = local_got + locsymcount;
3678 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3679 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3680 s = htab->elf.sgot;
3681 srel = htab->elf.srelgot;
3682 for (; local_got < end_local_got;
3683 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3684 {
3685 *local_tlsdesc_gotent = (bfd_vma) -1;
3686 if (*local_got > 0)
3687 {
3688 if (GOT_TLS_GDESC_P (*local_tls_type))
3689 {
3690 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3691 - elf_x86_64_compute_jump_table_size (htab);
3692 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3693 *local_got = (bfd_vma) -2;
3694 }
3695 if (! GOT_TLS_GDESC_P (*local_tls_type)
3696 || GOT_TLS_GD_P (*local_tls_type))
3697 {
3698 *local_got = s->size;
3699 s->size += GOT_ENTRY_SIZE;
3700 if (GOT_TLS_GD_P (*local_tls_type))
3701 s->size += GOT_ENTRY_SIZE;
3702 }
3703 if (bfd_link_pic (info)
3704 || GOT_TLS_GD_ANY_P (*local_tls_type)
3705 || *local_tls_type == GOT_TLS_IE)
3706 {
3707 if (GOT_TLS_GDESC_P (*local_tls_type))
3708 {
3709 htab->elf.srelplt->size
3710 += bed->s->sizeof_rela;
3711 htab->tlsdesc_plt = (bfd_vma) -1;
3712 }
3713 if (! GOT_TLS_GDESC_P (*local_tls_type)
3714 || GOT_TLS_GD_P (*local_tls_type))
3715 srel->size += bed->s->sizeof_rela;
3716 }
3717 }
3718 else
3719 *local_got = (bfd_vma) -1;
3720 }
3721 }
3722
3723 if (htab->tls_ld_got.refcount > 0)
3724 {
3725 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3726 relocs. */
3727 htab->tls_ld_got.offset = htab->elf.sgot->size;
3728 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3729 htab->elf.srelgot->size += bed->s->sizeof_rela;
3730 }
3731 else
3732 htab->tls_ld_got.offset = -1;
3733
3734 /* Allocate global sym .plt and .got entries, and space for global
3735 sym dynamic relocs. */
3736 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3737 info);
3738
3739 /* Allocate .plt and .got entries, and space for local symbols. */
3740 htab_traverse (htab->loc_hash_table,
3741 elf_x86_64_allocate_local_dynrelocs,
3742 info);
3743
3744 /* For every jump slot reserved in the sgotplt, reloc_count is
3745 incremented. However, when we reserve space for TLS descriptors,
3746 it's not incremented, so in order to compute the space reserved
3747 for them, it suffices to multiply the reloc count by the jump
3748 slot size.
3749
3750 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3751 so that R_X86_64_IRELATIVE entries come last. */
3752 if (htab->elf.srelplt)
3753 {
3754 htab->sgotplt_jump_table_size
3755 = elf_x86_64_compute_jump_table_size (htab);
3756 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3757 }
3758 else if (htab->elf.irelplt)
3759 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3760
3761 if (htab->tlsdesc_plt)
3762 {
3763 /* If we're not using lazy TLS relocations, don't generate the
3764 PLT and GOT entries they require. */
3765 if ((info->flags & DF_BIND_NOW))
3766 htab->tlsdesc_plt = 0;
3767 else
3768 {
3769 htab->tlsdesc_got = htab->elf.sgot->size;
3770 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3771 /* Reserve room for the initial entry.
3772 FIXME: we could probably do away with it in this case. */
3773 if (htab->elf.splt->size == 0)
3774 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3775 htab->tlsdesc_plt = htab->elf.splt->size;
3776 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3777 }
3778 }
3779
3780 if (htab->elf.sgotplt)
3781 {
3782 /* Don't allocate .got.plt section if there are no GOT nor PLT
3783 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3784 if ((htab->elf.hgot == NULL
3785 || !htab->elf.hgot->ref_regular_nonweak)
3786 && (htab->elf.sgotplt->size
3787 == get_elf_backend_data (output_bfd)->got_header_size)
3788 && (htab->elf.splt == NULL
3789 || htab->elf.splt->size == 0)
3790 && (htab->elf.sgot == NULL
3791 || htab->elf.sgot->size == 0)
3792 && (htab->elf.iplt == NULL
3793 || htab->elf.iplt->size == 0)
3794 && (htab->elf.igotplt == NULL
3795 || htab->elf.igotplt->size == 0))
3796 htab->elf.sgotplt->size = 0;
3797 }
3798
3799 if (htab->plt_eh_frame != NULL
3800 && htab->elf.splt != NULL
3801 && htab->elf.splt->size != 0
3802 && !bfd_is_abs_section (htab->elf.splt->output_section)
3803 && _bfd_elf_eh_frame_present (info))
3804 {
3805 const struct elf_x86_64_backend_data *arch_data
3806 = get_elf_x86_64_arch_data (bed);
3807 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3808 }
3809
3810 /* We now have determined the sizes of the various dynamic sections.
3811 Allocate memory for them. */
3812 relocs = FALSE;
3813 for (s = dynobj->sections; s != NULL; s = s->next)
3814 {
3815 if ((s->flags & SEC_LINKER_CREATED) == 0)
3816 continue;
3817
3818 if (s == htab->elf.splt
3819 || s == htab->elf.sgot
3820 || s == htab->elf.sgotplt
3821 || s == htab->elf.iplt
3822 || s == htab->elf.igotplt
3823 || s == htab->plt_bnd
3824 || s == htab->plt_got
3825 || s == htab->plt_eh_frame
3826 || s == htab->sdynbss)
3827 {
3828 /* Strip this section if we don't need it; see the
3829 comment below. */
3830 }
3831 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3832 {
3833 if (s->size != 0 && s != htab->elf.srelplt)
3834 relocs = TRUE;
3835
3836 /* We use the reloc_count field as a counter if we need
3837 to copy relocs into the output file. */
3838 if (s != htab->elf.srelplt)
3839 s->reloc_count = 0;
3840 }
3841 else
3842 {
3843 /* It's not one of our sections, so don't allocate space. */
3844 continue;
3845 }
3846
3847 if (s->size == 0)
3848 {
3849 /* If we don't need this section, strip it from the
3850 output file. This is mostly to handle .rela.bss and
3851 .rela.plt. We must create both sections in
3852 create_dynamic_sections, because they must be created
3853 before the linker maps input sections to output
3854 sections. The linker does that before
3855 adjust_dynamic_symbol is called, and it is that
3856 function which decides whether anything needs to go
3857 into these sections. */
3858
3859 s->flags |= SEC_EXCLUDE;
3860 continue;
3861 }
3862
3863 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3864 continue;
3865
3866 /* Allocate memory for the section contents. We use bfd_zalloc
3867 here in case unused entries are not reclaimed before the
3868 section's contents are written out. This should not happen,
3869 but this way if it does, we get a R_X86_64_NONE reloc instead
3870 of garbage. */
3871 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3872 if (s->contents == NULL)
3873 return FALSE;
3874 }
3875
3876 if (htab->plt_eh_frame != NULL
3877 && htab->plt_eh_frame->contents != NULL)
3878 {
3879 const struct elf_x86_64_backend_data *arch_data
3880 = get_elf_x86_64_arch_data (bed);
3881
3882 memcpy (htab->plt_eh_frame->contents,
3883 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3884 bfd_put_32 (dynobj, htab->elf.splt->size,
3885 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3886 }
3887
3888 if (htab->elf.dynamic_sections_created)
3889 {
3890 /* Add some entries to the .dynamic section. We fill in the
3891 values later, in elf_x86_64_finish_dynamic_sections, but we
3892 must add the entries now so that we get the correct size for
3893 the .dynamic section. The DT_DEBUG entry is filled in by the
3894 dynamic linker and used by the debugger. */
3895 #define add_dynamic_entry(TAG, VAL) \
3896 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3897
3898 if (bfd_link_executable (info))
3899 {
3900 if (!add_dynamic_entry (DT_DEBUG, 0))
3901 return FALSE;
3902 }
3903
3904 if (htab->elf.splt->size != 0)
3905 {
3906 /* DT_PLTGOT is used by prelink even if there is no PLT
3907 relocation. */
3908 if (!add_dynamic_entry (DT_PLTGOT, 0))
3909 return FALSE;
3910
3911 if (htab->elf.srelplt->size != 0)
3912 {
3913 if (!add_dynamic_entry (DT_PLTRELSZ, 0)
3914 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3915 || !add_dynamic_entry (DT_JMPREL, 0))
3916 return FALSE;
3917 }
3918
3919 if (htab->tlsdesc_plt
3920 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3921 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3922 return FALSE;
3923 }
3924
3925 if (relocs)
3926 {
3927 if (!add_dynamic_entry (DT_RELA, 0)
3928 || !add_dynamic_entry (DT_RELASZ, 0)
3929 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3930 return FALSE;
3931
3932 /* If any dynamic relocs apply to a read-only section,
3933 then we need a DT_TEXTREL entry. */
3934 if ((info->flags & DF_TEXTREL) == 0)
3935 elf_link_hash_traverse (&htab->elf,
3936 elf_x86_64_readonly_dynrelocs,
3937 info);
3938
3939 if ((info->flags & DF_TEXTREL) != 0)
3940 {
3941 if (htab->readonly_dynrelocs_against_ifunc)
3942 {
3943 info->callbacks->einfo
3944 (_("%P%X: read-only segment has dynamic IFUNC relocations; recompile with -fPIC\n"));
3945 bfd_set_error (bfd_error_bad_value);
3946 return FALSE;
3947 }
3948
3949 if (!add_dynamic_entry (DT_TEXTREL, 0))
3950 return FALSE;
3951 }
3952 }
3953 }
3954 #undef add_dynamic_entry
3955
3956 return TRUE;
3957 }
3958
3959 static bfd_boolean
3960 elf_x86_64_always_size_sections (bfd *output_bfd,
3961 struct bfd_link_info *info)
3962 {
3963 asection *tls_sec = elf_hash_table (info)->tls_sec;
3964
3965 if (tls_sec)
3966 {
3967 struct elf_link_hash_entry *tlsbase;
3968
3969 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3970 "_TLS_MODULE_BASE_",
3971 FALSE, FALSE, FALSE);
3972
3973 if (tlsbase && tlsbase->type == STT_TLS)
3974 {
3975 struct elf_x86_64_link_hash_table *htab;
3976 struct bfd_link_hash_entry *bh = NULL;
3977 const struct elf_backend_data *bed
3978 = get_elf_backend_data (output_bfd);
3979
3980 htab = elf_x86_64_hash_table (info);
3981 if (htab == NULL)
3982 return FALSE;
3983
3984 if (!(_bfd_generic_link_add_one_symbol
3985 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3986 tls_sec, 0, NULL, FALSE,
3987 bed->collect, &bh)))
3988 return FALSE;
3989
3990 htab->tls_module_base = bh;
3991
3992 tlsbase = (struct elf_link_hash_entry *)bh;
3993 tlsbase->def_regular = 1;
3994 tlsbase->other = STV_HIDDEN;
3995 tlsbase->root.linker_def = 1;
3996 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3997 }
3998 }
3999
4000 return TRUE;
4001 }
4002
4003 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
4004 executables. Rather than setting it to the beginning of the TLS
4005 section, we have to set it to the end. This function may be called
4006 multiple times, it is idempotent. */
4007
4008 static void
4009 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
4010 {
4011 struct elf_x86_64_link_hash_table *htab;
4012 struct bfd_link_hash_entry *base;
4013
4014 if (!bfd_link_executable (info))
4015 return;
4016
4017 htab = elf_x86_64_hash_table (info);
4018 if (htab == NULL)
4019 return;
4020
4021 base = htab->tls_module_base;
4022 if (base == NULL)
4023 return;
4024
4025 base->u.def.value = htab->elf.tls_size;
4026 }
4027
4028 /* Return the base VMA address which should be subtracted from real addresses
4029 when resolving @dtpoff relocation.
4030 This is PT_TLS segment p_vaddr. */
4031
4032 static bfd_vma
4033 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
4034 {
4035 /* If tls_sec is NULL, we should have signalled an error already. */
4036 if (elf_hash_table (info)->tls_sec == NULL)
4037 return 0;
4038 return elf_hash_table (info)->tls_sec->vma;
4039 }
4040
4041 /* Return the relocation value for @tpoff relocation
4042 if STT_TLS virtual address is ADDRESS. */
4043
4044 static bfd_vma
4045 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
4046 {
4047 struct elf_link_hash_table *htab = elf_hash_table (info);
4048 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
4049 bfd_vma static_tls_size;
4050
4051 /* If tls_segment is NULL, we should have signalled an error already. */
4052 if (htab->tls_sec == NULL)
4053 return 0;
4054
4055 /* Consider special static TLS alignment requirements. */
4056 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
4057 return address - static_tls_size - htab->tls_sec->vma;
4058 }
4059
4060 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
4061 branch? */
4062
4063 static bfd_boolean
4064 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
4065 {
4066 /* Opcode Instruction
4067 0xe8 call
4068 0xe9 jump
4069 0x0f 0x8x conditional jump */
4070 return ((offset > 0
4071 && (contents [offset - 1] == 0xe8
4072 || contents [offset - 1] == 0xe9))
4073 || (offset > 1
4074 && contents [offset - 2] == 0x0f
4075 && (contents [offset - 1] & 0xf0) == 0x80));
4076 }
4077
4078 /* Relocate an x86_64 ELF section. */
4079
4080 static bfd_boolean
4081 elf_x86_64_relocate_section (bfd *output_bfd,
4082 struct bfd_link_info *info,
4083 bfd *input_bfd,
4084 asection *input_section,
4085 bfd_byte *contents,
4086 Elf_Internal_Rela *relocs,
4087 Elf_Internal_Sym *local_syms,
4088 asection **local_sections)
4089 {
4090 struct elf_x86_64_link_hash_table *htab;
4091 Elf_Internal_Shdr *symtab_hdr;
4092 struct elf_link_hash_entry **sym_hashes;
4093 bfd_vma *local_got_offsets;
4094 bfd_vma *local_tlsdesc_gotents;
4095 Elf_Internal_Rela *rel;
4096 Elf_Internal_Rela *wrel;
4097 Elf_Internal_Rela *relend;
4098 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
4099
4100 BFD_ASSERT (is_x86_64_elf (input_bfd));
4101
4102 /* Skip if check_relocs failed. */
4103 if (input_section->check_relocs_failed)
4104 return FALSE;
4105
4106 htab = elf_x86_64_hash_table (info);
4107 if (htab == NULL)
4108 return FALSE;
4109 symtab_hdr = &elf_symtab_hdr (input_bfd);
4110 sym_hashes = elf_sym_hashes (input_bfd);
4111 local_got_offsets = elf_local_got_offsets (input_bfd);
4112 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
4113
4114 elf_x86_64_set_tls_module_base (info);
4115
4116 rel = wrel = relocs;
4117 relend = relocs + input_section->reloc_count;
4118 for (; rel < relend; wrel++, rel++)
4119 {
4120 unsigned int r_type;
4121 reloc_howto_type *howto;
4122 unsigned long r_symndx;
4123 struct elf_link_hash_entry *h;
4124 struct elf_x86_64_link_hash_entry *eh;
4125 Elf_Internal_Sym *sym;
4126 asection *sec;
4127 bfd_vma off, offplt, plt_offset;
4128 bfd_vma relocation;
4129 bfd_boolean unresolved_reloc;
4130 bfd_reloc_status_type r;
4131 int tls_type;
4132 asection *base_got, *resolved_plt;
4133 bfd_vma st_size;
4134 bfd_boolean resolved_to_zero;
4135
4136 r_type = ELF32_R_TYPE (rel->r_info);
4137 if (r_type == (int) R_X86_64_GNU_VTINHERIT
4138 || r_type == (int) R_X86_64_GNU_VTENTRY)
4139 {
4140 if (wrel != rel)
4141 *wrel = *rel;
4142 continue;
4143 }
4144
4145 if (r_type >= (int) R_X86_64_standard)
4146 {
4147 (*_bfd_error_handler)
4148 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4149 input_bfd, input_section, r_type);
4150 bfd_set_error (bfd_error_bad_value);
4151 return FALSE;
4152 }
4153
4154 if (r_type != (int) R_X86_64_32
4155 || ABI_64_P (output_bfd))
4156 howto = x86_64_elf_howto_table + r_type;
4157 else
4158 howto = (x86_64_elf_howto_table
4159 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
4160 r_symndx = htab->r_sym (rel->r_info);
4161 h = NULL;
4162 sym = NULL;
4163 sec = NULL;
4164 unresolved_reloc = FALSE;
4165 if (r_symndx < symtab_hdr->sh_info)
4166 {
4167 sym = local_syms + r_symndx;
4168 sec = local_sections[r_symndx];
4169
4170 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
4171 &sec, rel);
4172 st_size = sym->st_size;
4173
4174 /* Relocate against local STT_GNU_IFUNC symbol. */
4175 if (!bfd_link_relocatable (info)
4176 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4177 {
4178 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
4179 rel, FALSE);
4180 if (h == NULL)
4181 abort ();
4182
4183 /* Set STT_GNU_IFUNC symbol value. */
4184 h->root.u.def.value = sym->st_value;
4185 h->root.u.def.section = sec;
4186 }
4187 }
4188 else
4189 {
4190 bfd_boolean warned ATTRIBUTE_UNUSED;
4191 bfd_boolean ignored ATTRIBUTE_UNUSED;
4192
4193 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4194 r_symndx, symtab_hdr, sym_hashes,
4195 h, sec, relocation,
4196 unresolved_reloc, warned, ignored);
4197 st_size = h->size;
4198 }
4199
4200 if (sec != NULL && discarded_section (sec))
4201 {
4202 _bfd_clear_contents (howto, input_bfd, input_section,
4203 contents + rel->r_offset);
4204 wrel->r_offset = rel->r_offset;
4205 wrel->r_info = 0;
4206 wrel->r_addend = 0;
4207
4208 /* For ld -r, remove relocations in debug sections against
4209 sections defined in discarded sections. Not done for
4210 eh_frame editing code expects to be present. */
4211 if (bfd_link_relocatable (info)
4212 && (input_section->flags & SEC_DEBUGGING))
4213 wrel--;
4214
4215 continue;
4216 }
4217
4218 if (bfd_link_relocatable (info))
4219 {
4220 if (wrel != rel)
4221 *wrel = *rel;
4222 continue;
4223 }
4224
4225 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
4226 {
4227 if (r_type == R_X86_64_64)
4228 {
4229 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
4230 zero-extend it to 64bit if addend is zero. */
4231 r_type = R_X86_64_32;
4232 memset (contents + rel->r_offset + 4, 0, 4);
4233 }
4234 else if (r_type == R_X86_64_SIZE64)
4235 {
4236 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
4237 zero-extend it to 64bit if addend is zero. */
4238 r_type = R_X86_64_SIZE32;
4239 memset (contents + rel->r_offset + 4, 0, 4);
4240 }
4241 }
4242
4243 eh = (struct elf_x86_64_link_hash_entry *) h;
4244
4245 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4246 it here if it is defined in a non-shared object. */
4247 if (h != NULL
4248 && h->type == STT_GNU_IFUNC
4249 && h->def_regular)
4250 {
4251 bfd_vma plt_index;
4252 const char *name;
4253
4254 if ((input_section->flags & SEC_ALLOC) == 0)
4255 {
4256 /* Dynamic relocs are not propagated for SEC_DEBUGGING
4257 sections because such sections are not SEC_ALLOC and
4258 thus ld.so will not process them. */
4259 if ((input_section->flags & SEC_DEBUGGING) != 0)
4260 continue;
4261 abort ();
4262 }
4263
4264 switch (r_type)
4265 {
4266 default:
4267 break;
4268
4269 case R_X86_64_GOTPCREL:
4270 case R_X86_64_GOTPCRELX:
4271 case R_X86_64_REX_GOTPCRELX:
4272 case R_X86_64_GOTPCREL64:
4273 base_got = htab->elf.sgot;
4274 off = h->got.offset;
4275
4276 if (base_got == NULL)
4277 abort ();
4278
4279 if (off == (bfd_vma) -1)
4280 {
4281 /* We can't use h->got.offset here to save state, or
4282 even just remember the offset, as finish_dynamic_symbol
4283 would use that as offset into .got. */
4284
4285 if (h->plt.offset == (bfd_vma) -1)
4286 abort ();
4287
4288 if (htab->elf.splt != NULL)
4289 {
4290 plt_index = h->plt.offset / plt_entry_size - 1;
4291 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4292 base_got = htab->elf.sgotplt;
4293 }
4294 else
4295 {
4296 plt_index = h->plt.offset / plt_entry_size;
4297 off = plt_index * GOT_ENTRY_SIZE;
4298 base_got = htab->elf.igotplt;
4299 }
4300
4301 if (h->dynindx == -1
4302 || h->forced_local
4303 || info->symbolic)
4304 {
4305 /* This references the local defitionion. We must
4306 initialize this entry in the global offset table.
4307 Since the offset must always be a multiple of 8,
4308 we use the least significant bit to record
4309 whether we have initialized it already.
4310
4311 When doing a dynamic link, we create a .rela.got
4312 relocation entry to initialize the value. This
4313 is done in the finish_dynamic_symbol routine. */
4314 if ((off & 1) != 0)
4315 off &= ~1;
4316 else
4317 {
4318 bfd_put_64 (output_bfd, relocation,
4319 base_got->contents + off);
4320 /* Note that this is harmless for the GOTPLT64
4321 case, as -1 | 1 still is -1. */
4322 h->got.offset |= 1;
4323 }
4324 }
4325 }
4326
4327 relocation = (base_got->output_section->vma
4328 + base_got->output_offset + off);
4329
4330 goto do_relocation;
4331 }
4332
4333 if (h->plt.offset == (bfd_vma) -1)
4334 {
4335 /* Handle static pointers of STT_GNU_IFUNC symbols. */
4336 if (r_type == htab->pointer_r_type
4337 && (input_section->flags & SEC_CODE) == 0)
4338 goto do_ifunc_pointer;
4339 goto bad_ifunc_reloc;
4340 }
4341
4342 /* STT_GNU_IFUNC symbol must go through PLT. */
4343 if (htab->elf.splt != NULL)
4344 {
4345 if (htab->plt_bnd != NULL)
4346 {
4347 resolved_plt = htab->plt_bnd;
4348 plt_offset = eh->plt_bnd.offset;
4349 }
4350 else
4351 {
4352 resolved_plt = htab->elf.splt;
4353 plt_offset = h->plt.offset;
4354 }
4355 }
4356 else
4357 {
4358 resolved_plt = htab->elf.iplt;
4359 plt_offset = h->plt.offset;
4360 }
4361
4362 relocation = (resolved_plt->output_section->vma
4363 + resolved_plt->output_offset + plt_offset);
4364
4365 switch (r_type)
4366 {
4367 default:
4368 bad_ifunc_reloc:
4369 if (h->root.root.string)
4370 name = h->root.root.string;
4371 else
4372 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4373 NULL);
4374 (*_bfd_error_handler)
4375 (_("%B: relocation %s against STT_GNU_IFUNC "
4376 "symbol `%s' isn't supported"), input_bfd,
4377 howto->name, name);
4378 bfd_set_error (bfd_error_bad_value);
4379 return FALSE;
4380
4381 case R_X86_64_32S:
4382 if (bfd_link_pic (info))
4383 abort ();
4384 goto do_relocation;
4385
4386 case R_X86_64_32:
4387 if (ABI_64_P (output_bfd))
4388 goto do_relocation;
4389 /* FALLTHROUGH */
4390 case R_X86_64_64:
4391 do_ifunc_pointer:
4392 if (rel->r_addend != 0)
4393 {
4394 if (h->root.root.string)
4395 name = h->root.root.string;
4396 else
4397 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4398 sym, NULL);
4399 (*_bfd_error_handler)
4400 (_("%B: relocation %s against STT_GNU_IFUNC "
4401 "symbol `%s' has non-zero addend: %d"),
4402 input_bfd, howto->name, name, rel->r_addend);
4403 bfd_set_error (bfd_error_bad_value);
4404 return FALSE;
4405 }
4406
4407 /* Generate dynamic relcoation only when there is a
4408 non-GOT reference in a shared object or there is no
4409 PLT. */
4410 if ((bfd_link_pic (info) && h->non_got_ref)
4411 || h->plt.offset == (bfd_vma) -1)
4412 {
4413 Elf_Internal_Rela outrel;
4414 asection *sreloc;
4415
4416 /* Need a dynamic relocation to get the real function
4417 address. */
4418 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4419 info,
4420 input_section,
4421 rel->r_offset);
4422 if (outrel.r_offset == (bfd_vma) -1
4423 || outrel.r_offset == (bfd_vma) -2)
4424 abort ();
4425
4426 outrel.r_offset += (input_section->output_section->vma
4427 + input_section->output_offset);
4428
4429 if (h->dynindx == -1
4430 || h->forced_local
4431 || bfd_link_executable (info))
4432 {
4433 /* This symbol is resolved locally. */
4434 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4435 outrel.r_addend = (h->root.u.def.value
4436 + h->root.u.def.section->output_section->vma
4437 + h->root.u.def.section->output_offset);
4438 }
4439 else
4440 {
4441 outrel.r_info = htab->r_info (h->dynindx, r_type);
4442 outrel.r_addend = 0;
4443 }
4444
4445 /* Dynamic relocations are stored in
4446 1. .rela.ifunc section in PIC object.
4447 2. .rela.got section in dynamic executable.
4448 3. .rela.iplt section in static executable. */
4449 if (bfd_link_pic (info))
4450 sreloc = htab->elf.irelifunc;
4451 else if (htab->elf.splt != NULL)
4452 sreloc = htab->elf.srelgot;
4453 else
4454 sreloc = htab->elf.irelplt;
4455 elf_append_rela (output_bfd, sreloc, &outrel);
4456
4457 /* If this reloc is against an external symbol, we
4458 do not want to fiddle with the addend. Otherwise,
4459 we need to include the symbol value so that it
4460 becomes an addend for the dynamic reloc. For an
4461 internal symbol, we have updated addend. */
4462 continue;
4463 }
4464 /* FALLTHROUGH */
4465 case R_X86_64_PC32:
4466 case R_X86_64_PC32_BND:
4467 case R_X86_64_PC64:
4468 case R_X86_64_PLT32:
4469 case R_X86_64_PLT32_BND:
4470 goto do_relocation;
4471 }
4472 }
4473
4474 resolved_to_zero = (eh != NULL
4475 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
4476 eh->has_got_reloc,
4477 eh));
4478
4479 /* When generating a shared object, the relocations handled here are
4480 copied into the output file to be resolved at run time. */
4481 switch (r_type)
4482 {
4483 case R_X86_64_GOT32:
4484 case R_X86_64_GOT64:
4485 /* Relocation is to the entry for this symbol in the global
4486 offset table. */
4487 case R_X86_64_GOTPCREL:
4488 case R_X86_64_GOTPCRELX:
4489 case R_X86_64_REX_GOTPCRELX:
4490 case R_X86_64_GOTPCREL64:
4491 /* Use global offset table entry as symbol value. */
4492 case R_X86_64_GOTPLT64:
4493 /* This is obsolete and treated the the same as GOT64. */
4494 base_got = htab->elf.sgot;
4495
4496 if (htab->elf.sgot == NULL)
4497 abort ();
4498
4499 if (h != NULL)
4500 {
4501 bfd_boolean dyn;
4502
4503 off = h->got.offset;
4504 if (h->needs_plt
4505 && h->plt.offset != (bfd_vma)-1
4506 && off == (bfd_vma)-1)
4507 {
4508 /* We can't use h->got.offset here to save
4509 state, or even just remember the offset, as
4510 finish_dynamic_symbol would use that as offset into
4511 .got. */
4512 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
4513 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4514 base_got = htab->elf.sgotplt;
4515 }
4516
4517 dyn = htab->elf.dynamic_sections_created;
4518
4519 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4520 || (bfd_link_pic (info)
4521 && SYMBOL_REFERENCES_LOCAL (info, h))
4522 || (ELF_ST_VISIBILITY (h->other)
4523 && h->root.type == bfd_link_hash_undefweak))
4524 {
4525 /* This is actually a static link, or it is a -Bsymbolic
4526 link and the symbol is defined locally, or the symbol
4527 was forced to be local because of a version file. We
4528 must initialize this entry in the global offset table.
4529 Since the offset must always be a multiple of 8, we
4530 use the least significant bit to record whether we
4531 have initialized it already.
4532
4533 When doing a dynamic link, we create a .rela.got
4534 relocation entry to initialize the value. This is
4535 done in the finish_dynamic_symbol routine. */
4536 if ((off & 1) != 0)
4537 off &= ~1;
4538 else
4539 {
4540 bfd_put_64 (output_bfd, relocation,
4541 base_got->contents + off);
4542 /* Note that this is harmless for the GOTPLT64 case,
4543 as -1 | 1 still is -1. */
4544 h->got.offset |= 1;
4545 }
4546 }
4547 else
4548 unresolved_reloc = FALSE;
4549 }
4550 else
4551 {
4552 if (local_got_offsets == NULL)
4553 abort ();
4554
4555 off = local_got_offsets[r_symndx];
4556
4557 /* The offset must always be a multiple of 8. We use
4558 the least significant bit to record whether we have
4559 already generated the necessary reloc. */
4560 if ((off & 1) != 0)
4561 off &= ~1;
4562 else
4563 {
4564 bfd_put_64 (output_bfd, relocation,
4565 base_got->contents + off);
4566
4567 if (bfd_link_pic (info))
4568 {
4569 asection *s;
4570 Elf_Internal_Rela outrel;
4571
4572 /* We need to generate a R_X86_64_RELATIVE reloc
4573 for the dynamic linker. */
4574 s = htab->elf.srelgot;
4575 if (s == NULL)
4576 abort ();
4577
4578 outrel.r_offset = (base_got->output_section->vma
4579 + base_got->output_offset
4580 + off);
4581 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4582 outrel.r_addend = relocation;
4583 elf_append_rela (output_bfd, s, &outrel);
4584 }
4585
4586 local_got_offsets[r_symndx] |= 1;
4587 }
4588 }
4589
4590 if (off >= (bfd_vma) -2)
4591 abort ();
4592
4593 relocation = base_got->output_section->vma
4594 + base_got->output_offset + off;
4595 if (r_type != R_X86_64_GOTPCREL
4596 && r_type != R_X86_64_GOTPCRELX
4597 && r_type != R_X86_64_REX_GOTPCRELX
4598 && r_type != R_X86_64_GOTPCREL64)
4599 relocation -= htab->elf.sgotplt->output_section->vma
4600 - htab->elf.sgotplt->output_offset;
4601
4602 break;
4603
4604 case R_X86_64_GOTOFF64:
4605 /* Relocation is relative to the start of the global offset
4606 table. */
4607
4608 /* Check to make sure it isn't a protected function or data
4609 symbol for shared library since it may not be local when
4610 used as function address or with copy relocation. We also
4611 need to make sure that a symbol is referenced locally. */
4612 if (bfd_link_pic (info) && h)
4613 {
4614 if (!h->def_regular)
4615 {
4616 const char *v;
4617
4618 switch (ELF_ST_VISIBILITY (h->other))
4619 {
4620 case STV_HIDDEN:
4621 v = _("hidden symbol");
4622 break;
4623 case STV_INTERNAL:
4624 v = _("internal symbol");
4625 break;
4626 case STV_PROTECTED:
4627 v = _("protected symbol");
4628 break;
4629 default:
4630 v = _("symbol");
4631 break;
4632 }
4633
4634 (*_bfd_error_handler)
4635 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4636 input_bfd, v, h->root.root.string);
4637 bfd_set_error (bfd_error_bad_value);
4638 return FALSE;
4639 }
4640 else if (!bfd_link_executable (info)
4641 && !SYMBOL_REFERENCES_LOCAL (info, h)
4642 && (h->type == STT_FUNC
4643 || h->type == STT_OBJECT)
4644 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4645 {
4646 (*_bfd_error_handler)
4647 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4648 input_bfd,
4649 h->type == STT_FUNC ? "function" : "data",
4650 h->root.root.string);
4651 bfd_set_error (bfd_error_bad_value);
4652 return FALSE;
4653 }
4654 }
4655
4656 /* Note that sgot is not involved in this
4657 calculation. We always want the start of .got.plt. If we
4658 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4659 permitted by the ABI, we might have to change this
4660 calculation. */
4661 relocation -= htab->elf.sgotplt->output_section->vma
4662 + htab->elf.sgotplt->output_offset;
4663 break;
4664
4665 case R_X86_64_GOTPC32:
4666 case R_X86_64_GOTPC64:
4667 /* Use global offset table as symbol value. */
4668 relocation = htab->elf.sgotplt->output_section->vma
4669 + htab->elf.sgotplt->output_offset;
4670 unresolved_reloc = FALSE;
4671 break;
4672
4673 case R_X86_64_PLTOFF64:
4674 /* Relocation is PLT entry relative to GOT. For local
4675 symbols it's the symbol itself relative to GOT. */
4676 if (h != NULL
4677 /* See PLT32 handling. */
4678 && h->plt.offset != (bfd_vma) -1
4679 && htab->elf.splt != NULL)
4680 {
4681 if (htab->plt_bnd != NULL)
4682 {
4683 resolved_plt = htab->plt_bnd;
4684 plt_offset = eh->plt_bnd.offset;
4685 }
4686 else
4687 {
4688 resolved_plt = htab->elf.splt;
4689 plt_offset = h->plt.offset;
4690 }
4691
4692 relocation = (resolved_plt->output_section->vma
4693 + resolved_plt->output_offset
4694 + plt_offset);
4695 unresolved_reloc = FALSE;
4696 }
4697
4698 relocation -= htab->elf.sgotplt->output_section->vma
4699 + htab->elf.sgotplt->output_offset;
4700 break;
4701
4702 case R_X86_64_PLT32:
4703 case R_X86_64_PLT32_BND:
4704 /* Relocation is to the entry for this symbol in the
4705 procedure linkage table. */
4706
4707 /* Resolve a PLT32 reloc against a local symbol directly,
4708 without using the procedure linkage table. */
4709 if (h == NULL)
4710 break;
4711
4712 if ((h->plt.offset == (bfd_vma) -1
4713 && eh->plt_got.offset == (bfd_vma) -1)
4714 || htab->elf.splt == NULL)
4715 {
4716 /* We didn't make a PLT entry for this symbol. This
4717 happens when statically linking PIC code, or when
4718 using -Bsymbolic. */
4719 break;
4720 }
4721
4722 if (h->plt.offset != (bfd_vma) -1)
4723 {
4724 if (htab->plt_bnd != NULL)
4725 {
4726 resolved_plt = htab->plt_bnd;
4727 plt_offset = eh->plt_bnd.offset;
4728 }
4729 else
4730 {
4731 resolved_plt = htab->elf.splt;
4732 plt_offset = h->plt.offset;
4733 }
4734 }
4735 else
4736 {
4737 /* Use the GOT PLT. */
4738 resolved_plt = htab->plt_got;
4739 plt_offset = eh->plt_got.offset;
4740 }
4741
4742 relocation = (resolved_plt->output_section->vma
4743 + resolved_plt->output_offset
4744 + plt_offset);
4745 unresolved_reloc = FALSE;
4746 break;
4747
4748 case R_X86_64_SIZE32:
4749 case R_X86_64_SIZE64:
4750 /* Set to symbol size. */
4751 relocation = st_size;
4752 goto direct;
4753
4754 case R_X86_64_PC8:
4755 case R_X86_64_PC16:
4756 case R_X86_64_PC32:
4757 case R_X86_64_PC32_BND:
4758 /* Don't complain about -fPIC if the symbol is undefined when
4759 building executable unless it is unresolved weak symbol. */
4760 if ((input_section->flags & SEC_ALLOC) != 0
4761 && (input_section->flags & SEC_READONLY) != 0
4762 && h != NULL
4763 && ((bfd_link_executable (info)
4764 && h->root.type == bfd_link_hash_undefweak
4765 && !resolved_to_zero)
4766 || (bfd_link_pic (info)
4767 && !(bfd_link_pie (info)
4768 && h->root.type == bfd_link_hash_undefined))))
4769 {
4770 bfd_boolean fail = FALSE;
4771 bfd_boolean branch
4772 = ((r_type == R_X86_64_PC32
4773 || r_type == R_X86_64_PC32_BND)
4774 && is_32bit_relative_branch (contents, rel->r_offset));
4775
4776 if (SYMBOL_REFERENCES_LOCAL (info, h))
4777 {
4778 /* Symbol is referenced locally. Make sure it is
4779 defined locally or for a branch. */
4780 fail = !h->def_regular && !branch;
4781 }
4782 else if (!(bfd_link_pie (info)
4783 && (h->needs_copy || eh->needs_copy)))
4784 {
4785 /* Symbol doesn't need copy reloc and isn't referenced
4786 locally. We only allow branch to symbol with
4787 non-default visibility. */
4788 fail = (!branch
4789 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4790 }
4791
4792 if (fail)
4793 return elf_x86_64_need_pic (input_bfd, input_section,
4794 h, NULL, NULL, howto);
4795 }
4796 /* Fall through. */
4797
4798 case R_X86_64_8:
4799 case R_X86_64_16:
4800 case R_X86_64_32:
4801 case R_X86_64_PC64:
4802 case R_X86_64_64:
4803 /* FIXME: The ABI says the linker should make sure the value is
4804 the same when it's zeroextended to 64 bit. */
4805
4806 direct:
4807 if ((input_section->flags & SEC_ALLOC) == 0)
4808 break;
4809
4810 /* Don't copy a pc-relative relocation into the output file
4811 if the symbol needs copy reloc or the symbol is undefined
4812 when building executable. Copy dynamic function pointer
4813 relocations. Don't generate dynamic relocations against
4814 resolved undefined weak symbols in PIE. */
4815 if ((bfd_link_pic (info)
4816 && !(bfd_link_pie (info)
4817 && h != NULL
4818 && (h->needs_copy
4819 || eh->needs_copy
4820 || h->root.type == bfd_link_hash_undefined)
4821 && IS_X86_64_PCREL_TYPE (r_type))
4822 && (h == NULL
4823 || ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4824 && !resolved_to_zero)
4825 || h->root.type != bfd_link_hash_undefweak))
4826 && ((! IS_X86_64_PCREL_TYPE (r_type)
4827 && r_type != R_X86_64_SIZE32
4828 && r_type != R_X86_64_SIZE64)
4829 || ! SYMBOL_CALLS_LOCAL (info, h)))
4830 || (ELIMINATE_COPY_RELOCS
4831 && !bfd_link_pic (info)
4832 && h != NULL
4833 && h->dynindx != -1
4834 && (!h->non_got_ref
4835 || eh->func_pointer_refcount > 0
4836 || (h->root.type == bfd_link_hash_undefweak
4837 && !resolved_to_zero))
4838 && ((h->def_dynamic && !h->def_regular)
4839 /* Undefined weak symbol is bound locally when
4840 PIC is false. */
4841 || h->root.type == bfd_link_hash_undefined)))
4842 {
4843 Elf_Internal_Rela outrel;
4844 bfd_boolean skip, relocate;
4845 asection *sreloc;
4846
4847 /* When generating a shared object, these relocations
4848 are copied into the output file to be resolved at run
4849 time. */
4850 skip = FALSE;
4851 relocate = FALSE;
4852
4853 outrel.r_offset =
4854 _bfd_elf_section_offset (output_bfd, info, input_section,
4855 rel->r_offset);
4856 if (outrel.r_offset == (bfd_vma) -1)
4857 skip = TRUE;
4858 else if (outrel.r_offset == (bfd_vma) -2)
4859 skip = TRUE, relocate = TRUE;
4860
4861 outrel.r_offset += (input_section->output_section->vma
4862 + input_section->output_offset);
4863
4864 if (skip)
4865 memset (&outrel, 0, sizeof outrel);
4866
4867 /* h->dynindx may be -1 if this symbol was marked to
4868 become local. */
4869 else if (h != NULL
4870 && h->dynindx != -1
4871 && (IS_X86_64_PCREL_TYPE (r_type)
4872 || !(bfd_link_executable (info)
4873 || SYMBOLIC_BIND (info, h))
4874 || ! h->def_regular))
4875 {
4876 outrel.r_info = htab->r_info (h->dynindx, r_type);
4877 outrel.r_addend = rel->r_addend;
4878 }
4879 else
4880 {
4881 /* This symbol is local, or marked to become local.
4882 When relocation overflow check is disabled, we
4883 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
4884 if (r_type == htab->pointer_r_type
4885 || (r_type == R_X86_64_32
4886 && info->no_reloc_overflow_check))
4887 {
4888 relocate = TRUE;
4889 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4890 outrel.r_addend = relocation + rel->r_addend;
4891 }
4892 else if (r_type == R_X86_64_64
4893 && !ABI_64_P (output_bfd))
4894 {
4895 relocate = TRUE;
4896 outrel.r_info = htab->r_info (0,
4897 R_X86_64_RELATIVE64);
4898 outrel.r_addend = relocation + rel->r_addend;
4899 /* Check addend overflow. */
4900 if ((outrel.r_addend & 0x80000000)
4901 != (rel->r_addend & 0x80000000))
4902 {
4903 const char *name;
4904 int addend = rel->r_addend;
4905 if (h && h->root.root.string)
4906 name = h->root.root.string;
4907 else
4908 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4909 sym, NULL);
4910 if (addend < 0)
4911 (*_bfd_error_handler)
4912 (_("%B: addend -0x%x in relocation %s against "
4913 "symbol `%s' at 0x%lx in section `%A' is "
4914 "out of range"),
4915 input_bfd, input_section, addend,
4916 howto->name, name,
4917 (unsigned long) rel->r_offset);
4918 else
4919 (*_bfd_error_handler)
4920 (_("%B: addend 0x%x in relocation %s against "
4921 "symbol `%s' at 0x%lx in section `%A' is "
4922 "out of range"),
4923 input_bfd, input_section, addend,
4924 howto->name, name,
4925 (unsigned long) rel->r_offset);
4926 bfd_set_error (bfd_error_bad_value);
4927 return FALSE;
4928 }
4929 }
4930 else
4931 {
4932 long sindx;
4933
4934 if (bfd_is_abs_section (sec))
4935 sindx = 0;
4936 else if (sec == NULL || sec->owner == NULL)
4937 {
4938 bfd_set_error (bfd_error_bad_value);
4939 return FALSE;
4940 }
4941 else
4942 {
4943 asection *osec;
4944
4945 /* We are turning this relocation into one
4946 against a section symbol. It would be
4947 proper to subtract the symbol's value,
4948 osec->vma, from the emitted reloc addend,
4949 but ld.so expects buggy relocs. */
4950 osec = sec->output_section;
4951 sindx = elf_section_data (osec)->dynindx;
4952 if (sindx == 0)
4953 {
4954 asection *oi = htab->elf.text_index_section;
4955 sindx = elf_section_data (oi)->dynindx;
4956 }
4957 BFD_ASSERT (sindx != 0);
4958 }
4959
4960 outrel.r_info = htab->r_info (sindx, r_type);
4961 outrel.r_addend = relocation + rel->r_addend;
4962 }
4963 }
4964
4965 sreloc = elf_section_data (input_section)->sreloc;
4966
4967 if (sreloc == NULL || sreloc->contents == NULL)
4968 {
4969 r = bfd_reloc_notsupported;
4970 goto check_relocation_error;
4971 }
4972
4973 elf_append_rela (output_bfd, sreloc, &outrel);
4974
4975 /* If this reloc is against an external symbol, we do
4976 not want to fiddle with the addend. Otherwise, we
4977 need to include the symbol value so that it becomes
4978 an addend for the dynamic reloc. */
4979 if (! relocate)
4980 continue;
4981 }
4982
4983 break;
4984
4985 case R_X86_64_TLSGD:
4986 case R_X86_64_GOTPC32_TLSDESC:
4987 case R_X86_64_TLSDESC_CALL:
4988 case R_X86_64_GOTTPOFF:
4989 tls_type = GOT_UNKNOWN;
4990 if (h == NULL && local_got_offsets)
4991 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4992 else if (h != NULL)
4993 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4994
4995 if (! elf_x86_64_tls_transition (info, input_bfd,
4996 input_section, contents,
4997 symtab_hdr, sym_hashes,
4998 &r_type, tls_type, rel,
4999 relend, h, r_symndx, TRUE))
5000 return FALSE;
5001
5002 if (r_type == R_X86_64_TPOFF32)
5003 {
5004 bfd_vma roff = rel->r_offset;
5005
5006 BFD_ASSERT (! unresolved_reloc);
5007
5008 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
5009 {
5010 /* GD->LE transition. For 64bit, change
5011 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5012 .word 0x6666; rex64; call __tls_get_addr@PLT
5013 or
5014 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5015 .byte 0x66; rex64
5016 call *__tls_get_addr@GOTPCREL(%rip)
5017 which may be converted to
5018 addr32 call __tls_get_addr
5019 into:
5020 movq %fs:0, %rax
5021 leaq foo@tpoff(%rax), %rax
5022 For 32bit, change
5023 leaq foo@tlsgd(%rip), %rdi
5024 .word 0x6666; rex64; call __tls_get_addr@PLT
5025 or
5026 leaq foo@tlsgd(%rip), %rdi
5027 .byte 0x66; rex64
5028 call *__tls_get_addr@GOTPCREL(%rip)
5029 which may be converted to
5030 addr32 call __tls_get_addr
5031 into:
5032 movl %fs:0, %eax
5033 leaq foo@tpoff(%rax), %rax
5034 For largepic, change:
5035 leaq foo@tlsgd(%rip), %rdi
5036 movabsq $__tls_get_addr@pltoff, %rax
5037 addq %r15, %rax
5038 call *%rax
5039 into:
5040 movq %fs:0, %rax
5041 leaq foo@tpoff(%rax), %rax
5042 nopw 0x0(%rax,%rax,1) */
5043 int largepic = 0;
5044 if (ABI_64_P (output_bfd))
5045 {
5046 if (contents[roff + 5] == 0xb8)
5047 {
5048 memcpy (contents + roff - 3,
5049 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
5050 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
5051 largepic = 1;
5052 }
5053 else
5054 memcpy (contents + roff - 4,
5055 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
5056 16);
5057 }
5058 else
5059 memcpy (contents + roff - 3,
5060 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
5061 15);
5062 bfd_put_32 (output_bfd,
5063 elf_x86_64_tpoff (info, relocation),
5064 contents + roff + 8 + largepic);
5065 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
5066 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
5067 rel++;
5068 wrel++;
5069 continue;
5070 }
5071 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5072 {
5073 /* GDesc -> LE transition.
5074 It's originally something like:
5075 leaq x@tlsdesc(%rip), %rax
5076
5077 Change it to:
5078 movl $x@tpoff, %rax. */
5079
5080 unsigned int val, type;
5081
5082 type = bfd_get_8 (input_bfd, contents + roff - 3);
5083 val = bfd_get_8 (input_bfd, contents + roff - 1);
5084 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
5085 contents + roff - 3);
5086 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
5087 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
5088 contents + roff - 1);
5089 bfd_put_32 (output_bfd,
5090 elf_x86_64_tpoff (info, relocation),
5091 contents + roff);
5092 continue;
5093 }
5094 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5095 {
5096 /* GDesc -> LE transition.
5097 It's originally:
5098 call *(%rax)
5099 Turn it into:
5100 xchg %ax,%ax. */
5101 bfd_put_8 (output_bfd, 0x66, contents + roff);
5102 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5103 continue;
5104 }
5105 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
5106 {
5107 /* IE->LE transition:
5108 For 64bit, originally it can be one of:
5109 movq foo@gottpoff(%rip), %reg
5110 addq foo@gottpoff(%rip), %reg
5111 We change it into:
5112 movq $foo, %reg
5113 leaq foo(%reg), %reg
5114 addq $foo, %reg.
5115 For 32bit, originally it can be one of:
5116 movq foo@gottpoff(%rip), %reg
5117 addl foo@gottpoff(%rip), %reg
5118 We change it into:
5119 movq $foo, %reg
5120 leal foo(%reg), %reg
5121 addl $foo, %reg. */
5122
5123 unsigned int val, type, reg;
5124
5125 if (roff >= 3)
5126 val = bfd_get_8 (input_bfd, contents + roff - 3);
5127 else
5128 val = 0;
5129 type = bfd_get_8 (input_bfd, contents + roff - 2);
5130 reg = bfd_get_8 (input_bfd, contents + roff - 1);
5131 reg >>= 3;
5132 if (type == 0x8b)
5133 {
5134 /* movq */
5135 if (val == 0x4c)
5136 bfd_put_8 (output_bfd, 0x49,
5137 contents + roff - 3);
5138 else if (!ABI_64_P (output_bfd) && val == 0x44)
5139 bfd_put_8 (output_bfd, 0x41,
5140 contents + roff - 3);
5141 bfd_put_8 (output_bfd, 0xc7,
5142 contents + roff - 2);
5143 bfd_put_8 (output_bfd, 0xc0 | reg,
5144 contents + roff - 1);
5145 }
5146 else if (reg == 4)
5147 {
5148 /* addq/addl -> addq/addl - addressing with %rsp/%r12
5149 is special */
5150 if (val == 0x4c)
5151 bfd_put_8 (output_bfd, 0x49,
5152 contents + roff - 3);
5153 else if (!ABI_64_P (output_bfd) && val == 0x44)
5154 bfd_put_8 (output_bfd, 0x41,
5155 contents + roff - 3);
5156 bfd_put_8 (output_bfd, 0x81,
5157 contents + roff - 2);
5158 bfd_put_8 (output_bfd, 0xc0 | reg,
5159 contents + roff - 1);
5160 }
5161 else
5162 {
5163 /* addq/addl -> leaq/leal */
5164 if (val == 0x4c)
5165 bfd_put_8 (output_bfd, 0x4d,
5166 contents + roff - 3);
5167 else if (!ABI_64_P (output_bfd) && val == 0x44)
5168 bfd_put_8 (output_bfd, 0x45,
5169 contents + roff - 3);
5170 bfd_put_8 (output_bfd, 0x8d,
5171 contents + roff - 2);
5172 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
5173 contents + roff - 1);
5174 }
5175 bfd_put_32 (output_bfd,
5176 elf_x86_64_tpoff (info, relocation),
5177 contents + roff);
5178 continue;
5179 }
5180 else
5181 BFD_ASSERT (FALSE);
5182 }
5183
5184 if (htab->elf.sgot == NULL)
5185 abort ();
5186
5187 if (h != NULL)
5188 {
5189 off = h->got.offset;
5190 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
5191 }
5192 else
5193 {
5194 if (local_got_offsets == NULL)
5195 abort ();
5196
5197 off = local_got_offsets[r_symndx];
5198 offplt = local_tlsdesc_gotents[r_symndx];
5199 }
5200
5201 if ((off & 1) != 0)
5202 off &= ~1;
5203 else
5204 {
5205 Elf_Internal_Rela outrel;
5206 int dr_type, indx;
5207 asection *sreloc;
5208
5209 if (htab->elf.srelgot == NULL)
5210 abort ();
5211
5212 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5213
5214 if (GOT_TLS_GDESC_P (tls_type))
5215 {
5216 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
5217 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
5218 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
5219 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
5220 + htab->elf.sgotplt->output_offset
5221 + offplt
5222 + htab->sgotplt_jump_table_size);
5223 sreloc = htab->elf.srelplt;
5224 if (indx == 0)
5225 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
5226 else
5227 outrel.r_addend = 0;
5228 elf_append_rela (output_bfd, sreloc, &outrel);
5229 }
5230
5231 sreloc = htab->elf.srelgot;
5232
5233 outrel.r_offset = (htab->elf.sgot->output_section->vma
5234 + htab->elf.sgot->output_offset + off);
5235
5236 if (GOT_TLS_GD_P (tls_type))
5237 dr_type = R_X86_64_DTPMOD64;
5238 else if (GOT_TLS_GDESC_P (tls_type))
5239 goto dr_done;
5240 else
5241 dr_type = R_X86_64_TPOFF64;
5242
5243 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
5244 outrel.r_addend = 0;
5245 if ((dr_type == R_X86_64_TPOFF64
5246 || dr_type == R_X86_64_TLSDESC) && indx == 0)
5247 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
5248 outrel.r_info = htab->r_info (indx, dr_type);
5249
5250 elf_append_rela (output_bfd, sreloc, &outrel);
5251
5252 if (GOT_TLS_GD_P (tls_type))
5253 {
5254 if (indx == 0)
5255 {
5256 BFD_ASSERT (! unresolved_reloc);
5257 bfd_put_64 (output_bfd,
5258 relocation - elf_x86_64_dtpoff_base (info),
5259 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5260 }
5261 else
5262 {
5263 bfd_put_64 (output_bfd, 0,
5264 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5265 outrel.r_info = htab->r_info (indx,
5266 R_X86_64_DTPOFF64);
5267 outrel.r_offset += GOT_ENTRY_SIZE;
5268 elf_append_rela (output_bfd, sreloc,
5269 &outrel);
5270 }
5271 }
5272
5273 dr_done:
5274 if (h != NULL)
5275 h->got.offset |= 1;
5276 else
5277 local_got_offsets[r_symndx] |= 1;
5278 }
5279
5280 if (off >= (bfd_vma) -2
5281 && ! GOT_TLS_GDESC_P (tls_type))
5282 abort ();
5283 if (r_type == ELF32_R_TYPE (rel->r_info))
5284 {
5285 if (r_type == R_X86_64_GOTPC32_TLSDESC
5286 || r_type == R_X86_64_TLSDESC_CALL)
5287 relocation = htab->elf.sgotplt->output_section->vma
5288 + htab->elf.sgotplt->output_offset
5289 + offplt + htab->sgotplt_jump_table_size;
5290 else
5291 relocation = htab->elf.sgot->output_section->vma
5292 + htab->elf.sgot->output_offset + off;
5293 unresolved_reloc = FALSE;
5294 }
5295 else
5296 {
5297 bfd_vma roff = rel->r_offset;
5298
5299 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
5300 {
5301 /* GD->IE transition. For 64bit, change
5302 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5303 .word 0x6666; rex64; call __tls_get_addr@PLT
5304 or
5305 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
5306 .byte 0x66; rex64
5307 call *__tls_get_addr@GOTPCREL(%rip
5308 which may be converted to
5309 addr32 call __tls_get_addr
5310 into:
5311 movq %fs:0, %rax
5312 addq foo@gottpoff(%rip), %rax
5313 For 32bit, change
5314 leaq foo@tlsgd(%rip), %rdi
5315 .word 0x6666; rex64; call __tls_get_addr@PLT
5316 or
5317 leaq foo@tlsgd(%rip), %rdi
5318 .byte 0x66; rex64;
5319 call *__tls_get_addr@GOTPCREL(%rip)
5320 which may be converted to
5321 addr32 call __tls_get_addr
5322 into:
5323 movl %fs:0, %eax
5324 addq foo@gottpoff(%rip), %rax
5325 For largepic, change:
5326 leaq foo@tlsgd(%rip), %rdi
5327 movabsq $__tls_get_addr@pltoff, %rax
5328 addq %r15, %rax
5329 call *%rax
5330 into:
5331 movq %fs:0, %rax
5332 addq foo@gottpoff(%rax), %rax
5333 nopw 0x0(%rax,%rax,1) */
5334 int largepic = 0;
5335 if (ABI_64_P (output_bfd))
5336 {
5337 if (contents[roff + 5] == 0xb8)
5338 {
5339 memcpy (contents + roff - 3,
5340 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
5341 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
5342 largepic = 1;
5343 }
5344 else
5345 memcpy (contents + roff - 4,
5346 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5347 16);
5348 }
5349 else
5350 memcpy (contents + roff - 3,
5351 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
5352 15);
5353
5354 relocation = (htab->elf.sgot->output_section->vma
5355 + htab->elf.sgot->output_offset + off
5356 - roff
5357 - largepic
5358 - input_section->output_section->vma
5359 - input_section->output_offset
5360 - 12);
5361 bfd_put_32 (output_bfd, relocation,
5362 contents + roff + 8 + largepic);
5363 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
5364 rel++;
5365 wrel++;
5366 continue;
5367 }
5368 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
5369 {
5370 /* GDesc -> IE transition.
5371 It's originally something like:
5372 leaq x@tlsdesc(%rip), %rax
5373
5374 Change it to:
5375 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
5376
5377 /* Now modify the instruction as appropriate. To
5378 turn a leaq into a movq in the form we use it, it
5379 suffices to change the second byte from 0x8d to
5380 0x8b. */
5381 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
5382
5383 bfd_put_32 (output_bfd,
5384 htab->elf.sgot->output_section->vma
5385 + htab->elf.sgot->output_offset + off
5386 - rel->r_offset
5387 - input_section->output_section->vma
5388 - input_section->output_offset
5389 - 4,
5390 contents + roff);
5391 continue;
5392 }
5393 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
5394 {
5395 /* GDesc -> IE transition.
5396 It's originally:
5397 call *(%rax)
5398
5399 Change it to:
5400 xchg %ax, %ax. */
5401
5402 bfd_put_8 (output_bfd, 0x66, contents + roff);
5403 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
5404 continue;
5405 }
5406 else
5407 BFD_ASSERT (FALSE);
5408 }
5409 break;
5410
5411 case R_X86_64_TLSLD:
5412 if (! elf_x86_64_tls_transition (info, input_bfd,
5413 input_section, contents,
5414 symtab_hdr, sym_hashes,
5415 &r_type, GOT_UNKNOWN, rel,
5416 relend, h, r_symndx, TRUE))
5417 return FALSE;
5418
5419 if (r_type != R_X86_64_TLSLD)
5420 {
5421 /* LD->LE transition:
5422 leaq foo@tlsld(%rip), %rdi
5423 call __tls_get_addr@PLT
5424 For 64bit, we change it into:
5425 .word 0x6666; .byte 0x66; movq %fs:0, %rax
5426 For 32bit, we change it into:
5427 nopl 0x0(%rax); movl %fs:0, %eax
5428 Or
5429 leaq foo@tlsld(%rip), %rdi;
5430 call *__tls_get_addr@GOTPCREL(%rip)
5431 which may be converted to
5432 addr32 call __tls_get_addr
5433 For 64bit, we change it into:
5434 .word 0x6666; .word 0x6666; movq %fs:0, %rax
5435 For 32bit, we change it into:
5436 nopw 0x0(%rax); movl %fs:0, %eax
5437 For largepic, change:
5438 leaq foo@tlsgd(%rip), %rdi
5439 movabsq $__tls_get_addr@pltoff, %rax
5440 addq %rbx, %rax
5441 call *%rax
5442 into
5443 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
5444 movq %fs:0, %eax */
5445
5446 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
5447 if (ABI_64_P (output_bfd))
5448 {
5449 if (contents[rel->r_offset + 5] == 0xb8)
5450 memcpy (contents + rel->r_offset - 3,
5451 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
5452 "\x64\x48\x8b\x04\x25\0\0\0", 22);
5453 else if (contents[rel->r_offset + 4] == 0xff
5454 || contents[rel->r_offset + 4] == 0x67)
5455 memcpy (contents + rel->r_offset - 3,
5456 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
5457 13);
5458 else
5459 memcpy (contents + rel->r_offset - 3,
5460 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
5461 }
5462 else
5463 {
5464 if (contents[rel->r_offset + 4] == 0xff)
5465 memcpy (contents + rel->r_offset - 3,
5466 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
5467 13);
5468 else
5469 memcpy (contents + rel->r_offset - 3,
5470 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
5471 }
5472 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
5473 and R_X86_64_PLTOFF64. */
5474 rel++;
5475 wrel++;
5476 continue;
5477 }
5478
5479 if (htab->elf.sgot == NULL)
5480 abort ();
5481
5482 off = htab->tls_ld_got.offset;
5483 if (off & 1)
5484 off &= ~1;
5485 else
5486 {
5487 Elf_Internal_Rela outrel;
5488
5489 if (htab->elf.srelgot == NULL)
5490 abort ();
5491
5492 outrel.r_offset = (htab->elf.sgot->output_section->vma
5493 + htab->elf.sgot->output_offset + off);
5494
5495 bfd_put_64 (output_bfd, 0,
5496 htab->elf.sgot->contents + off);
5497 bfd_put_64 (output_bfd, 0,
5498 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5499 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
5500 outrel.r_addend = 0;
5501 elf_append_rela (output_bfd, htab->elf.srelgot,
5502 &outrel);
5503 htab->tls_ld_got.offset |= 1;
5504 }
5505 relocation = htab->elf.sgot->output_section->vma
5506 + htab->elf.sgot->output_offset + off;
5507 unresolved_reloc = FALSE;
5508 break;
5509
5510 case R_X86_64_DTPOFF32:
5511 if (!bfd_link_executable (info)
5512 || (input_section->flags & SEC_CODE) == 0)
5513 relocation -= elf_x86_64_dtpoff_base (info);
5514 else
5515 relocation = elf_x86_64_tpoff (info, relocation);
5516 break;
5517
5518 case R_X86_64_TPOFF32:
5519 case R_X86_64_TPOFF64:
5520 BFD_ASSERT (bfd_link_executable (info));
5521 relocation = elf_x86_64_tpoff (info, relocation);
5522 break;
5523
5524 case R_X86_64_DTPOFF64:
5525 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
5526 relocation -= elf_x86_64_dtpoff_base (info);
5527 break;
5528
5529 default:
5530 break;
5531 }
5532
5533 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5534 because such sections are not SEC_ALLOC and thus ld.so will
5535 not process them. */
5536 if (unresolved_reloc
5537 && !((input_section->flags & SEC_DEBUGGING) != 0
5538 && h->def_dynamic)
5539 && _bfd_elf_section_offset (output_bfd, info, input_section,
5540 rel->r_offset) != (bfd_vma) -1)
5541 {
5542 (*_bfd_error_handler)
5543 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5544 input_bfd,
5545 input_section,
5546 (long) rel->r_offset,
5547 howto->name,
5548 h->root.root.string);
5549 return FALSE;
5550 }
5551
5552 do_relocation:
5553 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
5554 contents, rel->r_offset,
5555 relocation, rel->r_addend);
5556
5557 check_relocation_error:
5558 if (r != bfd_reloc_ok)
5559 {
5560 const char *name;
5561
5562 if (h != NULL)
5563 name = h->root.root.string;
5564 else
5565 {
5566 name = bfd_elf_string_from_elf_section (input_bfd,
5567 symtab_hdr->sh_link,
5568 sym->st_name);
5569 if (name == NULL)
5570 return FALSE;
5571 if (*name == '\0')
5572 name = bfd_section_name (input_bfd, sec);
5573 }
5574
5575 if (r == bfd_reloc_overflow)
5576 (*info->callbacks->reloc_overflow)
5577 (info, (h ? &h->root : NULL), name, howto->name,
5578 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5579 else
5580 {
5581 (*_bfd_error_handler)
5582 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5583 input_bfd, input_section,
5584 (long) rel->r_offset, name, (int) r);
5585 return FALSE;
5586 }
5587 }
5588
5589 if (wrel != rel)
5590 *wrel = *rel;
5591 }
5592
5593 if (wrel != rel)
5594 {
5595 Elf_Internal_Shdr *rel_hdr;
5596 size_t deleted = rel - wrel;
5597
5598 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
5599 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5600 if (rel_hdr->sh_size == 0)
5601 {
5602 /* It is too late to remove an empty reloc section. Leave
5603 one NONE reloc.
5604 ??? What is wrong with an empty section??? */
5605 rel_hdr->sh_size = rel_hdr->sh_entsize;
5606 deleted -= 1;
5607 }
5608 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5609 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
5610 input_section->reloc_count -= deleted;
5611 }
5612
5613 return TRUE;
5614 }
5615
5616 /* Finish up dynamic symbol handling. We set the contents of various
5617 dynamic sections here. */
5618
5619 static bfd_boolean
5620 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5621 struct bfd_link_info *info,
5622 struct elf_link_hash_entry *h,
5623 Elf_Internal_Sym *sym)
5624 {
5625 struct elf_x86_64_link_hash_table *htab;
5626 const struct elf_x86_64_backend_data *abed;
5627 bfd_boolean use_plt_bnd;
5628 struct elf_x86_64_link_hash_entry *eh;
5629 bfd_boolean local_undefweak;
5630
5631 htab = elf_x86_64_hash_table (info);
5632 if (htab == NULL)
5633 return FALSE;
5634
5635 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5636 section only if there is .plt section. */
5637 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5638 abed = (use_plt_bnd
5639 ? &elf_x86_64_bnd_arch_bed
5640 : get_elf_x86_64_backend_data (output_bfd));
5641
5642 eh = (struct elf_x86_64_link_hash_entry *) h;
5643
5644 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
5645 resolved undefined weak symbols in executable so that their
5646 references have value 0 at run-time. */
5647 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
5648 eh->has_got_reloc,
5649 eh);
5650
5651 if (h->plt.offset != (bfd_vma) -1)
5652 {
5653 bfd_vma plt_index;
5654 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5655 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5656 Elf_Internal_Rela rela;
5657 bfd_byte *loc;
5658 asection *plt, *gotplt, *relplt, *resolved_plt;
5659 const struct elf_backend_data *bed;
5660 bfd_vma plt_got_pcrel_offset;
5661
5662 /* When building a static executable, use .iplt, .igot.plt and
5663 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5664 if (htab->elf.splt != NULL)
5665 {
5666 plt = htab->elf.splt;
5667 gotplt = htab->elf.sgotplt;
5668 relplt = htab->elf.srelplt;
5669 }
5670 else
5671 {
5672 plt = htab->elf.iplt;
5673 gotplt = htab->elf.igotplt;
5674 relplt = htab->elf.irelplt;
5675 }
5676
5677 /* This symbol has an entry in the procedure linkage table. Set
5678 it up. */
5679 if ((h->dynindx == -1
5680 && !local_undefweak
5681 && !((h->forced_local || bfd_link_executable (info))
5682 && h->def_regular
5683 && h->type == STT_GNU_IFUNC))
5684 || plt == NULL
5685 || gotplt == NULL
5686 || relplt == NULL)
5687 abort ();
5688
5689 /* Get the index in the procedure linkage table which
5690 corresponds to this symbol. This is the index of this symbol
5691 in all the symbols for which we are making plt entries. The
5692 first entry in the procedure linkage table is reserved.
5693
5694 Get the offset into the .got table of the entry that
5695 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5696 bytes. The first three are reserved for the dynamic linker.
5697
5698 For static executables, we don't reserve anything. */
5699
5700 if (plt == htab->elf.splt)
5701 {
5702 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5703 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5704 }
5705 else
5706 {
5707 got_offset = h->plt.offset / abed->plt_entry_size;
5708 got_offset = got_offset * GOT_ENTRY_SIZE;
5709 }
5710
5711 plt_plt_insn_end = abed->plt_plt_insn_end;
5712 plt_plt_offset = abed->plt_plt_offset;
5713 plt_got_insn_size = abed->plt_got_insn_size;
5714 plt_got_offset = abed->plt_got_offset;
5715 if (use_plt_bnd)
5716 {
5717 /* Use the second PLT with BND relocations. */
5718 const bfd_byte *plt_entry, *plt2_entry;
5719
5720 if (eh->has_bnd_reloc)
5721 {
5722 plt_entry = elf_x86_64_bnd_plt_entry;
5723 plt2_entry = elf_x86_64_bnd_plt2_entry;
5724 }
5725 else
5726 {
5727 plt_entry = elf_x86_64_legacy_plt_entry;
5728 plt2_entry = elf_x86_64_legacy_plt2_entry;
5729
5730 /* Subtract 1 since there is no BND prefix. */
5731 plt_plt_insn_end -= 1;
5732 plt_plt_offset -= 1;
5733 plt_got_insn_size -= 1;
5734 plt_got_offset -= 1;
5735 }
5736
5737 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5738 == sizeof (elf_x86_64_legacy_plt_entry));
5739
5740 /* Fill in the entry in the procedure linkage table. */
5741 memcpy (plt->contents + h->plt.offset,
5742 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5743 /* Fill in the entry in the second PLT. */
5744 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5745 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5746
5747 resolved_plt = htab->plt_bnd;
5748 plt_offset = eh->plt_bnd.offset;
5749 }
5750 else
5751 {
5752 /* Fill in the entry in the procedure linkage table. */
5753 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5754 abed->plt_entry_size);
5755
5756 resolved_plt = plt;
5757 plt_offset = h->plt.offset;
5758 }
5759
5760 /* Insert the relocation positions of the plt section. */
5761
5762 /* Put offset the PC-relative instruction referring to the GOT entry,
5763 subtracting the size of that instruction. */
5764 plt_got_pcrel_offset = (gotplt->output_section->vma
5765 + gotplt->output_offset
5766 + got_offset
5767 - resolved_plt->output_section->vma
5768 - resolved_plt->output_offset
5769 - plt_offset
5770 - plt_got_insn_size);
5771
5772 /* Check PC-relative offset overflow in PLT entry. */
5773 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5774 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5775 output_bfd, h->root.root.string);
5776
5777 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5778 resolved_plt->contents + plt_offset + plt_got_offset);
5779
5780 /* Fill in the entry in the global offset table, initially this
5781 points to the second part of the PLT entry. Leave the entry
5782 as zero for undefined weak symbol in PIE. No PLT relocation
5783 against undefined weak symbol in PIE. */
5784 if (!local_undefweak)
5785 {
5786 bfd_put_64 (output_bfd, (plt->output_section->vma
5787 + plt->output_offset
5788 + h->plt.offset
5789 + abed->plt_lazy_offset),
5790 gotplt->contents + got_offset);
5791
5792 /* Fill in the entry in the .rela.plt section. */
5793 rela.r_offset = (gotplt->output_section->vma
5794 + gotplt->output_offset
5795 + got_offset);
5796 if (h->dynindx == -1
5797 || ((bfd_link_executable (info)
5798 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5799 && h->def_regular
5800 && h->type == STT_GNU_IFUNC))
5801 {
5802 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5803 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5804 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5805 rela.r_addend = (h->root.u.def.value
5806 + h->root.u.def.section->output_section->vma
5807 + h->root.u.def.section->output_offset);
5808 /* R_X86_64_IRELATIVE comes last. */
5809 plt_index = htab->next_irelative_index--;
5810 }
5811 else
5812 {
5813 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5814 rela.r_addend = 0;
5815 plt_index = htab->next_jump_slot_index++;
5816 }
5817
5818 /* Don't fill PLT entry for static executables. */
5819 if (plt == htab->elf.splt)
5820 {
5821 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5822
5823 /* Put relocation index. */
5824 bfd_put_32 (output_bfd, plt_index,
5825 (plt->contents + h->plt.offset
5826 + abed->plt_reloc_offset));
5827
5828 /* Put offset for jmp .PLT0 and check for overflow. We don't
5829 check relocation index for overflow since branch displacement
5830 will overflow first. */
5831 if (plt0_offset > 0x80000000)
5832 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5833 output_bfd, h->root.root.string);
5834 bfd_put_32 (output_bfd, - plt0_offset,
5835 plt->contents + h->plt.offset + plt_plt_offset);
5836 }
5837
5838 bed = get_elf_backend_data (output_bfd);
5839 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5840 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5841 }
5842 }
5843 else if (eh->plt_got.offset != (bfd_vma) -1)
5844 {
5845 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5846 asection *plt, *got;
5847 bfd_boolean got_after_plt;
5848 int32_t got_pcrel_offset;
5849 const bfd_byte *got_plt_entry;
5850
5851 /* Set the entry in the GOT procedure linkage table. */
5852 plt = htab->plt_got;
5853 got = htab->elf.sgot;
5854 got_offset = h->got.offset;
5855
5856 if (got_offset == (bfd_vma) -1
5857 || h->type == STT_GNU_IFUNC
5858 || plt == NULL
5859 || got == NULL)
5860 abort ();
5861
5862 /* Use the second PLT entry template for the GOT PLT since they
5863 are the identical. */
5864 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5865 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5866 if (eh->has_bnd_reloc)
5867 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5868 else
5869 {
5870 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5871
5872 /* Subtract 1 since there is no BND prefix. */
5873 plt_got_insn_size -= 1;
5874 plt_got_offset -= 1;
5875 }
5876
5877 /* Fill in the entry in the GOT procedure linkage table. */
5878 plt_offset = eh->plt_got.offset;
5879 memcpy (plt->contents + plt_offset,
5880 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5881
5882 /* Put offset the PC-relative instruction referring to the GOT
5883 entry, subtracting the size of that instruction. */
5884 got_pcrel_offset = (got->output_section->vma
5885 + got->output_offset
5886 + got_offset
5887 - plt->output_section->vma
5888 - plt->output_offset
5889 - plt_offset
5890 - plt_got_insn_size);
5891
5892 /* Check PC-relative offset overflow in GOT PLT entry. */
5893 got_after_plt = got->output_section->vma > plt->output_section->vma;
5894 if ((got_after_plt && got_pcrel_offset < 0)
5895 || (!got_after_plt && got_pcrel_offset > 0))
5896 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5897 output_bfd, h->root.root.string);
5898
5899 bfd_put_32 (output_bfd, got_pcrel_offset,
5900 plt->contents + plt_offset + plt_got_offset);
5901 }
5902
5903 if (!local_undefweak
5904 && !h->def_regular
5905 && (h->plt.offset != (bfd_vma) -1
5906 || eh->plt_got.offset != (bfd_vma) -1))
5907 {
5908 /* Mark the symbol as undefined, rather than as defined in
5909 the .plt section. Leave the value if there were any
5910 relocations where pointer equality matters (this is a clue
5911 for the dynamic linker, to make function pointer
5912 comparisons work between an application and shared
5913 library), otherwise set it to zero. If a function is only
5914 called from a binary, there is no need to slow down
5915 shared libraries because of that. */
5916 sym->st_shndx = SHN_UNDEF;
5917 if (!h->pointer_equality_needed)
5918 sym->st_value = 0;
5919 }
5920
5921 /* Don't generate dynamic GOT relocation against undefined weak
5922 symbol in executable. */
5923 if (h->got.offset != (bfd_vma) -1
5924 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5925 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE
5926 && !local_undefweak)
5927 {
5928 Elf_Internal_Rela rela;
5929 asection *relgot = htab->elf.srelgot;
5930
5931 /* This symbol has an entry in the global offset table. Set it
5932 up. */
5933 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5934 abort ();
5935
5936 rela.r_offset = (htab->elf.sgot->output_section->vma
5937 + htab->elf.sgot->output_offset
5938 + (h->got.offset &~ (bfd_vma) 1));
5939
5940 /* If this is a static link, or it is a -Bsymbolic link and the
5941 symbol is defined locally or was forced to be local because
5942 of a version file, we just want to emit a RELATIVE reloc.
5943 The entry in the global offset table will already have been
5944 initialized in the relocate_section function. */
5945 if (h->def_regular
5946 && h->type == STT_GNU_IFUNC)
5947 {
5948 if (h->plt.offset == (bfd_vma) -1)
5949 {
5950 /* STT_GNU_IFUNC is referenced without PLT. */
5951 if (htab->elf.splt == NULL)
5952 {
5953 /* use .rel[a].iplt section to store .got relocations
5954 in static executable. */
5955 relgot = htab->elf.irelplt;
5956 }
5957 if (SYMBOL_REFERENCES_LOCAL (info, h))
5958 {
5959 rela.r_info = htab->r_info (0,
5960 R_X86_64_IRELATIVE);
5961 rela.r_addend = (h->root.u.def.value
5962 + h->root.u.def.section->output_section->vma
5963 + h->root.u.def.section->output_offset);
5964 }
5965 else
5966 goto do_glob_dat;
5967 }
5968 else if (bfd_link_pic (info))
5969 {
5970 /* Generate R_X86_64_GLOB_DAT. */
5971 goto do_glob_dat;
5972 }
5973 else
5974 {
5975 asection *plt;
5976
5977 if (!h->pointer_equality_needed)
5978 abort ();
5979
5980 /* For non-shared object, we can't use .got.plt, which
5981 contains the real function addres if we need pointer
5982 equality. We load the GOT entry with the PLT entry. */
5983 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5984 bfd_put_64 (output_bfd, (plt->output_section->vma
5985 + plt->output_offset
5986 + h->plt.offset),
5987 htab->elf.sgot->contents + h->got.offset);
5988 return TRUE;
5989 }
5990 }
5991 else if (bfd_link_pic (info)
5992 && SYMBOL_REFERENCES_LOCAL (info, h))
5993 {
5994 if (!h->def_regular)
5995 return FALSE;
5996 BFD_ASSERT((h->got.offset & 1) != 0);
5997 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5998 rela.r_addend = (h->root.u.def.value
5999 + h->root.u.def.section->output_section->vma
6000 + h->root.u.def.section->output_offset);
6001 }
6002 else
6003 {
6004 BFD_ASSERT((h->got.offset & 1) == 0);
6005 do_glob_dat:
6006 bfd_put_64 (output_bfd, (bfd_vma) 0,
6007 htab->elf.sgot->contents + h->got.offset);
6008 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
6009 rela.r_addend = 0;
6010 }
6011
6012 elf_append_rela (output_bfd, relgot, &rela);
6013 }
6014
6015 if (h->needs_copy)
6016 {
6017 Elf_Internal_Rela rela;
6018
6019 /* This symbol needs a copy reloc. Set it up. */
6020
6021 if (h->dynindx == -1
6022 || (h->root.type != bfd_link_hash_defined
6023 && h->root.type != bfd_link_hash_defweak)
6024 || htab->srelbss == NULL)
6025 abort ();
6026
6027 rela.r_offset = (h->root.u.def.value
6028 + h->root.u.def.section->output_section->vma
6029 + h->root.u.def.section->output_offset);
6030 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
6031 rela.r_addend = 0;
6032 elf_append_rela (output_bfd, htab->srelbss, &rela);
6033 }
6034
6035 return TRUE;
6036 }
6037
6038 /* Finish up local dynamic symbol handling. We set the contents of
6039 various dynamic sections here. */
6040
6041 static bfd_boolean
6042 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
6043 {
6044 struct elf_link_hash_entry *h
6045 = (struct elf_link_hash_entry *) *slot;
6046 struct bfd_link_info *info
6047 = (struct bfd_link_info *) inf;
6048
6049 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
6050 info, h, NULL);
6051 }
6052
6053 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
6054 here since undefined weak symbol may not be dynamic and may not be
6055 called for elf_x86_64_finish_dynamic_symbol. */
6056
6057 static bfd_boolean
6058 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
6059 void *inf)
6060 {
6061 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
6062 struct bfd_link_info *info = (struct bfd_link_info *) inf;
6063
6064 if (h->root.type != bfd_link_hash_undefweak
6065 || h->dynindx != -1)
6066 return TRUE;
6067
6068 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
6069 info, h, NULL);
6070 }
6071
6072 /* Used to decide how to sort relocs in an optimal manner for the
6073 dynamic linker, before writing them out. */
6074
6075 static enum elf_reloc_type_class
6076 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
6077 const asection *rel_sec ATTRIBUTE_UNUSED,
6078 const Elf_Internal_Rela *rela)
6079 {
6080 bfd *abfd = info->output_bfd;
6081 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6082 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
6083
6084 if (htab->elf.dynsym != NULL
6085 && htab->elf.dynsym->contents != NULL)
6086 {
6087 /* Check relocation against STT_GNU_IFUNC symbol if there are
6088 dynamic symbols. */
6089 unsigned long r_symndx = htab->r_sym (rela->r_info);
6090 if (r_symndx != STN_UNDEF)
6091 {
6092 Elf_Internal_Sym sym;
6093 if (!bed->s->swap_symbol_in (abfd,
6094 (htab->elf.dynsym->contents
6095 + r_symndx * bed->s->sizeof_sym),
6096 0, &sym))
6097 abort ();
6098
6099 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
6100 return reloc_class_ifunc;
6101 }
6102 }
6103
6104 switch ((int) ELF32_R_TYPE (rela->r_info))
6105 {
6106 case R_X86_64_IRELATIVE:
6107 return reloc_class_ifunc;
6108 case R_X86_64_RELATIVE:
6109 case R_X86_64_RELATIVE64:
6110 return reloc_class_relative;
6111 case R_X86_64_JUMP_SLOT:
6112 return reloc_class_plt;
6113 case R_X86_64_COPY:
6114 return reloc_class_copy;
6115 default:
6116 return reloc_class_normal;
6117 }
6118 }
6119
6120 /* Finish up the dynamic sections. */
6121
6122 static bfd_boolean
6123 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
6124 struct bfd_link_info *info)
6125 {
6126 struct elf_x86_64_link_hash_table *htab;
6127 bfd *dynobj;
6128 asection *sdyn;
6129 const struct elf_x86_64_backend_data *abed;
6130
6131 htab = elf_x86_64_hash_table (info);
6132 if (htab == NULL)
6133 return FALSE;
6134
6135 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
6136 section only if there is .plt section. */
6137 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
6138 ? &elf_x86_64_bnd_arch_bed
6139 : get_elf_x86_64_backend_data (output_bfd));
6140
6141 dynobj = htab->elf.dynobj;
6142 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6143
6144 if (htab->elf.dynamic_sections_created)
6145 {
6146 bfd_byte *dyncon, *dynconend;
6147 const struct elf_backend_data *bed;
6148 bfd_size_type sizeof_dyn;
6149
6150 if (sdyn == NULL || htab->elf.sgot == NULL)
6151 abort ();
6152
6153 bed = get_elf_backend_data (dynobj);
6154 sizeof_dyn = bed->s->sizeof_dyn;
6155 dyncon = sdyn->contents;
6156 dynconend = sdyn->contents + sdyn->size;
6157 for (; dyncon < dynconend; dyncon += sizeof_dyn)
6158 {
6159 Elf_Internal_Dyn dyn;
6160 asection *s;
6161
6162 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
6163
6164 switch (dyn.d_tag)
6165 {
6166 default:
6167 continue;
6168
6169 case DT_PLTGOT:
6170 s = htab->elf.sgotplt;
6171 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6172 break;
6173
6174 case DT_JMPREL:
6175 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
6176 break;
6177
6178 case DT_PLTRELSZ:
6179 s = htab->elf.srelplt->output_section;
6180 dyn.d_un.d_val = s->size;
6181 break;
6182
6183 case DT_RELASZ:
6184 /* The procedure linkage table relocs (DT_JMPREL) should
6185 not be included in the overall relocs (DT_RELA).
6186 Therefore, we override the DT_RELASZ entry here to
6187 make it not include the JMPREL relocs. Since the
6188 linker script arranges for .rela.plt to follow all
6189 other relocation sections, we don't have to worry
6190 about changing the DT_RELA entry. */
6191 if (htab->elf.srelplt != NULL)
6192 {
6193 s = htab->elf.srelplt->output_section;
6194 dyn.d_un.d_val -= s->size;
6195 }
6196 break;
6197
6198 case DT_TLSDESC_PLT:
6199 s = htab->elf.splt;
6200 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6201 + htab->tlsdesc_plt;
6202 break;
6203
6204 case DT_TLSDESC_GOT:
6205 s = htab->elf.sgot;
6206 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6207 + htab->tlsdesc_got;
6208 break;
6209 }
6210
6211 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
6212 }
6213
6214 /* Fill in the special first entry in the procedure linkage table. */
6215 if (htab->elf.splt && htab->elf.splt->size > 0)
6216 {
6217 /* Fill in the first entry in the procedure linkage table. */
6218 memcpy (htab->elf.splt->contents,
6219 abed->plt0_entry, abed->plt_entry_size);
6220 /* Add offset for pushq GOT+8(%rip), since the instruction
6221 uses 6 bytes subtract this value. */
6222 bfd_put_32 (output_bfd,
6223 (htab->elf.sgotplt->output_section->vma
6224 + htab->elf.sgotplt->output_offset
6225 + 8
6226 - htab->elf.splt->output_section->vma
6227 - htab->elf.splt->output_offset
6228 - 6),
6229 htab->elf.splt->contents + abed->plt0_got1_offset);
6230 /* Add offset for the PC-relative instruction accessing GOT+16,
6231 subtracting the offset to the end of that instruction. */
6232 bfd_put_32 (output_bfd,
6233 (htab->elf.sgotplt->output_section->vma
6234 + htab->elf.sgotplt->output_offset
6235 + 16
6236 - htab->elf.splt->output_section->vma
6237 - htab->elf.splt->output_offset
6238 - abed->plt0_got2_insn_end),
6239 htab->elf.splt->contents + abed->plt0_got2_offset);
6240
6241 elf_section_data (htab->elf.splt->output_section)
6242 ->this_hdr.sh_entsize = abed->plt_entry_size;
6243
6244 if (htab->tlsdesc_plt)
6245 {
6246 bfd_put_64 (output_bfd, (bfd_vma) 0,
6247 htab->elf.sgot->contents + htab->tlsdesc_got);
6248
6249 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
6250 abed->plt0_entry, abed->plt_entry_size);
6251
6252 /* Add offset for pushq GOT+8(%rip), since the
6253 instruction uses 6 bytes subtract this value. */
6254 bfd_put_32 (output_bfd,
6255 (htab->elf.sgotplt->output_section->vma
6256 + htab->elf.sgotplt->output_offset
6257 + 8
6258 - htab->elf.splt->output_section->vma
6259 - htab->elf.splt->output_offset
6260 - htab->tlsdesc_plt
6261 - 6),
6262 htab->elf.splt->contents
6263 + htab->tlsdesc_plt + abed->plt0_got1_offset);
6264 /* Add offset for the PC-relative instruction accessing GOT+TDG,
6265 where TGD stands for htab->tlsdesc_got, subtracting the offset
6266 to the end of that instruction. */
6267 bfd_put_32 (output_bfd,
6268 (htab->elf.sgot->output_section->vma
6269 + htab->elf.sgot->output_offset
6270 + htab->tlsdesc_got
6271 - htab->elf.splt->output_section->vma
6272 - htab->elf.splt->output_offset
6273 - htab->tlsdesc_plt
6274 - abed->plt0_got2_insn_end),
6275 htab->elf.splt->contents
6276 + htab->tlsdesc_plt + abed->plt0_got2_offset);
6277 }
6278 }
6279 }
6280
6281 if (htab->plt_bnd != NULL)
6282 elf_section_data (htab->plt_bnd->output_section)
6283 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
6284
6285 if (htab->elf.sgotplt)
6286 {
6287 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
6288 {
6289 (*_bfd_error_handler)
6290 (_("discarded output section: `%A'"), htab->elf.sgotplt);
6291 return FALSE;
6292 }
6293
6294 /* Fill in the first three entries in the global offset table. */
6295 if (htab->elf.sgotplt->size > 0)
6296 {
6297 /* Set the first entry in the global offset table to the address of
6298 the dynamic section. */
6299 if (sdyn == NULL)
6300 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
6301 else
6302 bfd_put_64 (output_bfd,
6303 sdyn->output_section->vma + sdyn->output_offset,
6304 htab->elf.sgotplt->contents);
6305 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6306 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
6307 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
6308 }
6309
6310 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
6311 GOT_ENTRY_SIZE;
6312 }
6313
6314 /* Adjust .eh_frame for .plt section. */
6315 if (htab->plt_eh_frame != NULL
6316 && htab->plt_eh_frame->contents != NULL)
6317 {
6318 if (htab->elf.splt != NULL
6319 && htab->elf.splt->size != 0
6320 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
6321 && htab->elf.splt->output_section != NULL
6322 && htab->plt_eh_frame->output_section != NULL)
6323 {
6324 bfd_vma plt_start = htab->elf.splt->output_section->vma;
6325 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
6326 + htab->plt_eh_frame->output_offset
6327 + PLT_FDE_START_OFFSET;
6328 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
6329 htab->plt_eh_frame->contents
6330 + PLT_FDE_START_OFFSET);
6331 }
6332 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
6333 {
6334 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
6335 htab->plt_eh_frame,
6336 htab->plt_eh_frame->contents))
6337 return FALSE;
6338 }
6339 }
6340
6341 if (htab->elf.sgot && htab->elf.sgot->size > 0)
6342 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
6343 = GOT_ENTRY_SIZE;
6344
6345 /* Fill PLT entries for undefined weak symbols in PIE. */
6346 if (bfd_link_pie (info))
6347 bfd_hash_traverse (&info->hash->table,
6348 elf_x86_64_pie_finish_undefweak_symbol,
6349 info);
6350
6351 return TRUE;
6352 }
6353
6354 /* Fill PLT/GOT entries and allocate dynamic relocations for local
6355 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
6356 It has to be done before elf_link_sort_relocs is called so that
6357 dynamic relocations are properly sorted. */
6358
6359 static bfd_boolean
6360 elf_x86_64_output_arch_local_syms
6361 (bfd *output_bfd ATTRIBUTE_UNUSED,
6362 struct bfd_link_info *info,
6363 void *flaginfo ATTRIBUTE_UNUSED,
6364 int (*func) (void *, const char *,
6365 Elf_Internal_Sym *,
6366 asection *,
6367 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
6368 {
6369 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
6370 if (htab == NULL)
6371 return FALSE;
6372
6373 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
6374 htab_traverse (htab->loc_hash_table,
6375 elf_x86_64_finish_local_dynamic_symbol,
6376 info);
6377
6378 return TRUE;
6379 }
6380
6381 /* Return an array of PLT entry symbol values. */
6382
6383 static bfd_vma *
6384 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
6385 asection *relplt)
6386 {
6387 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
6388 arelent *p;
6389 long count, i;
6390 bfd_vma *plt_sym_val;
6391 bfd_vma plt_offset;
6392 bfd_byte *plt_contents;
6393 const struct elf_x86_64_backend_data *bed;
6394 Elf_Internal_Shdr *hdr;
6395 asection *plt_bnd;
6396
6397 /* Get the .plt section contents. PLT passed down may point to the
6398 .plt.bnd section. Make sure that PLT always points to the .plt
6399 section. */
6400 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
6401 if (plt_bnd)
6402 {
6403 if (plt != plt_bnd)
6404 abort ();
6405 plt = bfd_get_section_by_name (abfd, ".plt");
6406 if (plt == NULL)
6407 abort ();
6408 bed = &elf_x86_64_bnd_arch_bed;
6409 }
6410 else
6411 bed = get_elf_x86_64_backend_data (abfd);
6412
6413 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
6414 if (plt_contents == NULL)
6415 return NULL;
6416 if (!bfd_get_section_contents (abfd, (asection *) plt,
6417 plt_contents, 0, plt->size))
6418 {
6419 bad_return:
6420 free (plt_contents);
6421 return NULL;
6422 }
6423
6424 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
6425 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
6426 goto bad_return;
6427
6428 hdr = &elf_section_data (relplt)->this_hdr;
6429 count = relplt->size / hdr->sh_entsize;
6430
6431 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
6432 if (plt_sym_val == NULL)
6433 goto bad_return;
6434
6435 for (i = 0; i < count; i++)
6436 plt_sym_val[i] = -1;
6437
6438 plt_offset = bed->plt_entry_size;
6439 p = relplt->relocation;
6440 for (i = 0; i < count; i++, p++)
6441 {
6442 long reloc_index;
6443
6444 /* Skip unknown relocation. */
6445 if (p->howto == NULL)
6446 continue;
6447
6448 if (p->howto->type != R_X86_64_JUMP_SLOT
6449 && p->howto->type != R_X86_64_IRELATIVE)
6450 continue;
6451
6452 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
6453 + bed->plt_reloc_offset));
6454 if (reloc_index < count)
6455 {
6456 if (plt_bnd)
6457 {
6458 /* This is the index in .plt section. */
6459 long plt_index = plt_offset / bed->plt_entry_size;
6460 /* Store VMA + the offset in .plt.bnd section. */
6461 plt_sym_val[reloc_index] =
6462 (plt_bnd->vma
6463 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
6464 }
6465 else
6466 plt_sym_val[reloc_index] = plt->vma + plt_offset;
6467 }
6468 plt_offset += bed->plt_entry_size;
6469
6470 /* PR binutils/18437: Skip extra relocations in the .rela.plt
6471 section. */
6472 if (plt_offset >= plt->size)
6473 break;
6474 }
6475
6476 free (plt_contents);
6477
6478 return plt_sym_val;
6479 }
6480
6481 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
6482 support. */
6483
6484 static long
6485 elf_x86_64_get_synthetic_symtab (bfd *abfd,
6486 long symcount,
6487 asymbol **syms,
6488 long dynsymcount,
6489 asymbol **dynsyms,
6490 asymbol **ret)
6491 {
6492 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
6493 as PLT if it exists. */
6494 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
6495 if (plt == NULL)
6496 plt = bfd_get_section_by_name (abfd, ".plt");
6497 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
6498 dynsymcount, dynsyms, ret,
6499 plt,
6500 elf_x86_64_get_plt_sym_val);
6501 }
6502
6503 /* Handle an x86-64 specific section when reading an object file. This
6504 is called when elfcode.h finds a section with an unknown type. */
6505
6506 static bfd_boolean
6507 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
6508 const char *name, int shindex)
6509 {
6510 if (hdr->sh_type != SHT_X86_64_UNWIND)
6511 return FALSE;
6512
6513 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6514 return FALSE;
6515
6516 return TRUE;
6517 }
6518
6519 /* Hook called by the linker routine which adds symbols from an object
6520 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
6521 of .bss. */
6522
6523 static bfd_boolean
6524 elf_x86_64_add_symbol_hook (bfd *abfd,
6525 struct bfd_link_info *info ATTRIBUTE_UNUSED,
6526 Elf_Internal_Sym *sym,
6527 const char **namep ATTRIBUTE_UNUSED,
6528 flagword *flagsp ATTRIBUTE_UNUSED,
6529 asection **secp,
6530 bfd_vma *valp)
6531 {
6532 asection *lcomm;
6533
6534 switch (sym->st_shndx)
6535 {
6536 case SHN_X86_64_LCOMMON:
6537 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
6538 if (lcomm == NULL)
6539 {
6540 lcomm = bfd_make_section_with_flags (abfd,
6541 "LARGE_COMMON",
6542 (SEC_ALLOC
6543 | SEC_IS_COMMON
6544 | SEC_LINKER_CREATED));
6545 if (lcomm == NULL)
6546 return FALSE;
6547 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
6548 }
6549 *secp = lcomm;
6550 *valp = sym->st_size;
6551 return TRUE;
6552 }
6553
6554 return TRUE;
6555 }
6556
6557
6558 /* Given a BFD section, try to locate the corresponding ELF section
6559 index. */
6560
6561 static bfd_boolean
6562 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
6563 asection *sec, int *index_return)
6564 {
6565 if (sec == &_bfd_elf_large_com_section)
6566 {
6567 *index_return = SHN_X86_64_LCOMMON;
6568 return TRUE;
6569 }
6570 return FALSE;
6571 }
6572
6573 /* Process a symbol. */
6574
6575 static void
6576 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
6577 asymbol *asym)
6578 {
6579 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
6580
6581 switch (elfsym->internal_elf_sym.st_shndx)
6582 {
6583 case SHN_X86_64_LCOMMON:
6584 asym->section = &_bfd_elf_large_com_section;
6585 asym->value = elfsym->internal_elf_sym.st_size;
6586 /* Common symbol doesn't set BSF_GLOBAL. */
6587 asym->flags &= ~BSF_GLOBAL;
6588 break;
6589 }
6590 }
6591
6592 static bfd_boolean
6593 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
6594 {
6595 return (sym->st_shndx == SHN_COMMON
6596 || sym->st_shndx == SHN_X86_64_LCOMMON);
6597 }
6598
6599 static unsigned int
6600 elf_x86_64_common_section_index (asection *sec)
6601 {
6602 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6603 return SHN_COMMON;
6604 else
6605 return SHN_X86_64_LCOMMON;
6606 }
6607
6608 static asection *
6609 elf_x86_64_common_section (asection *sec)
6610 {
6611 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6612 return bfd_com_section_ptr;
6613 else
6614 return &_bfd_elf_large_com_section;
6615 }
6616
6617 static bfd_boolean
6618 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
6619 const Elf_Internal_Sym *sym,
6620 asection **psec,
6621 bfd_boolean newdef,
6622 bfd_boolean olddef,
6623 bfd *oldbfd,
6624 const asection *oldsec)
6625 {
6626 /* A normal common symbol and a large common symbol result in a
6627 normal common symbol. We turn the large common symbol into a
6628 normal one. */
6629 if (!olddef
6630 && h->root.type == bfd_link_hash_common
6631 && !newdef
6632 && bfd_is_com_section (*psec)
6633 && oldsec != *psec)
6634 {
6635 if (sym->st_shndx == SHN_COMMON
6636 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
6637 {
6638 h->root.u.c.p->section
6639 = bfd_make_section_old_way (oldbfd, "COMMON");
6640 h->root.u.c.p->section->flags = SEC_ALLOC;
6641 }
6642 else if (sym->st_shndx == SHN_X86_64_LCOMMON
6643 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
6644 *psec = bfd_com_section_ptr;
6645 }
6646
6647 return TRUE;
6648 }
6649
6650 static int
6651 elf_x86_64_additional_program_headers (bfd *abfd,
6652 struct bfd_link_info *info ATTRIBUTE_UNUSED)
6653 {
6654 asection *s;
6655 int count = 0;
6656
6657 /* Check to see if we need a large readonly segment. */
6658 s = bfd_get_section_by_name (abfd, ".lrodata");
6659 if (s && (s->flags & SEC_LOAD))
6660 count++;
6661
6662 /* Check to see if we need a large data segment. Since .lbss sections
6663 is placed right after the .bss section, there should be no need for
6664 a large data segment just because of .lbss. */
6665 s = bfd_get_section_by_name (abfd, ".ldata");
6666 if (s && (s->flags & SEC_LOAD))
6667 count++;
6668
6669 return count;
6670 }
6671
6672 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
6673
6674 static bfd_boolean
6675 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
6676 {
6677 if (h->plt.offset != (bfd_vma) -1
6678 && !h->def_regular
6679 && !h->pointer_equality_needed)
6680 return FALSE;
6681
6682 return _bfd_elf_hash_symbol (h);
6683 }
6684
6685 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6686
6687 static bfd_boolean
6688 elf_x86_64_relocs_compatible (const bfd_target *input,
6689 const bfd_target *output)
6690 {
6691 return ((xvec_get_elf_backend_data (input)->s->elfclass
6692 == xvec_get_elf_backend_data (output)->s->elfclass)
6693 && _bfd_elf_relocs_compatible (input, output));
6694 }
6695
6696 static const struct bfd_elf_special_section
6697 elf_x86_64_special_sections[]=
6698 {
6699 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6700 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6701 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6702 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6703 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6704 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6705 { NULL, 0, 0, 0, 0 }
6706 };
6707
6708 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6709 #define TARGET_LITTLE_NAME "elf64-x86-64"
6710 #define ELF_ARCH bfd_arch_i386
6711 #define ELF_TARGET_ID X86_64_ELF_DATA
6712 #define ELF_MACHINE_CODE EM_X86_64
6713 #define ELF_MAXPAGESIZE 0x200000
6714 #define ELF_MINPAGESIZE 0x1000
6715 #define ELF_COMMONPAGESIZE 0x1000
6716
6717 #define elf_backend_can_gc_sections 1
6718 #define elf_backend_can_refcount 1
6719 #define elf_backend_want_got_plt 1
6720 #define elf_backend_plt_readonly 1
6721 #define elf_backend_want_plt_sym 0
6722 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6723 #define elf_backend_rela_normal 1
6724 #define elf_backend_plt_alignment 4
6725 #define elf_backend_extern_protected_data 1
6726 #define elf_backend_caches_rawsize 1
6727
6728 #define elf_info_to_howto elf_x86_64_info_to_howto
6729
6730 #define bfd_elf64_bfd_link_hash_table_create \
6731 elf_x86_64_link_hash_table_create
6732 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6733 #define bfd_elf64_bfd_reloc_name_lookup \
6734 elf_x86_64_reloc_name_lookup
6735
6736 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6737 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6738 #define elf_backend_check_relocs elf_x86_64_check_relocs
6739 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6740 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6741 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6742 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6743 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
6744 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6745 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6746 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6747 #ifdef CORE_HEADER
6748 #define elf_backend_write_core_note elf_x86_64_write_core_note
6749 #endif
6750 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6751 #define elf_backend_relocate_section elf_x86_64_relocate_section
6752 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6753 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6754 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6755 #define elf_backend_object_p elf64_x86_64_elf_object_p
6756 #define bfd_elf64_mkobject elf_x86_64_mkobject
6757 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6758
6759 #define elf_backend_section_from_shdr \
6760 elf_x86_64_section_from_shdr
6761
6762 #define elf_backend_section_from_bfd_section \
6763 elf_x86_64_elf_section_from_bfd_section
6764 #define elf_backend_add_symbol_hook \
6765 elf_x86_64_add_symbol_hook
6766 #define elf_backend_symbol_processing \
6767 elf_x86_64_symbol_processing
6768 #define elf_backend_common_section_index \
6769 elf_x86_64_common_section_index
6770 #define elf_backend_common_section \
6771 elf_x86_64_common_section
6772 #define elf_backend_common_definition \
6773 elf_x86_64_common_definition
6774 #define elf_backend_merge_symbol \
6775 elf_x86_64_merge_symbol
6776 #define elf_backend_special_sections \
6777 elf_x86_64_special_sections
6778 #define elf_backend_additional_program_headers \
6779 elf_x86_64_additional_program_headers
6780 #define elf_backend_hash_symbol \
6781 elf_x86_64_hash_symbol
6782 #define elf_backend_omit_section_dynsym \
6783 ((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true)
6784 #define elf_backend_fixup_symbol \
6785 elf_x86_64_fixup_symbol
6786
6787 #include "elf64-target.h"
6788
6789 /* CloudABI support. */
6790
6791 #undef TARGET_LITTLE_SYM
6792 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6793 #undef TARGET_LITTLE_NAME
6794 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6795
6796 #undef ELF_OSABI
6797 #define ELF_OSABI ELFOSABI_CLOUDABI
6798
6799 #undef elf64_bed
6800 #define elf64_bed elf64_x86_64_cloudabi_bed
6801
6802 #include "elf64-target.h"
6803
6804 /* FreeBSD support. */
6805
6806 #undef TARGET_LITTLE_SYM
6807 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6808 #undef TARGET_LITTLE_NAME
6809 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6810
6811 #undef ELF_OSABI
6812 #define ELF_OSABI ELFOSABI_FREEBSD
6813
6814 #undef elf64_bed
6815 #define elf64_bed elf64_x86_64_fbsd_bed
6816
6817 #include "elf64-target.h"
6818
6819 /* Solaris 2 support. */
6820
6821 #undef TARGET_LITTLE_SYM
6822 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6823 #undef TARGET_LITTLE_NAME
6824 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6825
6826 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6827 objects won't be recognized. */
6828 #undef ELF_OSABI
6829
6830 #undef elf64_bed
6831 #define elf64_bed elf64_x86_64_sol2_bed
6832
6833 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6834 boundary. */
6835 #undef elf_backend_static_tls_alignment
6836 #define elf_backend_static_tls_alignment 16
6837
6838 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6839
6840 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6841 File, p.63. */
6842 #undef elf_backend_want_plt_sym
6843 #define elf_backend_want_plt_sym 1
6844
6845 #undef elf_backend_strtab_flags
6846 #define elf_backend_strtab_flags SHF_STRINGS
6847
6848 static bfd_boolean
6849 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
6850 bfd *obfd ATTRIBUTE_UNUSED,
6851 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
6852 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
6853 {
6854 /* PR 19938: FIXME: Need to add code for setting the sh_info
6855 and sh_link fields of Solaris specific section types. */
6856 return FALSE;
6857 }
6858
6859 #undef elf_backend_copy_special_section_fields
6860 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
6861
6862 #include "elf64-target.h"
6863
6864 /* Native Client support. */
6865
6866 static bfd_boolean
6867 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6868 {
6869 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6870 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6871 return TRUE;
6872 }
6873
6874 #undef TARGET_LITTLE_SYM
6875 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6876 #undef TARGET_LITTLE_NAME
6877 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6878 #undef elf64_bed
6879 #define elf64_bed elf64_x86_64_nacl_bed
6880
6881 #undef ELF_MAXPAGESIZE
6882 #undef ELF_MINPAGESIZE
6883 #undef ELF_COMMONPAGESIZE
6884 #define ELF_MAXPAGESIZE 0x10000
6885 #define ELF_MINPAGESIZE 0x10000
6886 #define ELF_COMMONPAGESIZE 0x10000
6887
6888 /* Restore defaults. */
6889 #undef ELF_OSABI
6890 #undef elf_backend_static_tls_alignment
6891 #undef elf_backend_want_plt_sym
6892 #define elf_backend_want_plt_sym 0
6893 #undef elf_backend_strtab_flags
6894 #undef elf_backend_copy_special_section_fields
6895
6896 /* NaCl uses substantially different PLT entries for the same effects. */
6897
6898 #undef elf_backend_plt_alignment
6899 #define elf_backend_plt_alignment 5
6900 #define NACL_PLT_ENTRY_SIZE 64
6901 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6902
6903 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6904 {
6905 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6906 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6907 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6908 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6909 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6910
6911 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6912 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6913
6914 /* 32 bytes of nop to pad out to the standard size. */
6915 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6916 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6917 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6918 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6919 0x66, /* excess data16 prefix */
6920 0x90 /* nop */
6921 };
6922
6923 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6924 {
6925 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6926 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6927 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6928 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6929
6930 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6931 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6932 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6933
6934 /* Lazy GOT entries point here (32-byte aligned). */
6935 0x68, /* pushq immediate */
6936 0, 0, 0, 0, /* replaced with index into relocation table. */
6937 0xe9, /* jmp relative */
6938 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6939
6940 /* 22 bytes of nop to pad out to the standard size. */
6941 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
6942 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6943 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6944 };
6945
6946 /* .eh_frame covering the .plt section. */
6947
6948 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6949 {
6950 #if (PLT_CIE_LENGTH != 20 \
6951 || PLT_FDE_LENGTH != 36 \
6952 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6953 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6954 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6955 #endif
6956 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6957 0, 0, 0, 0, /* CIE ID */
6958 1, /* CIE version */
6959 'z', 'R', 0, /* Augmentation string */
6960 1, /* Code alignment factor */
6961 0x78, /* Data alignment factor */
6962 16, /* Return address column */
6963 1, /* Augmentation size */
6964 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6965 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6966 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6967 DW_CFA_nop, DW_CFA_nop,
6968
6969 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6970 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6971 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6972 0, 0, 0, 0, /* .plt size goes here */
6973 0, /* Augmentation size */
6974 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6975 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6976 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6977 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6978 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6979 13, /* Block length */
6980 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6981 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6982 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6983 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6984 DW_CFA_nop, DW_CFA_nop
6985 };
6986
6987 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6988 {
6989 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6990 elf_x86_64_nacl_plt_entry, /* plt_entry */
6991 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6992 2, /* plt0_got1_offset */
6993 9, /* plt0_got2_offset */
6994 13, /* plt0_got2_insn_end */
6995 3, /* plt_got_offset */
6996 33, /* plt_reloc_offset */
6997 38, /* plt_plt_offset */
6998 7, /* plt_got_insn_size */
6999 42, /* plt_plt_insn_end */
7000 32, /* plt_lazy_offset */
7001 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
7002 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
7003 };
7004
7005 #undef elf_backend_arch_data
7006 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
7007
7008 #undef elf_backend_object_p
7009 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
7010 #undef elf_backend_modify_segment_map
7011 #define elf_backend_modify_segment_map nacl_modify_segment_map
7012 #undef elf_backend_modify_program_headers
7013 #define elf_backend_modify_program_headers nacl_modify_program_headers
7014 #undef elf_backend_final_write_processing
7015 #define elf_backend_final_write_processing nacl_final_write_processing
7016
7017 #include "elf64-target.h"
7018
7019 /* Native Client x32 support. */
7020
7021 static bfd_boolean
7022 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
7023 {
7024 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
7025 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
7026 return TRUE;
7027 }
7028
7029 #undef TARGET_LITTLE_SYM
7030 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
7031 #undef TARGET_LITTLE_NAME
7032 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
7033 #undef elf32_bed
7034 #define elf32_bed elf32_x86_64_nacl_bed
7035
7036 #define bfd_elf32_bfd_link_hash_table_create \
7037 elf_x86_64_link_hash_table_create
7038 #define bfd_elf32_bfd_reloc_type_lookup \
7039 elf_x86_64_reloc_type_lookup
7040 #define bfd_elf32_bfd_reloc_name_lookup \
7041 elf_x86_64_reloc_name_lookup
7042 #define bfd_elf32_mkobject \
7043 elf_x86_64_mkobject
7044 #define bfd_elf32_get_synthetic_symtab \
7045 elf_x86_64_get_synthetic_symtab
7046
7047 #undef elf_backend_object_p
7048 #define elf_backend_object_p \
7049 elf32_x86_64_nacl_elf_object_p
7050
7051 #undef elf_backend_bfd_from_remote_memory
7052 #define elf_backend_bfd_from_remote_memory \
7053 _bfd_elf32_bfd_from_remote_memory
7054
7055 #undef elf_backend_size_info
7056 #define elf_backend_size_info \
7057 _bfd_elf32_size_info
7058
7059 #include "elf32-target.h"
7060
7061 /* Restore defaults. */
7062 #undef elf_backend_object_p
7063 #define elf_backend_object_p elf64_x86_64_elf_object_p
7064 #undef elf_backend_bfd_from_remote_memory
7065 #undef elf_backend_size_info
7066 #undef elf_backend_modify_segment_map
7067 #undef elf_backend_modify_program_headers
7068 #undef elf_backend_final_write_processing
7069
7070 /* Intel L1OM support. */
7071
7072 static bfd_boolean
7073 elf64_l1om_elf_object_p (bfd *abfd)
7074 {
7075 /* Set the right machine number for an L1OM elf64 file. */
7076 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
7077 return TRUE;
7078 }
7079
7080 #undef TARGET_LITTLE_SYM
7081 #define TARGET_LITTLE_SYM l1om_elf64_vec
7082 #undef TARGET_LITTLE_NAME
7083 #define TARGET_LITTLE_NAME "elf64-l1om"
7084 #undef ELF_ARCH
7085 #define ELF_ARCH bfd_arch_l1om
7086
7087 #undef ELF_MACHINE_CODE
7088 #define ELF_MACHINE_CODE EM_L1OM
7089
7090 #undef ELF_OSABI
7091
7092 #undef elf64_bed
7093 #define elf64_bed elf64_l1om_bed
7094
7095 #undef elf_backend_object_p
7096 #define elf_backend_object_p elf64_l1om_elf_object_p
7097
7098 /* Restore defaults. */
7099 #undef ELF_MAXPAGESIZE
7100 #undef ELF_MINPAGESIZE
7101 #undef ELF_COMMONPAGESIZE
7102 #define ELF_MAXPAGESIZE 0x200000
7103 #define ELF_MINPAGESIZE 0x1000
7104 #define ELF_COMMONPAGESIZE 0x1000
7105 #undef elf_backend_plt_alignment
7106 #define elf_backend_plt_alignment 4
7107 #undef elf_backend_arch_data
7108 #define elf_backend_arch_data &elf_x86_64_arch_bed
7109
7110 #include "elf64-target.h"
7111
7112 /* FreeBSD L1OM support. */
7113
7114 #undef TARGET_LITTLE_SYM
7115 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
7116 #undef TARGET_LITTLE_NAME
7117 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
7118
7119 #undef ELF_OSABI
7120 #define ELF_OSABI ELFOSABI_FREEBSD
7121
7122 #undef elf64_bed
7123 #define elf64_bed elf64_l1om_fbsd_bed
7124
7125 #include "elf64-target.h"
7126
7127 /* Intel K1OM support. */
7128
7129 static bfd_boolean
7130 elf64_k1om_elf_object_p (bfd *abfd)
7131 {
7132 /* Set the right machine number for an K1OM elf64 file. */
7133 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
7134 return TRUE;
7135 }
7136
7137 #undef TARGET_LITTLE_SYM
7138 #define TARGET_LITTLE_SYM k1om_elf64_vec
7139 #undef TARGET_LITTLE_NAME
7140 #define TARGET_LITTLE_NAME "elf64-k1om"
7141 #undef ELF_ARCH
7142 #define ELF_ARCH bfd_arch_k1om
7143
7144 #undef ELF_MACHINE_CODE
7145 #define ELF_MACHINE_CODE EM_K1OM
7146
7147 #undef ELF_OSABI
7148
7149 #undef elf64_bed
7150 #define elf64_bed elf64_k1om_bed
7151
7152 #undef elf_backend_object_p
7153 #define elf_backend_object_p elf64_k1om_elf_object_p
7154
7155 #undef elf_backend_static_tls_alignment
7156
7157 #undef elf_backend_want_plt_sym
7158 #define elf_backend_want_plt_sym 0
7159
7160 #include "elf64-target.h"
7161
7162 /* FreeBSD K1OM support. */
7163
7164 #undef TARGET_LITTLE_SYM
7165 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
7166 #undef TARGET_LITTLE_NAME
7167 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
7168
7169 #undef ELF_OSABI
7170 #define ELF_OSABI ELFOSABI_FREEBSD
7171
7172 #undef elf64_bed
7173 #define elf64_bed elf64_k1om_fbsd_bed
7174
7175 #include "elf64-target.h"
7176
7177 /* 32bit x86-64 support. */
7178
7179 #undef TARGET_LITTLE_SYM
7180 #define TARGET_LITTLE_SYM x86_64_elf32_vec
7181 #undef TARGET_LITTLE_NAME
7182 #define TARGET_LITTLE_NAME "elf32-x86-64"
7183 #undef elf32_bed
7184
7185 #undef ELF_ARCH
7186 #define ELF_ARCH bfd_arch_i386
7187
7188 #undef ELF_MACHINE_CODE
7189 #define ELF_MACHINE_CODE EM_X86_64
7190
7191 #undef ELF_OSABI
7192
7193 #undef elf_backend_object_p
7194 #define elf_backend_object_p \
7195 elf32_x86_64_elf_object_p
7196
7197 #undef elf_backend_bfd_from_remote_memory
7198 #define elf_backend_bfd_from_remote_memory \
7199 _bfd_elf32_bfd_from_remote_memory
7200
7201 #undef elf_backend_size_info
7202 #define elf_backend_size_info \
7203 _bfd_elf32_size_info
7204
7205 #include "elf32-target.h"
This page took 0.255901 seconds and 4 git commands to generate.