Discard space for relocs against symbols with copy relocs
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2014 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return 0;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if symbol has at least one BND relocation. */
761 bfd_boolean has_bnd_reloc;
762
763 /* Information about the GOT PLT entry. Filled when there are both
764 GOT and PLT relocations against the same function. */
765 union gotplt_union plt_got;
766
767 /* Information about the second PLT entry. Filled when has_bnd_reloc is
768 set. */
769 union gotplt_union plt_bnd;
770
771 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
772 starting at the end of the jump table. */
773 bfd_vma tlsdesc_got;
774 };
775
776 #define elf_x86_64_hash_entry(ent) \
777 ((struct elf_x86_64_link_hash_entry *)(ent))
778
779 struct elf_x86_64_obj_tdata
780 {
781 struct elf_obj_tdata root;
782
783 /* tls_type for each local got entry. */
784 char *local_got_tls_type;
785
786 /* GOTPLT entries for TLS descriptors. */
787 bfd_vma *local_tlsdesc_gotent;
788 };
789
790 #define elf_x86_64_tdata(abfd) \
791 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
792
793 #define elf_x86_64_local_got_tls_type(abfd) \
794 (elf_x86_64_tdata (abfd)->local_got_tls_type)
795
796 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
797 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
798
799 #define is_x86_64_elf(bfd) \
800 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
801 && elf_tdata (bfd) != NULL \
802 && elf_object_id (bfd) == X86_64_ELF_DATA)
803
804 static bfd_boolean
805 elf_x86_64_mkobject (bfd *abfd)
806 {
807 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
808 X86_64_ELF_DATA);
809 }
810
811 /* x86-64 ELF linker hash table. */
812
813 struct elf_x86_64_link_hash_table
814 {
815 struct elf_link_hash_table elf;
816
817 /* Short-cuts to get to dynamic linker sections. */
818 asection *sdynbss;
819 asection *srelbss;
820 asection *plt_eh_frame;
821 asection *plt_bnd;
822 asection *plt_got;
823
824 union
825 {
826 bfd_signed_vma refcount;
827 bfd_vma offset;
828 } tls_ld_got;
829
830 /* The amount of space used by the jump slots in the GOT. */
831 bfd_vma sgotplt_jump_table_size;
832
833 /* Small local sym cache. */
834 struct sym_cache sym_cache;
835
836 bfd_vma (*r_info) (bfd_vma, bfd_vma);
837 bfd_vma (*r_sym) (bfd_vma);
838 unsigned int pointer_r_type;
839 const char *dynamic_interpreter;
840 int dynamic_interpreter_size;
841
842 /* _TLS_MODULE_BASE_ symbol. */
843 struct bfd_link_hash_entry *tls_module_base;
844
845 /* Used by local STT_GNU_IFUNC symbols. */
846 htab_t loc_hash_table;
847 void * loc_hash_memory;
848
849 /* The offset into splt of the PLT entry for the TLS descriptor
850 resolver. Special values are 0, if not necessary (or not found
851 to be necessary yet), and -1 if needed but not determined
852 yet. */
853 bfd_vma tlsdesc_plt;
854 /* The offset into sgot of the GOT entry used by the PLT entry
855 above. */
856 bfd_vma tlsdesc_got;
857
858 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
859 bfd_vma next_jump_slot_index;
860 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
861 bfd_vma next_irelative_index;
862 };
863
864 /* Get the x86-64 ELF linker hash table from a link_info structure. */
865
866 #define elf_x86_64_hash_table(p) \
867 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
868 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
869
870 #define elf_x86_64_compute_jump_table_size(htab) \
871 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
872
873 /* Create an entry in an x86-64 ELF linker hash table. */
874
875 static struct bfd_hash_entry *
876 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
877 struct bfd_hash_table *table,
878 const char *string)
879 {
880 /* Allocate the structure if it has not already been allocated by a
881 subclass. */
882 if (entry == NULL)
883 {
884 entry = (struct bfd_hash_entry *)
885 bfd_hash_allocate (table,
886 sizeof (struct elf_x86_64_link_hash_entry));
887 if (entry == NULL)
888 return entry;
889 }
890
891 /* Call the allocation method of the superclass. */
892 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
893 if (entry != NULL)
894 {
895 struct elf_x86_64_link_hash_entry *eh;
896
897 eh = (struct elf_x86_64_link_hash_entry *) entry;
898 eh->dyn_relocs = NULL;
899 eh->tls_type = GOT_UNKNOWN;
900 eh->has_bnd_reloc = FALSE;
901 eh->plt_bnd.offset = (bfd_vma) -1;
902 eh->plt_got.offset = (bfd_vma) -1;
903 eh->tlsdesc_got = (bfd_vma) -1;
904 }
905
906 return entry;
907 }
908
909 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
910 for local symbol so that we can handle local STT_GNU_IFUNC symbols
911 as global symbol. We reuse indx and dynstr_index for local symbol
912 hash since they aren't used by global symbols in this backend. */
913
914 static hashval_t
915 elf_x86_64_local_htab_hash (const void *ptr)
916 {
917 struct elf_link_hash_entry *h
918 = (struct elf_link_hash_entry *) ptr;
919 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
920 }
921
922 /* Compare local hash entries. */
923
924 static int
925 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
926 {
927 struct elf_link_hash_entry *h1
928 = (struct elf_link_hash_entry *) ptr1;
929 struct elf_link_hash_entry *h2
930 = (struct elf_link_hash_entry *) ptr2;
931
932 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
933 }
934
935 /* Find and/or create a hash entry for local symbol. */
936
937 static struct elf_link_hash_entry *
938 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
939 bfd *abfd, const Elf_Internal_Rela *rel,
940 bfd_boolean create)
941 {
942 struct elf_x86_64_link_hash_entry e, *ret;
943 asection *sec = abfd->sections;
944 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
945 htab->r_sym (rel->r_info));
946 void **slot;
947
948 e.elf.indx = sec->id;
949 e.elf.dynstr_index = htab->r_sym (rel->r_info);
950 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
951 create ? INSERT : NO_INSERT);
952
953 if (!slot)
954 return NULL;
955
956 if (*slot)
957 {
958 ret = (struct elf_x86_64_link_hash_entry *) *slot;
959 return &ret->elf;
960 }
961
962 ret = (struct elf_x86_64_link_hash_entry *)
963 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
964 sizeof (struct elf_x86_64_link_hash_entry));
965 if (ret)
966 {
967 memset (ret, 0, sizeof (*ret));
968 ret->elf.indx = sec->id;
969 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
970 ret->elf.dynindx = -1;
971 ret->plt_got.offset = (bfd_vma) -1;
972 *slot = ret;
973 }
974 return &ret->elf;
975 }
976
977 /* Destroy an X86-64 ELF linker hash table. */
978
979 static void
980 elf_x86_64_link_hash_table_free (bfd *obfd)
981 {
982 struct elf_x86_64_link_hash_table *htab
983 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
984
985 if (htab->loc_hash_table)
986 htab_delete (htab->loc_hash_table);
987 if (htab->loc_hash_memory)
988 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
989 _bfd_elf_link_hash_table_free (obfd);
990 }
991
992 /* Create an X86-64 ELF linker hash table. */
993
994 static struct bfd_link_hash_table *
995 elf_x86_64_link_hash_table_create (bfd *abfd)
996 {
997 struct elf_x86_64_link_hash_table *ret;
998 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
999
1000 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1001 if (ret == NULL)
1002 return NULL;
1003
1004 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1005 elf_x86_64_link_hash_newfunc,
1006 sizeof (struct elf_x86_64_link_hash_entry),
1007 X86_64_ELF_DATA))
1008 {
1009 free (ret);
1010 return NULL;
1011 }
1012
1013 if (ABI_64_P (abfd))
1014 {
1015 ret->r_info = elf64_r_info;
1016 ret->r_sym = elf64_r_sym;
1017 ret->pointer_r_type = R_X86_64_64;
1018 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1019 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1020 }
1021 else
1022 {
1023 ret->r_info = elf32_r_info;
1024 ret->r_sym = elf32_r_sym;
1025 ret->pointer_r_type = R_X86_64_32;
1026 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1027 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1028 }
1029
1030 ret->loc_hash_table = htab_try_create (1024,
1031 elf_x86_64_local_htab_hash,
1032 elf_x86_64_local_htab_eq,
1033 NULL);
1034 ret->loc_hash_memory = objalloc_create ();
1035 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1036 {
1037 elf_x86_64_link_hash_table_free (abfd);
1038 return NULL;
1039 }
1040 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1041
1042 return &ret->elf.root;
1043 }
1044
1045 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1046 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1047 hash table. */
1048
1049 static bfd_boolean
1050 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1051 struct bfd_link_info *info)
1052 {
1053 struct elf_x86_64_link_hash_table *htab;
1054
1055 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1056 return FALSE;
1057
1058 htab = elf_x86_64_hash_table (info);
1059 if (htab == NULL)
1060 return FALSE;
1061
1062 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1063 if (!htab->sdynbss)
1064 abort ();
1065
1066 if (info->executable)
1067 {
1068 /* Always allow copy relocs for building executables. */
1069 asection *s;
1070 s = bfd_get_linker_section (dynobj, ".rela.bss");
1071 if (s == NULL)
1072 {
1073 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1074 s = bfd_make_section_anyway_with_flags (dynobj,
1075 ".rela.bss",
1076 (bed->dynamic_sec_flags
1077 | SEC_READONLY));
1078 if (s == NULL
1079 || ! bfd_set_section_alignment (dynobj, s,
1080 bed->s->log_file_align))
1081 return FALSE;
1082 }
1083 htab->srelbss = s;
1084 }
1085
1086 if (!info->no_ld_generated_unwind_info
1087 && htab->plt_eh_frame == NULL
1088 && htab->elf.splt != NULL)
1089 {
1090 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1091 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1092 | SEC_LINKER_CREATED);
1093 htab->plt_eh_frame
1094 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1095 if (htab->plt_eh_frame == NULL
1096 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1097 return FALSE;
1098 }
1099 return TRUE;
1100 }
1101
1102 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1103
1104 static void
1105 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1106 struct elf_link_hash_entry *dir,
1107 struct elf_link_hash_entry *ind)
1108 {
1109 struct elf_x86_64_link_hash_entry *edir, *eind;
1110
1111 edir = (struct elf_x86_64_link_hash_entry *) dir;
1112 eind = (struct elf_x86_64_link_hash_entry *) ind;
1113
1114 if (!edir->has_bnd_reloc)
1115 edir->has_bnd_reloc = eind->has_bnd_reloc;
1116
1117 if (eind->dyn_relocs != NULL)
1118 {
1119 if (edir->dyn_relocs != NULL)
1120 {
1121 struct elf_dyn_relocs **pp;
1122 struct elf_dyn_relocs *p;
1123
1124 /* Add reloc counts against the indirect sym to the direct sym
1125 list. Merge any entries against the same section. */
1126 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1127 {
1128 struct elf_dyn_relocs *q;
1129
1130 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1131 if (q->sec == p->sec)
1132 {
1133 q->pc_count += p->pc_count;
1134 q->count += p->count;
1135 *pp = p->next;
1136 break;
1137 }
1138 if (q == NULL)
1139 pp = &p->next;
1140 }
1141 *pp = edir->dyn_relocs;
1142 }
1143
1144 edir->dyn_relocs = eind->dyn_relocs;
1145 eind->dyn_relocs = NULL;
1146 }
1147
1148 if (ind->root.type == bfd_link_hash_indirect
1149 && dir->got.refcount <= 0)
1150 {
1151 edir->tls_type = eind->tls_type;
1152 eind->tls_type = GOT_UNKNOWN;
1153 }
1154
1155 if (ELIMINATE_COPY_RELOCS
1156 && ind->root.type != bfd_link_hash_indirect
1157 && dir->dynamic_adjusted)
1158 {
1159 /* If called to transfer flags for a weakdef during processing
1160 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1161 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1162 dir->ref_dynamic |= ind->ref_dynamic;
1163 dir->ref_regular |= ind->ref_regular;
1164 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1165 dir->needs_plt |= ind->needs_plt;
1166 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1167 }
1168 else
1169 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1170 }
1171
1172 static bfd_boolean
1173 elf64_x86_64_elf_object_p (bfd *abfd)
1174 {
1175 /* Set the right machine number for an x86-64 elf64 file. */
1176 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1177 return TRUE;
1178 }
1179
1180 static bfd_boolean
1181 elf32_x86_64_elf_object_p (bfd *abfd)
1182 {
1183 /* Set the right machine number for an x86-64 elf32 file. */
1184 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1185 return TRUE;
1186 }
1187
1188 /* Return TRUE if the TLS access code sequence support transition
1189 from R_TYPE. */
1190
1191 static bfd_boolean
1192 elf_x86_64_check_tls_transition (bfd *abfd,
1193 struct bfd_link_info *info,
1194 asection *sec,
1195 bfd_byte *contents,
1196 Elf_Internal_Shdr *symtab_hdr,
1197 struct elf_link_hash_entry **sym_hashes,
1198 unsigned int r_type,
1199 const Elf_Internal_Rela *rel,
1200 const Elf_Internal_Rela *relend)
1201 {
1202 unsigned int val;
1203 unsigned long r_symndx;
1204 bfd_boolean largepic = FALSE;
1205 struct elf_link_hash_entry *h;
1206 bfd_vma offset;
1207 struct elf_x86_64_link_hash_table *htab;
1208
1209 /* Get the section contents. */
1210 if (contents == NULL)
1211 {
1212 if (elf_section_data (sec)->this_hdr.contents != NULL)
1213 contents = elf_section_data (sec)->this_hdr.contents;
1214 else
1215 {
1216 /* FIXME: How to better handle error condition? */
1217 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1218 return FALSE;
1219
1220 /* Cache the section contents for elf_link_input_bfd. */
1221 elf_section_data (sec)->this_hdr.contents = contents;
1222 }
1223 }
1224
1225 htab = elf_x86_64_hash_table (info);
1226 offset = rel->r_offset;
1227 switch (r_type)
1228 {
1229 case R_X86_64_TLSGD:
1230 case R_X86_64_TLSLD:
1231 if ((rel + 1) >= relend)
1232 return FALSE;
1233
1234 if (r_type == R_X86_64_TLSGD)
1235 {
1236 /* Check transition from GD access model. For 64bit, only
1237 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1238 .word 0x6666; rex64; call __tls_get_addr
1239 can transit to different access model. For 32bit, only
1240 leaq foo@tlsgd(%rip), %rdi
1241 .word 0x6666; rex64; call __tls_get_addr
1242 can transit to different access model. For largepic
1243 we also support:
1244 leaq foo@tlsgd(%rip), %rdi
1245 movabsq $__tls_get_addr@pltoff, %rax
1246 addq $rbx, %rax
1247 call *%rax. */
1248
1249 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1250 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1251
1252 if ((offset + 12) > sec->size)
1253 return FALSE;
1254
1255 if (memcmp (contents + offset + 4, call, 4) != 0)
1256 {
1257 if (!ABI_64_P (abfd)
1258 || (offset + 19) > sec->size
1259 || offset < 3
1260 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1261 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1262 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1263 != 0)
1264 return FALSE;
1265 largepic = TRUE;
1266 }
1267 else if (ABI_64_P (abfd))
1268 {
1269 if (offset < 4
1270 || memcmp (contents + offset - 4, leaq, 4) != 0)
1271 return FALSE;
1272 }
1273 else
1274 {
1275 if (offset < 3
1276 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1277 return FALSE;
1278 }
1279 }
1280 else
1281 {
1282 /* Check transition from LD access model. Only
1283 leaq foo@tlsld(%rip), %rdi;
1284 call __tls_get_addr
1285 can transit to different access model. For largepic
1286 we also support:
1287 leaq foo@tlsld(%rip), %rdi
1288 movabsq $__tls_get_addr@pltoff, %rax
1289 addq $rbx, %rax
1290 call *%rax. */
1291
1292 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1293
1294 if (offset < 3 || (offset + 9) > sec->size)
1295 return FALSE;
1296
1297 if (memcmp (contents + offset - 3, lea, 3) != 0)
1298 return FALSE;
1299
1300 if (0xe8 != *(contents + offset + 4))
1301 {
1302 if (!ABI_64_P (abfd)
1303 || (offset + 19) > sec->size
1304 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1305 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1306 != 0)
1307 return FALSE;
1308 largepic = TRUE;
1309 }
1310 }
1311
1312 r_symndx = htab->r_sym (rel[1].r_info);
1313 if (r_symndx < symtab_hdr->sh_info)
1314 return FALSE;
1315
1316 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1317 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1318 may be versioned. */
1319 return (h != NULL
1320 && h->root.root.string != NULL
1321 && (largepic
1322 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1323 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1324 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1325 && (strncmp (h->root.root.string,
1326 "__tls_get_addr", 14) == 0));
1327
1328 case R_X86_64_GOTTPOFF:
1329 /* Check transition from IE access model:
1330 mov foo@gottpoff(%rip), %reg
1331 add foo@gottpoff(%rip), %reg
1332 */
1333
1334 /* Check REX prefix first. */
1335 if (offset >= 3 && (offset + 4) <= sec->size)
1336 {
1337 val = bfd_get_8 (abfd, contents + offset - 3);
1338 if (val != 0x48 && val != 0x4c)
1339 {
1340 /* X32 may have 0x44 REX prefix or no REX prefix. */
1341 if (ABI_64_P (abfd))
1342 return FALSE;
1343 }
1344 }
1345 else
1346 {
1347 /* X32 may not have any REX prefix. */
1348 if (ABI_64_P (abfd))
1349 return FALSE;
1350 if (offset < 2 || (offset + 3) > sec->size)
1351 return FALSE;
1352 }
1353
1354 val = bfd_get_8 (abfd, contents + offset - 2);
1355 if (val != 0x8b && val != 0x03)
1356 return FALSE;
1357
1358 val = bfd_get_8 (abfd, contents + offset - 1);
1359 return (val & 0xc7) == 5;
1360
1361 case R_X86_64_GOTPC32_TLSDESC:
1362 /* Check transition from GDesc access model:
1363 leaq x@tlsdesc(%rip), %rax
1364
1365 Make sure it's a leaq adding rip to a 32-bit offset
1366 into any register, although it's probably almost always
1367 going to be rax. */
1368
1369 if (offset < 3 || (offset + 4) > sec->size)
1370 return FALSE;
1371
1372 val = bfd_get_8 (abfd, contents + offset - 3);
1373 if ((val & 0xfb) != 0x48)
1374 return FALSE;
1375
1376 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 1);
1380 return (val & 0xc7) == 0x05;
1381
1382 case R_X86_64_TLSDESC_CALL:
1383 /* Check transition from GDesc access model:
1384 call *x@tlsdesc(%rax)
1385 */
1386 if (offset + 2 <= sec->size)
1387 {
1388 /* Make sure that it's a call *x@tlsdesc(%rax). */
1389 static const unsigned char call[] = { 0xff, 0x10 };
1390 return memcmp (contents + offset, call, 2) == 0;
1391 }
1392
1393 return FALSE;
1394
1395 default:
1396 abort ();
1397 }
1398 }
1399
1400 /* Return TRUE if the TLS access transition is OK or no transition
1401 will be performed. Update R_TYPE if there is a transition. */
1402
1403 static bfd_boolean
1404 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1405 asection *sec, bfd_byte *contents,
1406 Elf_Internal_Shdr *symtab_hdr,
1407 struct elf_link_hash_entry **sym_hashes,
1408 unsigned int *r_type, int tls_type,
1409 const Elf_Internal_Rela *rel,
1410 const Elf_Internal_Rela *relend,
1411 struct elf_link_hash_entry *h,
1412 unsigned long r_symndx)
1413 {
1414 unsigned int from_type = *r_type;
1415 unsigned int to_type = from_type;
1416 bfd_boolean check = TRUE;
1417
1418 /* Skip TLS transition for functions. */
1419 if (h != NULL
1420 && (h->type == STT_FUNC
1421 || h->type == STT_GNU_IFUNC))
1422 return TRUE;
1423
1424 switch (from_type)
1425 {
1426 case R_X86_64_TLSGD:
1427 case R_X86_64_GOTPC32_TLSDESC:
1428 case R_X86_64_TLSDESC_CALL:
1429 case R_X86_64_GOTTPOFF:
1430 if (info->executable)
1431 {
1432 if (h == NULL)
1433 to_type = R_X86_64_TPOFF32;
1434 else
1435 to_type = R_X86_64_GOTTPOFF;
1436 }
1437
1438 /* When we are called from elf_x86_64_relocate_section,
1439 CONTENTS isn't NULL and there may be additional transitions
1440 based on TLS_TYPE. */
1441 if (contents != NULL)
1442 {
1443 unsigned int new_to_type = to_type;
1444
1445 if (info->executable
1446 && h != NULL
1447 && h->dynindx == -1
1448 && tls_type == GOT_TLS_IE)
1449 new_to_type = R_X86_64_TPOFF32;
1450
1451 if (to_type == R_X86_64_TLSGD
1452 || to_type == R_X86_64_GOTPC32_TLSDESC
1453 || to_type == R_X86_64_TLSDESC_CALL)
1454 {
1455 if (tls_type == GOT_TLS_IE)
1456 new_to_type = R_X86_64_GOTTPOFF;
1457 }
1458
1459 /* We checked the transition before when we were called from
1460 elf_x86_64_check_relocs. We only want to check the new
1461 transition which hasn't been checked before. */
1462 check = new_to_type != to_type && from_type == to_type;
1463 to_type = new_to_type;
1464 }
1465
1466 break;
1467
1468 case R_X86_64_TLSLD:
1469 if (info->executable)
1470 to_type = R_X86_64_TPOFF32;
1471 break;
1472
1473 default:
1474 return TRUE;
1475 }
1476
1477 /* Return TRUE if there is no transition. */
1478 if (from_type == to_type)
1479 return TRUE;
1480
1481 /* Check if the transition can be performed. */
1482 if (check
1483 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1484 symtab_hdr, sym_hashes,
1485 from_type, rel, relend))
1486 {
1487 reloc_howto_type *from, *to;
1488 const char *name;
1489
1490 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1491 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1492
1493 if (h)
1494 name = h->root.root.string;
1495 else
1496 {
1497 struct elf_x86_64_link_hash_table *htab;
1498
1499 htab = elf_x86_64_hash_table (info);
1500 if (htab == NULL)
1501 name = "*unknown*";
1502 else
1503 {
1504 Elf_Internal_Sym *isym;
1505
1506 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1507 abfd, r_symndx);
1508 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1509 }
1510 }
1511
1512 (*_bfd_error_handler)
1513 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1514 "in section `%A' failed"),
1515 abfd, sec, from->name, to->name, name,
1516 (unsigned long) rel->r_offset);
1517 bfd_set_error (bfd_error_bad_value);
1518 return FALSE;
1519 }
1520
1521 *r_type = to_type;
1522 return TRUE;
1523 }
1524
1525 /* Look through the relocs for a section during the first phase, and
1526 calculate needed space in the global offset table, procedure
1527 linkage table, and dynamic reloc sections. */
1528
1529 static bfd_boolean
1530 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1531 asection *sec,
1532 const Elf_Internal_Rela *relocs)
1533 {
1534 struct elf_x86_64_link_hash_table *htab;
1535 Elf_Internal_Shdr *symtab_hdr;
1536 struct elf_link_hash_entry **sym_hashes;
1537 const Elf_Internal_Rela *rel;
1538 const Elf_Internal_Rela *rel_end;
1539 asection *sreloc;
1540 bfd_boolean use_plt_got;
1541
1542 if (info->relocatable)
1543 return TRUE;
1544
1545 BFD_ASSERT (is_x86_64_elf (abfd));
1546
1547 htab = elf_x86_64_hash_table (info);
1548 if (htab == NULL)
1549 return FALSE;
1550
1551 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1552
1553 symtab_hdr = &elf_symtab_hdr (abfd);
1554 sym_hashes = elf_sym_hashes (abfd);
1555
1556 sreloc = NULL;
1557
1558 rel_end = relocs + sec->reloc_count;
1559 for (rel = relocs; rel < rel_end; rel++)
1560 {
1561 unsigned int r_type;
1562 unsigned long r_symndx;
1563 struct elf_link_hash_entry *h;
1564 Elf_Internal_Sym *isym;
1565 const char *name;
1566 bfd_boolean size_reloc;
1567
1568 r_symndx = htab->r_sym (rel->r_info);
1569 r_type = ELF32_R_TYPE (rel->r_info);
1570
1571 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1572 {
1573 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1574 abfd, r_symndx);
1575 return FALSE;
1576 }
1577
1578 if (r_symndx < symtab_hdr->sh_info)
1579 {
1580 /* A local symbol. */
1581 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1582 abfd, r_symndx);
1583 if (isym == NULL)
1584 return FALSE;
1585
1586 /* Check relocation against local STT_GNU_IFUNC symbol. */
1587 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1588 {
1589 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1590 TRUE);
1591 if (h == NULL)
1592 return FALSE;
1593
1594 /* Fake a STT_GNU_IFUNC symbol. */
1595 h->type = STT_GNU_IFUNC;
1596 h->def_regular = 1;
1597 h->ref_regular = 1;
1598 h->forced_local = 1;
1599 h->root.type = bfd_link_hash_defined;
1600 }
1601 else
1602 h = NULL;
1603 }
1604 else
1605 {
1606 isym = NULL;
1607 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1608 while (h->root.type == bfd_link_hash_indirect
1609 || h->root.type == bfd_link_hash_warning)
1610 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1611 }
1612
1613 /* Check invalid x32 relocations. */
1614 if (!ABI_64_P (abfd))
1615 switch (r_type)
1616 {
1617 default:
1618 break;
1619
1620 case R_X86_64_DTPOFF64:
1621 case R_X86_64_TPOFF64:
1622 case R_X86_64_PC64:
1623 case R_X86_64_GOTOFF64:
1624 case R_X86_64_GOT64:
1625 case R_X86_64_GOTPCREL64:
1626 case R_X86_64_GOTPC64:
1627 case R_X86_64_GOTPLT64:
1628 case R_X86_64_PLTOFF64:
1629 {
1630 if (h)
1631 name = h->root.root.string;
1632 else
1633 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1634 NULL);
1635 (*_bfd_error_handler)
1636 (_("%B: relocation %s against symbol `%s' isn't "
1637 "supported in x32 mode"), abfd,
1638 x86_64_elf_howto_table[r_type].name, name);
1639 bfd_set_error (bfd_error_bad_value);
1640 return FALSE;
1641 }
1642 break;
1643 }
1644
1645 if (h != NULL)
1646 {
1647 /* Create the ifunc sections for static executables. If we
1648 never see an indirect function symbol nor we are building
1649 a static executable, those sections will be empty and
1650 won't appear in output. */
1651 switch (r_type)
1652 {
1653 default:
1654 break;
1655
1656 case R_X86_64_PC32_BND:
1657 case R_X86_64_PLT32_BND:
1658 case R_X86_64_PC32:
1659 case R_X86_64_PLT32:
1660 case R_X86_64_32:
1661 case R_X86_64_64:
1662 /* MPX PLT is supported only if elf_x86_64_arch_bed
1663 is used in 64-bit mode. */
1664 if (ABI_64_P (abfd)
1665 && info->bndplt
1666 && (get_elf_x86_64_backend_data (abfd)
1667 == &elf_x86_64_arch_bed))
1668 {
1669 elf_x86_64_hash_entry (h)->has_bnd_reloc = TRUE;
1670
1671 /* Create the second PLT for Intel MPX support. */
1672 if (htab->plt_bnd == NULL)
1673 {
1674 unsigned int plt_bnd_align;
1675 const struct elf_backend_data *bed;
1676
1677 bed = get_elf_backend_data (info->output_bfd);
1678 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1679 && (sizeof (elf_x86_64_bnd_plt2_entry)
1680 == sizeof (elf_x86_64_legacy_plt2_entry)));
1681 plt_bnd_align = 3;
1682
1683 if (htab->elf.dynobj == NULL)
1684 htab->elf.dynobj = abfd;
1685 htab->plt_bnd
1686 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1687 ".plt.bnd",
1688 (bed->dynamic_sec_flags
1689 | SEC_ALLOC
1690 | SEC_CODE
1691 | SEC_LOAD
1692 | SEC_READONLY));
1693 if (htab->plt_bnd == NULL
1694 || !bfd_set_section_alignment (htab->elf.dynobj,
1695 htab->plt_bnd,
1696 plt_bnd_align))
1697 return FALSE;
1698 }
1699 }
1700
1701 case R_X86_64_32S:
1702 case R_X86_64_PC64:
1703 case R_X86_64_GOTPCREL:
1704 case R_X86_64_GOTPCREL64:
1705 if (htab->elf.dynobj == NULL)
1706 htab->elf.dynobj = abfd;
1707 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1708 return FALSE;
1709 break;
1710 }
1711
1712 /* It is referenced by a non-shared object. */
1713 h->ref_regular = 1;
1714 h->root.non_ir_ref = 1;
1715 }
1716
1717 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1718 symtab_hdr, sym_hashes,
1719 &r_type, GOT_UNKNOWN,
1720 rel, rel_end, h, r_symndx))
1721 return FALSE;
1722
1723 switch (r_type)
1724 {
1725 case R_X86_64_TLSLD:
1726 htab->tls_ld_got.refcount += 1;
1727 goto create_got;
1728
1729 case R_X86_64_TPOFF32:
1730 if (!info->executable && ABI_64_P (abfd))
1731 {
1732 if (h)
1733 name = h->root.root.string;
1734 else
1735 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1736 NULL);
1737 (*_bfd_error_handler)
1738 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1739 abfd,
1740 x86_64_elf_howto_table[r_type].name, name);
1741 bfd_set_error (bfd_error_bad_value);
1742 return FALSE;
1743 }
1744 break;
1745
1746 case R_X86_64_GOTTPOFF:
1747 if (!info->executable)
1748 info->flags |= DF_STATIC_TLS;
1749 /* Fall through */
1750
1751 case R_X86_64_GOT32:
1752 case R_X86_64_GOTPCREL:
1753 case R_X86_64_TLSGD:
1754 case R_X86_64_GOT64:
1755 case R_X86_64_GOTPCREL64:
1756 case R_X86_64_GOTPLT64:
1757 case R_X86_64_GOTPC32_TLSDESC:
1758 case R_X86_64_TLSDESC_CALL:
1759 /* This symbol requires a global offset table entry. */
1760 {
1761 int tls_type, old_tls_type;
1762
1763 switch (r_type)
1764 {
1765 default: tls_type = GOT_NORMAL; break;
1766 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1767 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1768 case R_X86_64_GOTPC32_TLSDESC:
1769 case R_X86_64_TLSDESC_CALL:
1770 tls_type = GOT_TLS_GDESC; break;
1771 }
1772
1773 if (h != NULL)
1774 {
1775 h->got.refcount += 1;
1776 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1777 }
1778 else
1779 {
1780 bfd_signed_vma *local_got_refcounts;
1781
1782 /* This is a global offset table entry for a local symbol. */
1783 local_got_refcounts = elf_local_got_refcounts (abfd);
1784 if (local_got_refcounts == NULL)
1785 {
1786 bfd_size_type size;
1787
1788 size = symtab_hdr->sh_info;
1789 size *= sizeof (bfd_signed_vma)
1790 + sizeof (bfd_vma) + sizeof (char);
1791 local_got_refcounts = ((bfd_signed_vma *)
1792 bfd_zalloc (abfd, size));
1793 if (local_got_refcounts == NULL)
1794 return FALSE;
1795 elf_local_got_refcounts (abfd) = local_got_refcounts;
1796 elf_x86_64_local_tlsdesc_gotent (abfd)
1797 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1798 elf_x86_64_local_got_tls_type (abfd)
1799 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1800 }
1801 local_got_refcounts[r_symndx] += 1;
1802 old_tls_type
1803 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1804 }
1805
1806 /* If a TLS symbol is accessed using IE at least once,
1807 there is no point to use dynamic model for it. */
1808 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1809 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1810 || tls_type != GOT_TLS_IE))
1811 {
1812 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1813 tls_type = old_tls_type;
1814 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1815 && GOT_TLS_GD_ANY_P (tls_type))
1816 tls_type |= old_tls_type;
1817 else
1818 {
1819 if (h)
1820 name = h->root.root.string;
1821 else
1822 name = bfd_elf_sym_name (abfd, symtab_hdr,
1823 isym, NULL);
1824 (*_bfd_error_handler)
1825 (_("%B: '%s' accessed both as normal and thread local symbol"),
1826 abfd, name);
1827 bfd_set_error (bfd_error_bad_value);
1828 return FALSE;
1829 }
1830 }
1831
1832 if (old_tls_type != tls_type)
1833 {
1834 if (h != NULL)
1835 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1836 else
1837 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1838 }
1839 }
1840 /* Fall through */
1841
1842 case R_X86_64_GOTOFF64:
1843 case R_X86_64_GOTPC32:
1844 case R_X86_64_GOTPC64:
1845 create_got:
1846 if (htab->elf.sgot == NULL)
1847 {
1848 if (htab->elf.dynobj == NULL)
1849 htab->elf.dynobj = abfd;
1850 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1851 info))
1852 return FALSE;
1853 }
1854 break;
1855
1856 case R_X86_64_PLT32:
1857 case R_X86_64_PLT32_BND:
1858 /* This symbol requires a procedure linkage table entry. We
1859 actually build the entry in adjust_dynamic_symbol,
1860 because this might be a case of linking PIC code which is
1861 never referenced by a dynamic object, in which case we
1862 don't need to generate a procedure linkage table entry
1863 after all. */
1864
1865 /* If this is a local symbol, we resolve it directly without
1866 creating a procedure linkage table entry. */
1867 if (h == NULL)
1868 continue;
1869
1870 h->needs_plt = 1;
1871 h->plt.refcount += 1;
1872 break;
1873
1874 case R_X86_64_PLTOFF64:
1875 /* This tries to form the 'address' of a function relative
1876 to GOT. For global symbols we need a PLT entry. */
1877 if (h != NULL)
1878 {
1879 h->needs_plt = 1;
1880 h->plt.refcount += 1;
1881 }
1882 goto create_got;
1883
1884 case R_X86_64_SIZE32:
1885 case R_X86_64_SIZE64:
1886 size_reloc = TRUE;
1887 goto do_size;
1888
1889 case R_X86_64_32:
1890 if (!ABI_64_P (abfd))
1891 goto pointer;
1892 case R_X86_64_8:
1893 case R_X86_64_16:
1894 case R_X86_64_32S:
1895 /* Let's help debug shared library creation. These relocs
1896 cannot be used in shared libs. Don't error out for
1897 sections we don't care about, such as debug sections or
1898 non-constant sections. */
1899 if (info->shared
1900 && (sec->flags & SEC_ALLOC) != 0
1901 && (sec->flags & SEC_READONLY) != 0)
1902 {
1903 if (h)
1904 name = h->root.root.string;
1905 else
1906 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1907 (*_bfd_error_handler)
1908 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1909 abfd, x86_64_elf_howto_table[r_type].name, name);
1910 bfd_set_error (bfd_error_bad_value);
1911 return FALSE;
1912 }
1913 /* Fall through. */
1914
1915 case R_X86_64_PC8:
1916 case R_X86_64_PC16:
1917 case R_X86_64_PC32:
1918 case R_X86_64_PC32_BND:
1919 case R_X86_64_PC64:
1920 case R_X86_64_64:
1921 pointer:
1922 if (h != NULL && info->executable)
1923 {
1924 /* If this reloc is in a read-only section, we might
1925 need a copy reloc. We can't check reliably at this
1926 stage whether the section is read-only, as input
1927 sections have not yet been mapped to output sections.
1928 Tentatively set the flag for now, and correct in
1929 adjust_dynamic_symbol. */
1930 h->non_got_ref = 1;
1931
1932 /* We may need a .plt entry if the function this reloc
1933 refers to is in a shared lib. */
1934 h->plt.refcount += 1;
1935 if (r_type != R_X86_64_PC32
1936 && r_type != R_X86_64_PC32_BND
1937 && r_type != R_X86_64_PC64)
1938 h->pointer_equality_needed = 1;
1939 }
1940
1941 size_reloc = FALSE;
1942 do_size:
1943 /* If we are creating a shared library, and this is a reloc
1944 against a global symbol, or a non PC relative reloc
1945 against a local symbol, then we need to copy the reloc
1946 into the shared library. However, if we are linking with
1947 -Bsymbolic, we do not need to copy a reloc against a
1948 global symbol which is defined in an object we are
1949 including in the link (i.e., DEF_REGULAR is set). At
1950 this point we have not seen all the input files, so it is
1951 possible that DEF_REGULAR is not set now but will be set
1952 later (it is never cleared). In case of a weak definition,
1953 DEF_REGULAR may be cleared later by a strong definition in
1954 a shared library. We account for that possibility below by
1955 storing information in the relocs_copied field of the hash
1956 table entry. A similar situation occurs when creating
1957 shared libraries and symbol visibility changes render the
1958 symbol local.
1959
1960 If on the other hand, we are creating an executable, we
1961 may need to keep relocations for symbols satisfied by a
1962 dynamic library if we manage to avoid copy relocs for the
1963 symbol. */
1964 if ((info->shared
1965 && (sec->flags & SEC_ALLOC) != 0
1966 && (! IS_X86_64_PCREL_TYPE (r_type)
1967 || (h != NULL
1968 && (! SYMBOLIC_BIND (info, h)
1969 || h->root.type == bfd_link_hash_defweak
1970 || !h->def_regular))))
1971 || (ELIMINATE_COPY_RELOCS
1972 && !info->shared
1973 && (sec->flags & SEC_ALLOC) != 0
1974 && h != NULL
1975 && (h->root.type == bfd_link_hash_defweak
1976 || !h->def_regular)))
1977 {
1978 struct elf_dyn_relocs *p;
1979 struct elf_dyn_relocs **head;
1980
1981 /* We must copy these reloc types into the output file.
1982 Create a reloc section in dynobj and make room for
1983 this reloc. */
1984 if (sreloc == NULL)
1985 {
1986 if (htab->elf.dynobj == NULL)
1987 htab->elf.dynobj = abfd;
1988
1989 sreloc = _bfd_elf_make_dynamic_reloc_section
1990 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
1991 abfd, /*rela?*/ TRUE);
1992
1993 if (sreloc == NULL)
1994 return FALSE;
1995 }
1996
1997 /* If this is a global symbol, we count the number of
1998 relocations we need for this symbol. */
1999 if (h != NULL)
2000 {
2001 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2002 }
2003 else
2004 {
2005 /* Track dynamic relocs needed for local syms too.
2006 We really need local syms available to do this
2007 easily. Oh well. */
2008 asection *s;
2009 void **vpp;
2010
2011 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2012 abfd, r_symndx);
2013 if (isym == NULL)
2014 return FALSE;
2015
2016 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2017 if (s == NULL)
2018 s = sec;
2019
2020 /* Beware of type punned pointers vs strict aliasing
2021 rules. */
2022 vpp = &(elf_section_data (s)->local_dynrel);
2023 head = (struct elf_dyn_relocs **)vpp;
2024 }
2025
2026 p = *head;
2027 if (p == NULL || p->sec != sec)
2028 {
2029 bfd_size_type amt = sizeof *p;
2030
2031 p = ((struct elf_dyn_relocs *)
2032 bfd_alloc (htab->elf.dynobj, amt));
2033 if (p == NULL)
2034 return FALSE;
2035 p->next = *head;
2036 *head = p;
2037 p->sec = sec;
2038 p->count = 0;
2039 p->pc_count = 0;
2040 }
2041
2042 p->count += 1;
2043 /* Count size relocation as PC-relative relocation. */
2044 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2045 p->pc_count += 1;
2046 }
2047 break;
2048
2049 /* This relocation describes the C++ object vtable hierarchy.
2050 Reconstruct it for later use during GC. */
2051 case R_X86_64_GNU_VTINHERIT:
2052 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2053 return FALSE;
2054 break;
2055
2056 /* This relocation describes which C++ vtable entries are actually
2057 used. Record for later use during GC. */
2058 case R_X86_64_GNU_VTENTRY:
2059 BFD_ASSERT (h != NULL);
2060 if (h != NULL
2061 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2062 return FALSE;
2063 break;
2064
2065 default:
2066 break;
2067 }
2068
2069 if (use_plt_got
2070 && h != NULL
2071 && h->plt.refcount > 0
2072 && h->got.refcount > 0
2073 && htab->plt_got == NULL)
2074 {
2075 /* Create the GOT procedure linkage table. */
2076 unsigned int plt_got_align;
2077 const struct elf_backend_data *bed;
2078
2079 bed = get_elf_backend_data (info->output_bfd);
2080 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2081 && (sizeof (elf_x86_64_bnd_plt2_entry)
2082 == sizeof (elf_x86_64_legacy_plt2_entry)));
2083 plt_got_align = 3;
2084
2085 if (htab->elf.dynobj == NULL)
2086 htab->elf.dynobj = abfd;
2087 htab->plt_got
2088 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2089 ".plt.got",
2090 (bed->dynamic_sec_flags
2091 | SEC_ALLOC
2092 | SEC_CODE
2093 | SEC_LOAD
2094 | SEC_READONLY));
2095 if (htab->plt_got == NULL
2096 || !bfd_set_section_alignment (htab->elf.dynobj,
2097 htab->plt_got,
2098 plt_got_align))
2099 return FALSE;
2100 }
2101 }
2102
2103 return TRUE;
2104 }
2105
2106 /* Return the section that should be marked against GC for a given
2107 relocation. */
2108
2109 static asection *
2110 elf_x86_64_gc_mark_hook (asection *sec,
2111 struct bfd_link_info *info,
2112 Elf_Internal_Rela *rel,
2113 struct elf_link_hash_entry *h,
2114 Elf_Internal_Sym *sym)
2115 {
2116 if (h != NULL)
2117 switch (ELF32_R_TYPE (rel->r_info))
2118 {
2119 case R_X86_64_GNU_VTINHERIT:
2120 case R_X86_64_GNU_VTENTRY:
2121 return NULL;
2122 }
2123
2124 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2125 }
2126
2127 /* Update the got entry reference counts for the section being removed. */
2128
2129 static bfd_boolean
2130 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2131 asection *sec,
2132 const Elf_Internal_Rela *relocs)
2133 {
2134 struct elf_x86_64_link_hash_table *htab;
2135 Elf_Internal_Shdr *symtab_hdr;
2136 struct elf_link_hash_entry **sym_hashes;
2137 bfd_signed_vma *local_got_refcounts;
2138 const Elf_Internal_Rela *rel, *relend;
2139
2140 if (info->relocatable)
2141 return TRUE;
2142
2143 htab = elf_x86_64_hash_table (info);
2144 if (htab == NULL)
2145 return FALSE;
2146
2147 elf_section_data (sec)->local_dynrel = NULL;
2148
2149 symtab_hdr = &elf_symtab_hdr (abfd);
2150 sym_hashes = elf_sym_hashes (abfd);
2151 local_got_refcounts = elf_local_got_refcounts (abfd);
2152
2153 htab = elf_x86_64_hash_table (info);
2154 relend = relocs + sec->reloc_count;
2155 for (rel = relocs; rel < relend; rel++)
2156 {
2157 unsigned long r_symndx;
2158 unsigned int r_type;
2159 struct elf_link_hash_entry *h = NULL;
2160
2161 r_symndx = htab->r_sym (rel->r_info);
2162 if (r_symndx >= symtab_hdr->sh_info)
2163 {
2164 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2165 while (h->root.type == bfd_link_hash_indirect
2166 || h->root.type == bfd_link_hash_warning)
2167 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2168 }
2169 else
2170 {
2171 /* A local symbol. */
2172 Elf_Internal_Sym *isym;
2173
2174 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2175 abfd, r_symndx);
2176
2177 /* Check relocation against local STT_GNU_IFUNC symbol. */
2178 if (isym != NULL
2179 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2180 {
2181 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2182 if (h == NULL)
2183 abort ();
2184 }
2185 }
2186
2187 if (h)
2188 {
2189 struct elf_x86_64_link_hash_entry *eh;
2190 struct elf_dyn_relocs **pp;
2191 struct elf_dyn_relocs *p;
2192
2193 eh = (struct elf_x86_64_link_hash_entry *) h;
2194
2195 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2196 if (p->sec == sec)
2197 {
2198 /* Everything must go for SEC. */
2199 *pp = p->next;
2200 break;
2201 }
2202 }
2203
2204 r_type = ELF32_R_TYPE (rel->r_info);
2205 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2206 symtab_hdr, sym_hashes,
2207 &r_type, GOT_UNKNOWN,
2208 rel, relend, h, r_symndx))
2209 return FALSE;
2210
2211 switch (r_type)
2212 {
2213 case R_X86_64_TLSLD:
2214 if (htab->tls_ld_got.refcount > 0)
2215 htab->tls_ld_got.refcount -= 1;
2216 break;
2217
2218 case R_X86_64_TLSGD:
2219 case R_X86_64_GOTPC32_TLSDESC:
2220 case R_X86_64_TLSDESC_CALL:
2221 case R_X86_64_GOTTPOFF:
2222 case R_X86_64_GOT32:
2223 case R_X86_64_GOTPCREL:
2224 case R_X86_64_GOT64:
2225 case R_X86_64_GOTPCREL64:
2226 case R_X86_64_GOTPLT64:
2227 if (h != NULL)
2228 {
2229 if (h->got.refcount > 0)
2230 h->got.refcount -= 1;
2231 if (h->type == STT_GNU_IFUNC)
2232 {
2233 if (h->plt.refcount > 0)
2234 h->plt.refcount -= 1;
2235 }
2236 }
2237 else if (local_got_refcounts != NULL)
2238 {
2239 if (local_got_refcounts[r_symndx] > 0)
2240 local_got_refcounts[r_symndx] -= 1;
2241 }
2242 break;
2243
2244 case R_X86_64_8:
2245 case R_X86_64_16:
2246 case R_X86_64_32:
2247 case R_X86_64_64:
2248 case R_X86_64_32S:
2249 case R_X86_64_PC8:
2250 case R_X86_64_PC16:
2251 case R_X86_64_PC32:
2252 case R_X86_64_PC32_BND:
2253 case R_X86_64_PC64:
2254 case R_X86_64_SIZE32:
2255 case R_X86_64_SIZE64:
2256 if (info->shared
2257 && (h == NULL || h->type != STT_GNU_IFUNC))
2258 break;
2259 /* Fall thru */
2260
2261 case R_X86_64_PLT32:
2262 case R_X86_64_PLT32_BND:
2263 case R_X86_64_PLTOFF64:
2264 if (h != NULL)
2265 {
2266 if (h->plt.refcount > 0)
2267 h->plt.refcount -= 1;
2268 }
2269 break;
2270
2271 default:
2272 break;
2273 }
2274 }
2275
2276 return TRUE;
2277 }
2278
2279 /* Adjust a symbol defined by a dynamic object and referenced by a
2280 regular object. The current definition is in some section of the
2281 dynamic object, but we're not including those sections. We have to
2282 change the definition to something the rest of the link can
2283 understand. */
2284
2285 static bfd_boolean
2286 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2287 struct elf_link_hash_entry *h)
2288 {
2289 struct elf_x86_64_link_hash_table *htab;
2290 asection *s;
2291 struct elf_x86_64_link_hash_entry *eh;
2292 struct elf_dyn_relocs *p;
2293
2294 /* STT_GNU_IFUNC symbol must go through PLT. */
2295 if (h->type == STT_GNU_IFUNC)
2296 {
2297 /* All local STT_GNU_IFUNC references must be treate as local
2298 calls via local PLT. */
2299 if (h->ref_regular
2300 && SYMBOL_CALLS_LOCAL (info, h))
2301 {
2302 bfd_size_type pc_count = 0, count = 0;
2303 struct elf_dyn_relocs **pp;
2304
2305 eh = (struct elf_x86_64_link_hash_entry *) h;
2306 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2307 {
2308 pc_count += p->pc_count;
2309 p->count -= p->pc_count;
2310 p->pc_count = 0;
2311 count += p->count;
2312 if (p->count == 0)
2313 *pp = p->next;
2314 else
2315 pp = &p->next;
2316 }
2317
2318 if (pc_count || count)
2319 {
2320 h->needs_plt = 1;
2321 h->non_got_ref = 1;
2322 if (h->plt.refcount <= 0)
2323 h->plt.refcount = 1;
2324 else
2325 h->plt.refcount += 1;
2326 }
2327 }
2328
2329 if (h->plt.refcount <= 0)
2330 {
2331 h->plt.offset = (bfd_vma) -1;
2332 h->needs_plt = 0;
2333 }
2334 return TRUE;
2335 }
2336
2337 /* If this is a function, put it in the procedure linkage table. We
2338 will fill in the contents of the procedure linkage table later,
2339 when we know the address of the .got section. */
2340 if (h->type == STT_FUNC
2341 || h->needs_plt)
2342 {
2343 if (h->plt.refcount <= 0
2344 || SYMBOL_CALLS_LOCAL (info, h)
2345 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2346 && h->root.type == bfd_link_hash_undefweak))
2347 {
2348 /* This case can occur if we saw a PLT32 reloc in an input
2349 file, but the symbol was never referred to by a dynamic
2350 object, or if all references were garbage collected. In
2351 such a case, we don't actually need to build a procedure
2352 linkage table, and we can just do a PC32 reloc instead. */
2353 h->plt.offset = (bfd_vma) -1;
2354 h->needs_plt = 0;
2355 }
2356
2357 return TRUE;
2358 }
2359 else
2360 /* It's possible that we incorrectly decided a .plt reloc was
2361 needed for an R_X86_64_PC32 reloc to a non-function sym in
2362 check_relocs. We can't decide accurately between function and
2363 non-function syms in check-relocs; Objects loaded later in
2364 the link may change h->type. So fix it now. */
2365 h->plt.offset = (bfd_vma) -1;
2366
2367 /* If this is a weak symbol, and there is a real definition, the
2368 processor independent code will have arranged for us to see the
2369 real definition first, and we can just use the same value. */
2370 if (h->u.weakdef != NULL)
2371 {
2372 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2373 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2374 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2375 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2376 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2377 h->non_got_ref = h->u.weakdef->non_got_ref;
2378 return TRUE;
2379 }
2380
2381 /* This is a reference to a symbol defined by a dynamic object which
2382 is not a function. */
2383
2384 /* If we are creating a shared library, we must presume that the
2385 only references to the symbol are via the global offset table.
2386 For such cases we need not do anything here; the relocations will
2387 be handled correctly by relocate_section. */
2388 if (!info->executable)
2389 return TRUE;
2390
2391 /* If there are no references to this symbol that do not use the
2392 GOT, we don't need to generate a copy reloc. */
2393 if (!h->non_got_ref)
2394 return TRUE;
2395
2396 /* If -z nocopyreloc was given, we won't generate them either. */
2397 if (info->nocopyreloc)
2398 {
2399 h->non_got_ref = 0;
2400 return TRUE;
2401 }
2402
2403 if (ELIMINATE_COPY_RELOCS)
2404 {
2405 eh = (struct elf_x86_64_link_hash_entry *) h;
2406 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2407 {
2408 s = p->sec->output_section;
2409 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2410 break;
2411 }
2412
2413 /* If we didn't find any dynamic relocs in read-only sections, then
2414 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2415 if (p == NULL)
2416 {
2417 h->non_got_ref = 0;
2418 return TRUE;
2419 }
2420 }
2421
2422 /* We must allocate the symbol in our .dynbss section, which will
2423 become part of the .bss section of the executable. There will be
2424 an entry for this symbol in the .dynsym section. The dynamic
2425 object will contain position independent code, so all references
2426 from the dynamic object to this symbol will go through the global
2427 offset table. The dynamic linker will use the .dynsym entry to
2428 determine the address it must put in the global offset table, so
2429 both the dynamic object and the regular object will refer to the
2430 same memory location for the variable. */
2431
2432 htab = elf_x86_64_hash_table (info);
2433 if (htab == NULL)
2434 return FALSE;
2435
2436 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2437 to copy the initial value out of the dynamic object and into the
2438 runtime process image. */
2439 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2440 {
2441 const struct elf_backend_data *bed;
2442 bed = get_elf_backend_data (info->output_bfd);
2443 htab->srelbss->size += bed->s->sizeof_rela;
2444 h->needs_copy = 1;
2445 }
2446
2447 s = htab->sdynbss;
2448
2449 return _bfd_elf_adjust_dynamic_copy (h, s);
2450 }
2451
2452 /* Allocate space in .plt, .got and associated reloc sections for
2453 dynamic relocs. */
2454
2455 static bfd_boolean
2456 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2457 {
2458 struct bfd_link_info *info;
2459 struct elf_x86_64_link_hash_table *htab;
2460 struct elf_x86_64_link_hash_entry *eh;
2461 struct elf_dyn_relocs *p;
2462 const struct elf_backend_data *bed;
2463 unsigned int plt_entry_size;
2464
2465 if (h->root.type == bfd_link_hash_indirect)
2466 return TRUE;
2467
2468 eh = (struct elf_x86_64_link_hash_entry *) h;
2469
2470 info = (struct bfd_link_info *) inf;
2471 htab = elf_x86_64_hash_table (info);
2472 if (htab == NULL)
2473 return FALSE;
2474 bed = get_elf_backend_data (info->output_bfd);
2475 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2476
2477 /* We can't use the GOT PLT if pointer equality is needed since
2478 finish_dynamic_symbol won't clear symbol value and the dynamic
2479 linker won't update the GOT slot. We will get into an infinite
2480 loop at run-time. */
2481 if (htab->plt_got != NULL
2482 && h->type != STT_GNU_IFUNC
2483 && !h->pointer_equality_needed
2484 && h->plt.refcount > 0
2485 && h->got.refcount > 0)
2486 {
2487 /* Don't use the regular PLT if there are both GOT and GOTPLT
2488 reloctions. */
2489 h->plt.offset = (bfd_vma) -1;
2490
2491 /* Use the GOT PLT. */
2492 eh->plt_got.refcount = 1;
2493 }
2494
2495 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2496 here if it is defined and referenced in a non-shared object. */
2497 if (h->type == STT_GNU_IFUNC
2498 && h->def_regular)
2499 {
2500 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2501 &eh->dyn_relocs,
2502 plt_entry_size,
2503 plt_entry_size,
2504 GOT_ENTRY_SIZE))
2505 {
2506 asection *s = htab->plt_bnd;
2507 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2508 {
2509 /* Use the .plt.bnd section if it is created. */
2510 eh->plt_bnd.offset = s->size;
2511
2512 /* Make room for this entry in the .plt.bnd section. */
2513 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2514 }
2515
2516 return TRUE;
2517 }
2518 else
2519 return FALSE;
2520 }
2521 else if (htab->elf.dynamic_sections_created
2522 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0))
2523 {
2524 bfd_boolean use_plt_got = eh->plt_got.refcount > 0;
2525
2526 /* Make sure this symbol is output as a dynamic symbol.
2527 Undefined weak syms won't yet be marked as dynamic. */
2528 if (h->dynindx == -1
2529 && !h->forced_local)
2530 {
2531 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2532 return FALSE;
2533 }
2534
2535 if (info->shared
2536 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2537 {
2538 asection *s = htab->elf.splt;
2539 asection *bnd_s = htab->plt_bnd;
2540 asection *got_s = htab->plt_got;
2541
2542 /* If this is the first .plt entry, make room for the special
2543 first entry. */
2544 if (s->size == 0)
2545 s->size = plt_entry_size;
2546
2547 if (use_plt_got)
2548 eh->plt_got.offset = got_s->size;
2549 else
2550 {
2551 h->plt.offset = s->size;
2552 if (bnd_s)
2553 eh->plt_bnd.offset = bnd_s->size;
2554 }
2555
2556 /* If this symbol is not defined in a regular file, and we are
2557 not generating a shared library, then set the symbol to this
2558 location in the .plt. This is required to make function
2559 pointers compare as equal between the normal executable and
2560 the shared library. */
2561 if (! info->shared
2562 && !h->def_regular)
2563 {
2564 if (use_plt_got)
2565 {
2566 /* We need to make a call to the entry of the GOT PLT
2567 instead of regular PLT entry. */
2568 h->root.u.def.section = got_s;
2569 h->root.u.def.value = eh->plt_got.offset;
2570 }
2571 else
2572 {
2573 if (bnd_s)
2574 {
2575 /* We need to make a call to the entry of the second
2576 PLT instead of regular PLT entry. */
2577 h->root.u.def.section = bnd_s;
2578 h->root.u.def.value = eh->plt_bnd.offset;
2579 }
2580 else
2581 {
2582 h->root.u.def.section = s;
2583 h->root.u.def.value = h->plt.offset;
2584 }
2585 }
2586 }
2587
2588 /* Make room for this entry. */
2589 if (use_plt_got)
2590 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2591 else
2592 {
2593 s->size += plt_entry_size;
2594 if (bnd_s)
2595 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2596
2597 /* We also need to make an entry in the .got.plt section,
2598 which will be placed in the .got section by the linker
2599 script. */
2600 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2601
2602 /* We also need to make an entry in the .rela.plt
2603 section. */
2604 htab->elf.srelplt->size += bed->s->sizeof_rela;
2605 htab->elf.srelplt->reloc_count++;
2606 }
2607 }
2608 else
2609 {
2610 h->plt.offset = (bfd_vma) -1;
2611 h->needs_plt = 0;
2612 }
2613 }
2614 else
2615 {
2616 h->plt.offset = (bfd_vma) -1;
2617 h->needs_plt = 0;
2618 }
2619
2620 eh->tlsdesc_got = (bfd_vma) -1;
2621
2622 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2623 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2624 if (h->got.refcount > 0
2625 && info->executable
2626 && h->dynindx == -1
2627 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2628 {
2629 h->got.offset = (bfd_vma) -1;
2630 }
2631 else if (h->got.refcount > 0)
2632 {
2633 asection *s;
2634 bfd_boolean dyn;
2635 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2636
2637 /* Make sure this symbol is output as a dynamic symbol.
2638 Undefined weak syms won't yet be marked as dynamic. */
2639 if (h->dynindx == -1
2640 && !h->forced_local)
2641 {
2642 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2643 return FALSE;
2644 }
2645
2646 if (GOT_TLS_GDESC_P (tls_type))
2647 {
2648 eh->tlsdesc_got = htab->elf.sgotplt->size
2649 - elf_x86_64_compute_jump_table_size (htab);
2650 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2651 h->got.offset = (bfd_vma) -2;
2652 }
2653 if (! GOT_TLS_GDESC_P (tls_type)
2654 || GOT_TLS_GD_P (tls_type))
2655 {
2656 s = htab->elf.sgot;
2657 h->got.offset = s->size;
2658 s->size += GOT_ENTRY_SIZE;
2659 if (GOT_TLS_GD_P (tls_type))
2660 s->size += GOT_ENTRY_SIZE;
2661 }
2662 dyn = htab->elf.dynamic_sections_created;
2663 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2664 and two if global.
2665 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2666 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2667 || tls_type == GOT_TLS_IE)
2668 htab->elf.srelgot->size += bed->s->sizeof_rela;
2669 else if (GOT_TLS_GD_P (tls_type))
2670 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2671 else if (! GOT_TLS_GDESC_P (tls_type)
2672 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2673 || h->root.type != bfd_link_hash_undefweak)
2674 && (info->shared
2675 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2676 htab->elf.srelgot->size += bed->s->sizeof_rela;
2677 if (GOT_TLS_GDESC_P (tls_type))
2678 {
2679 htab->elf.srelplt->size += bed->s->sizeof_rela;
2680 htab->tlsdesc_plt = (bfd_vma) -1;
2681 }
2682 }
2683 else
2684 h->got.offset = (bfd_vma) -1;
2685
2686 if (eh->dyn_relocs == NULL)
2687 return TRUE;
2688
2689 /* In the shared -Bsymbolic case, discard space allocated for
2690 dynamic pc-relative relocs against symbols which turn out to be
2691 defined in regular objects. For the normal shared case, discard
2692 space for pc-relative relocs that have become local due to symbol
2693 visibility changes. */
2694
2695 if (info->shared)
2696 {
2697 /* Relocs that use pc_count are those that appear on a call
2698 insn, or certain REL relocs that can generated via assembly.
2699 We want calls to protected symbols to resolve directly to the
2700 function rather than going via the plt. If people want
2701 function pointer comparisons to work as expected then they
2702 should avoid writing weird assembly. */
2703 if (SYMBOL_CALLS_LOCAL (info, h))
2704 {
2705 struct elf_dyn_relocs **pp;
2706
2707 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2708 {
2709 p->count -= p->pc_count;
2710 p->pc_count = 0;
2711 if (p->count == 0)
2712 *pp = p->next;
2713 else
2714 pp = &p->next;
2715 }
2716 }
2717
2718 /* Also discard relocs on undefined weak syms with non-default
2719 visibility. */
2720 if (eh->dyn_relocs != NULL)
2721 {
2722 if (h->root.type == bfd_link_hash_undefweak)
2723 {
2724 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2725 eh->dyn_relocs = NULL;
2726
2727 /* Make sure undefined weak symbols are output as a dynamic
2728 symbol in PIEs. */
2729 else if (h->dynindx == -1
2730 && ! h->forced_local
2731 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2732 return FALSE;
2733 }
2734 /* For PIE, discard space for relocs against symbols which
2735 turn out to need copy relocs. */
2736 else if (info->executable
2737 && h->needs_copy
2738 && h->def_dynamic
2739 && !h->def_regular)
2740 eh->dyn_relocs = NULL;
2741 }
2742 }
2743 else if (ELIMINATE_COPY_RELOCS)
2744 {
2745 /* For the non-shared case, discard space for relocs against
2746 symbols which turn out to need copy relocs or are not
2747 dynamic. */
2748
2749 if (!h->non_got_ref
2750 && ((h->def_dynamic
2751 && !h->def_regular)
2752 || (htab->elf.dynamic_sections_created
2753 && (h->root.type == bfd_link_hash_undefweak
2754 || h->root.type == bfd_link_hash_undefined))))
2755 {
2756 /* Make sure this symbol is output as a dynamic symbol.
2757 Undefined weak syms won't yet be marked as dynamic. */
2758 if (h->dynindx == -1
2759 && ! h->forced_local
2760 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2761 return FALSE;
2762
2763 /* If that succeeded, we know we'll be keeping all the
2764 relocs. */
2765 if (h->dynindx != -1)
2766 goto keep;
2767 }
2768
2769 eh->dyn_relocs = NULL;
2770
2771 keep: ;
2772 }
2773
2774 /* Finally, allocate space. */
2775 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2776 {
2777 asection * sreloc;
2778
2779 sreloc = elf_section_data (p->sec)->sreloc;
2780
2781 BFD_ASSERT (sreloc != NULL);
2782
2783 sreloc->size += p->count * bed->s->sizeof_rela;
2784 }
2785
2786 return TRUE;
2787 }
2788
2789 /* Allocate space in .plt, .got and associated reloc sections for
2790 local dynamic relocs. */
2791
2792 static bfd_boolean
2793 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2794 {
2795 struct elf_link_hash_entry *h
2796 = (struct elf_link_hash_entry *) *slot;
2797
2798 if (h->type != STT_GNU_IFUNC
2799 || !h->def_regular
2800 || !h->ref_regular
2801 || !h->forced_local
2802 || h->root.type != bfd_link_hash_defined)
2803 abort ();
2804
2805 return elf_x86_64_allocate_dynrelocs (h, inf);
2806 }
2807
2808 /* Find any dynamic relocs that apply to read-only sections. */
2809
2810 static bfd_boolean
2811 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2812 void * inf)
2813 {
2814 struct elf_x86_64_link_hash_entry *eh;
2815 struct elf_dyn_relocs *p;
2816
2817 /* Skip local IFUNC symbols. */
2818 if (h->forced_local && h->type == STT_GNU_IFUNC)
2819 return TRUE;
2820
2821 eh = (struct elf_x86_64_link_hash_entry *) h;
2822 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2823 {
2824 asection *s = p->sec->output_section;
2825
2826 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2827 {
2828 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2829
2830 info->flags |= DF_TEXTREL;
2831
2832 if (info->warn_shared_textrel && info->shared)
2833 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'.\n"),
2834 p->sec->owner, h->root.root.string,
2835 p->sec);
2836
2837 /* Not an error, just cut short the traversal. */
2838 return FALSE;
2839 }
2840 }
2841 return TRUE;
2842 }
2843
2844 /* Convert
2845 mov foo@GOTPCREL(%rip), %reg
2846 to
2847 lea foo(%rip), %reg
2848 with the local symbol, foo. */
2849
2850 static bfd_boolean
2851 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2852 struct bfd_link_info *link_info)
2853 {
2854 Elf_Internal_Shdr *symtab_hdr;
2855 Elf_Internal_Rela *internal_relocs;
2856 Elf_Internal_Rela *irel, *irelend;
2857 bfd_byte *contents;
2858 struct elf_x86_64_link_hash_table *htab;
2859 bfd_boolean changed_contents;
2860 bfd_boolean changed_relocs;
2861 bfd_signed_vma *local_got_refcounts;
2862
2863 /* Don't even try to convert non-ELF outputs. */
2864 if (!is_elf_hash_table (link_info->hash))
2865 return FALSE;
2866
2867 /* Nothing to do if there are no codes, no relocations or no output. */
2868 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2869 || sec->reloc_count == 0
2870 || bfd_is_abs_section (sec->output_section))
2871 return TRUE;
2872
2873 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2874
2875 /* Load the relocations for this section. */
2876 internal_relocs = (_bfd_elf_link_read_relocs
2877 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2878 link_info->keep_memory));
2879 if (internal_relocs == NULL)
2880 return FALSE;
2881
2882 htab = elf_x86_64_hash_table (link_info);
2883 changed_contents = FALSE;
2884 changed_relocs = FALSE;
2885 local_got_refcounts = elf_local_got_refcounts (abfd);
2886
2887 /* Get the section contents. */
2888 if (elf_section_data (sec)->this_hdr.contents != NULL)
2889 contents = elf_section_data (sec)->this_hdr.contents;
2890 else
2891 {
2892 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2893 goto error_return;
2894 }
2895
2896 irelend = internal_relocs + sec->reloc_count;
2897 for (irel = internal_relocs; irel < irelend; irel++)
2898 {
2899 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2900 unsigned int r_symndx = htab->r_sym (irel->r_info);
2901 unsigned int indx;
2902 struct elf_link_hash_entry *h;
2903
2904 if (r_type != R_X86_64_GOTPCREL)
2905 continue;
2906
2907 /* Get the symbol referred to by the reloc. */
2908 if (r_symndx < symtab_hdr->sh_info)
2909 {
2910 Elf_Internal_Sym *isym;
2911
2912 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2913 abfd, r_symndx);
2914
2915 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. */
2916 if (ELF_ST_TYPE (isym->st_info) != STT_GNU_IFUNC
2917 && irel->r_offset >= 2
2918 && bfd_get_8 (input_bfd,
2919 contents + irel->r_offset - 2) == 0x8b)
2920 {
2921 bfd_put_8 (output_bfd, 0x8d,
2922 contents + irel->r_offset - 2);
2923 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2924 if (local_got_refcounts != NULL
2925 && local_got_refcounts[r_symndx] > 0)
2926 local_got_refcounts[r_symndx] -= 1;
2927 changed_contents = TRUE;
2928 changed_relocs = TRUE;
2929 }
2930 continue;
2931 }
2932
2933 indx = r_symndx - symtab_hdr->sh_info;
2934 h = elf_sym_hashes (abfd)[indx];
2935 BFD_ASSERT (h != NULL);
2936
2937 while (h->root.type == bfd_link_hash_indirect
2938 || h->root.type == bfd_link_hash_warning)
2939 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2940
2941 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
2942 avoid optimizing _DYNAMIC since ld.so may use its link-time
2943 address. */
2944 if (h->def_regular
2945 && h->type != STT_GNU_IFUNC
2946 && h != htab->elf.hdynamic
2947 && SYMBOL_REFERENCES_LOCAL (link_info, h)
2948 && irel->r_offset >= 2
2949 && bfd_get_8 (input_bfd,
2950 contents + irel->r_offset - 2) == 0x8b)
2951 {
2952 bfd_put_8 (output_bfd, 0x8d,
2953 contents + irel->r_offset - 2);
2954 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2955 if (h->got.refcount > 0)
2956 h->got.refcount -= 1;
2957 changed_contents = TRUE;
2958 changed_relocs = TRUE;
2959 }
2960 }
2961
2962 if (contents != NULL
2963 && elf_section_data (sec)->this_hdr.contents != contents)
2964 {
2965 if (!changed_contents && !link_info->keep_memory)
2966 free (contents);
2967 else
2968 {
2969 /* Cache the section contents for elf_link_input_bfd. */
2970 elf_section_data (sec)->this_hdr.contents = contents;
2971 }
2972 }
2973
2974 if (elf_section_data (sec)->relocs != internal_relocs)
2975 {
2976 if (!changed_relocs)
2977 free (internal_relocs);
2978 else
2979 elf_section_data (sec)->relocs = internal_relocs;
2980 }
2981
2982 return TRUE;
2983
2984 error_return:
2985 if (contents != NULL
2986 && elf_section_data (sec)->this_hdr.contents != contents)
2987 free (contents);
2988 if (internal_relocs != NULL
2989 && elf_section_data (sec)->relocs != internal_relocs)
2990 free (internal_relocs);
2991 return FALSE;
2992 }
2993
2994 /* Set the sizes of the dynamic sections. */
2995
2996 static bfd_boolean
2997 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
2998 struct bfd_link_info *info)
2999 {
3000 struct elf_x86_64_link_hash_table *htab;
3001 bfd *dynobj;
3002 asection *s;
3003 bfd_boolean relocs;
3004 bfd *ibfd;
3005 const struct elf_backend_data *bed;
3006
3007 htab = elf_x86_64_hash_table (info);
3008 if (htab == NULL)
3009 return FALSE;
3010 bed = get_elf_backend_data (output_bfd);
3011
3012 dynobj = htab->elf.dynobj;
3013 if (dynobj == NULL)
3014 abort ();
3015
3016 if (htab->elf.dynamic_sections_created)
3017 {
3018 /* Set the contents of the .interp section to the interpreter. */
3019 if (info->executable)
3020 {
3021 s = bfd_get_linker_section (dynobj, ".interp");
3022 if (s == NULL)
3023 abort ();
3024 s->size = htab->dynamic_interpreter_size;
3025 s->contents = (unsigned char *) htab->dynamic_interpreter;
3026 }
3027 }
3028
3029 /* Set up .got offsets for local syms, and space for local dynamic
3030 relocs. */
3031 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3032 {
3033 bfd_signed_vma *local_got;
3034 bfd_signed_vma *end_local_got;
3035 char *local_tls_type;
3036 bfd_vma *local_tlsdesc_gotent;
3037 bfd_size_type locsymcount;
3038 Elf_Internal_Shdr *symtab_hdr;
3039 asection *srel;
3040
3041 if (! is_x86_64_elf (ibfd))
3042 continue;
3043
3044 for (s = ibfd->sections; s != NULL; s = s->next)
3045 {
3046 struct elf_dyn_relocs *p;
3047
3048 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3049 return FALSE;
3050
3051 for (p = (struct elf_dyn_relocs *)
3052 (elf_section_data (s)->local_dynrel);
3053 p != NULL;
3054 p = p->next)
3055 {
3056 if (!bfd_is_abs_section (p->sec)
3057 && bfd_is_abs_section (p->sec->output_section))
3058 {
3059 /* Input section has been discarded, either because
3060 it is a copy of a linkonce section or due to
3061 linker script /DISCARD/, so we'll be discarding
3062 the relocs too. */
3063 }
3064 else if (p->count != 0)
3065 {
3066 srel = elf_section_data (p->sec)->sreloc;
3067 srel->size += p->count * bed->s->sizeof_rela;
3068 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3069 && (info->flags & DF_TEXTREL) == 0)
3070 {
3071 info->flags |= DF_TEXTREL;
3072 if (info->warn_shared_textrel && info->shared)
3073 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'.\n"),
3074 p->sec->owner, p->sec);
3075 }
3076 }
3077 }
3078 }
3079
3080 local_got = elf_local_got_refcounts (ibfd);
3081 if (!local_got)
3082 continue;
3083
3084 symtab_hdr = &elf_symtab_hdr (ibfd);
3085 locsymcount = symtab_hdr->sh_info;
3086 end_local_got = local_got + locsymcount;
3087 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3088 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3089 s = htab->elf.sgot;
3090 srel = htab->elf.srelgot;
3091 for (; local_got < end_local_got;
3092 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3093 {
3094 *local_tlsdesc_gotent = (bfd_vma) -1;
3095 if (*local_got > 0)
3096 {
3097 if (GOT_TLS_GDESC_P (*local_tls_type))
3098 {
3099 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3100 - elf_x86_64_compute_jump_table_size (htab);
3101 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3102 *local_got = (bfd_vma) -2;
3103 }
3104 if (! GOT_TLS_GDESC_P (*local_tls_type)
3105 || GOT_TLS_GD_P (*local_tls_type))
3106 {
3107 *local_got = s->size;
3108 s->size += GOT_ENTRY_SIZE;
3109 if (GOT_TLS_GD_P (*local_tls_type))
3110 s->size += GOT_ENTRY_SIZE;
3111 }
3112 if (info->shared
3113 || GOT_TLS_GD_ANY_P (*local_tls_type)
3114 || *local_tls_type == GOT_TLS_IE)
3115 {
3116 if (GOT_TLS_GDESC_P (*local_tls_type))
3117 {
3118 htab->elf.srelplt->size
3119 += bed->s->sizeof_rela;
3120 htab->tlsdesc_plt = (bfd_vma) -1;
3121 }
3122 if (! GOT_TLS_GDESC_P (*local_tls_type)
3123 || GOT_TLS_GD_P (*local_tls_type))
3124 srel->size += bed->s->sizeof_rela;
3125 }
3126 }
3127 else
3128 *local_got = (bfd_vma) -1;
3129 }
3130 }
3131
3132 if (htab->tls_ld_got.refcount > 0)
3133 {
3134 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3135 relocs. */
3136 htab->tls_ld_got.offset = htab->elf.sgot->size;
3137 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3138 htab->elf.srelgot->size += bed->s->sizeof_rela;
3139 }
3140 else
3141 htab->tls_ld_got.offset = -1;
3142
3143 /* Allocate global sym .plt and .got entries, and space for global
3144 sym dynamic relocs. */
3145 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3146 info);
3147
3148 /* Allocate .plt and .got entries, and space for local symbols. */
3149 htab_traverse (htab->loc_hash_table,
3150 elf_x86_64_allocate_local_dynrelocs,
3151 info);
3152
3153 /* For every jump slot reserved in the sgotplt, reloc_count is
3154 incremented. However, when we reserve space for TLS descriptors,
3155 it's not incremented, so in order to compute the space reserved
3156 for them, it suffices to multiply the reloc count by the jump
3157 slot size.
3158
3159 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3160 so that R_X86_64_IRELATIVE entries come last. */
3161 if (htab->elf.srelplt)
3162 {
3163 htab->sgotplt_jump_table_size
3164 = elf_x86_64_compute_jump_table_size (htab);
3165 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3166 }
3167 else if (htab->elf.irelplt)
3168 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3169
3170 if (htab->tlsdesc_plt)
3171 {
3172 /* If we're not using lazy TLS relocations, don't generate the
3173 PLT and GOT entries they require. */
3174 if ((info->flags & DF_BIND_NOW))
3175 htab->tlsdesc_plt = 0;
3176 else
3177 {
3178 htab->tlsdesc_got = htab->elf.sgot->size;
3179 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3180 /* Reserve room for the initial entry.
3181 FIXME: we could probably do away with it in this case. */
3182 if (htab->elf.splt->size == 0)
3183 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3184 htab->tlsdesc_plt = htab->elf.splt->size;
3185 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3186 }
3187 }
3188
3189 if (htab->elf.sgotplt)
3190 {
3191 /* Don't allocate .got.plt section if there are no GOT nor PLT
3192 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3193 if ((htab->elf.hgot == NULL
3194 || !htab->elf.hgot->ref_regular_nonweak)
3195 && (htab->elf.sgotplt->size
3196 == get_elf_backend_data (output_bfd)->got_header_size)
3197 && (htab->elf.splt == NULL
3198 || htab->elf.splt->size == 0)
3199 && (htab->elf.sgot == NULL
3200 || htab->elf.sgot->size == 0)
3201 && (htab->elf.iplt == NULL
3202 || htab->elf.iplt->size == 0)
3203 && (htab->elf.igotplt == NULL
3204 || htab->elf.igotplt->size == 0))
3205 htab->elf.sgotplt->size = 0;
3206 }
3207
3208 if (htab->plt_eh_frame != NULL
3209 && htab->elf.splt != NULL
3210 && htab->elf.splt->size != 0
3211 && !bfd_is_abs_section (htab->elf.splt->output_section)
3212 && _bfd_elf_eh_frame_present (info))
3213 {
3214 const struct elf_x86_64_backend_data *arch_data
3215 = get_elf_x86_64_arch_data (bed);
3216 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3217 }
3218
3219 /* We now have determined the sizes of the various dynamic sections.
3220 Allocate memory for them. */
3221 relocs = FALSE;
3222 for (s = dynobj->sections; s != NULL; s = s->next)
3223 {
3224 if ((s->flags & SEC_LINKER_CREATED) == 0)
3225 continue;
3226
3227 if (s == htab->elf.splt
3228 || s == htab->elf.sgot
3229 || s == htab->elf.sgotplt
3230 || s == htab->elf.iplt
3231 || s == htab->elf.igotplt
3232 || s == htab->plt_bnd
3233 || s == htab->plt_got
3234 || s == htab->plt_eh_frame
3235 || s == htab->sdynbss)
3236 {
3237 /* Strip this section if we don't need it; see the
3238 comment below. */
3239 }
3240 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3241 {
3242 if (s->size != 0 && s != htab->elf.srelplt)
3243 relocs = TRUE;
3244
3245 /* We use the reloc_count field as a counter if we need
3246 to copy relocs into the output file. */
3247 if (s != htab->elf.srelplt)
3248 s->reloc_count = 0;
3249 }
3250 else
3251 {
3252 /* It's not one of our sections, so don't allocate space. */
3253 continue;
3254 }
3255
3256 if (s->size == 0)
3257 {
3258 /* If we don't need this section, strip it from the
3259 output file. This is mostly to handle .rela.bss and
3260 .rela.plt. We must create both sections in
3261 create_dynamic_sections, because they must be created
3262 before the linker maps input sections to output
3263 sections. The linker does that before
3264 adjust_dynamic_symbol is called, and it is that
3265 function which decides whether anything needs to go
3266 into these sections. */
3267
3268 s->flags |= SEC_EXCLUDE;
3269 continue;
3270 }
3271
3272 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3273 continue;
3274
3275 /* Allocate memory for the section contents. We use bfd_zalloc
3276 here in case unused entries are not reclaimed before the
3277 section's contents are written out. This should not happen,
3278 but this way if it does, we get a R_X86_64_NONE reloc instead
3279 of garbage. */
3280 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3281 if (s->contents == NULL)
3282 return FALSE;
3283 }
3284
3285 if (htab->plt_eh_frame != NULL
3286 && htab->plt_eh_frame->contents != NULL)
3287 {
3288 const struct elf_x86_64_backend_data *arch_data
3289 = get_elf_x86_64_arch_data (bed);
3290
3291 memcpy (htab->plt_eh_frame->contents,
3292 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3293 bfd_put_32 (dynobj, htab->elf.splt->size,
3294 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3295 }
3296
3297 if (htab->elf.dynamic_sections_created)
3298 {
3299 /* Add some entries to the .dynamic section. We fill in the
3300 values later, in elf_x86_64_finish_dynamic_sections, but we
3301 must add the entries now so that we get the correct size for
3302 the .dynamic section. The DT_DEBUG entry is filled in by the
3303 dynamic linker and used by the debugger. */
3304 #define add_dynamic_entry(TAG, VAL) \
3305 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3306
3307 if (info->executable)
3308 {
3309 if (!add_dynamic_entry (DT_DEBUG, 0))
3310 return FALSE;
3311 }
3312
3313 if (htab->elf.splt->size != 0)
3314 {
3315 if (!add_dynamic_entry (DT_PLTGOT, 0)
3316 || !add_dynamic_entry (DT_PLTRELSZ, 0)
3317 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3318 || !add_dynamic_entry (DT_JMPREL, 0))
3319 return FALSE;
3320
3321 if (htab->tlsdesc_plt
3322 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3323 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3324 return FALSE;
3325 }
3326
3327 if (relocs)
3328 {
3329 if (!add_dynamic_entry (DT_RELA, 0)
3330 || !add_dynamic_entry (DT_RELASZ, 0)
3331 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3332 return FALSE;
3333
3334 /* If any dynamic relocs apply to a read-only section,
3335 then we need a DT_TEXTREL entry. */
3336 if ((info->flags & DF_TEXTREL) == 0)
3337 elf_link_hash_traverse (&htab->elf,
3338 elf_x86_64_readonly_dynrelocs,
3339 info);
3340
3341 if ((info->flags & DF_TEXTREL) != 0)
3342 {
3343 if (!add_dynamic_entry (DT_TEXTREL, 0))
3344 return FALSE;
3345 }
3346 }
3347 }
3348 #undef add_dynamic_entry
3349
3350 return TRUE;
3351 }
3352
3353 static bfd_boolean
3354 elf_x86_64_always_size_sections (bfd *output_bfd,
3355 struct bfd_link_info *info)
3356 {
3357 asection *tls_sec = elf_hash_table (info)->tls_sec;
3358
3359 if (tls_sec)
3360 {
3361 struct elf_link_hash_entry *tlsbase;
3362
3363 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3364 "_TLS_MODULE_BASE_",
3365 FALSE, FALSE, FALSE);
3366
3367 if (tlsbase && tlsbase->type == STT_TLS)
3368 {
3369 struct elf_x86_64_link_hash_table *htab;
3370 struct bfd_link_hash_entry *bh = NULL;
3371 const struct elf_backend_data *bed
3372 = get_elf_backend_data (output_bfd);
3373
3374 htab = elf_x86_64_hash_table (info);
3375 if (htab == NULL)
3376 return FALSE;
3377
3378 if (!(_bfd_generic_link_add_one_symbol
3379 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3380 tls_sec, 0, NULL, FALSE,
3381 bed->collect, &bh)))
3382 return FALSE;
3383
3384 htab->tls_module_base = bh;
3385
3386 tlsbase = (struct elf_link_hash_entry *)bh;
3387 tlsbase->def_regular = 1;
3388 tlsbase->other = STV_HIDDEN;
3389 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3390 }
3391 }
3392
3393 return TRUE;
3394 }
3395
3396 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3397 executables. Rather than setting it to the beginning of the TLS
3398 section, we have to set it to the end. This function may be called
3399 multiple times, it is idempotent. */
3400
3401 static void
3402 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3403 {
3404 struct elf_x86_64_link_hash_table *htab;
3405 struct bfd_link_hash_entry *base;
3406
3407 if (!info->executable)
3408 return;
3409
3410 htab = elf_x86_64_hash_table (info);
3411 if (htab == NULL)
3412 return;
3413
3414 base = htab->tls_module_base;
3415 if (base == NULL)
3416 return;
3417
3418 base->u.def.value = htab->elf.tls_size;
3419 }
3420
3421 /* Return the base VMA address which should be subtracted from real addresses
3422 when resolving @dtpoff relocation.
3423 This is PT_TLS segment p_vaddr. */
3424
3425 static bfd_vma
3426 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3427 {
3428 /* If tls_sec is NULL, we should have signalled an error already. */
3429 if (elf_hash_table (info)->tls_sec == NULL)
3430 return 0;
3431 return elf_hash_table (info)->tls_sec->vma;
3432 }
3433
3434 /* Return the relocation value for @tpoff relocation
3435 if STT_TLS virtual address is ADDRESS. */
3436
3437 static bfd_vma
3438 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3439 {
3440 struct elf_link_hash_table *htab = elf_hash_table (info);
3441 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3442 bfd_vma static_tls_size;
3443
3444 /* If tls_segment is NULL, we should have signalled an error already. */
3445 if (htab->tls_sec == NULL)
3446 return 0;
3447
3448 /* Consider special static TLS alignment requirements. */
3449 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3450 return address - static_tls_size - htab->tls_sec->vma;
3451 }
3452
3453 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3454 branch? */
3455
3456 static bfd_boolean
3457 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3458 {
3459 /* Opcode Instruction
3460 0xe8 call
3461 0xe9 jump
3462 0x0f 0x8x conditional jump */
3463 return ((offset > 0
3464 && (contents [offset - 1] == 0xe8
3465 || contents [offset - 1] == 0xe9))
3466 || (offset > 1
3467 && contents [offset - 2] == 0x0f
3468 && (contents [offset - 1] & 0xf0) == 0x80));
3469 }
3470
3471 /* Relocate an x86_64 ELF section. */
3472
3473 static bfd_boolean
3474 elf_x86_64_relocate_section (bfd *output_bfd,
3475 struct bfd_link_info *info,
3476 bfd *input_bfd,
3477 asection *input_section,
3478 bfd_byte *contents,
3479 Elf_Internal_Rela *relocs,
3480 Elf_Internal_Sym *local_syms,
3481 asection **local_sections)
3482 {
3483 struct elf_x86_64_link_hash_table *htab;
3484 Elf_Internal_Shdr *symtab_hdr;
3485 struct elf_link_hash_entry **sym_hashes;
3486 bfd_vma *local_got_offsets;
3487 bfd_vma *local_tlsdesc_gotents;
3488 Elf_Internal_Rela *rel;
3489 Elf_Internal_Rela *relend;
3490 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3491
3492 BFD_ASSERT (is_x86_64_elf (input_bfd));
3493
3494 htab = elf_x86_64_hash_table (info);
3495 if (htab == NULL)
3496 return FALSE;
3497 symtab_hdr = &elf_symtab_hdr (input_bfd);
3498 sym_hashes = elf_sym_hashes (input_bfd);
3499 local_got_offsets = elf_local_got_offsets (input_bfd);
3500 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3501
3502 elf_x86_64_set_tls_module_base (info);
3503
3504 rel = relocs;
3505 relend = relocs + input_section->reloc_count;
3506 for (; rel < relend; rel++)
3507 {
3508 unsigned int r_type;
3509 reloc_howto_type *howto;
3510 unsigned long r_symndx;
3511 struct elf_link_hash_entry *h;
3512 struct elf_x86_64_link_hash_entry *eh;
3513 Elf_Internal_Sym *sym;
3514 asection *sec;
3515 bfd_vma off, offplt, plt_offset;
3516 bfd_vma relocation;
3517 bfd_boolean unresolved_reloc;
3518 bfd_reloc_status_type r;
3519 int tls_type;
3520 asection *base_got, *resolved_plt;
3521 bfd_vma st_size;
3522
3523 r_type = ELF32_R_TYPE (rel->r_info);
3524 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3525 || r_type == (int) R_X86_64_GNU_VTENTRY)
3526 continue;
3527
3528 if (r_type >= (int) R_X86_64_standard)
3529 {
3530 (*_bfd_error_handler)
3531 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3532 input_bfd, input_section, r_type);
3533 bfd_set_error (bfd_error_bad_value);
3534 return FALSE;
3535 }
3536
3537 if (r_type != (int) R_X86_64_32
3538 || ABI_64_P (output_bfd))
3539 howto = x86_64_elf_howto_table + r_type;
3540 else
3541 howto = (x86_64_elf_howto_table
3542 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3543 r_symndx = htab->r_sym (rel->r_info);
3544 h = NULL;
3545 sym = NULL;
3546 sec = NULL;
3547 unresolved_reloc = FALSE;
3548 if (r_symndx < symtab_hdr->sh_info)
3549 {
3550 sym = local_syms + r_symndx;
3551 sec = local_sections[r_symndx];
3552
3553 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3554 &sec, rel);
3555 st_size = sym->st_size;
3556
3557 /* Relocate against local STT_GNU_IFUNC symbol. */
3558 if (!info->relocatable
3559 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3560 {
3561 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3562 rel, FALSE);
3563 if (h == NULL)
3564 abort ();
3565
3566 /* Set STT_GNU_IFUNC symbol value. */
3567 h->root.u.def.value = sym->st_value;
3568 h->root.u.def.section = sec;
3569 }
3570 }
3571 else
3572 {
3573 bfd_boolean warned ATTRIBUTE_UNUSED;
3574 bfd_boolean ignored ATTRIBUTE_UNUSED;
3575
3576 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3577 r_symndx, symtab_hdr, sym_hashes,
3578 h, sec, relocation,
3579 unresolved_reloc, warned, ignored);
3580 st_size = h->size;
3581 }
3582
3583 if (sec != NULL && discarded_section (sec))
3584 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3585 rel, 1, relend, howto, 0, contents);
3586
3587 if (info->relocatable)
3588 continue;
3589
3590 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3591 {
3592 if (r_type == R_X86_64_64)
3593 {
3594 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3595 zero-extend it to 64bit if addend is zero. */
3596 r_type = R_X86_64_32;
3597 memset (contents + rel->r_offset + 4, 0, 4);
3598 }
3599 else if (r_type == R_X86_64_SIZE64)
3600 {
3601 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3602 zero-extend it to 64bit if addend is zero. */
3603 r_type = R_X86_64_SIZE32;
3604 memset (contents + rel->r_offset + 4, 0, 4);
3605 }
3606 }
3607
3608 eh = (struct elf_x86_64_link_hash_entry *) h;
3609
3610 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3611 it here if it is defined in a non-shared object. */
3612 if (h != NULL
3613 && h->type == STT_GNU_IFUNC
3614 && h->def_regular)
3615 {
3616 bfd_vma plt_index;
3617 const char *name;
3618
3619 if ((input_section->flags & SEC_ALLOC) == 0
3620 || h->plt.offset == (bfd_vma) -1)
3621 abort ();
3622
3623 /* STT_GNU_IFUNC symbol must go through PLT. */
3624 if (htab->elf.splt != NULL)
3625 {
3626 if (htab->plt_bnd != NULL)
3627 {
3628 resolved_plt = htab->plt_bnd;
3629 plt_offset = eh->plt_bnd.offset;
3630 }
3631 else
3632 {
3633 resolved_plt = htab->elf.splt;
3634 plt_offset = h->plt.offset;
3635 }
3636 }
3637 else
3638 {
3639 resolved_plt = htab->elf.iplt;
3640 plt_offset = h->plt.offset;
3641 }
3642
3643 relocation = (resolved_plt->output_section->vma
3644 + resolved_plt->output_offset + plt_offset);
3645
3646 switch (r_type)
3647 {
3648 default:
3649 if (h->root.root.string)
3650 name = h->root.root.string;
3651 else
3652 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3653 NULL);
3654 (*_bfd_error_handler)
3655 (_("%B: relocation %s against STT_GNU_IFUNC "
3656 "symbol `%s' isn't handled by %s"), input_bfd,
3657 x86_64_elf_howto_table[r_type].name,
3658 name, __FUNCTION__);
3659 bfd_set_error (bfd_error_bad_value);
3660 return FALSE;
3661
3662 case R_X86_64_32S:
3663 if (info->shared)
3664 abort ();
3665 goto do_relocation;
3666
3667 case R_X86_64_32:
3668 if (ABI_64_P (output_bfd))
3669 goto do_relocation;
3670 /* FALLTHROUGH */
3671 case R_X86_64_64:
3672 if (rel->r_addend != 0)
3673 {
3674 if (h->root.root.string)
3675 name = h->root.root.string;
3676 else
3677 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3678 sym, NULL);
3679 (*_bfd_error_handler)
3680 (_("%B: relocation %s against STT_GNU_IFUNC "
3681 "symbol `%s' has non-zero addend: %d"),
3682 input_bfd, x86_64_elf_howto_table[r_type].name,
3683 name, rel->r_addend);
3684 bfd_set_error (bfd_error_bad_value);
3685 return FALSE;
3686 }
3687
3688 /* Generate dynamic relcoation only when there is a
3689 non-GOT reference in a shared object. */
3690 if (info->shared && h->non_got_ref)
3691 {
3692 Elf_Internal_Rela outrel;
3693 asection *sreloc;
3694
3695 /* Need a dynamic relocation to get the real function
3696 address. */
3697 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3698 info,
3699 input_section,
3700 rel->r_offset);
3701 if (outrel.r_offset == (bfd_vma) -1
3702 || outrel.r_offset == (bfd_vma) -2)
3703 abort ();
3704
3705 outrel.r_offset += (input_section->output_section->vma
3706 + input_section->output_offset);
3707
3708 if (h->dynindx == -1
3709 || h->forced_local
3710 || info->executable)
3711 {
3712 /* This symbol is resolved locally. */
3713 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3714 outrel.r_addend = (h->root.u.def.value
3715 + h->root.u.def.section->output_section->vma
3716 + h->root.u.def.section->output_offset);
3717 }
3718 else
3719 {
3720 outrel.r_info = htab->r_info (h->dynindx, r_type);
3721 outrel.r_addend = 0;
3722 }
3723
3724 sreloc = htab->elf.irelifunc;
3725 elf_append_rela (output_bfd, sreloc, &outrel);
3726
3727 /* If this reloc is against an external symbol, we
3728 do not want to fiddle with the addend. Otherwise,
3729 we need to include the symbol value so that it
3730 becomes an addend for the dynamic reloc. For an
3731 internal symbol, we have updated addend. */
3732 continue;
3733 }
3734 /* FALLTHROUGH */
3735 case R_X86_64_PC32:
3736 case R_X86_64_PC32_BND:
3737 case R_X86_64_PC64:
3738 case R_X86_64_PLT32:
3739 case R_X86_64_PLT32_BND:
3740 goto do_relocation;
3741
3742 case R_X86_64_GOTPCREL:
3743 case R_X86_64_GOTPCREL64:
3744 base_got = htab->elf.sgot;
3745 off = h->got.offset;
3746
3747 if (base_got == NULL)
3748 abort ();
3749
3750 if (off == (bfd_vma) -1)
3751 {
3752 /* We can't use h->got.offset here to save state, or
3753 even just remember the offset, as finish_dynamic_symbol
3754 would use that as offset into .got. */
3755
3756 if (htab->elf.splt != NULL)
3757 {
3758 plt_index = h->plt.offset / plt_entry_size - 1;
3759 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3760 base_got = htab->elf.sgotplt;
3761 }
3762 else
3763 {
3764 plt_index = h->plt.offset / plt_entry_size;
3765 off = plt_index * GOT_ENTRY_SIZE;
3766 base_got = htab->elf.igotplt;
3767 }
3768
3769 if (h->dynindx == -1
3770 || h->forced_local
3771 || info->symbolic)
3772 {
3773 /* This references the local defitionion. We must
3774 initialize this entry in the global offset table.
3775 Since the offset must always be a multiple of 8,
3776 we use the least significant bit to record
3777 whether we have initialized it already.
3778
3779 When doing a dynamic link, we create a .rela.got
3780 relocation entry to initialize the value. This
3781 is done in the finish_dynamic_symbol routine. */
3782 if ((off & 1) != 0)
3783 off &= ~1;
3784 else
3785 {
3786 bfd_put_64 (output_bfd, relocation,
3787 base_got->contents + off);
3788 /* Note that this is harmless for the GOTPLT64
3789 case, as -1 | 1 still is -1. */
3790 h->got.offset |= 1;
3791 }
3792 }
3793 }
3794
3795 relocation = (base_got->output_section->vma
3796 + base_got->output_offset + off);
3797
3798 goto do_relocation;
3799 }
3800 }
3801
3802 /* When generating a shared object, the relocations handled here are
3803 copied into the output file to be resolved at run time. */
3804 switch (r_type)
3805 {
3806 case R_X86_64_GOT32:
3807 case R_X86_64_GOT64:
3808 /* Relocation is to the entry for this symbol in the global
3809 offset table. */
3810 case R_X86_64_GOTPCREL:
3811 case R_X86_64_GOTPCREL64:
3812 /* Use global offset table entry as symbol value. */
3813 case R_X86_64_GOTPLT64:
3814 /* This is obsolete and treated the the same as GOT64. */
3815 base_got = htab->elf.sgot;
3816
3817 if (htab->elf.sgot == NULL)
3818 abort ();
3819
3820 if (h != NULL)
3821 {
3822 bfd_boolean dyn;
3823
3824 off = h->got.offset;
3825 if (h->needs_plt
3826 && h->plt.offset != (bfd_vma)-1
3827 && off == (bfd_vma)-1)
3828 {
3829 /* We can't use h->got.offset here to save
3830 state, or even just remember the offset, as
3831 finish_dynamic_symbol would use that as offset into
3832 .got. */
3833 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3834 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3835 base_got = htab->elf.sgotplt;
3836 }
3837
3838 dyn = htab->elf.dynamic_sections_created;
3839
3840 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3841 || (info->shared
3842 && SYMBOL_REFERENCES_LOCAL (info, h))
3843 || (ELF_ST_VISIBILITY (h->other)
3844 && h->root.type == bfd_link_hash_undefweak))
3845 {
3846 /* This is actually a static link, or it is a -Bsymbolic
3847 link and the symbol is defined locally, or the symbol
3848 was forced to be local because of a version file. We
3849 must initialize this entry in the global offset table.
3850 Since the offset must always be a multiple of 8, we
3851 use the least significant bit to record whether we
3852 have initialized it already.
3853
3854 When doing a dynamic link, we create a .rela.got
3855 relocation entry to initialize the value. This is
3856 done in the finish_dynamic_symbol routine. */
3857 if ((off & 1) != 0)
3858 off &= ~1;
3859 else
3860 {
3861 bfd_put_64 (output_bfd, relocation,
3862 base_got->contents + off);
3863 /* Note that this is harmless for the GOTPLT64 case,
3864 as -1 | 1 still is -1. */
3865 h->got.offset |= 1;
3866 }
3867 }
3868 else
3869 unresolved_reloc = FALSE;
3870 }
3871 else
3872 {
3873 if (local_got_offsets == NULL)
3874 abort ();
3875
3876 off = local_got_offsets[r_symndx];
3877
3878 /* The offset must always be a multiple of 8. We use
3879 the least significant bit to record whether we have
3880 already generated the necessary reloc. */
3881 if ((off & 1) != 0)
3882 off &= ~1;
3883 else
3884 {
3885 bfd_put_64 (output_bfd, relocation,
3886 base_got->contents + off);
3887
3888 if (info->shared)
3889 {
3890 asection *s;
3891 Elf_Internal_Rela outrel;
3892
3893 /* We need to generate a R_X86_64_RELATIVE reloc
3894 for the dynamic linker. */
3895 s = htab->elf.srelgot;
3896 if (s == NULL)
3897 abort ();
3898
3899 outrel.r_offset = (base_got->output_section->vma
3900 + base_got->output_offset
3901 + off);
3902 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3903 outrel.r_addend = relocation;
3904 elf_append_rela (output_bfd, s, &outrel);
3905 }
3906
3907 local_got_offsets[r_symndx] |= 1;
3908 }
3909 }
3910
3911 if (off >= (bfd_vma) -2)
3912 abort ();
3913
3914 relocation = base_got->output_section->vma
3915 + base_got->output_offset + off;
3916 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
3917 relocation -= htab->elf.sgotplt->output_section->vma
3918 - htab->elf.sgotplt->output_offset;
3919
3920 break;
3921
3922 case R_X86_64_GOTOFF64:
3923 /* Relocation is relative to the start of the global offset
3924 table. */
3925
3926 /* Check to make sure it isn't a protected function symbol
3927 for shared library since it may not be local when used
3928 as function address. */
3929 if (!info->executable
3930 && h
3931 && !SYMBOLIC_BIND (info, h)
3932 && h->def_regular
3933 && h->type == STT_FUNC
3934 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3935 {
3936 (*_bfd_error_handler)
3937 (_("%B: relocation R_X86_64_GOTOFF64 against protected function `%s' can not be used when making a shared object"),
3938 input_bfd, h->root.root.string);
3939 bfd_set_error (bfd_error_bad_value);
3940 return FALSE;
3941 }
3942
3943 /* Note that sgot is not involved in this
3944 calculation. We always want the start of .got.plt. If we
3945 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3946 permitted by the ABI, we might have to change this
3947 calculation. */
3948 relocation -= htab->elf.sgotplt->output_section->vma
3949 + htab->elf.sgotplt->output_offset;
3950 break;
3951
3952 case R_X86_64_GOTPC32:
3953 case R_X86_64_GOTPC64:
3954 /* Use global offset table as symbol value. */
3955 relocation = htab->elf.sgotplt->output_section->vma
3956 + htab->elf.sgotplt->output_offset;
3957 unresolved_reloc = FALSE;
3958 break;
3959
3960 case R_X86_64_PLTOFF64:
3961 /* Relocation is PLT entry relative to GOT. For local
3962 symbols it's the symbol itself relative to GOT. */
3963 if (h != NULL
3964 /* See PLT32 handling. */
3965 && h->plt.offset != (bfd_vma) -1
3966 && htab->elf.splt != NULL)
3967 {
3968 if (htab->plt_bnd != NULL)
3969 {
3970 resolved_plt = htab->plt_bnd;
3971 plt_offset = eh->plt_bnd.offset;
3972 }
3973 else
3974 {
3975 resolved_plt = htab->elf.splt;
3976 plt_offset = h->plt.offset;
3977 }
3978
3979 relocation = (resolved_plt->output_section->vma
3980 + resolved_plt->output_offset
3981 + plt_offset);
3982 unresolved_reloc = FALSE;
3983 }
3984
3985 relocation -= htab->elf.sgotplt->output_section->vma
3986 + htab->elf.sgotplt->output_offset;
3987 break;
3988
3989 case R_X86_64_PLT32:
3990 case R_X86_64_PLT32_BND:
3991 /* Relocation is to the entry for this symbol in the
3992 procedure linkage table. */
3993
3994 /* Resolve a PLT32 reloc against a local symbol directly,
3995 without using the procedure linkage table. */
3996 if (h == NULL)
3997 break;
3998
3999 if ((h->plt.offset == (bfd_vma) -1
4000 && eh->plt_got.offset == (bfd_vma) -1)
4001 || htab->elf.splt == NULL)
4002 {
4003 /* We didn't make a PLT entry for this symbol. This
4004 happens when statically linking PIC code, or when
4005 using -Bsymbolic. */
4006 break;
4007 }
4008
4009 if (h->plt.offset != (bfd_vma) -1)
4010 {
4011 if (htab->plt_bnd != NULL)
4012 {
4013 resolved_plt = htab->plt_bnd;
4014 plt_offset = eh->plt_bnd.offset;
4015 }
4016 else
4017 {
4018 resolved_plt = htab->elf.splt;
4019 plt_offset = h->plt.offset;
4020 }
4021 }
4022 else
4023 {
4024 /* Use the GOT PLT. */
4025 resolved_plt = htab->plt_got;
4026 plt_offset = eh->plt_got.offset;
4027 }
4028
4029 relocation = (resolved_plt->output_section->vma
4030 + resolved_plt->output_offset
4031 + plt_offset);
4032 unresolved_reloc = FALSE;
4033 break;
4034
4035 case R_X86_64_SIZE32:
4036 case R_X86_64_SIZE64:
4037 /* Set to symbol size. */
4038 relocation = st_size;
4039 goto direct;
4040
4041 case R_X86_64_PC8:
4042 case R_X86_64_PC16:
4043 case R_X86_64_PC32:
4044 case R_X86_64_PC32_BND:
4045 if (info->shared
4046 && (input_section->flags & SEC_ALLOC) != 0
4047 && (input_section->flags & SEC_READONLY) != 0
4048 && h != NULL)
4049 {
4050 bfd_boolean fail = FALSE;
4051 bfd_boolean branch
4052 = ((r_type == R_X86_64_PC32
4053 || r_type == R_X86_64_PC32_BND)
4054 && is_32bit_relative_branch (contents, rel->r_offset));
4055
4056 if (SYMBOL_REFERENCES_LOCAL (info, h))
4057 {
4058 /* Symbol is referenced locally. Make sure it is
4059 defined locally or for a branch. */
4060 fail = !h->def_regular && !branch;
4061 }
4062 else if (!h->needs_copy)
4063 {
4064 /* Symbol doesn't need copy reloc and isn't referenced
4065 locally. We only allow branch to symbol with
4066 non-default visibility. */
4067 fail = (!branch
4068 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4069 }
4070
4071 if (fail)
4072 {
4073 const char *fmt;
4074 const char *v;
4075 const char *pic = "";
4076
4077 switch (ELF_ST_VISIBILITY (h->other))
4078 {
4079 case STV_HIDDEN:
4080 v = _("hidden symbol");
4081 break;
4082 case STV_INTERNAL:
4083 v = _("internal symbol");
4084 break;
4085 case STV_PROTECTED:
4086 v = _("protected symbol");
4087 break;
4088 default:
4089 v = _("symbol");
4090 pic = _("; recompile with -fPIC");
4091 break;
4092 }
4093
4094 if (h->def_regular)
4095 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4096 else
4097 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4098
4099 (*_bfd_error_handler) (fmt, input_bfd,
4100 x86_64_elf_howto_table[r_type].name,
4101 v, h->root.root.string, pic);
4102 bfd_set_error (bfd_error_bad_value);
4103 return FALSE;
4104 }
4105 }
4106 /* Fall through. */
4107
4108 case R_X86_64_8:
4109 case R_X86_64_16:
4110 case R_X86_64_32:
4111 case R_X86_64_PC64:
4112 case R_X86_64_64:
4113 /* FIXME: The ABI says the linker should make sure the value is
4114 the same when it's zeroextended to 64 bit. */
4115
4116 direct:
4117 if ((input_section->flags & SEC_ALLOC) == 0)
4118 break;
4119
4120 /* Don't copy a pc-relative relocation into the output file
4121 if the symbol needs copy reloc. */
4122 if ((info->shared
4123 && !(h != NULL
4124 && h->needs_copy
4125 && IS_X86_64_PCREL_TYPE (r_type))
4126 && (h == NULL
4127 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4128 || h->root.type != bfd_link_hash_undefweak)
4129 && ((! IS_X86_64_PCREL_TYPE (r_type)
4130 && r_type != R_X86_64_SIZE32
4131 && r_type != R_X86_64_SIZE64)
4132 || ! SYMBOL_CALLS_LOCAL (info, h)))
4133 || (ELIMINATE_COPY_RELOCS
4134 && !info->shared
4135 && h != NULL
4136 && h->dynindx != -1
4137 && !h->non_got_ref
4138 && ((h->def_dynamic
4139 && !h->def_regular)
4140 || h->root.type == bfd_link_hash_undefweak
4141 || h->root.type == bfd_link_hash_undefined)))
4142 {
4143 Elf_Internal_Rela outrel;
4144 bfd_boolean skip, relocate;
4145 asection *sreloc;
4146
4147 /* When generating a shared object, these relocations
4148 are copied into the output file to be resolved at run
4149 time. */
4150 skip = FALSE;
4151 relocate = FALSE;
4152
4153 outrel.r_offset =
4154 _bfd_elf_section_offset (output_bfd, info, input_section,
4155 rel->r_offset);
4156 if (outrel.r_offset == (bfd_vma) -1)
4157 skip = TRUE;
4158 else if (outrel.r_offset == (bfd_vma) -2)
4159 skip = TRUE, relocate = TRUE;
4160
4161 outrel.r_offset += (input_section->output_section->vma
4162 + input_section->output_offset);
4163
4164 if (skip)
4165 memset (&outrel, 0, sizeof outrel);
4166
4167 /* h->dynindx may be -1 if this symbol was marked to
4168 become local. */
4169 else if (h != NULL
4170 && h->dynindx != -1
4171 && (IS_X86_64_PCREL_TYPE (r_type)
4172 || ! info->shared
4173 || ! SYMBOLIC_BIND (info, h)
4174 || ! h->def_regular))
4175 {
4176 outrel.r_info = htab->r_info (h->dynindx, r_type);
4177 outrel.r_addend = rel->r_addend;
4178 }
4179 else
4180 {
4181 /* This symbol is local, or marked to become local. */
4182 if (r_type == htab->pointer_r_type)
4183 {
4184 relocate = TRUE;
4185 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4186 outrel.r_addend = relocation + rel->r_addend;
4187 }
4188 else if (r_type == R_X86_64_64
4189 && !ABI_64_P (output_bfd))
4190 {
4191 relocate = TRUE;
4192 outrel.r_info = htab->r_info (0,
4193 R_X86_64_RELATIVE64);
4194 outrel.r_addend = relocation + rel->r_addend;
4195 /* Check addend overflow. */
4196 if ((outrel.r_addend & 0x80000000)
4197 != (rel->r_addend & 0x80000000))
4198 {
4199 const char *name;
4200 int addend = rel->r_addend;
4201 if (h && h->root.root.string)
4202 name = h->root.root.string;
4203 else
4204 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4205 sym, NULL);
4206 if (addend < 0)
4207 (*_bfd_error_handler)
4208 (_("%B: addend -0x%x in relocation %s against "
4209 "symbol `%s' at 0x%lx in section `%A' is "
4210 "out of range"),
4211 input_bfd, input_section, addend,
4212 x86_64_elf_howto_table[r_type].name,
4213 name, (unsigned long) rel->r_offset);
4214 else
4215 (*_bfd_error_handler)
4216 (_("%B: addend 0x%x in relocation %s against "
4217 "symbol `%s' at 0x%lx in section `%A' is "
4218 "out of range"),
4219 input_bfd, input_section, addend,
4220 x86_64_elf_howto_table[r_type].name,
4221 name, (unsigned long) rel->r_offset);
4222 bfd_set_error (bfd_error_bad_value);
4223 return FALSE;
4224 }
4225 }
4226 else
4227 {
4228 long sindx;
4229
4230 if (bfd_is_abs_section (sec))
4231 sindx = 0;
4232 else if (sec == NULL || sec->owner == NULL)
4233 {
4234 bfd_set_error (bfd_error_bad_value);
4235 return FALSE;
4236 }
4237 else
4238 {
4239 asection *osec;
4240
4241 /* We are turning this relocation into one
4242 against a section symbol. It would be
4243 proper to subtract the symbol's value,
4244 osec->vma, from the emitted reloc addend,
4245 but ld.so expects buggy relocs. */
4246 osec = sec->output_section;
4247 sindx = elf_section_data (osec)->dynindx;
4248 if (sindx == 0)
4249 {
4250 asection *oi = htab->elf.text_index_section;
4251 sindx = elf_section_data (oi)->dynindx;
4252 }
4253 BFD_ASSERT (sindx != 0);
4254 }
4255
4256 outrel.r_info = htab->r_info (sindx, r_type);
4257 outrel.r_addend = relocation + rel->r_addend;
4258 }
4259 }
4260
4261 sreloc = elf_section_data (input_section)->sreloc;
4262
4263 if (sreloc == NULL || sreloc->contents == NULL)
4264 {
4265 r = bfd_reloc_notsupported;
4266 goto check_relocation_error;
4267 }
4268
4269 elf_append_rela (output_bfd, sreloc, &outrel);
4270
4271 /* If this reloc is against an external symbol, we do
4272 not want to fiddle with the addend. Otherwise, we
4273 need to include the symbol value so that it becomes
4274 an addend for the dynamic reloc. */
4275 if (! relocate)
4276 continue;
4277 }
4278
4279 break;
4280
4281 case R_X86_64_TLSGD:
4282 case R_X86_64_GOTPC32_TLSDESC:
4283 case R_X86_64_TLSDESC_CALL:
4284 case R_X86_64_GOTTPOFF:
4285 tls_type = GOT_UNKNOWN;
4286 if (h == NULL && local_got_offsets)
4287 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4288 else if (h != NULL)
4289 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4290
4291 if (! elf_x86_64_tls_transition (info, input_bfd,
4292 input_section, contents,
4293 symtab_hdr, sym_hashes,
4294 &r_type, tls_type, rel,
4295 relend, h, r_symndx))
4296 return FALSE;
4297
4298 if (r_type == R_X86_64_TPOFF32)
4299 {
4300 bfd_vma roff = rel->r_offset;
4301
4302 BFD_ASSERT (! unresolved_reloc);
4303
4304 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4305 {
4306 /* GD->LE transition. For 64bit, change
4307 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4308 .word 0x6666; rex64; call __tls_get_addr
4309 into:
4310 movq %fs:0, %rax
4311 leaq foo@tpoff(%rax), %rax
4312 For 32bit, change
4313 leaq foo@tlsgd(%rip), %rdi
4314 .word 0x6666; rex64; call __tls_get_addr
4315 into:
4316 movl %fs:0, %eax
4317 leaq foo@tpoff(%rax), %rax
4318 For largepic, change:
4319 leaq foo@tlsgd(%rip), %rdi
4320 movabsq $__tls_get_addr@pltoff, %rax
4321 addq %rbx, %rax
4322 call *%rax
4323 into:
4324 movq %fs:0, %rax
4325 leaq foo@tpoff(%rax), %rax
4326 nopw 0x0(%rax,%rax,1) */
4327 int largepic = 0;
4328 if (ABI_64_P (output_bfd)
4329 && contents[roff + 5] == (bfd_byte) '\xb8')
4330 {
4331 memcpy (contents + roff - 3,
4332 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4333 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4334 largepic = 1;
4335 }
4336 else if (ABI_64_P (output_bfd))
4337 memcpy (contents + roff - 4,
4338 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4339 16);
4340 else
4341 memcpy (contents + roff - 3,
4342 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4343 15);
4344 bfd_put_32 (output_bfd,
4345 elf_x86_64_tpoff (info, relocation),
4346 contents + roff + 8 + largepic);
4347 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4348 rel++;
4349 continue;
4350 }
4351 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4352 {
4353 /* GDesc -> LE transition.
4354 It's originally something like:
4355 leaq x@tlsdesc(%rip), %rax
4356
4357 Change it to:
4358 movl $x@tpoff, %rax. */
4359
4360 unsigned int val, type;
4361
4362 type = bfd_get_8 (input_bfd, contents + roff - 3);
4363 val = bfd_get_8 (input_bfd, contents + roff - 1);
4364 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4365 contents + roff - 3);
4366 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4367 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4368 contents + roff - 1);
4369 bfd_put_32 (output_bfd,
4370 elf_x86_64_tpoff (info, relocation),
4371 contents + roff);
4372 continue;
4373 }
4374 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4375 {
4376 /* GDesc -> LE transition.
4377 It's originally:
4378 call *(%rax)
4379 Turn it into:
4380 xchg %ax,%ax. */
4381 bfd_put_8 (output_bfd, 0x66, contents + roff);
4382 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4383 continue;
4384 }
4385 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4386 {
4387 /* IE->LE transition:
4388 For 64bit, originally it can be one of:
4389 movq foo@gottpoff(%rip), %reg
4390 addq foo@gottpoff(%rip), %reg
4391 We change it into:
4392 movq $foo, %reg
4393 leaq foo(%reg), %reg
4394 addq $foo, %reg.
4395 For 32bit, originally it can be one of:
4396 movq foo@gottpoff(%rip), %reg
4397 addl foo@gottpoff(%rip), %reg
4398 We change it into:
4399 movq $foo, %reg
4400 leal foo(%reg), %reg
4401 addl $foo, %reg. */
4402
4403 unsigned int val, type, reg;
4404
4405 if (roff >= 3)
4406 val = bfd_get_8 (input_bfd, contents + roff - 3);
4407 else
4408 val = 0;
4409 type = bfd_get_8 (input_bfd, contents + roff - 2);
4410 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4411 reg >>= 3;
4412 if (type == 0x8b)
4413 {
4414 /* movq */
4415 if (val == 0x4c)
4416 bfd_put_8 (output_bfd, 0x49,
4417 contents + roff - 3);
4418 else if (!ABI_64_P (output_bfd) && val == 0x44)
4419 bfd_put_8 (output_bfd, 0x41,
4420 contents + roff - 3);
4421 bfd_put_8 (output_bfd, 0xc7,
4422 contents + roff - 2);
4423 bfd_put_8 (output_bfd, 0xc0 | reg,
4424 contents + roff - 1);
4425 }
4426 else if (reg == 4)
4427 {
4428 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4429 is special */
4430 if (val == 0x4c)
4431 bfd_put_8 (output_bfd, 0x49,
4432 contents + roff - 3);
4433 else if (!ABI_64_P (output_bfd) && val == 0x44)
4434 bfd_put_8 (output_bfd, 0x41,
4435 contents + roff - 3);
4436 bfd_put_8 (output_bfd, 0x81,
4437 contents + roff - 2);
4438 bfd_put_8 (output_bfd, 0xc0 | reg,
4439 contents + roff - 1);
4440 }
4441 else
4442 {
4443 /* addq/addl -> leaq/leal */
4444 if (val == 0x4c)
4445 bfd_put_8 (output_bfd, 0x4d,
4446 contents + roff - 3);
4447 else if (!ABI_64_P (output_bfd) && val == 0x44)
4448 bfd_put_8 (output_bfd, 0x45,
4449 contents + roff - 3);
4450 bfd_put_8 (output_bfd, 0x8d,
4451 contents + roff - 2);
4452 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4453 contents + roff - 1);
4454 }
4455 bfd_put_32 (output_bfd,
4456 elf_x86_64_tpoff (info, relocation),
4457 contents + roff);
4458 continue;
4459 }
4460 else
4461 BFD_ASSERT (FALSE);
4462 }
4463
4464 if (htab->elf.sgot == NULL)
4465 abort ();
4466
4467 if (h != NULL)
4468 {
4469 off = h->got.offset;
4470 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4471 }
4472 else
4473 {
4474 if (local_got_offsets == NULL)
4475 abort ();
4476
4477 off = local_got_offsets[r_symndx];
4478 offplt = local_tlsdesc_gotents[r_symndx];
4479 }
4480
4481 if ((off & 1) != 0)
4482 off &= ~1;
4483 else
4484 {
4485 Elf_Internal_Rela outrel;
4486 int dr_type, indx;
4487 asection *sreloc;
4488
4489 if (htab->elf.srelgot == NULL)
4490 abort ();
4491
4492 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4493
4494 if (GOT_TLS_GDESC_P (tls_type))
4495 {
4496 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4497 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4498 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4499 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4500 + htab->elf.sgotplt->output_offset
4501 + offplt
4502 + htab->sgotplt_jump_table_size);
4503 sreloc = htab->elf.srelplt;
4504 if (indx == 0)
4505 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4506 else
4507 outrel.r_addend = 0;
4508 elf_append_rela (output_bfd, sreloc, &outrel);
4509 }
4510
4511 sreloc = htab->elf.srelgot;
4512
4513 outrel.r_offset = (htab->elf.sgot->output_section->vma
4514 + htab->elf.sgot->output_offset + off);
4515
4516 if (GOT_TLS_GD_P (tls_type))
4517 dr_type = R_X86_64_DTPMOD64;
4518 else if (GOT_TLS_GDESC_P (tls_type))
4519 goto dr_done;
4520 else
4521 dr_type = R_X86_64_TPOFF64;
4522
4523 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4524 outrel.r_addend = 0;
4525 if ((dr_type == R_X86_64_TPOFF64
4526 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4527 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4528 outrel.r_info = htab->r_info (indx, dr_type);
4529
4530 elf_append_rela (output_bfd, sreloc, &outrel);
4531
4532 if (GOT_TLS_GD_P (tls_type))
4533 {
4534 if (indx == 0)
4535 {
4536 BFD_ASSERT (! unresolved_reloc);
4537 bfd_put_64 (output_bfd,
4538 relocation - elf_x86_64_dtpoff_base (info),
4539 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4540 }
4541 else
4542 {
4543 bfd_put_64 (output_bfd, 0,
4544 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4545 outrel.r_info = htab->r_info (indx,
4546 R_X86_64_DTPOFF64);
4547 outrel.r_offset += GOT_ENTRY_SIZE;
4548 elf_append_rela (output_bfd, sreloc,
4549 &outrel);
4550 }
4551 }
4552
4553 dr_done:
4554 if (h != NULL)
4555 h->got.offset |= 1;
4556 else
4557 local_got_offsets[r_symndx] |= 1;
4558 }
4559
4560 if (off >= (bfd_vma) -2
4561 && ! GOT_TLS_GDESC_P (tls_type))
4562 abort ();
4563 if (r_type == ELF32_R_TYPE (rel->r_info))
4564 {
4565 if (r_type == R_X86_64_GOTPC32_TLSDESC
4566 || r_type == R_X86_64_TLSDESC_CALL)
4567 relocation = htab->elf.sgotplt->output_section->vma
4568 + htab->elf.sgotplt->output_offset
4569 + offplt + htab->sgotplt_jump_table_size;
4570 else
4571 relocation = htab->elf.sgot->output_section->vma
4572 + htab->elf.sgot->output_offset + off;
4573 unresolved_reloc = FALSE;
4574 }
4575 else
4576 {
4577 bfd_vma roff = rel->r_offset;
4578
4579 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4580 {
4581 /* GD->IE transition. For 64bit, change
4582 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4583 .word 0x6666; rex64; call __tls_get_addr@plt
4584 into:
4585 movq %fs:0, %rax
4586 addq foo@gottpoff(%rip), %rax
4587 For 32bit, change
4588 leaq foo@tlsgd(%rip), %rdi
4589 .word 0x6666; rex64; call __tls_get_addr@plt
4590 into:
4591 movl %fs:0, %eax
4592 addq foo@gottpoff(%rip), %rax
4593 For largepic, change:
4594 leaq foo@tlsgd(%rip), %rdi
4595 movabsq $__tls_get_addr@pltoff, %rax
4596 addq %rbx, %rax
4597 call *%rax
4598 into:
4599 movq %fs:0, %rax
4600 addq foo@gottpoff(%rax), %rax
4601 nopw 0x0(%rax,%rax,1) */
4602 int largepic = 0;
4603 if (ABI_64_P (output_bfd)
4604 && contents[roff + 5] == (bfd_byte) '\xb8')
4605 {
4606 memcpy (contents + roff - 3,
4607 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4608 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4609 largepic = 1;
4610 }
4611 else if (ABI_64_P (output_bfd))
4612 memcpy (contents + roff - 4,
4613 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4614 16);
4615 else
4616 memcpy (contents + roff - 3,
4617 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4618 15);
4619
4620 relocation = (htab->elf.sgot->output_section->vma
4621 + htab->elf.sgot->output_offset + off
4622 - roff
4623 - largepic
4624 - input_section->output_section->vma
4625 - input_section->output_offset
4626 - 12);
4627 bfd_put_32 (output_bfd, relocation,
4628 contents + roff + 8 + largepic);
4629 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4630 rel++;
4631 continue;
4632 }
4633 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4634 {
4635 /* GDesc -> IE transition.
4636 It's originally something like:
4637 leaq x@tlsdesc(%rip), %rax
4638
4639 Change it to:
4640 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4641
4642 /* Now modify the instruction as appropriate. To
4643 turn a leaq into a movq in the form we use it, it
4644 suffices to change the second byte from 0x8d to
4645 0x8b. */
4646 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4647
4648 bfd_put_32 (output_bfd,
4649 htab->elf.sgot->output_section->vma
4650 + htab->elf.sgot->output_offset + off
4651 - rel->r_offset
4652 - input_section->output_section->vma
4653 - input_section->output_offset
4654 - 4,
4655 contents + roff);
4656 continue;
4657 }
4658 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4659 {
4660 /* GDesc -> IE transition.
4661 It's originally:
4662 call *(%rax)
4663
4664 Change it to:
4665 xchg %ax, %ax. */
4666
4667 bfd_put_8 (output_bfd, 0x66, contents + roff);
4668 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4669 continue;
4670 }
4671 else
4672 BFD_ASSERT (FALSE);
4673 }
4674 break;
4675
4676 case R_X86_64_TLSLD:
4677 if (! elf_x86_64_tls_transition (info, input_bfd,
4678 input_section, contents,
4679 symtab_hdr, sym_hashes,
4680 &r_type, GOT_UNKNOWN,
4681 rel, relend, h, r_symndx))
4682 return FALSE;
4683
4684 if (r_type != R_X86_64_TLSLD)
4685 {
4686 /* LD->LE transition:
4687 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4688 For 64bit, we change it into:
4689 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4690 For 32bit, we change it into:
4691 nopl 0x0(%rax); movl %fs:0, %eax.
4692 For largepic, change:
4693 leaq foo@tlsgd(%rip), %rdi
4694 movabsq $__tls_get_addr@pltoff, %rax
4695 addq %rbx, %rax
4696 call *%rax
4697 into:
4698 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4699 movq %fs:0, %eax */
4700
4701 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4702 if (ABI_64_P (output_bfd)
4703 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4704 memcpy (contents + rel->r_offset - 3,
4705 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4706 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4707 else if (ABI_64_P (output_bfd))
4708 memcpy (contents + rel->r_offset - 3,
4709 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4710 else
4711 memcpy (contents + rel->r_offset - 3,
4712 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4713 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4714 rel++;
4715 continue;
4716 }
4717
4718 if (htab->elf.sgot == NULL)
4719 abort ();
4720
4721 off = htab->tls_ld_got.offset;
4722 if (off & 1)
4723 off &= ~1;
4724 else
4725 {
4726 Elf_Internal_Rela outrel;
4727
4728 if (htab->elf.srelgot == NULL)
4729 abort ();
4730
4731 outrel.r_offset = (htab->elf.sgot->output_section->vma
4732 + htab->elf.sgot->output_offset + off);
4733
4734 bfd_put_64 (output_bfd, 0,
4735 htab->elf.sgot->contents + off);
4736 bfd_put_64 (output_bfd, 0,
4737 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4738 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4739 outrel.r_addend = 0;
4740 elf_append_rela (output_bfd, htab->elf.srelgot,
4741 &outrel);
4742 htab->tls_ld_got.offset |= 1;
4743 }
4744 relocation = htab->elf.sgot->output_section->vma
4745 + htab->elf.sgot->output_offset + off;
4746 unresolved_reloc = FALSE;
4747 break;
4748
4749 case R_X86_64_DTPOFF32:
4750 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4751 relocation -= elf_x86_64_dtpoff_base (info);
4752 else
4753 relocation = elf_x86_64_tpoff (info, relocation);
4754 break;
4755
4756 case R_X86_64_TPOFF32:
4757 case R_X86_64_TPOFF64:
4758 BFD_ASSERT (info->executable);
4759 relocation = elf_x86_64_tpoff (info, relocation);
4760 break;
4761
4762 case R_X86_64_DTPOFF64:
4763 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4764 relocation -= elf_x86_64_dtpoff_base (info);
4765 break;
4766
4767 default:
4768 break;
4769 }
4770
4771 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4772 because such sections are not SEC_ALLOC and thus ld.so will
4773 not process them. */
4774 if (unresolved_reloc
4775 && !((input_section->flags & SEC_DEBUGGING) != 0
4776 && h->def_dynamic)
4777 && _bfd_elf_section_offset (output_bfd, info, input_section,
4778 rel->r_offset) != (bfd_vma) -1)
4779 {
4780 (*_bfd_error_handler)
4781 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4782 input_bfd,
4783 input_section,
4784 (long) rel->r_offset,
4785 howto->name,
4786 h->root.root.string);
4787 return FALSE;
4788 }
4789
4790 do_relocation:
4791 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4792 contents, rel->r_offset,
4793 relocation, rel->r_addend);
4794
4795 check_relocation_error:
4796 if (r != bfd_reloc_ok)
4797 {
4798 const char *name;
4799
4800 if (h != NULL)
4801 name = h->root.root.string;
4802 else
4803 {
4804 name = bfd_elf_string_from_elf_section (input_bfd,
4805 symtab_hdr->sh_link,
4806 sym->st_name);
4807 if (name == NULL)
4808 return FALSE;
4809 if (*name == '\0')
4810 name = bfd_section_name (input_bfd, sec);
4811 }
4812
4813 if (r == bfd_reloc_overflow)
4814 {
4815 if (! ((*info->callbacks->reloc_overflow)
4816 (info, (h ? &h->root : NULL), name, howto->name,
4817 (bfd_vma) 0, input_bfd, input_section,
4818 rel->r_offset)))
4819 return FALSE;
4820 }
4821 else
4822 {
4823 (*_bfd_error_handler)
4824 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
4825 input_bfd, input_section,
4826 (long) rel->r_offset, name, (int) r);
4827 return FALSE;
4828 }
4829 }
4830 }
4831
4832 return TRUE;
4833 }
4834
4835 /* Finish up dynamic symbol handling. We set the contents of various
4836 dynamic sections here. */
4837
4838 static bfd_boolean
4839 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4840 struct bfd_link_info *info,
4841 struct elf_link_hash_entry *h,
4842 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
4843 {
4844 struct elf_x86_64_link_hash_table *htab;
4845 const struct elf_x86_64_backend_data *abed;
4846 bfd_boolean use_plt_bnd;
4847 struct elf_x86_64_link_hash_entry *eh;
4848
4849 htab = elf_x86_64_hash_table (info);
4850 if (htab == NULL)
4851 return FALSE;
4852
4853 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
4854 section only if there is .plt section. */
4855 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
4856 abed = (use_plt_bnd
4857 ? &elf_x86_64_bnd_arch_bed
4858 : get_elf_x86_64_backend_data (output_bfd));
4859
4860 eh = (struct elf_x86_64_link_hash_entry *) h;
4861
4862 if (h->plt.offset != (bfd_vma) -1)
4863 {
4864 bfd_vma plt_index;
4865 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
4866 bfd_vma plt_plt_insn_end, plt_got_insn_size;
4867 Elf_Internal_Rela rela;
4868 bfd_byte *loc;
4869 asection *plt, *gotplt, *relplt, *resolved_plt;
4870 const struct elf_backend_data *bed;
4871 bfd_vma plt_got_pcrel_offset;
4872
4873 /* When building a static executable, use .iplt, .igot.plt and
4874 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4875 if (htab->elf.splt != NULL)
4876 {
4877 plt = htab->elf.splt;
4878 gotplt = htab->elf.sgotplt;
4879 relplt = htab->elf.srelplt;
4880 }
4881 else
4882 {
4883 plt = htab->elf.iplt;
4884 gotplt = htab->elf.igotplt;
4885 relplt = htab->elf.irelplt;
4886 }
4887
4888 /* This symbol has an entry in the procedure linkage table. Set
4889 it up. */
4890 if ((h->dynindx == -1
4891 && !((h->forced_local || info->executable)
4892 && h->def_regular
4893 && h->type == STT_GNU_IFUNC))
4894 || plt == NULL
4895 || gotplt == NULL
4896 || relplt == NULL)
4897 abort ();
4898
4899 /* Get the index in the procedure linkage table which
4900 corresponds to this symbol. This is the index of this symbol
4901 in all the symbols for which we are making plt entries. The
4902 first entry in the procedure linkage table is reserved.
4903
4904 Get the offset into the .got table of the entry that
4905 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4906 bytes. The first three are reserved for the dynamic linker.
4907
4908 For static executables, we don't reserve anything. */
4909
4910 if (plt == htab->elf.splt)
4911 {
4912 got_offset = h->plt.offset / abed->plt_entry_size - 1;
4913 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4914 }
4915 else
4916 {
4917 got_offset = h->plt.offset / abed->plt_entry_size;
4918 got_offset = got_offset * GOT_ENTRY_SIZE;
4919 }
4920
4921 plt_plt_insn_end = abed->plt_plt_insn_end;
4922 plt_plt_offset = abed->plt_plt_offset;
4923 plt_got_insn_size = abed->plt_got_insn_size;
4924 plt_got_offset = abed->plt_got_offset;
4925 if (use_plt_bnd)
4926 {
4927 /* Use the second PLT with BND relocations. */
4928 const bfd_byte *plt_entry, *plt2_entry;
4929
4930 if (eh->has_bnd_reloc)
4931 {
4932 plt_entry = elf_x86_64_bnd_plt_entry;
4933 plt2_entry = elf_x86_64_bnd_plt2_entry;
4934 }
4935 else
4936 {
4937 plt_entry = elf_x86_64_legacy_plt_entry;
4938 plt2_entry = elf_x86_64_legacy_plt2_entry;
4939
4940 /* Subtract 1 since there is no BND prefix. */
4941 plt_plt_insn_end -= 1;
4942 plt_plt_offset -= 1;
4943 plt_got_insn_size -= 1;
4944 plt_got_offset -= 1;
4945 }
4946
4947 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
4948 == sizeof (elf_x86_64_legacy_plt_entry));
4949
4950 /* Fill in the entry in the procedure linkage table. */
4951 memcpy (plt->contents + h->plt.offset,
4952 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
4953 /* Fill in the entry in the second PLT. */
4954 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
4955 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
4956
4957 resolved_plt = htab->plt_bnd;
4958 plt_offset = eh->plt_bnd.offset;
4959 }
4960 else
4961 {
4962 /* Fill in the entry in the procedure linkage table. */
4963 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
4964 abed->plt_entry_size);
4965
4966 resolved_plt = plt;
4967 plt_offset = h->plt.offset;
4968 }
4969
4970 /* Insert the relocation positions of the plt section. */
4971
4972 /* Put offset the PC-relative instruction referring to the GOT entry,
4973 subtracting the size of that instruction. */
4974 plt_got_pcrel_offset = (gotplt->output_section->vma
4975 + gotplt->output_offset
4976 + got_offset
4977 - resolved_plt->output_section->vma
4978 - resolved_plt->output_offset
4979 - plt_offset
4980 - plt_got_insn_size);
4981
4982 /* Check PC-relative offset overflow in PLT entry. */
4983 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4984 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
4985 output_bfd, h->root.root.string);
4986
4987 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4988 resolved_plt->contents + plt_offset + plt_got_offset);
4989
4990 /* Fill in the entry in the global offset table, initially this
4991 points to the second part of the PLT entry. */
4992 bfd_put_64 (output_bfd, (plt->output_section->vma
4993 + plt->output_offset
4994 + h->plt.offset + abed->plt_lazy_offset),
4995 gotplt->contents + got_offset);
4996
4997 /* Fill in the entry in the .rela.plt section. */
4998 rela.r_offset = (gotplt->output_section->vma
4999 + gotplt->output_offset
5000 + got_offset);
5001 if (h->dynindx == -1
5002 || ((info->executable
5003 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5004 && h->def_regular
5005 && h->type == STT_GNU_IFUNC))
5006 {
5007 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5008 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5009 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5010 rela.r_addend = (h->root.u.def.value
5011 + h->root.u.def.section->output_section->vma
5012 + h->root.u.def.section->output_offset);
5013 /* R_X86_64_IRELATIVE comes last. */
5014 plt_index = htab->next_irelative_index--;
5015 }
5016 else
5017 {
5018 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5019 rela.r_addend = 0;
5020 plt_index = htab->next_jump_slot_index++;
5021 }
5022
5023 /* Don't fill PLT entry for static executables. */
5024 if (plt == htab->elf.splt)
5025 {
5026 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5027
5028 /* Put relocation index. */
5029 bfd_put_32 (output_bfd, plt_index,
5030 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5031
5032 /* Put offset for jmp .PLT0 and check for overflow. We don't
5033 check relocation index for overflow since branch displacement
5034 will overflow first. */
5035 if (plt0_offset > 0x80000000)
5036 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5037 output_bfd, h->root.root.string);
5038 bfd_put_32 (output_bfd, - plt0_offset,
5039 plt->contents + h->plt.offset + plt_plt_offset);
5040 }
5041
5042 bed = get_elf_backend_data (output_bfd);
5043 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5044 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5045 }
5046 else if (eh->plt_got.offset != (bfd_vma) -1)
5047 {
5048 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5049 asection *plt, *got;
5050 bfd_boolean got_after_plt;
5051 int32_t got_pcrel_offset;
5052 const bfd_byte *got_plt_entry;
5053
5054 /* Set the entry in the GOT procedure linkage table. */
5055 plt = htab->plt_got;
5056 got = htab->elf.sgot;
5057 got_offset = h->got.offset;
5058
5059 if (got_offset == (bfd_vma) -1
5060 || h->type == STT_GNU_IFUNC
5061 || plt == NULL
5062 || got == NULL)
5063 abort ();
5064
5065 /* Use the second PLT entry template for the GOT PLT since they
5066 are the identical. */
5067 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5068 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5069 if (eh->has_bnd_reloc)
5070 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5071 else
5072 {
5073 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5074
5075 /* Subtract 1 since there is no BND prefix. */
5076 plt_got_insn_size -= 1;
5077 plt_got_offset -= 1;
5078 }
5079
5080 /* Fill in the entry in the GOT procedure linkage table. */
5081 plt_offset = eh->plt_got.offset;
5082 memcpy (plt->contents + plt_offset,
5083 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5084
5085 /* Put offset the PC-relative instruction referring to the GOT
5086 entry, subtracting the size of that instruction. */
5087 got_pcrel_offset = (got->output_section->vma
5088 + got->output_offset
5089 + got_offset
5090 - plt->output_section->vma
5091 - plt->output_offset
5092 - plt_offset
5093 - plt_got_insn_size);
5094
5095 /* Check PC-relative offset overflow in GOT PLT entry. */
5096 got_after_plt = got->output_section->vma > plt->output_section->vma;
5097 if ((got_after_plt && got_pcrel_offset < 0)
5098 || (!got_after_plt && got_pcrel_offset > 0))
5099 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5100 output_bfd, h->root.root.string);
5101
5102 bfd_put_32 (output_bfd, got_pcrel_offset,
5103 plt->contents + plt_offset + plt_got_offset);
5104 }
5105
5106 if (!h->def_regular
5107 && (h->plt.offset != (bfd_vma) -1
5108 || eh->plt_got.offset != (bfd_vma) -1))
5109 {
5110 /* Mark the symbol as undefined, rather than as defined in
5111 the .plt section. Leave the value if there were any
5112 relocations where pointer equality matters (this is a clue
5113 for the dynamic linker, to make function pointer
5114 comparisons work between an application and shared
5115 library), otherwise set it to zero. If a function is only
5116 called from a binary, there is no need to slow down
5117 shared libraries because of that. */
5118 sym->st_shndx = SHN_UNDEF;
5119 if (!h->pointer_equality_needed)
5120 sym->st_value = 0;
5121 }
5122
5123 if (h->got.offset != (bfd_vma) -1
5124 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5125 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5126 {
5127 Elf_Internal_Rela rela;
5128
5129 /* This symbol has an entry in the global offset table. Set it
5130 up. */
5131 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5132 abort ();
5133
5134 rela.r_offset = (htab->elf.sgot->output_section->vma
5135 + htab->elf.sgot->output_offset
5136 + (h->got.offset &~ (bfd_vma) 1));
5137
5138 /* If this is a static link, or it is a -Bsymbolic link and the
5139 symbol is defined locally or was forced to be local because
5140 of a version file, we just want to emit a RELATIVE reloc.
5141 The entry in the global offset table will already have been
5142 initialized in the relocate_section function. */
5143 if (h->def_regular
5144 && h->type == STT_GNU_IFUNC)
5145 {
5146 if (info->shared)
5147 {
5148 /* Generate R_X86_64_GLOB_DAT. */
5149 goto do_glob_dat;
5150 }
5151 else
5152 {
5153 asection *plt;
5154
5155 if (!h->pointer_equality_needed)
5156 abort ();
5157
5158 /* For non-shared object, we can't use .got.plt, which
5159 contains the real function addres if we need pointer
5160 equality. We load the GOT entry with the PLT entry. */
5161 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5162 bfd_put_64 (output_bfd, (plt->output_section->vma
5163 + plt->output_offset
5164 + h->plt.offset),
5165 htab->elf.sgot->contents + h->got.offset);
5166 return TRUE;
5167 }
5168 }
5169 else if (info->shared
5170 && SYMBOL_REFERENCES_LOCAL (info, h))
5171 {
5172 if (!h->def_regular)
5173 return FALSE;
5174 BFD_ASSERT((h->got.offset & 1) != 0);
5175 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5176 rela.r_addend = (h->root.u.def.value
5177 + h->root.u.def.section->output_section->vma
5178 + h->root.u.def.section->output_offset);
5179 }
5180 else
5181 {
5182 BFD_ASSERT((h->got.offset & 1) == 0);
5183 do_glob_dat:
5184 bfd_put_64 (output_bfd, (bfd_vma) 0,
5185 htab->elf.sgot->contents + h->got.offset);
5186 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5187 rela.r_addend = 0;
5188 }
5189
5190 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5191 }
5192
5193 if (h->needs_copy)
5194 {
5195 Elf_Internal_Rela rela;
5196
5197 /* This symbol needs a copy reloc. Set it up. */
5198
5199 if (h->dynindx == -1
5200 || (h->root.type != bfd_link_hash_defined
5201 && h->root.type != bfd_link_hash_defweak)
5202 || htab->srelbss == NULL)
5203 abort ();
5204
5205 rela.r_offset = (h->root.u.def.value
5206 + h->root.u.def.section->output_section->vma
5207 + h->root.u.def.section->output_offset);
5208 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5209 rela.r_addend = 0;
5210 elf_append_rela (output_bfd, htab->srelbss, &rela);
5211 }
5212
5213 return TRUE;
5214 }
5215
5216 /* Finish up local dynamic symbol handling. We set the contents of
5217 various dynamic sections here. */
5218
5219 static bfd_boolean
5220 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5221 {
5222 struct elf_link_hash_entry *h
5223 = (struct elf_link_hash_entry *) *slot;
5224 struct bfd_link_info *info
5225 = (struct bfd_link_info *) inf;
5226
5227 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5228 info, h, NULL);
5229 }
5230
5231 /* Used to decide how to sort relocs in an optimal manner for the
5232 dynamic linker, before writing them out. */
5233
5234 static enum elf_reloc_type_class
5235 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5236 const asection *rel_sec ATTRIBUTE_UNUSED,
5237 const Elf_Internal_Rela *rela)
5238 {
5239 switch ((int) ELF32_R_TYPE (rela->r_info))
5240 {
5241 case R_X86_64_RELATIVE:
5242 case R_X86_64_RELATIVE64:
5243 return reloc_class_relative;
5244 case R_X86_64_JUMP_SLOT:
5245 return reloc_class_plt;
5246 case R_X86_64_COPY:
5247 return reloc_class_copy;
5248 default:
5249 return reloc_class_normal;
5250 }
5251 }
5252
5253 /* Finish up the dynamic sections. */
5254
5255 static bfd_boolean
5256 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5257 struct bfd_link_info *info)
5258 {
5259 struct elf_x86_64_link_hash_table *htab;
5260 bfd *dynobj;
5261 asection *sdyn;
5262 const struct elf_x86_64_backend_data *abed;
5263
5264 htab = elf_x86_64_hash_table (info);
5265 if (htab == NULL)
5266 return FALSE;
5267
5268 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5269 section only if there is .plt section. */
5270 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5271 ? &elf_x86_64_bnd_arch_bed
5272 : get_elf_x86_64_backend_data (output_bfd));
5273
5274 dynobj = htab->elf.dynobj;
5275 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5276
5277 if (htab->elf.dynamic_sections_created)
5278 {
5279 bfd_byte *dyncon, *dynconend;
5280 const struct elf_backend_data *bed;
5281 bfd_size_type sizeof_dyn;
5282
5283 if (sdyn == NULL || htab->elf.sgot == NULL)
5284 abort ();
5285
5286 bed = get_elf_backend_data (dynobj);
5287 sizeof_dyn = bed->s->sizeof_dyn;
5288 dyncon = sdyn->contents;
5289 dynconend = sdyn->contents + sdyn->size;
5290 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5291 {
5292 Elf_Internal_Dyn dyn;
5293 asection *s;
5294
5295 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5296
5297 switch (dyn.d_tag)
5298 {
5299 default:
5300 continue;
5301
5302 case DT_PLTGOT:
5303 s = htab->elf.sgotplt;
5304 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5305 break;
5306
5307 case DT_JMPREL:
5308 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5309 break;
5310
5311 case DT_PLTRELSZ:
5312 s = htab->elf.srelplt->output_section;
5313 dyn.d_un.d_val = s->size;
5314 break;
5315
5316 case DT_RELASZ:
5317 /* The procedure linkage table relocs (DT_JMPREL) should
5318 not be included in the overall relocs (DT_RELA).
5319 Therefore, we override the DT_RELASZ entry here to
5320 make it not include the JMPREL relocs. Since the
5321 linker script arranges for .rela.plt to follow all
5322 other relocation sections, we don't have to worry
5323 about changing the DT_RELA entry. */
5324 if (htab->elf.srelplt != NULL)
5325 {
5326 s = htab->elf.srelplt->output_section;
5327 dyn.d_un.d_val -= s->size;
5328 }
5329 break;
5330
5331 case DT_TLSDESC_PLT:
5332 s = htab->elf.splt;
5333 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5334 + htab->tlsdesc_plt;
5335 break;
5336
5337 case DT_TLSDESC_GOT:
5338 s = htab->elf.sgot;
5339 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5340 + htab->tlsdesc_got;
5341 break;
5342 }
5343
5344 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5345 }
5346
5347 /* Fill in the special first entry in the procedure linkage table. */
5348 if (htab->elf.splt && htab->elf.splt->size > 0)
5349 {
5350 /* Fill in the first entry in the procedure linkage table. */
5351 memcpy (htab->elf.splt->contents,
5352 abed->plt0_entry, abed->plt_entry_size);
5353 /* Add offset for pushq GOT+8(%rip), since the instruction
5354 uses 6 bytes subtract this value. */
5355 bfd_put_32 (output_bfd,
5356 (htab->elf.sgotplt->output_section->vma
5357 + htab->elf.sgotplt->output_offset
5358 + 8
5359 - htab->elf.splt->output_section->vma
5360 - htab->elf.splt->output_offset
5361 - 6),
5362 htab->elf.splt->contents + abed->plt0_got1_offset);
5363 /* Add offset for the PC-relative instruction accessing GOT+16,
5364 subtracting the offset to the end of that instruction. */
5365 bfd_put_32 (output_bfd,
5366 (htab->elf.sgotplt->output_section->vma
5367 + htab->elf.sgotplt->output_offset
5368 + 16
5369 - htab->elf.splt->output_section->vma
5370 - htab->elf.splt->output_offset
5371 - abed->plt0_got2_insn_end),
5372 htab->elf.splt->contents + abed->plt0_got2_offset);
5373
5374 elf_section_data (htab->elf.splt->output_section)
5375 ->this_hdr.sh_entsize = abed->plt_entry_size;
5376
5377 if (htab->tlsdesc_plt)
5378 {
5379 bfd_put_64 (output_bfd, (bfd_vma) 0,
5380 htab->elf.sgot->contents + htab->tlsdesc_got);
5381
5382 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5383 abed->plt0_entry, abed->plt_entry_size);
5384
5385 /* Add offset for pushq GOT+8(%rip), since the
5386 instruction uses 6 bytes subtract this value. */
5387 bfd_put_32 (output_bfd,
5388 (htab->elf.sgotplt->output_section->vma
5389 + htab->elf.sgotplt->output_offset
5390 + 8
5391 - htab->elf.splt->output_section->vma
5392 - htab->elf.splt->output_offset
5393 - htab->tlsdesc_plt
5394 - 6),
5395 htab->elf.splt->contents
5396 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5397 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5398 where TGD stands for htab->tlsdesc_got, subtracting the offset
5399 to the end of that instruction. */
5400 bfd_put_32 (output_bfd,
5401 (htab->elf.sgot->output_section->vma
5402 + htab->elf.sgot->output_offset
5403 + htab->tlsdesc_got
5404 - htab->elf.splt->output_section->vma
5405 - htab->elf.splt->output_offset
5406 - htab->tlsdesc_plt
5407 - abed->plt0_got2_insn_end),
5408 htab->elf.splt->contents
5409 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5410 }
5411 }
5412 }
5413
5414 if (htab->plt_bnd != NULL)
5415 elf_section_data (htab->plt_bnd->output_section)
5416 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5417
5418 if (htab->elf.sgotplt)
5419 {
5420 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5421 {
5422 (*_bfd_error_handler)
5423 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5424 return FALSE;
5425 }
5426
5427 /* Fill in the first three entries in the global offset table. */
5428 if (htab->elf.sgotplt->size > 0)
5429 {
5430 /* Set the first entry in the global offset table to the address of
5431 the dynamic section. */
5432 if (sdyn == NULL)
5433 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5434 else
5435 bfd_put_64 (output_bfd,
5436 sdyn->output_section->vma + sdyn->output_offset,
5437 htab->elf.sgotplt->contents);
5438 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5439 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5440 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5441 }
5442
5443 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5444 GOT_ENTRY_SIZE;
5445 }
5446
5447 /* Adjust .eh_frame for .plt section. */
5448 if (htab->plt_eh_frame != NULL
5449 && htab->plt_eh_frame->contents != NULL)
5450 {
5451 if (htab->elf.splt != NULL
5452 && htab->elf.splt->size != 0
5453 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5454 && htab->elf.splt->output_section != NULL
5455 && htab->plt_eh_frame->output_section != NULL)
5456 {
5457 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5458 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5459 + htab->plt_eh_frame->output_offset
5460 + PLT_FDE_START_OFFSET;
5461 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5462 htab->plt_eh_frame->contents
5463 + PLT_FDE_START_OFFSET);
5464 }
5465 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5466 {
5467 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5468 htab->plt_eh_frame,
5469 htab->plt_eh_frame->contents))
5470 return FALSE;
5471 }
5472 }
5473
5474 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5475 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5476 = GOT_ENTRY_SIZE;
5477
5478 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5479 htab_traverse (htab->loc_hash_table,
5480 elf_x86_64_finish_local_dynamic_symbol,
5481 info);
5482
5483 return TRUE;
5484 }
5485
5486 /* Return address in section PLT for the Ith GOTPLT relocation, for
5487 relocation REL or (bfd_vma) -1 if it should not be included. */
5488
5489 static bfd_vma
5490 elf_x86_64_plt_sym_val (bfd_vma i, const asection *plt,
5491 const arelent *rel)
5492 {
5493 bfd *abfd;
5494 const struct elf_x86_64_backend_data *bed;
5495 bfd_vma plt_offset;
5496
5497 /* Only match R_X86_64_JUMP_SLOT and R_X86_64_IRELATIVE. */
5498 if (rel->howto->type != R_X86_64_JUMP_SLOT
5499 && rel->howto->type != R_X86_64_IRELATIVE)
5500 return (bfd_vma) -1;
5501
5502 abfd = plt->owner;
5503 bed = get_elf_x86_64_backend_data (abfd);
5504 plt_offset = bed->plt_entry_size;
5505
5506 if (elf_elfheader (abfd)->e_ident[EI_OSABI] != ELFOSABI_GNU)
5507 return plt->vma + (i + 1) * plt_offset;
5508
5509 while (plt_offset < plt->size)
5510 {
5511 bfd_vma reloc_index;
5512 bfd_byte reloc_index_raw[4];
5513
5514 if (!bfd_get_section_contents (abfd, (asection *) plt,
5515 reloc_index_raw,
5516 plt_offset + bed->plt_reloc_offset,
5517 sizeof (reloc_index_raw)))
5518 return (bfd_vma) -1;
5519
5520 reloc_index = H_GET_32 (abfd, reloc_index_raw);
5521 if (reloc_index == i)
5522 return plt->vma + plt_offset;
5523 plt_offset += bed->plt_entry_size;
5524 }
5525
5526 abort ();
5527 }
5528
5529 /* Return offset in .plt.bnd section for the Ith GOTPLT relocation with
5530 PLT section, or (bfd_vma) -1 if it should not be included. */
5531
5532 static bfd_vma
5533 elf_x86_64_plt_sym_val_offset_plt_bnd (bfd_vma i, const asection *plt)
5534 {
5535 const struct elf_x86_64_backend_data *bed = &elf_x86_64_bnd_arch_bed;
5536 bfd *abfd = plt->owner;
5537 bfd_vma plt_offset = bed->plt_entry_size;
5538
5539 if (elf_elfheader (abfd)->e_ident[EI_OSABI] != ELFOSABI_GNU)
5540 return i * sizeof (elf_x86_64_legacy_plt2_entry);
5541
5542 while (plt_offset < plt->size)
5543 {
5544 bfd_vma reloc_index;
5545 bfd_byte reloc_index_raw[4];
5546
5547 if (!bfd_get_section_contents (abfd, (asection *) plt,
5548 reloc_index_raw,
5549 plt_offset + bed->plt_reloc_offset,
5550 sizeof (reloc_index_raw)))
5551 return (bfd_vma) -1;
5552
5553 reloc_index = H_GET_32 (abfd, reloc_index_raw);
5554 if (reloc_index == i)
5555 {
5556 /* This is the index in .plt section. */
5557 long plt_index = plt_offset / bed->plt_entry_size;
5558 /* Return the offset in .plt.bnd section. */
5559 return (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry);
5560 }
5561 plt_offset += bed->plt_entry_size;
5562 }
5563
5564 abort ();
5565 }
5566
5567 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5568 support. */
5569
5570 static long
5571 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5572 long symcount,
5573 asymbol **syms,
5574 long dynsymcount,
5575 asymbol **dynsyms,
5576 asymbol **ret)
5577 {
5578 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5579 asection *relplt;
5580 asymbol *s;
5581 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5582 arelent *p;
5583 long count, i, n;
5584 size_t size;
5585 Elf_Internal_Shdr *hdr;
5586 char *names;
5587 asection *plt, *plt_push;
5588
5589 plt_push = bfd_get_section_by_name (abfd, ".plt");
5590 if (plt_push == NULL)
5591 return 0;
5592
5593 plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5594 /* Use the generic ELF version if there is no .plt.bnd section. */
5595 if (plt == NULL)
5596 return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms,
5597 dynsymcount, dynsyms, ret);
5598
5599 *ret = NULL;
5600
5601 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
5602 return 0;
5603
5604 if (dynsymcount <= 0)
5605 return 0;
5606
5607 relplt = bfd_get_section_by_name (abfd, ".rela.plt");
5608 if (relplt == NULL)
5609 return 0;
5610
5611 hdr = &elf_section_data (relplt)->this_hdr;
5612 if (hdr->sh_link != elf_dynsymtab (abfd)
5613 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
5614 return 0;
5615
5616 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5617 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5618 return -1;
5619
5620 count = relplt->size / hdr->sh_entsize;
5621 size = count * sizeof (asymbol);
5622 p = relplt->relocation;
5623 for (i = 0; i < count; i++, p += bed->s->int_rels_per_ext_rel)
5624 {
5625 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
5626 if (p->addend != 0)
5627 size += sizeof ("+0x") - 1 + 8 + 8;
5628 }
5629
5630 s = *ret = (asymbol *) bfd_malloc (size);
5631 if (s == NULL)
5632 return -1;
5633
5634 names = (char *) (s + count);
5635 p = relplt->relocation;
5636 n = 0;
5637 for (i = 0; i < count; i++, p++)
5638 {
5639 bfd_vma offset;
5640 size_t len;
5641
5642 if (p->howto->type != R_X86_64_JUMP_SLOT
5643 && p->howto->type != R_X86_64_IRELATIVE)
5644 continue;
5645
5646 offset = elf_x86_64_plt_sym_val_offset_plt_bnd (i, plt_push);
5647
5648 *s = **p->sym_ptr_ptr;
5649 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
5650 we are defining a symbol, ensure one of them is set. */
5651 if ((s->flags & BSF_LOCAL) == 0)
5652 s->flags |= BSF_GLOBAL;
5653 s->flags |= BSF_SYNTHETIC;
5654 s->section = plt;
5655 s->value = offset;
5656 s->name = names;
5657 s->udata.p = NULL;
5658 len = strlen ((*p->sym_ptr_ptr)->name);
5659 memcpy (names, (*p->sym_ptr_ptr)->name, len);
5660 names += len;
5661 if (p->addend != 0)
5662 {
5663 char buf[30], *a;
5664
5665 memcpy (names, "+0x", sizeof ("+0x") - 1);
5666 names += sizeof ("+0x") - 1;
5667 bfd_sprintf_vma (abfd, buf, p->addend);
5668 for (a = buf; *a == '0'; ++a)
5669 ;
5670 len = strlen (a);
5671 memcpy (names, a, len);
5672 names += len;
5673 }
5674 memcpy (names, "@plt", sizeof ("@plt"));
5675 names += sizeof ("@plt");
5676 ++s, ++n;
5677 }
5678
5679 return n;
5680 }
5681
5682 /* Handle an x86-64 specific section when reading an object file. This
5683 is called when elfcode.h finds a section with an unknown type. */
5684
5685 static bfd_boolean
5686 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5687 const char *name, int shindex)
5688 {
5689 if (hdr->sh_type != SHT_X86_64_UNWIND)
5690 return FALSE;
5691
5692 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5693 return FALSE;
5694
5695 return TRUE;
5696 }
5697
5698 /* Hook called by the linker routine which adds symbols from an object
5699 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5700 of .bss. */
5701
5702 static bfd_boolean
5703 elf_x86_64_add_symbol_hook (bfd *abfd,
5704 struct bfd_link_info *info,
5705 Elf_Internal_Sym *sym,
5706 const char **namep ATTRIBUTE_UNUSED,
5707 flagword *flagsp ATTRIBUTE_UNUSED,
5708 asection **secp,
5709 bfd_vma *valp)
5710 {
5711 asection *lcomm;
5712
5713 switch (sym->st_shndx)
5714 {
5715 case SHN_X86_64_LCOMMON:
5716 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5717 if (lcomm == NULL)
5718 {
5719 lcomm = bfd_make_section_with_flags (abfd,
5720 "LARGE_COMMON",
5721 (SEC_ALLOC
5722 | SEC_IS_COMMON
5723 | SEC_LINKER_CREATED));
5724 if (lcomm == NULL)
5725 return FALSE;
5726 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5727 }
5728 *secp = lcomm;
5729 *valp = sym->st_size;
5730 return TRUE;
5731 }
5732
5733 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
5734 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
5735 && (abfd->flags & DYNAMIC) == 0
5736 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5737 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
5738
5739 return TRUE;
5740 }
5741
5742
5743 /* Given a BFD section, try to locate the corresponding ELF section
5744 index. */
5745
5746 static bfd_boolean
5747 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5748 asection *sec, int *index_return)
5749 {
5750 if (sec == &_bfd_elf_large_com_section)
5751 {
5752 *index_return = SHN_X86_64_LCOMMON;
5753 return TRUE;
5754 }
5755 return FALSE;
5756 }
5757
5758 /* Process a symbol. */
5759
5760 static void
5761 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5762 asymbol *asym)
5763 {
5764 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5765
5766 switch (elfsym->internal_elf_sym.st_shndx)
5767 {
5768 case SHN_X86_64_LCOMMON:
5769 asym->section = &_bfd_elf_large_com_section;
5770 asym->value = elfsym->internal_elf_sym.st_size;
5771 /* Common symbol doesn't set BSF_GLOBAL. */
5772 asym->flags &= ~BSF_GLOBAL;
5773 break;
5774 }
5775 }
5776
5777 static bfd_boolean
5778 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5779 {
5780 return (sym->st_shndx == SHN_COMMON
5781 || sym->st_shndx == SHN_X86_64_LCOMMON);
5782 }
5783
5784 static unsigned int
5785 elf_x86_64_common_section_index (asection *sec)
5786 {
5787 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5788 return SHN_COMMON;
5789 else
5790 return SHN_X86_64_LCOMMON;
5791 }
5792
5793 static asection *
5794 elf_x86_64_common_section (asection *sec)
5795 {
5796 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5797 return bfd_com_section_ptr;
5798 else
5799 return &_bfd_elf_large_com_section;
5800 }
5801
5802 static bfd_boolean
5803 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5804 const Elf_Internal_Sym *sym,
5805 asection **psec,
5806 bfd_boolean newdef,
5807 bfd_boolean olddef,
5808 bfd *oldbfd,
5809 const asection *oldsec)
5810 {
5811 /* A normal common symbol and a large common symbol result in a
5812 normal common symbol. We turn the large common symbol into a
5813 normal one. */
5814 if (!olddef
5815 && h->root.type == bfd_link_hash_common
5816 && !newdef
5817 && bfd_is_com_section (*psec)
5818 && oldsec != *psec)
5819 {
5820 if (sym->st_shndx == SHN_COMMON
5821 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5822 {
5823 h->root.u.c.p->section
5824 = bfd_make_section_old_way (oldbfd, "COMMON");
5825 h->root.u.c.p->section->flags = SEC_ALLOC;
5826 }
5827 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5828 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5829 *psec = bfd_com_section_ptr;
5830 }
5831
5832 return TRUE;
5833 }
5834
5835 static int
5836 elf_x86_64_additional_program_headers (bfd *abfd,
5837 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5838 {
5839 asection *s;
5840 int count = 0;
5841
5842 /* Check to see if we need a large readonly segment. */
5843 s = bfd_get_section_by_name (abfd, ".lrodata");
5844 if (s && (s->flags & SEC_LOAD))
5845 count++;
5846
5847 /* Check to see if we need a large data segment. Since .lbss sections
5848 is placed right after the .bss section, there should be no need for
5849 a large data segment just because of .lbss. */
5850 s = bfd_get_section_by_name (abfd, ".ldata");
5851 if (s && (s->flags & SEC_LOAD))
5852 count++;
5853
5854 return count;
5855 }
5856
5857 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5858
5859 static bfd_boolean
5860 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5861 {
5862 if (h->plt.offset != (bfd_vma) -1
5863 && !h->def_regular
5864 && !h->pointer_equality_needed)
5865 return FALSE;
5866
5867 return _bfd_elf_hash_symbol (h);
5868 }
5869
5870 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5871
5872 static bfd_boolean
5873 elf_x86_64_relocs_compatible (const bfd_target *input,
5874 const bfd_target *output)
5875 {
5876 return ((xvec_get_elf_backend_data (input)->s->elfclass
5877 == xvec_get_elf_backend_data (output)->s->elfclass)
5878 && _bfd_elf_relocs_compatible (input, output));
5879 }
5880
5881 static const struct bfd_elf_special_section
5882 elf_x86_64_special_sections[]=
5883 {
5884 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5885 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5886 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5887 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5888 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5889 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5890 { NULL, 0, 0, 0, 0 }
5891 };
5892
5893 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5894 #define TARGET_LITTLE_NAME "elf64-x86-64"
5895 #define ELF_ARCH bfd_arch_i386
5896 #define ELF_TARGET_ID X86_64_ELF_DATA
5897 #define ELF_MACHINE_CODE EM_X86_64
5898 #define ELF_MAXPAGESIZE 0x200000
5899 #define ELF_MINPAGESIZE 0x1000
5900 #define ELF_COMMONPAGESIZE 0x1000
5901
5902 #define elf_backend_can_gc_sections 1
5903 #define elf_backend_can_refcount 1
5904 #define elf_backend_want_got_plt 1
5905 #define elf_backend_plt_readonly 1
5906 #define elf_backend_want_plt_sym 0
5907 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5908 #define elf_backend_rela_normal 1
5909 #define elf_backend_plt_alignment 4
5910
5911 #define elf_info_to_howto elf_x86_64_info_to_howto
5912
5913 #define bfd_elf64_bfd_link_hash_table_create \
5914 elf_x86_64_link_hash_table_create
5915 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5916 #define bfd_elf64_bfd_reloc_name_lookup \
5917 elf_x86_64_reloc_name_lookup
5918
5919 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
5920 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5921 #define elf_backend_check_relocs elf_x86_64_check_relocs
5922 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
5923 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
5924 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5925 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5926 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
5927 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
5928 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5929 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5930 #ifdef CORE_HEADER
5931 #define elf_backend_write_core_note elf_x86_64_write_core_note
5932 #endif
5933 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5934 #define elf_backend_relocate_section elf_x86_64_relocate_section
5935 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
5936 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5937 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5938 #define elf_backend_plt_sym_val elf_x86_64_plt_sym_val
5939 #define elf_backend_object_p elf64_x86_64_elf_object_p
5940 #define bfd_elf64_mkobject elf_x86_64_mkobject
5941 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5942
5943 #define elf_backend_section_from_shdr \
5944 elf_x86_64_section_from_shdr
5945
5946 #define elf_backend_section_from_bfd_section \
5947 elf_x86_64_elf_section_from_bfd_section
5948 #define elf_backend_add_symbol_hook \
5949 elf_x86_64_add_symbol_hook
5950 #define elf_backend_symbol_processing \
5951 elf_x86_64_symbol_processing
5952 #define elf_backend_common_section_index \
5953 elf_x86_64_common_section_index
5954 #define elf_backend_common_section \
5955 elf_x86_64_common_section
5956 #define elf_backend_common_definition \
5957 elf_x86_64_common_definition
5958 #define elf_backend_merge_symbol \
5959 elf_x86_64_merge_symbol
5960 #define elf_backend_special_sections \
5961 elf_x86_64_special_sections
5962 #define elf_backend_additional_program_headers \
5963 elf_x86_64_additional_program_headers
5964 #define elf_backend_hash_symbol \
5965 elf_x86_64_hash_symbol
5966
5967 #include "elf64-target.h"
5968
5969 /* FreeBSD support. */
5970
5971 #undef TARGET_LITTLE_SYM
5972 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5973 #undef TARGET_LITTLE_NAME
5974 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5975
5976 #undef ELF_OSABI
5977 #define ELF_OSABI ELFOSABI_FREEBSD
5978
5979 #undef elf64_bed
5980 #define elf64_bed elf64_x86_64_fbsd_bed
5981
5982 #include "elf64-target.h"
5983
5984 /* Solaris 2 support. */
5985
5986 #undef TARGET_LITTLE_SYM
5987 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5988 #undef TARGET_LITTLE_NAME
5989 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5990
5991 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5992 objects won't be recognized. */
5993 #undef ELF_OSABI
5994
5995 #undef elf64_bed
5996 #define elf64_bed elf64_x86_64_sol2_bed
5997
5998 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5999 boundary. */
6000 #undef elf_backend_static_tls_alignment
6001 #define elf_backend_static_tls_alignment 16
6002
6003 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6004
6005 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6006 File, p.63. */
6007 #undef elf_backend_want_plt_sym
6008 #define elf_backend_want_plt_sym 1
6009
6010 #include "elf64-target.h"
6011
6012 #undef bfd_elf64_get_synthetic_symtab
6013
6014 /* Native Client support. */
6015
6016 static bfd_boolean
6017 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6018 {
6019 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6020 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6021 return TRUE;
6022 }
6023
6024 #undef TARGET_LITTLE_SYM
6025 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6026 #undef TARGET_LITTLE_NAME
6027 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6028 #undef elf64_bed
6029 #define elf64_bed elf64_x86_64_nacl_bed
6030
6031 #undef ELF_MAXPAGESIZE
6032 #undef ELF_MINPAGESIZE
6033 #undef ELF_COMMONPAGESIZE
6034 #define ELF_MAXPAGESIZE 0x10000
6035 #define ELF_MINPAGESIZE 0x10000
6036 #define ELF_COMMONPAGESIZE 0x10000
6037
6038 /* Restore defaults. */
6039 #undef ELF_OSABI
6040 #undef elf_backend_static_tls_alignment
6041 #undef elf_backend_want_plt_sym
6042 #define elf_backend_want_plt_sym 0
6043
6044 /* NaCl uses substantially different PLT entries for the same effects. */
6045
6046 #undef elf_backend_plt_alignment
6047 #define elf_backend_plt_alignment 5
6048 #define NACL_PLT_ENTRY_SIZE 64
6049 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6050
6051 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6052 {
6053 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6054 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6055 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6056 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6057 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6058
6059 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6060 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6061
6062 /* 32 bytes of nop to pad out to the standard size. */
6063 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6064 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6065 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6066 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6067 0x66, /* excess data32 prefix */
6068 0x90 /* nop */
6069 };
6070
6071 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6072 {
6073 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6074 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6075 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6076 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6077
6078 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6079 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6080 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6081
6082 /* Lazy GOT entries point here (32-byte aligned). */
6083 0x68, /* pushq immediate */
6084 0, 0, 0, 0, /* replaced with index into relocation table. */
6085 0xe9, /* jmp relative */
6086 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6087
6088 /* 22 bytes of nop to pad out to the standard size. */
6089 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6090 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6091 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6092 };
6093
6094 /* .eh_frame covering the .plt section. */
6095
6096 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6097 {
6098 #if (PLT_CIE_LENGTH != 20 \
6099 || PLT_FDE_LENGTH != 36 \
6100 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6101 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6102 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6103 #endif
6104 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6105 0, 0, 0, 0, /* CIE ID */
6106 1, /* CIE version */
6107 'z', 'R', 0, /* Augmentation string */
6108 1, /* Code alignment factor */
6109 0x78, /* Data alignment factor */
6110 16, /* Return address column */
6111 1, /* Augmentation size */
6112 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6113 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6114 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6115 DW_CFA_nop, DW_CFA_nop,
6116
6117 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6118 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6119 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6120 0, 0, 0, 0, /* .plt size goes here */
6121 0, /* Augmentation size */
6122 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6123 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6124 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6125 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6126 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6127 13, /* Block length */
6128 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6129 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6130 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6131 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6132 DW_CFA_nop, DW_CFA_nop
6133 };
6134
6135 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6136 {
6137 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6138 elf_x86_64_nacl_plt_entry, /* plt_entry */
6139 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6140 2, /* plt0_got1_offset */
6141 9, /* plt0_got2_offset */
6142 13, /* plt0_got2_insn_end */
6143 3, /* plt_got_offset */
6144 33, /* plt_reloc_offset */
6145 38, /* plt_plt_offset */
6146 7, /* plt_got_insn_size */
6147 42, /* plt_plt_insn_end */
6148 32, /* plt_lazy_offset */
6149 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6150 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6151 };
6152
6153 #undef elf_backend_arch_data
6154 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6155
6156 #undef elf_backend_object_p
6157 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6158 #undef elf_backend_modify_segment_map
6159 #define elf_backend_modify_segment_map nacl_modify_segment_map
6160 #undef elf_backend_modify_program_headers
6161 #define elf_backend_modify_program_headers nacl_modify_program_headers
6162 #undef elf_backend_final_write_processing
6163 #define elf_backend_final_write_processing nacl_final_write_processing
6164
6165 #include "elf64-target.h"
6166
6167 /* Native Client x32 support. */
6168
6169 static bfd_boolean
6170 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6171 {
6172 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6173 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6174 return TRUE;
6175 }
6176
6177 #undef TARGET_LITTLE_SYM
6178 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6179 #undef TARGET_LITTLE_NAME
6180 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6181 #undef elf32_bed
6182 #define elf32_bed elf32_x86_64_nacl_bed
6183
6184 #define bfd_elf32_bfd_link_hash_table_create \
6185 elf_x86_64_link_hash_table_create
6186 #define bfd_elf32_bfd_reloc_type_lookup \
6187 elf_x86_64_reloc_type_lookup
6188 #define bfd_elf32_bfd_reloc_name_lookup \
6189 elf_x86_64_reloc_name_lookup
6190 #define bfd_elf32_mkobject \
6191 elf_x86_64_mkobject
6192
6193 #undef elf_backend_object_p
6194 #define elf_backend_object_p \
6195 elf32_x86_64_nacl_elf_object_p
6196
6197 #undef elf_backend_bfd_from_remote_memory
6198 #define elf_backend_bfd_from_remote_memory \
6199 _bfd_elf32_bfd_from_remote_memory
6200
6201 #undef elf_backend_size_info
6202 #define elf_backend_size_info \
6203 _bfd_elf32_size_info
6204
6205 #include "elf32-target.h"
6206
6207 /* Restore defaults. */
6208 #undef elf_backend_object_p
6209 #define elf_backend_object_p elf64_x86_64_elf_object_p
6210 #undef elf_backend_bfd_from_remote_memory
6211 #undef elf_backend_size_info
6212 #undef elf_backend_modify_segment_map
6213 #undef elf_backend_modify_program_headers
6214 #undef elf_backend_final_write_processing
6215
6216 /* Intel L1OM support. */
6217
6218 static bfd_boolean
6219 elf64_l1om_elf_object_p (bfd *abfd)
6220 {
6221 /* Set the right machine number for an L1OM elf64 file. */
6222 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6223 return TRUE;
6224 }
6225
6226 #undef TARGET_LITTLE_SYM
6227 #define TARGET_LITTLE_SYM l1om_elf64_vec
6228 #undef TARGET_LITTLE_NAME
6229 #define TARGET_LITTLE_NAME "elf64-l1om"
6230 #undef ELF_ARCH
6231 #define ELF_ARCH bfd_arch_l1om
6232
6233 #undef ELF_MACHINE_CODE
6234 #define ELF_MACHINE_CODE EM_L1OM
6235
6236 #undef ELF_OSABI
6237
6238 #undef elf64_bed
6239 #define elf64_bed elf64_l1om_bed
6240
6241 #undef elf_backend_object_p
6242 #define elf_backend_object_p elf64_l1om_elf_object_p
6243
6244 /* Restore defaults. */
6245 #undef ELF_MAXPAGESIZE
6246 #undef ELF_MINPAGESIZE
6247 #undef ELF_COMMONPAGESIZE
6248 #define ELF_MAXPAGESIZE 0x200000
6249 #define ELF_MINPAGESIZE 0x1000
6250 #define ELF_COMMONPAGESIZE 0x1000
6251 #undef elf_backend_plt_alignment
6252 #define elf_backend_plt_alignment 4
6253 #undef elf_backend_arch_data
6254 #define elf_backend_arch_data &elf_x86_64_arch_bed
6255
6256 #include "elf64-target.h"
6257
6258 /* FreeBSD L1OM support. */
6259
6260 #undef TARGET_LITTLE_SYM
6261 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6262 #undef TARGET_LITTLE_NAME
6263 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6264
6265 #undef ELF_OSABI
6266 #define ELF_OSABI ELFOSABI_FREEBSD
6267
6268 #undef elf64_bed
6269 #define elf64_bed elf64_l1om_fbsd_bed
6270
6271 #include "elf64-target.h"
6272
6273 /* Intel K1OM support. */
6274
6275 static bfd_boolean
6276 elf64_k1om_elf_object_p (bfd *abfd)
6277 {
6278 /* Set the right machine number for an K1OM elf64 file. */
6279 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6280 return TRUE;
6281 }
6282
6283 #undef TARGET_LITTLE_SYM
6284 #define TARGET_LITTLE_SYM k1om_elf64_vec
6285 #undef TARGET_LITTLE_NAME
6286 #define TARGET_LITTLE_NAME "elf64-k1om"
6287 #undef ELF_ARCH
6288 #define ELF_ARCH bfd_arch_k1om
6289
6290 #undef ELF_MACHINE_CODE
6291 #define ELF_MACHINE_CODE EM_K1OM
6292
6293 #undef ELF_OSABI
6294
6295 #undef elf64_bed
6296 #define elf64_bed elf64_k1om_bed
6297
6298 #undef elf_backend_object_p
6299 #define elf_backend_object_p elf64_k1om_elf_object_p
6300
6301 #undef elf_backend_static_tls_alignment
6302
6303 #undef elf_backend_want_plt_sym
6304 #define elf_backend_want_plt_sym 0
6305
6306 #include "elf64-target.h"
6307
6308 /* FreeBSD K1OM support. */
6309
6310 #undef TARGET_LITTLE_SYM
6311 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6312 #undef TARGET_LITTLE_NAME
6313 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6314
6315 #undef ELF_OSABI
6316 #define ELF_OSABI ELFOSABI_FREEBSD
6317
6318 #undef elf64_bed
6319 #define elf64_bed elf64_k1om_fbsd_bed
6320
6321 #include "elf64-target.h"
6322
6323 /* 32bit x86-64 support. */
6324
6325 #undef TARGET_LITTLE_SYM
6326 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6327 #undef TARGET_LITTLE_NAME
6328 #define TARGET_LITTLE_NAME "elf32-x86-64"
6329 #undef elf32_bed
6330
6331 #undef ELF_ARCH
6332 #define ELF_ARCH bfd_arch_i386
6333
6334 #undef ELF_MACHINE_CODE
6335 #define ELF_MACHINE_CODE EM_X86_64
6336
6337 #undef ELF_OSABI
6338
6339 #undef elf_backend_object_p
6340 #define elf_backend_object_p \
6341 elf32_x86_64_elf_object_p
6342
6343 #undef elf_backend_bfd_from_remote_memory
6344 #define elf_backend_bfd_from_remote_memory \
6345 _bfd_elf32_bfd_from_remote_memory
6346
6347 #undef elf_backend_size_info
6348 #define elf_backend_size_info \
6349 _bfd_elf32_size_info
6350
6351 #include "elf32-target.h"
This page took 0.163641 seconds and 5 git commands to generate.