Check if symbol is defined when converting mov to lea
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2015 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return NULL;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if a weak symbol with a real definition needs a copy reloc.
761 When there is a weak symbol with a real definition, the processor
762 independent code will have arranged for us to see the real
763 definition first. We need to copy the needs_copy bit from the
764 real definition and check it when allowing copy reloc in PIE. */
765 unsigned int needs_copy : 1;
766
767 /* TRUE if symbol has at least one BND relocation. */
768 unsigned int has_bnd_reloc : 1;
769
770 /* Reference count of C/C++ function pointer relocations in read-write
771 section which can be resolved at run-time. */
772 bfd_signed_vma func_pointer_refcount;
773
774 /* Information about the GOT PLT entry. Filled when there are both
775 GOT and PLT relocations against the same function. */
776 union gotplt_union plt_got;
777
778 /* Information about the second PLT entry. Filled when has_bnd_reloc is
779 set. */
780 union gotplt_union plt_bnd;
781
782 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
783 starting at the end of the jump table. */
784 bfd_vma tlsdesc_got;
785 };
786
787 #define elf_x86_64_hash_entry(ent) \
788 ((struct elf_x86_64_link_hash_entry *)(ent))
789
790 struct elf_x86_64_obj_tdata
791 {
792 struct elf_obj_tdata root;
793
794 /* tls_type for each local got entry. */
795 char *local_got_tls_type;
796
797 /* GOTPLT entries for TLS descriptors. */
798 bfd_vma *local_tlsdesc_gotent;
799 };
800
801 #define elf_x86_64_tdata(abfd) \
802 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
803
804 #define elf_x86_64_local_got_tls_type(abfd) \
805 (elf_x86_64_tdata (abfd)->local_got_tls_type)
806
807 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
808 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
809
810 #define is_x86_64_elf(bfd) \
811 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
812 && elf_tdata (bfd) != NULL \
813 && elf_object_id (bfd) == X86_64_ELF_DATA)
814
815 static bfd_boolean
816 elf_x86_64_mkobject (bfd *abfd)
817 {
818 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
819 X86_64_ELF_DATA);
820 }
821
822 /* x86-64 ELF linker hash table. */
823
824 struct elf_x86_64_link_hash_table
825 {
826 struct elf_link_hash_table elf;
827
828 /* Short-cuts to get to dynamic linker sections. */
829 asection *sdynbss;
830 asection *srelbss;
831 asection *plt_eh_frame;
832 asection *plt_bnd;
833 asection *plt_got;
834
835 union
836 {
837 bfd_signed_vma refcount;
838 bfd_vma offset;
839 } tls_ld_got;
840
841 /* The amount of space used by the jump slots in the GOT. */
842 bfd_vma sgotplt_jump_table_size;
843
844 /* Small local sym cache. */
845 struct sym_cache sym_cache;
846
847 bfd_vma (*r_info) (bfd_vma, bfd_vma);
848 bfd_vma (*r_sym) (bfd_vma);
849 unsigned int pointer_r_type;
850 const char *dynamic_interpreter;
851 int dynamic_interpreter_size;
852
853 /* _TLS_MODULE_BASE_ symbol. */
854 struct bfd_link_hash_entry *tls_module_base;
855
856 /* Used by local STT_GNU_IFUNC symbols. */
857 htab_t loc_hash_table;
858 void * loc_hash_memory;
859
860 /* The offset into splt of the PLT entry for the TLS descriptor
861 resolver. Special values are 0, if not necessary (or not found
862 to be necessary yet), and -1 if needed but not determined
863 yet. */
864 bfd_vma tlsdesc_plt;
865 /* The offset into sgot of the GOT entry used by the PLT entry
866 above. */
867 bfd_vma tlsdesc_got;
868
869 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
870 bfd_vma next_jump_slot_index;
871 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
872 bfd_vma next_irelative_index;
873 };
874
875 /* Get the x86-64 ELF linker hash table from a link_info structure. */
876
877 #define elf_x86_64_hash_table(p) \
878 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
879 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
880
881 #define elf_x86_64_compute_jump_table_size(htab) \
882 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
883
884 /* Create an entry in an x86-64 ELF linker hash table. */
885
886 static struct bfd_hash_entry *
887 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
888 struct bfd_hash_table *table,
889 const char *string)
890 {
891 /* Allocate the structure if it has not already been allocated by a
892 subclass. */
893 if (entry == NULL)
894 {
895 entry = (struct bfd_hash_entry *)
896 bfd_hash_allocate (table,
897 sizeof (struct elf_x86_64_link_hash_entry));
898 if (entry == NULL)
899 return entry;
900 }
901
902 /* Call the allocation method of the superclass. */
903 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
904 if (entry != NULL)
905 {
906 struct elf_x86_64_link_hash_entry *eh;
907
908 eh = (struct elf_x86_64_link_hash_entry *) entry;
909 eh->dyn_relocs = NULL;
910 eh->tls_type = GOT_UNKNOWN;
911 eh->needs_copy = 0;
912 eh->has_bnd_reloc = 0;
913 eh->func_pointer_refcount = 0;
914 eh->plt_bnd.offset = (bfd_vma) -1;
915 eh->plt_got.offset = (bfd_vma) -1;
916 eh->tlsdesc_got = (bfd_vma) -1;
917 }
918
919 return entry;
920 }
921
922 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
923 for local symbol so that we can handle local STT_GNU_IFUNC symbols
924 as global symbol. We reuse indx and dynstr_index for local symbol
925 hash since they aren't used by global symbols in this backend. */
926
927 static hashval_t
928 elf_x86_64_local_htab_hash (const void *ptr)
929 {
930 struct elf_link_hash_entry *h
931 = (struct elf_link_hash_entry *) ptr;
932 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
933 }
934
935 /* Compare local hash entries. */
936
937 static int
938 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
939 {
940 struct elf_link_hash_entry *h1
941 = (struct elf_link_hash_entry *) ptr1;
942 struct elf_link_hash_entry *h2
943 = (struct elf_link_hash_entry *) ptr2;
944
945 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
946 }
947
948 /* Find and/or create a hash entry for local symbol. */
949
950 static struct elf_link_hash_entry *
951 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
952 bfd *abfd, const Elf_Internal_Rela *rel,
953 bfd_boolean create)
954 {
955 struct elf_x86_64_link_hash_entry e, *ret;
956 asection *sec = abfd->sections;
957 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
958 htab->r_sym (rel->r_info));
959 void **slot;
960
961 e.elf.indx = sec->id;
962 e.elf.dynstr_index = htab->r_sym (rel->r_info);
963 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
964 create ? INSERT : NO_INSERT);
965
966 if (!slot)
967 return NULL;
968
969 if (*slot)
970 {
971 ret = (struct elf_x86_64_link_hash_entry *) *slot;
972 return &ret->elf;
973 }
974
975 ret = (struct elf_x86_64_link_hash_entry *)
976 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
977 sizeof (struct elf_x86_64_link_hash_entry));
978 if (ret)
979 {
980 memset (ret, 0, sizeof (*ret));
981 ret->elf.indx = sec->id;
982 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
983 ret->elf.dynindx = -1;
984 ret->func_pointer_refcount = 0;
985 ret->plt_got.offset = (bfd_vma) -1;
986 *slot = ret;
987 }
988 return &ret->elf;
989 }
990
991 /* Destroy an X86-64 ELF linker hash table. */
992
993 static void
994 elf_x86_64_link_hash_table_free (bfd *obfd)
995 {
996 struct elf_x86_64_link_hash_table *htab
997 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
998
999 if (htab->loc_hash_table)
1000 htab_delete (htab->loc_hash_table);
1001 if (htab->loc_hash_memory)
1002 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
1003 _bfd_elf_link_hash_table_free (obfd);
1004 }
1005
1006 /* Create an X86-64 ELF linker hash table. */
1007
1008 static struct bfd_link_hash_table *
1009 elf_x86_64_link_hash_table_create (bfd *abfd)
1010 {
1011 struct elf_x86_64_link_hash_table *ret;
1012 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1013
1014 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1015 if (ret == NULL)
1016 return NULL;
1017
1018 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1019 elf_x86_64_link_hash_newfunc,
1020 sizeof (struct elf_x86_64_link_hash_entry),
1021 X86_64_ELF_DATA))
1022 {
1023 free (ret);
1024 return NULL;
1025 }
1026
1027 if (ABI_64_P (abfd))
1028 {
1029 ret->r_info = elf64_r_info;
1030 ret->r_sym = elf64_r_sym;
1031 ret->pointer_r_type = R_X86_64_64;
1032 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1033 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1034 }
1035 else
1036 {
1037 ret->r_info = elf32_r_info;
1038 ret->r_sym = elf32_r_sym;
1039 ret->pointer_r_type = R_X86_64_32;
1040 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1041 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1042 }
1043
1044 ret->loc_hash_table = htab_try_create (1024,
1045 elf_x86_64_local_htab_hash,
1046 elf_x86_64_local_htab_eq,
1047 NULL);
1048 ret->loc_hash_memory = objalloc_create ();
1049 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1050 {
1051 elf_x86_64_link_hash_table_free (abfd);
1052 return NULL;
1053 }
1054 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1055
1056 return &ret->elf.root;
1057 }
1058
1059 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1060 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1061 hash table. */
1062
1063 static bfd_boolean
1064 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1065 struct bfd_link_info *info)
1066 {
1067 struct elf_x86_64_link_hash_table *htab;
1068
1069 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1070 return FALSE;
1071
1072 htab = elf_x86_64_hash_table (info);
1073 if (htab == NULL)
1074 return FALSE;
1075
1076 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1077 if (!htab->sdynbss)
1078 abort ();
1079
1080 if (bfd_link_executable (info))
1081 {
1082 /* Always allow copy relocs for building executables. */
1083 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1084 if (s == NULL)
1085 {
1086 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1087 s = bfd_make_section_anyway_with_flags (dynobj,
1088 ".rela.bss",
1089 (bed->dynamic_sec_flags
1090 | SEC_READONLY));
1091 if (s == NULL
1092 || ! bfd_set_section_alignment (dynobj, s,
1093 bed->s->log_file_align))
1094 return FALSE;
1095 }
1096 htab->srelbss = s;
1097 }
1098
1099 if (!info->no_ld_generated_unwind_info
1100 && htab->plt_eh_frame == NULL
1101 && htab->elf.splt != NULL)
1102 {
1103 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1104 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1105 | SEC_LINKER_CREATED);
1106 htab->plt_eh_frame
1107 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1108 if (htab->plt_eh_frame == NULL
1109 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1110 return FALSE;
1111 }
1112 return TRUE;
1113 }
1114
1115 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1116
1117 static void
1118 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1119 struct elf_link_hash_entry *dir,
1120 struct elf_link_hash_entry *ind)
1121 {
1122 struct elf_x86_64_link_hash_entry *edir, *eind;
1123
1124 edir = (struct elf_x86_64_link_hash_entry *) dir;
1125 eind = (struct elf_x86_64_link_hash_entry *) ind;
1126
1127 if (!edir->has_bnd_reloc)
1128 edir->has_bnd_reloc = eind->has_bnd_reloc;
1129
1130 if (eind->dyn_relocs != NULL)
1131 {
1132 if (edir->dyn_relocs != NULL)
1133 {
1134 struct elf_dyn_relocs **pp;
1135 struct elf_dyn_relocs *p;
1136
1137 /* Add reloc counts against the indirect sym to the direct sym
1138 list. Merge any entries against the same section. */
1139 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1140 {
1141 struct elf_dyn_relocs *q;
1142
1143 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1144 if (q->sec == p->sec)
1145 {
1146 q->pc_count += p->pc_count;
1147 q->count += p->count;
1148 *pp = p->next;
1149 break;
1150 }
1151 if (q == NULL)
1152 pp = &p->next;
1153 }
1154 *pp = edir->dyn_relocs;
1155 }
1156
1157 edir->dyn_relocs = eind->dyn_relocs;
1158 eind->dyn_relocs = NULL;
1159 }
1160
1161 if (ind->root.type == bfd_link_hash_indirect
1162 && dir->got.refcount <= 0)
1163 {
1164 edir->tls_type = eind->tls_type;
1165 eind->tls_type = GOT_UNKNOWN;
1166 }
1167
1168 if (ELIMINATE_COPY_RELOCS
1169 && ind->root.type != bfd_link_hash_indirect
1170 && dir->dynamic_adjusted)
1171 {
1172 /* If called to transfer flags for a weakdef during processing
1173 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1174 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1175 dir->ref_dynamic |= ind->ref_dynamic;
1176 dir->ref_regular |= ind->ref_regular;
1177 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1178 dir->needs_plt |= ind->needs_plt;
1179 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1180 }
1181 else
1182 {
1183 if (eind->func_pointer_refcount > 0)
1184 {
1185 edir->func_pointer_refcount += eind->func_pointer_refcount;
1186 eind->func_pointer_refcount = 0;
1187 }
1188
1189 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1190 }
1191 }
1192
1193 static bfd_boolean
1194 elf64_x86_64_elf_object_p (bfd *abfd)
1195 {
1196 /* Set the right machine number for an x86-64 elf64 file. */
1197 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1198 return TRUE;
1199 }
1200
1201 static bfd_boolean
1202 elf32_x86_64_elf_object_p (bfd *abfd)
1203 {
1204 /* Set the right machine number for an x86-64 elf32 file. */
1205 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1206 return TRUE;
1207 }
1208
1209 /* Return TRUE if the TLS access code sequence support transition
1210 from R_TYPE. */
1211
1212 static bfd_boolean
1213 elf_x86_64_check_tls_transition (bfd *abfd,
1214 struct bfd_link_info *info,
1215 asection *sec,
1216 bfd_byte *contents,
1217 Elf_Internal_Shdr *symtab_hdr,
1218 struct elf_link_hash_entry **sym_hashes,
1219 unsigned int r_type,
1220 const Elf_Internal_Rela *rel,
1221 const Elf_Internal_Rela *relend)
1222 {
1223 unsigned int val;
1224 unsigned long r_symndx;
1225 bfd_boolean largepic = FALSE;
1226 struct elf_link_hash_entry *h;
1227 bfd_vma offset;
1228 struct elf_x86_64_link_hash_table *htab;
1229
1230 /* Get the section contents. */
1231 if (contents == NULL)
1232 {
1233 if (elf_section_data (sec)->this_hdr.contents != NULL)
1234 contents = elf_section_data (sec)->this_hdr.contents;
1235 else
1236 {
1237 /* FIXME: How to better handle error condition? */
1238 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1239 return FALSE;
1240
1241 /* Cache the section contents for elf_link_input_bfd. */
1242 elf_section_data (sec)->this_hdr.contents = contents;
1243 }
1244 }
1245
1246 htab = elf_x86_64_hash_table (info);
1247 offset = rel->r_offset;
1248 switch (r_type)
1249 {
1250 case R_X86_64_TLSGD:
1251 case R_X86_64_TLSLD:
1252 if ((rel + 1) >= relend)
1253 return FALSE;
1254
1255 if (r_type == R_X86_64_TLSGD)
1256 {
1257 /* Check transition from GD access model. For 64bit, only
1258 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1259 .word 0x6666; rex64; call __tls_get_addr
1260 can transit to different access model. For 32bit, only
1261 leaq foo@tlsgd(%rip), %rdi
1262 .word 0x6666; rex64; call __tls_get_addr
1263 can transit to different access model. For largepic
1264 we also support:
1265 leaq foo@tlsgd(%rip), %rdi
1266 movabsq $__tls_get_addr@pltoff, %rax
1267 addq $rbx, %rax
1268 call *%rax. */
1269
1270 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1271 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1272
1273 if ((offset + 12) > sec->size)
1274 return FALSE;
1275
1276 if (memcmp (contents + offset + 4, call, 4) != 0)
1277 {
1278 if (!ABI_64_P (abfd)
1279 || (offset + 19) > sec->size
1280 || offset < 3
1281 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1282 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1283 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1284 != 0)
1285 return FALSE;
1286 largepic = TRUE;
1287 }
1288 else if (ABI_64_P (abfd))
1289 {
1290 if (offset < 4
1291 || memcmp (contents + offset - 4, leaq, 4) != 0)
1292 return FALSE;
1293 }
1294 else
1295 {
1296 if (offset < 3
1297 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1298 return FALSE;
1299 }
1300 }
1301 else
1302 {
1303 /* Check transition from LD access model. Only
1304 leaq foo@tlsld(%rip), %rdi;
1305 call __tls_get_addr
1306 can transit to different access model. For largepic
1307 we also support:
1308 leaq foo@tlsld(%rip), %rdi
1309 movabsq $__tls_get_addr@pltoff, %rax
1310 addq $rbx, %rax
1311 call *%rax. */
1312
1313 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1314
1315 if (offset < 3 || (offset + 9) > sec->size)
1316 return FALSE;
1317
1318 if (memcmp (contents + offset - 3, lea, 3) != 0)
1319 return FALSE;
1320
1321 if (0xe8 != *(contents + offset + 4))
1322 {
1323 if (!ABI_64_P (abfd)
1324 || (offset + 19) > sec->size
1325 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1326 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1327 != 0)
1328 return FALSE;
1329 largepic = TRUE;
1330 }
1331 }
1332
1333 r_symndx = htab->r_sym (rel[1].r_info);
1334 if (r_symndx < symtab_hdr->sh_info)
1335 return FALSE;
1336
1337 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1338 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1339 may be versioned. */
1340 return (h != NULL
1341 && h->root.root.string != NULL
1342 && (largepic
1343 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1344 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1345 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1346 && (strncmp (h->root.root.string,
1347 "__tls_get_addr", 14) == 0));
1348
1349 case R_X86_64_GOTTPOFF:
1350 /* Check transition from IE access model:
1351 mov foo@gottpoff(%rip), %reg
1352 add foo@gottpoff(%rip), %reg
1353 */
1354
1355 /* Check REX prefix first. */
1356 if (offset >= 3 && (offset + 4) <= sec->size)
1357 {
1358 val = bfd_get_8 (abfd, contents + offset - 3);
1359 if (val != 0x48 && val != 0x4c)
1360 {
1361 /* X32 may have 0x44 REX prefix or no REX prefix. */
1362 if (ABI_64_P (abfd))
1363 return FALSE;
1364 }
1365 }
1366 else
1367 {
1368 /* X32 may not have any REX prefix. */
1369 if (ABI_64_P (abfd))
1370 return FALSE;
1371 if (offset < 2 || (offset + 3) > sec->size)
1372 return FALSE;
1373 }
1374
1375 val = bfd_get_8 (abfd, contents + offset - 2);
1376 if (val != 0x8b && val != 0x03)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 1);
1380 return (val & 0xc7) == 5;
1381
1382 case R_X86_64_GOTPC32_TLSDESC:
1383 /* Check transition from GDesc access model:
1384 leaq x@tlsdesc(%rip), %rax
1385
1386 Make sure it's a leaq adding rip to a 32-bit offset
1387 into any register, although it's probably almost always
1388 going to be rax. */
1389
1390 if (offset < 3 || (offset + 4) > sec->size)
1391 return FALSE;
1392
1393 val = bfd_get_8 (abfd, contents + offset - 3);
1394 if ((val & 0xfb) != 0x48)
1395 return FALSE;
1396
1397 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1398 return FALSE;
1399
1400 val = bfd_get_8 (abfd, contents + offset - 1);
1401 return (val & 0xc7) == 0x05;
1402
1403 case R_X86_64_TLSDESC_CALL:
1404 /* Check transition from GDesc access model:
1405 call *x@tlsdesc(%rax)
1406 */
1407 if (offset + 2 <= sec->size)
1408 {
1409 /* Make sure that it's a call *x@tlsdesc(%rax). */
1410 static const unsigned char call[] = { 0xff, 0x10 };
1411 return memcmp (contents + offset, call, 2) == 0;
1412 }
1413
1414 return FALSE;
1415
1416 default:
1417 abort ();
1418 }
1419 }
1420
1421 /* Return TRUE if the TLS access transition is OK or no transition
1422 will be performed. Update R_TYPE if there is a transition. */
1423
1424 static bfd_boolean
1425 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1426 asection *sec, bfd_byte *contents,
1427 Elf_Internal_Shdr *symtab_hdr,
1428 struct elf_link_hash_entry **sym_hashes,
1429 unsigned int *r_type, int tls_type,
1430 const Elf_Internal_Rela *rel,
1431 const Elf_Internal_Rela *relend,
1432 struct elf_link_hash_entry *h,
1433 unsigned long r_symndx)
1434 {
1435 unsigned int from_type = *r_type;
1436 unsigned int to_type = from_type;
1437 bfd_boolean check = TRUE;
1438
1439 /* Skip TLS transition for functions. */
1440 if (h != NULL
1441 && (h->type == STT_FUNC
1442 || h->type == STT_GNU_IFUNC))
1443 return TRUE;
1444
1445 switch (from_type)
1446 {
1447 case R_X86_64_TLSGD:
1448 case R_X86_64_GOTPC32_TLSDESC:
1449 case R_X86_64_TLSDESC_CALL:
1450 case R_X86_64_GOTTPOFF:
1451 if (bfd_link_executable (info))
1452 {
1453 if (h == NULL)
1454 to_type = R_X86_64_TPOFF32;
1455 else
1456 to_type = R_X86_64_GOTTPOFF;
1457 }
1458
1459 /* When we are called from elf_x86_64_relocate_section,
1460 CONTENTS isn't NULL and there may be additional transitions
1461 based on TLS_TYPE. */
1462 if (contents != NULL)
1463 {
1464 unsigned int new_to_type = to_type;
1465
1466 if (bfd_link_executable (info)
1467 && h != NULL
1468 && h->dynindx == -1
1469 && tls_type == GOT_TLS_IE)
1470 new_to_type = R_X86_64_TPOFF32;
1471
1472 if (to_type == R_X86_64_TLSGD
1473 || to_type == R_X86_64_GOTPC32_TLSDESC
1474 || to_type == R_X86_64_TLSDESC_CALL)
1475 {
1476 if (tls_type == GOT_TLS_IE)
1477 new_to_type = R_X86_64_GOTTPOFF;
1478 }
1479
1480 /* We checked the transition before when we were called from
1481 elf_x86_64_check_relocs. We only want to check the new
1482 transition which hasn't been checked before. */
1483 check = new_to_type != to_type && from_type == to_type;
1484 to_type = new_to_type;
1485 }
1486
1487 break;
1488
1489 case R_X86_64_TLSLD:
1490 if (bfd_link_executable (info))
1491 to_type = R_X86_64_TPOFF32;
1492 break;
1493
1494 default:
1495 return TRUE;
1496 }
1497
1498 /* Return TRUE if there is no transition. */
1499 if (from_type == to_type)
1500 return TRUE;
1501
1502 /* Check if the transition can be performed. */
1503 if (check
1504 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1505 symtab_hdr, sym_hashes,
1506 from_type, rel, relend))
1507 {
1508 reloc_howto_type *from, *to;
1509 const char *name;
1510
1511 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1512 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1513
1514 if (h)
1515 name = h->root.root.string;
1516 else
1517 {
1518 struct elf_x86_64_link_hash_table *htab;
1519
1520 htab = elf_x86_64_hash_table (info);
1521 if (htab == NULL)
1522 name = "*unknown*";
1523 else
1524 {
1525 Elf_Internal_Sym *isym;
1526
1527 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1528 abfd, r_symndx);
1529 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1530 }
1531 }
1532
1533 (*_bfd_error_handler)
1534 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1535 "in section `%A' failed"),
1536 abfd, sec, from->name, to->name, name,
1537 (unsigned long) rel->r_offset);
1538 bfd_set_error (bfd_error_bad_value);
1539 return FALSE;
1540 }
1541
1542 *r_type = to_type;
1543 return TRUE;
1544 }
1545
1546 /* Rename some of the generic section flags to better document how they
1547 are used here. */
1548 #define need_convert_mov_to_lea sec_flg0
1549
1550 /* Look through the relocs for a section during the first phase, and
1551 calculate needed space in the global offset table, procedure
1552 linkage table, and dynamic reloc sections. */
1553
1554 static bfd_boolean
1555 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1556 asection *sec,
1557 const Elf_Internal_Rela *relocs)
1558 {
1559 struct elf_x86_64_link_hash_table *htab;
1560 Elf_Internal_Shdr *symtab_hdr;
1561 struct elf_link_hash_entry **sym_hashes;
1562 const Elf_Internal_Rela *rel;
1563 const Elf_Internal_Rela *rel_end;
1564 asection *sreloc;
1565 bfd_boolean use_plt_got;
1566
1567 if (bfd_link_relocatable (info))
1568 return TRUE;
1569
1570 BFD_ASSERT (is_x86_64_elf (abfd));
1571
1572 htab = elf_x86_64_hash_table (info);
1573 if (htab == NULL)
1574 return FALSE;
1575
1576 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1577
1578 symtab_hdr = &elf_symtab_hdr (abfd);
1579 sym_hashes = elf_sym_hashes (abfd);
1580
1581 sreloc = NULL;
1582
1583 rel_end = relocs + sec->reloc_count;
1584 for (rel = relocs; rel < rel_end; rel++)
1585 {
1586 unsigned int r_type;
1587 unsigned long r_symndx;
1588 struct elf_link_hash_entry *h;
1589 Elf_Internal_Sym *isym;
1590 const char *name;
1591 bfd_boolean size_reloc;
1592
1593 r_symndx = htab->r_sym (rel->r_info);
1594 r_type = ELF32_R_TYPE (rel->r_info);
1595
1596 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1597 {
1598 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1599 abfd, r_symndx);
1600 return FALSE;
1601 }
1602
1603 if (r_symndx < symtab_hdr->sh_info)
1604 {
1605 /* A local symbol. */
1606 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1607 abfd, r_symndx);
1608 if (isym == NULL)
1609 return FALSE;
1610
1611 /* Check relocation against local STT_GNU_IFUNC symbol. */
1612 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1613 {
1614 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1615 TRUE);
1616 if (h == NULL)
1617 return FALSE;
1618
1619 /* Fake a STT_GNU_IFUNC symbol. */
1620 h->type = STT_GNU_IFUNC;
1621 h->def_regular = 1;
1622 h->ref_regular = 1;
1623 h->forced_local = 1;
1624 h->root.type = bfd_link_hash_defined;
1625 }
1626 else
1627 h = NULL;
1628 }
1629 else
1630 {
1631 isym = NULL;
1632 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1633 while (h->root.type == bfd_link_hash_indirect
1634 || h->root.type == bfd_link_hash_warning)
1635 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1636 }
1637
1638 /* Check invalid x32 relocations. */
1639 if (!ABI_64_P (abfd))
1640 switch (r_type)
1641 {
1642 default:
1643 break;
1644
1645 case R_X86_64_DTPOFF64:
1646 case R_X86_64_TPOFF64:
1647 case R_X86_64_PC64:
1648 case R_X86_64_GOTOFF64:
1649 case R_X86_64_GOT64:
1650 case R_X86_64_GOTPCREL64:
1651 case R_X86_64_GOTPC64:
1652 case R_X86_64_GOTPLT64:
1653 case R_X86_64_PLTOFF64:
1654 {
1655 if (h)
1656 name = h->root.root.string;
1657 else
1658 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1659 NULL);
1660 (*_bfd_error_handler)
1661 (_("%B: relocation %s against symbol `%s' isn't "
1662 "supported in x32 mode"), abfd,
1663 x86_64_elf_howto_table[r_type].name, name);
1664 bfd_set_error (bfd_error_bad_value);
1665 return FALSE;
1666 }
1667 break;
1668 }
1669
1670 if (h != NULL)
1671 {
1672 /* Create the ifunc sections for static executables. If we
1673 never see an indirect function symbol nor we are building
1674 a static executable, those sections will be empty and
1675 won't appear in output. */
1676 switch (r_type)
1677 {
1678 default:
1679 break;
1680
1681 case R_X86_64_PC32_BND:
1682 case R_X86_64_PLT32_BND:
1683 case R_X86_64_PC32:
1684 case R_X86_64_PLT32:
1685 case R_X86_64_32:
1686 case R_X86_64_64:
1687 /* MPX PLT is supported only if elf_x86_64_arch_bed
1688 is used in 64-bit mode. */
1689 if (ABI_64_P (abfd)
1690 && info->bndplt
1691 && (get_elf_x86_64_backend_data (abfd)
1692 == &elf_x86_64_arch_bed))
1693 {
1694 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
1695
1696 /* Create the second PLT for Intel MPX support. */
1697 if (htab->plt_bnd == NULL)
1698 {
1699 unsigned int plt_bnd_align;
1700 const struct elf_backend_data *bed;
1701
1702 bed = get_elf_backend_data (info->output_bfd);
1703 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1704 && (sizeof (elf_x86_64_bnd_plt2_entry)
1705 == sizeof (elf_x86_64_legacy_plt2_entry)));
1706 plt_bnd_align = 3;
1707
1708 if (htab->elf.dynobj == NULL)
1709 htab->elf.dynobj = abfd;
1710 htab->plt_bnd
1711 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1712 ".plt.bnd",
1713 (bed->dynamic_sec_flags
1714 | SEC_ALLOC
1715 | SEC_CODE
1716 | SEC_LOAD
1717 | SEC_READONLY));
1718 if (htab->plt_bnd == NULL
1719 || !bfd_set_section_alignment (htab->elf.dynobj,
1720 htab->plt_bnd,
1721 plt_bnd_align))
1722 return FALSE;
1723 }
1724 }
1725
1726 case R_X86_64_32S:
1727 case R_X86_64_PC64:
1728 case R_X86_64_GOTPCREL:
1729 case R_X86_64_GOTPCREL64:
1730 if (htab->elf.dynobj == NULL)
1731 htab->elf.dynobj = abfd;
1732 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1733 return FALSE;
1734 break;
1735 }
1736
1737 /* It is referenced by a non-shared object. */
1738 h->ref_regular = 1;
1739 h->root.non_ir_ref = 1;
1740
1741 if (h->type == STT_GNU_IFUNC)
1742 elf_tdata (info->output_bfd)->has_gnu_symbols
1743 |= elf_gnu_symbol_ifunc;
1744 }
1745
1746 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1747 symtab_hdr, sym_hashes,
1748 &r_type, GOT_UNKNOWN,
1749 rel, rel_end, h, r_symndx))
1750 return FALSE;
1751
1752 switch (r_type)
1753 {
1754 case R_X86_64_TLSLD:
1755 htab->tls_ld_got.refcount += 1;
1756 goto create_got;
1757
1758 case R_X86_64_TPOFF32:
1759 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1760 {
1761 if (h)
1762 name = h->root.root.string;
1763 else
1764 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1765 NULL);
1766 (*_bfd_error_handler)
1767 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1768 abfd,
1769 x86_64_elf_howto_table[r_type].name, name);
1770 bfd_set_error (bfd_error_bad_value);
1771 return FALSE;
1772 }
1773 break;
1774
1775 case R_X86_64_GOTTPOFF:
1776 if (!bfd_link_executable (info))
1777 info->flags |= DF_STATIC_TLS;
1778 /* Fall through */
1779
1780 case R_X86_64_GOT32:
1781 case R_X86_64_GOTPCREL:
1782 case R_X86_64_TLSGD:
1783 case R_X86_64_GOT64:
1784 case R_X86_64_GOTPCREL64:
1785 case R_X86_64_GOTPLT64:
1786 case R_X86_64_GOTPC32_TLSDESC:
1787 case R_X86_64_TLSDESC_CALL:
1788 /* This symbol requires a global offset table entry. */
1789 {
1790 int tls_type, old_tls_type;
1791
1792 switch (r_type)
1793 {
1794 default: tls_type = GOT_NORMAL; break;
1795 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1796 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1797 case R_X86_64_GOTPC32_TLSDESC:
1798 case R_X86_64_TLSDESC_CALL:
1799 tls_type = GOT_TLS_GDESC; break;
1800 }
1801
1802 if (h != NULL)
1803 {
1804 h->got.refcount += 1;
1805 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1806 }
1807 else
1808 {
1809 bfd_signed_vma *local_got_refcounts;
1810
1811 /* This is a global offset table entry for a local symbol. */
1812 local_got_refcounts = elf_local_got_refcounts (abfd);
1813 if (local_got_refcounts == NULL)
1814 {
1815 bfd_size_type size;
1816
1817 size = symtab_hdr->sh_info;
1818 size *= sizeof (bfd_signed_vma)
1819 + sizeof (bfd_vma) + sizeof (char);
1820 local_got_refcounts = ((bfd_signed_vma *)
1821 bfd_zalloc (abfd, size));
1822 if (local_got_refcounts == NULL)
1823 return FALSE;
1824 elf_local_got_refcounts (abfd) = local_got_refcounts;
1825 elf_x86_64_local_tlsdesc_gotent (abfd)
1826 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1827 elf_x86_64_local_got_tls_type (abfd)
1828 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1829 }
1830 local_got_refcounts[r_symndx] += 1;
1831 old_tls_type
1832 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1833 }
1834
1835 /* If a TLS symbol is accessed using IE at least once,
1836 there is no point to use dynamic model for it. */
1837 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1838 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1839 || tls_type != GOT_TLS_IE))
1840 {
1841 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1842 tls_type = old_tls_type;
1843 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1844 && GOT_TLS_GD_ANY_P (tls_type))
1845 tls_type |= old_tls_type;
1846 else
1847 {
1848 if (h)
1849 name = h->root.root.string;
1850 else
1851 name = bfd_elf_sym_name (abfd, symtab_hdr,
1852 isym, NULL);
1853 (*_bfd_error_handler)
1854 (_("%B: '%s' accessed both as normal and thread local symbol"),
1855 abfd, name);
1856 bfd_set_error (bfd_error_bad_value);
1857 return FALSE;
1858 }
1859 }
1860
1861 if (old_tls_type != tls_type)
1862 {
1863 if (h != NULL)
1864 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1865 else
1866 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1867 }
1868 }
1869 /* Fall through */
1870
1871 case R_X86_64_GOTOFF64:
1872 case R_X86_64_GOTPC32:
1873 case R_X86_64_GOTPC64:
1874 create_got:
1875 if (htab->elf.sgot == NULL)
1876 {
1877 if (htab->elf.dynobj == NULL)
1878 htab->elf.dynobj = abfd;
1879 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1880 info))
1881 return FALSE;
1882 }
1883 break;
1884
1885 case R_X86_64_PLT32:
1886 case R_X86_64_PLT32_BND:
1887 /* This symbol requires a procedure linkage table entry. We
1888 actually build the entry in adjust_dynamic_symbol,
1889 because this might be a case of linking PIC code which is
1890 never referenced by a dynamic object, in which case we
1891 don't need to generate a procedure linkage table entry
1892 after all. */
1893
1894 /* If this is a local symbol, we resolve it directly without
1895 creating a procedure linkage table entry. */
1896 if (h == NULL)
1897 continue;
1898
1899 h->needs_plt = 1;
1900 h->plt.refcount += 1;
1901 break;
1902
1903 case R_X86_64_PLTOFF64:
1904 /* This tries to form the 'address' of a function relative
1905 to GOT. For global symbols we need a PLT entry. */
1906 if (h != NULL)
1907 {
1908 h->needs_plt = 1;
1909 h->plt.refcount += 1;
1910 }
1911 goto create_got;
1912
1913 case R_X86_64_SIZE32:
1914 case R_X86_64_SIZE64:
1915 size_reloc = TRUE;
1916 goto do_size;
1917
1918 case R_X86_64_32:
1919 if (!ABI_64_P (abfd))
1920 goto pointer;
1921 case R_X86_64_8:
1922 case R_X86_64_16:
1923 case R_X86_64_32S:
1924 /* Let's help debug shared library creation. These relocs
1925 cannot be used in shared libs. Don't error out for
1926 sections we don't care about, such as debug sections or
1927 non-constant sections. */
1928 if (bfd_link_pic (info)
1929 && (sec->flags & SEC_ALLOC) != 0
1930 && (sec->flags & SEC_READONLY) != 0)
1931 {
1932 if (h)
1933 name = h->root.root.string;
1934 else
1935 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1936 (*_bfd_error_handler)
1937 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1938 abfd, x86_64_elf_howto_table[r_type].name, name);
1939 bfd_set_error (bfd_error_bad_value);
1940 return FALSE;
1941 }
1942 /* Fall through. */
1943
1944 case R_X86_64_PC8:
1945 case R_X86_64_PC16:
1946 case R_X86_64_PC32:
1947 case R_X86_64_PC32_BND:
1948 case R_X86_64_PC64:
1949 case R_X86_64_64:
1950 pointer:
1951 if (h != NULL && bfd_link_executable (info))
1952 {
1953 /* If this reloc is in a read-only section, we might
1954 need a copy reloc. We can't check reliably at this
1955 stage whether the section is read-only, as input
1956 sections have not yet been mapped to output sections.
1957 Tentatively set the flag for now, and correct in
1958 adjust_dynamic_symbol. */
1959 h->non_got_ref = 1;
1960
1961 /* We may need a .plt entry if the function this reloc
1962 refers to is in a shared lib. */
1963 h->plt.refcount += 1;
1964 if (r_type == R_X86_64_PC32)
1965 {
1966 /* Since something like ".long foo - ." may be used
1967 as pointer, make sure that PLT is used if foo is
1968 a function defined in a shared library. */
1969 if ((sec->flags & SEC_CODE) == 0)
1970 h->pointer_equality_needed = 1;
1971 }
1972 else if (r_type != R_X86_64_PC32_BND
1973 && r_type != R_X86_64_PC64)
1974 {
1975 h->pointer_equality_needed = 1;
1976 /* At run-time, R_X86_64_64 can be resolved for both
1977 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
1978 can only be resolved for x32. */
1979 if ((sec->flags & SEC_READONLY) == 0
1980 && (r_type == R_X86_64_64
1981 || (!ABI_64_P (abfd)
1982 && (r_type == R_X86_64_32
1983 || r_type == R_X86_64_32S))))
1984 {
1985 struct elf_x86_64_link_hash_entry *eh
1986 = (struct elf_x86_64_link_hash_entry *) h;
1987 eh->func_pointer_refcount += 1;
1988 }
1989 }
1990 }
1991
1992 size_reloc = FALSE;
1993 do_size:
1994 /* If we are creating a shared library, and this is a reloc
1995 against a global symbol, or a non PC relative reloc
1996 against a local symbol, then we need to copy the reloc
1997 into the shared library. However, if we are linking with
1998 -Bsymbolic, we do not need to copy a reloc against a
1999 global symbol which is defined in an object we are
2000 including in the link (i.e., DEF_REGULAR is set). At
2001 this point we have not seen all the input files, so it is
2002 possible that DEF_REGULAR is not set now but will be set
2003 later (it is never cleared). In case of a weak definition,
2004 DEF_REGULAR may be cleared later by a strong definition in
2005 a shared library. We account for that possibility below by
2006 storing information in the relocs_copied field of the hash
2007 table entry. A similar situation occurs when creating
2008 shared libraries and symbol visibility changes render the
2009 symbol local.
2010
2011 If on the other hand, we are creating an executable, we
2012 may need to keep relocations for symbols satisfied by a
2013 dynamic library if we manage to avoid copy relocs for the
2014 symbol. */
2015 if ((bfd_link_pic (info)
2016 && (sec->flags & SEC_ALLOC) != 0
2017 && (! IS_X86_64_PCREL_TYPE (r_type)
2018 || (h != NULL
2019 && (! SYMBOLIC_BIND (info, h)
2020 || h->root.type == bfd_link_hash_defweak
2021 || !h->def_regular))))
2022 || (ELIMINATE_COPY_RELOCS
2023 && !bfd_link_pic (info)
2024 && (sec->flags & SEC_ALLOC) != 0
2025 && h != NULL
2026 && (h->root.type == bfd_link_hash_defweak
2027 || !h->def_regular)))
2028 {
2029 struct elf_dyn_relocs *p;
2030 struct elf_dyn_relocs **head;
2031
2032 /* We must copy these reloc types into the output file.
2033 Create a reloc section in dynobj and make room for
2034 this reloc. */
2035 if (sreloc == NULL)
2036 {
2037 if (htab->elf.dynobj == NULL)
2038 htab->elf.dynobj = abfd;
2039
2040 sreloc = _bfd_elf_make_dynamic_reloc_section
2041 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2042 abfd, /*rela?*/ TRUE);
2043
2044 if (sreloc == NULL)
2045 return FALSE;
2046 }
2047
2048 /* If this is a global symbol, we count the number of
2049 relocations we need for this symbol. */
2050 if (h != NULL)
2051 {
2052 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2053 }
2054 else
2055 {
2056 /* Track dynamic relocs needed for local syms too.
2057 We really need local syms available to do this
2058 easily. Oh well. */
2059 asection *s;
2060 void **vpp;
2061
2062 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2063 abfd, r_symndx);
2064 if (isym == NULL)
2065 return FALSE;
2066
2067 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2068 if (s == NULL)
2069 s = sec;
2070
2071 /* Beware of type punned pointers vs strict aliasing
2072 rules. */
2073 vpp = &(elf_section_data (s)->local_dynrel);
2074 head = (struct elf_dyn_relocs **)vpp;
2075 }
2076
2077 p = *head;
2078 if (p == NULL || p->sec != sec)
2079 {
2080 bfd_size_type amt = sizeof *p;
2081
2082 p = ((struct elf_dyn_relocs *)
2083 bfd_alloc (htab->elf.dynobj, amt));
2084 if (p == NULL)
2085 return FALSE;
2086 p->next = *head;
2087 *head = p;
2088 p->sec = sec;
2089 p->count = 0;
2090 p->pc_count = 0;
2091 }
2092
2093 p->count += 1;
2094 /* Count size relocation as PC-relative relocation. */
2095 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2096 p->pc_count += 1;
2097 }
2098 break;
2099
2100 /* This relocation describes the C++ object vtable hierarchy.
2101 Reconstruct it for later use during GC. */
2102 case R_X86_64_GNU_VTINHERIT:
2103 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2104 return FALSE;
2105 break;
2106
2107 /* This relocation describes which C++ vtable entries are actually
2108 used. Record for later use during GC. */
2109 case R_X86_64_GNU_VTENTRY:
2110 BFD_ASSERT (h != NULL);
2111 if (h != NULL
2112 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2113 return FALSE;
2114 break;
2115
2116 default:
2117 break;
2118 }
2119
2120 if (use_plt_got
2121 && h != NULL
2122 && h->plt.refcount > 0
2123 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2124 || h->got.refcount > 0)
2125 && htab->plt_got == NULL)
2126 {
2127 /* Create the GOT procedure linkage table. */
2128 unsigned int plt_got_align;
2129 const struct elf_backend_data *bed;
2130
2131 bed = get_elf_backend_data (info->output_bfd);
2132 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2133 && (sizeof (elf_x86_64_bnd_plt2_entry)
2134 == sizeof (elf_x86_64_legacy_plt2_entry)));
2135 plt_got_align = 3;
2136
2137 if (htab->elf.dynobj == NULL)
2138 htab->elf.dynobj = abfd;
2139 htab->plt_got
2140 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2141 ".plt.got",
2142 (bed->dynamic_sec_flags
2143 | SEC_ALLOC
2144 | SEC_CODE
2145 | SEC_LOAD
2146 | SEC_READONLY));
2147 if (htab->plt_got == NULL
2148 || !bfd_set_section_alignment (htab->elf.dynobj,
2149 htab->plt_got,
2150 plt_got_align))
2151 return FALSE;
2152 }
2153
2154 if (r_type == R_X86_64_GOTPCREL
2155 && (h == NULL || h->type != STT_GNU_IFUNC))
2156 sec->need_convert_mov_to_lea = 1;
2157 }
2158
2159 return TRUE;
2160 }
2161
2162 /* Return the section that should be marked against GC for a given
2163 relocation. */
2164
2165 static asection *
2166 elf_x86_64_gc_mark_hook (asection *sec,
2167 struct bfd_link_info *info,
2168 Elf_Internal_Rela *rel,
2169 struct elf_link_hash_entry *h,
2170 Elf_Internal_Sym *sym)
2171 {
2172 if (h != NULL)
2173 switch (ELF32_R_TYPE (rel->r_info))
2174 {
2175 case R_X86_64_GNU_VTINHERIT:
2176 case R_X86_64_GNU_VTENTRY:
2177 return NULL;
2178 }
2179
2180 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2181 }
2182
2183 /* Update the got entry reference counts for the section being removed. */
2184
2185 static bfd_boolean
2186 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2187 asection *sec,
2188 const Elf_Internal_Rela *relocs)
2189 {
2190 struct elf_x86_64_link_hash_table *htab;
2191 Elf_Internal_Shdr *symtab_hdr;
2192 struct elf_link_hash_entry **sym_hashes;
2193 bfd_signed_vma *local_got_refcounts;
2194 const Elf_Internal_Rela *rel, *relend;
2195
2196 if (bfd_link_relocatable (info))
2197 return TRUE;
2198
2199 htab = elf_x86_64_hash_table (info);
2200 if (htab == NULL)
2201 return FALSE;
2202
2203 elf_section_data (sec)->local_dynrel = NULL;
2204
2205 symtab_hdr = &elf_symtab_hdr (abfd);
2206 sym_hashes = elf_sym_hashes (abfd);
2207 local_got_refcounts = elf_local_got_refcounts (abfd);
2208
2209 htab = elf_x86_64_hash_table (info);
2210 relend = relocs + sec->reloc_count;
2211 for (rel = relocs; rel < relend; rel++)
2212 {
2213 unsigned long r_symndx;
2214 unsigned int r_type;
2215 struct elf_link_hash_entry *h = NULL;
2216 bfd_boolean pointer_reloc;
2217
2218 r_symndx = htab->r_sym (rel->r_info);
2219 if (r_symndx >= symtab_hdr->sh_info)
2220 {
2221 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2222 while (h->root.type == bfd_link_hash_indirect
2223 || h->root.type == bfd_link_hash_warning)
2224 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2225 }
2226 else
2227 {
2228 /* A local symbol. */
2229 Elf_Internal_Sym *isym;
2230
2231 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2232 abfd, r_symndx);
2233
2234 /* Check relocation against local STT_GNU_IFUNC symbol. */
2235 if (isym != NULL
2236 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2237 {
2238 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2239 if (h == NULL)
2240 abort ();
2241 }
2242 }
2243
2244 if (h)
2245 {
2246 struct elf_x86_64_link_hash_entry *eh;
2247 struct elf_dyn_relocs **pp;
2248 struct elf_dyn_relocs *p;
2249
2250 eh = (struct elf_x86_64_link_hash_entry *) h;
2251
2252 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2253 if (p->sec == sec)
2254 {
2255 /* Everything must go for SEC. */
2256 *pp = p->next;
2257 break;
2258 }
2259 }
2260
2261 r_type = ELF32_R_TYPE (rel->r_info);
2262 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2263 symtab_hdr, sym_hashes,
2264 &r_type, GOT_UNKNOWN,
2265 rel, relend, h, r_symndx))
2266 return FALSE;
2267
2268 pointer_reloc = FALSE;
2269 switch (r_type)
2270 {
2271 case R_X86_64_TLSLD:
2272 if (htab->tls_ld_got.refcount > 0)
2273 htab->tls_ld_got.refcount -= 1;
2274 break;
2275
2276 case R_X86_64_TLSGD:
2277 case R_X86_64_GOTPC32_TLSDESC:
2278 case R_X86_64_TLSDESC_CALL:
2279 case R_X86_64_GOTTPOFF:
2280 case R_X86_64_GOT32:
2281 case R_X86_64_GOTPCREL:
2282 case R_X86_64_GOT64:
2283 case R_X86_64_GOTPCREL64:
2284 case R_X86_64_GOTPLT64:
2285 if (h != NULL)
2286 {
2287 if (h->got.refcount > 0)
2288 h->got.refcount -= 1;
2289 if (h->type == STT_GNU_IFUNC)
2290 {
2291 if (h->plt.refcount > 0)
2292 h->plt.refcount -= 1;
2293 }
2294 }
2295 else if (local_got_refcounts != NULL)
2296 {
2297 if (local_got_refcounts[r_symndx] > 0)
2298 local_got_refcounts[r_symndx] -= 1;
2299 }
2300 break;
2301
2302 case R_X86_64_32:
2303 case R_X86_64_32S:
2304 pointer_reloc = !ABI_64_P (abfd);
2305 goto pointer;
2306
2307 case R_X86_64_64:
2308 pointer_reloc = TRUE;
2309 case R_X86_64_8:
2310 case R_X86_64_16:
2311 case R_X86_64_PC8:
2312 case R_X86_64_PC16:
2313 case R_X86_64_PC32:
2314 case R_X86_64_PC32_BND:
2315 case R_X86_64_PC64:
2316 case R_X86_64_SIZE32:
2317 case R_X86_64_SIZE64:
2318 pointer:
2319 if (bfd_link_pic (info)
2320 && (h == NULL || h->type != STT_GNU_IFUNC))
2321 break;
2322 /* Fall thru */
2323
2324 case R_X86_64_PLT32:
2325 case R_X86_64_PLT32_BND:
2326 case R_X86_64_PLTOFF64:
2327 if (h != NULL)
2328 {
2329 if (h->plt.refcount > 0)
2330 h->plt.refcount -= 1;
2331 if (pointer_reloc && (sec->flags & SEC_READONLY) == 0)
2332 {
2333 struct elf_x86_64_link_hash_entry *eh
2334 = (struct elf_x86_64_link_hash_entry *) h;
2335 if (eh->func_pointer_refcount > 0)
2336 eh->func_pointer_refcount -= 1;
2337 }
2338 }
2339 break;
2340
2341 default:
2342 break;
2343 }
2344 }
2345
2346 return TRUE;
2347 }
2348
2349 /* Adjust a symbol defined by a dynamic object and referenced by a
2350 regular object. The current definition is in some section of the
2351 dynamic object, but we're not including those sections. We have to
2352 change the definition to something the rest of the link can
2353 understand. */
2354
2355 static bfd_boolean
2356 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2357 struct elf_link_hash_entry *h)
2358 {
2359 struct elf_x86_64_link_hash_table *htab;
2360 asection *s;
2361 struct elf_x86_64_link_hash_entry *eh;
2362 struct elf_dyn_relocs *p;
2363
2364 /* STT_GNU_IFUNC symbol must go through PLT. */
2365 if (h->type == STT_GNU_IFUNC)
2366 {
2367 /* All local STT_GNU_IFUNC references must be treate as local
2368 calls via local PLT. */
2369 if (h->ref_regular
2370 && SYMBOL_CALLS_LOCAL (info, h))
2371 {
2372 bfd_size_type pc_count = 0, count = 0;
2373 struct elf_dyn_relocs **pp;
2374
2375 eh = (struct elf_x86_64_link_hash_entry *) h;
2376 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2377 {
2378 pc_count += p->pc_count;
2379 p->count -= p->pc_count;
2380 p->pc_count = 0;
2381 count += p->count;
2382 if (p->count == 0)
2383 *pp = p->next;
2384 else
2385 pp = &p->next;
2386 }
2387
2388 if (pc_count || count)
2389 {
2390 h->needs_plt = 1;
2391 h->non_got_ref = 1;
2392 if (h->plt.refcount <= 0)
2393 h->plt.refcount = 1;
2394 else
2395 h->plt.refcount += 1;
2396 }
2397 }
2398
2399 if (h->plt.refcount <= 0)
2400 {
2401 h->plt.offset = (bfd_vma) -1;
2402 h->needs_plt = 0;
2403 }
2404 return TRUE;
2405 }
2406
2407 /* If this is a function, put it in the procedure linkage table. We
2408 will fill in the contents of the procedure linkage table later,
2409 when we know the address of the .got section. */
2410 if (h->type == STT_FUNC
2411 || h->needs_plt)
2412 {
2413 if (h->plt.refcount <= 0
2414 || SYMBOL_CALLS_LOCAL (info, h)
2415 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2416 && h->root.type == bfd_link_hash_undefweak))
2417 {
2418 /* This case can occur if we saw a PLT32 reloc in an input
2419 file, but the symbol was never referred to by a dynamic
2420 object, or if all references were garbage collected. In
2421 such a case, we don't actually need to build a procedure
2422 linkage table, and we can just do a PC32 reloc instead. */
2423 h->plt.offset = (bfd_vma) -1;
2424 h->needs_plt = 0;
2425 }
2426
2427 return TRUE;
2428 }
2429 else
2430 /* It's possible that we incorrectly decided a .plt reloc was
2431 needed for an R_X86_64_PC32 reloc to a non-function sym in
2432 check_relocs. We can't decide accurately between function and
2433 non-function syms in check-relocs; Objects loaded later in
2434 the link may change h->type. So fix it now. */
2435 h->plt.offset = (bfd_vma) -1;
2436
2437 /* If this is a weak symbol, and there is a real definition, the
2438 processor independent code will have arranged for us to see the
2439 real definition first, and we can just use the same value. */
2440 if (h->u.weakdef != NULL)
2441 {
2442 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2443 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2444 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2445 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2446 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2447 {
2448 eh = (struct elf_x86_64_link_hash_entry *) h;
2449 h->non_got_ref = h->u.weakdef->non_got_ref;
2450 eh->needs_copy = h->u.weakdef->needs_copy;
2451 }
2452 return TRUE;
2453 }
2454
2455 /* This is a reference to a symbol defined by a dynamic object which
2456 is not a function. */
2457
2458 /* If we are creating a shared library, we must presume that the
2459 only references to the symbol are via the global offset table.
2460 For such cases we need not do anything here; the relocations will
2461 be handled correctly by relocate_section. */
2462 if (!bfd_link_executable (info))
2463 return TRUE;
2464
2465 /* If there are no references to this symbol that do not use the
2466 GOT, we don't need to generate a copy reloc. */
2467 if (!h->non_got_ref)
2468 return TRUE;
2469
2470 /* If -z nocopyreloc was given, we won't generate them either. */
2471 if (info->nocopyreloc)
2472 {
2473 h->non_got_ref = 0;
2474 return TRUE;
2475 }
2476
2477 if (ELIMINATE_COPY_RELOCS)
2478 {
2479 eh = (struct elf_x86_64_link_hash_entry *) h;
2480 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2481 {
2482 s = p->sec->output_section;
2483 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2484 break;
2485 }
2486
2487 /* If we didn't find any dynamic relocs in read-only sections, then
2488 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2489 if (p == NULL)
2490 {
2491 h->non_got_ref = 0;
2492 return TRUE;
2493 }
2494 }
2495
2496 /* We must allocate the symbol in our .dynbss section, which will
2497 become part of the .bss section of the executable. There will be
2498 an entry for this symbol in the .dynsym section. The dynamic
2499 object will contain position independent code, so all references
2500 from the dynamic object to this symbol will go through the global
2501 offset table. The dynamic linker will use the .dynsym entry to
2502 determine the address it must put in the global offset table, so
2503 both the dynamic object and the regular object will refer to the
2504 same memory location for the variable. */
2505
2506 htab = elf_x86_64_hash_table (info);
2507 if (htab == NULL)
2508 return FALSE;
2509
2510 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2511 to copy the initial value out of the dynamic object and into the
2512 runtime process image. */
2513 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2514 {
2515 const struct elf_backend_data *bed;
2516 bed = get_elf_backend_data (info->output_bfd);
2517 htab->srelbss->size += bed->s->sizeof_rela;
2518 h->needs_copy = 1;
2519 }
2520
2521 s = htab->sdynbss;
2522
2523 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2524 }
2525
2526 /* Allocate space in .plt, .got and associated reloc sections for
2527 dynamic relocs. */
2528
2529 static bfd_boolean
2530 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2531 {
2532 struct bfd_link_info *info;
2533 struct elf_x86_64_link_hash_table *htab;
2534 struct elf_x86_64_link_hash_entry *eh;
2535 struct elf_dyn_relocs *p;
2536 const struct elf_backend_data *bed;
2537 unsigned int plt_entry_size;
2538
2539 if (h->root.type == bfd_link_hash_indirect)
2540 return TRUE;
2541
2542 eh = (struct elf_x86_64_link_hash_entry *) h;
2543
2544 info = (struct bfd_link_info *) inf;
2545 htab = elf_x86_64_hash_table (info);
2546 if (htab == NULL)
2547 return FALSE;
2548 bed = get_elf_backend_data (info->output_bfd);
2549 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2550
2551 /* We can't use the GOT PLT if pointer equality is needed since
2552 finish_dynamic_symbol won't clear symbol value and the dynamic
2553 linker won't update the GOT slot. We will get into an infinite
2554 loop at run-time. */
2555 if (htab->plt_got != NULL
2556 && h->type != STT_GNU_IFUNC
2557 && !h->pointer_equality_needed
2558 && h->plt.refcount > 0
2559 && h->got.refcount > 0)
2560 {
2561 /* Don't use the regular PLT if there are both GOT and GOTPLT
2562 reloctions. */
2563 h->plt.offset = (bfd_vma) -1;
2564
2565 /* Use the GOT PLT. */
2566 eh->plt_got.refcount = 1;
2567 }
2568
2569 /* Clear the reference count of function pointer relocations if
2570 symbol isn't a normal function. */
2571 if (h->type != STT_FUNC)
2572 eh->func_pointer_refcount = 0;
2573
2574 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2575 here if it is defined and referenced in a non-shared object. */
2576 if (h->type == STT_GNU_IFUNC
2577 && h->def_regular)
2578 {
2579 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2580 &eh->dyn_relocs,
2581 plt_entry_size,
2582 plt_entry_size,
2583 GOT_ENTRY_SIZE))
2584 {
2585 asection *s = htab->plt_bnd;
2586 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2587 {
2588 /* Use the .plt.bnd section if it is created. */
2589 eh->plt_bnd.offset = s->size;
2590
2591 /* Make room for this entry in the .plt.bnd section. */
2592 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2593 }
2594
2595 return TRUE;
2596 }
2597 else
2598 return FALSE;
2599 }
2600 /* Don't create the PLT entry if there are only function pointer
2601 relocations which can be resolved at run-time. */
2602 else if (htab->elf.dynamic_sections_created
2603 && (h->plt.refcount > eh->func_pointer_refcount
2604 || eh->plt_got.refcount > 0))
2605 {
2606 bfd_boolean use_plt_got;
2607
2608 /* Clear the reference count of function pointer relocations
2609 if PLT is used. */
2610 eh->func_pointer_refcount = 0;
2611
2612 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed)
2613 {
2614 /* Don't use the regular PLT for DF_BIND_NOW. */
2615 h->plt.offset = (bfd_vma) -1;
2616
2617 /* Use the GOT PLT. */
2618 h->got.refcount = 1;
2619 eh->plt_got.refcount = 1;
2620 }
2621
2622 use_plt_got = eh->plt_got.refcount > 0;
2623
2624 /* Make sure this symbol is output as a dynamic symbol.
2625 Undefined weak syms won't yet be marked as dynamic. */
2626 if (h->dynindx == -1
2627 && !h->forced_local)
2628 {
2629 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2630 return FALSE;
2631 }
2632
2633 if (bfd_link_pic (info)
2634 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2635 {
2636 asection *s = htab->elf.splt;
2637 asection *bnd_s = htab->plt_bnd;
2638 asection *got_s = htab->plt_got;
2639
2640 /* If this is the first .plt entry, make room for the special
2641 first entry. The .plt section is used by prelink to undo
2642 prelinking for dynamic relocations. */
2643 if (s->size == 0)
2644 s->size = plt_entry_size;
2645
2646 if (use_plt_got)
2647 eh->plt_got.offset = got_s->size;
2648 else
2649 {
2650 h->plt.offset = s->size;
2651 if (bnd_s)
2652 eh->plt_bnd.offset = bnd_s->size;
2653 }
2654
2655 /* If this symbol is not defined in a regular file, and we are
2656 not generating a shared library, then set the symbol to this
2657 location in the .plt. This is required to make function
2658 pointers compare as equal between the normal executable and
2659 the shared library. */
2660 if (! bfd_link_pic (info)
2661 && !h->def_regular)
2662 {
2663 if (use_plt_got)
2664 {
2665 /* We need to make a call to the entry of the GOT PLT
2666 instead of regular PLT entry. */
2667 h->root.u.def.section = got_s;
2668 h->root.u.def.value = eh->plt_got.offset;
2669 }
2670 else
2671 {
2672 if (bnd_s)
2673 {
2674 /* We need to make a call to the entry of the second
2675 PLT instead of regular PLT entry. */
2676 h->root.u.def.section = bnd_s;
2677 h->root.u.def.value = eh->plt_bnd.offset;
2678 }
2679 else
2680 {
2681 h->root.u.def.section = s;
2682 h->root.u.def.value = h->plt.offset;
2683 }
2684 }
2685 }
2686
2687 /* Make room for this entry. */
2688 if (use_plt_got)
2689 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2690 else
2691 {
2692 s->size += plt_entry_size;
2693 if (bnd_s)
2694 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2695
2696 /* We also need to make an entry in the .got.plt section,
2697 which will be placed in the .got section by the linker
2698 script. */
2699 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2700
2701 /* We also need to make an entry in the .rela.plt
2702 section. */
2703 htab->elf.srelplt->size += bed->s->sizeof_rela;
2704 htab->elf.srelplt->reloc_count++;
2705 }
2706 }
2707 else
2708 {
2709 h->plt.offset = (bfd_vma) -1;
2710 h->needs_plt = 0;
2711 }
2712 }
2713 else
2714 {
2715 h->plt.offset = (bfd_vma) -1;
2716 h->needs_plt = 0;
2717 }
2718
2719 eh->tlsdesc_got = (bfd_vma) -1;
2720
2721 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2722 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2723 if (h->got.refcount > 0
2724 && bfd_link_executable (info)
2725 && h->dynindx == -1
2726 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2727 {
2728 h->got.offset = (bfd_vma) -1;
2729 }
2730 else if (h->got.refcount > 0)
2731 {
2732 asection *s;
2733 bfd_boolean dyn;
2734 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2735
2736 /* Make sure this symbol is output as a dynamic symbol.
2737 Undefined weak syms won't yet be marked as dynamic. */
2738 if (h->dynindx == -1
2739 && !h->forced_local)
2740 {
2741 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2742 return FALSE;
2743 }
2744
2745 if (GOT_TLS_GDESC_P (tls_type))
2746 {
2747 eh->tlsdesc_got = htab->elf.sgotplt->size
2748 - elf_x86_64_compute_jump_table_size (htab);
2749 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2750 h->got.offset = (bfd_vma) -2;
2751 }
2752 if (! GOT_TLS_GDESC_P (tls_type)
2753 || GOT_TLS_GD_P (tls_type))
2754 {
2755 s = htab->elf.sgot;
2756 h->got.offset = s->size;
2757 s->size += GOT_ENTRY_SIZE;
2758 if (GOT_TLS_GD_P (tls_type))
2759 s->size += GOT_ENTRY_SIZE;
2760 }
2761 dyn = htab->elf.dynamic_sections_created;
2762 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2763 and two if global.
2764 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2765 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2766 || tls_type == GOT_TLS_IE)
2767 htab->elf.srelgot->size += bed->s->sizeof_rela;
2768 else if (GOT_TLS_GD_P (tls_type))
2769 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2770 else if (! GOT_TLS_GDESC_P (tls_type)
2771 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2772 || h->root.type != bfd_link_hash_undefweak)
2773 && (bfd_link_pic (info)
2774 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2775 htab->elf.srelgot->size += bed->s->sizeof_rela;
2776 if (GOT_TLS_GDESC_P (tls_type))
2777 {
2778 htab->elf.srelplt->size += bed->s->sizeof_rela;
2779 htab->tlsdesc_plt = (bfd_vma) -1;
2780 }
2781 }
2782 else
2783 h->got.offset = (bfd_vma) -1;
2784
2785 if (eh->dyn_relocs == NULL)
2786 return TRUE;
2787
2788 /* In the shared -Bsymbolic case, discard space allocated for
2789 dynamic pc-relative relocs against symbols which turn out to be
2790 defined in regular objects. For the normal shared case, discard
2791 space for pc-relative relocs that have become local due to symbol
2792 visibility changes. */
2793
2794 if (bfd_link_pic (info))
2795 {
2796 /* Relocs that use pc_count are those that appear on a call
2797 insn, or certain REL relocs that can generated via assembly.
2798 We want calls to protected symbols to resolve directly to the
2799 function rather than going via the plt. If people want
2800 function pointer comparisons to work as expected then they
2801 should avoid writing weird assembly. */
2802 if (SYMBOL_CALLS_LOCAL (info, h))
2803 {
2804 struct elf_dyn_relocs **pp;
2805
2806 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2807 {
2808 p->count -= p->pc_count;
2809 p->pc_count = 0;
2810 if (p->count == 0)
2811 *pp = p->next;
2812 else
2813 pp = &p->next;
2814 }
2815 }
2816
2817 /* Also discard relocs on undefined weak syms with non-default
2818 visibility. */
2819 if (eh->dyn_relocs != NULL)
2820 {
2821 if (h->root.type == bfd_link_hash_undefweak)
2822 {
2823 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2824 eh->dyn_relocs = NULL;
2825
2826 /* Make sure undefined weak symbols are output as a dynamic
2827 symbol in PIEs. */
2828 else if (h->dynindx == -1
2829 && ! h->forced_local
2830 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2831 return FALSE;
2832 }
2833 /* For PIE, discard space for pc-relative relocs against
2834 symbols which turn out to need copy relocs. */
2835 else if (bfd_link_executable (info)
2836 && (h->needs_copy || eh->needs_copy)
2837 && h->def_dynamic
2838 && !h->def_regular)
2839 {
2840 struct elf_dyn_relocs **pp;
2841
2842 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2843 {
2844 if (p->pc_count != 0)
2845 *pp = p->next;
2846 else
2847 pp = &p->next;
2848 }
2849 }
2850 }
2851 }
2852 else if (ELIMINATE_COPY_RELOCS)
2853 {
2854 /* For the non-shared case, discard space for relocs against
2855 symbols which turn out to need copy relocs or are not
2856 dynamic. Keep dynamic relocations for run-time function
2857 pointer initialization. */
2858
2859 if ((!h->non_got_ref || eh->func_pointer_refcount > 0)
2860 && ((h->def_dynamic
2861 && !h->def_regular)
2862 || (htab->elf.dynamic_sections_created
2863 && (h->root.type == bfd_link_hash_undefweak
2864 || h->root.type == bfd_link_hash_undefined))))
2865 {
2866 /* Make sure this symbol is output as a dynamic symbol.
2867 Undefined weak syms won't yet be marked as dynamic. */
2868 if (h->dynindx == -1
2869 && ! h->forced_local
2870 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2871 return FALSE;
2872
2873 /* If that succeeded, we know we'll be keeping all the
2874 relocs. */
2875 if (h->dynindx != -1)
2876 goto keep;
2877 }
2878
2879 eh->dyn_relocs = NULL;
2880 eh->func_pointer_refcount = 0;
2881
2882 keep: ;
2883 }
2884
2885 /* Finally, allocate space. */
2886 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2887 {
2888 asection * sreloc;
2889
2890 sreloc = elf_section_data (p->sec)->sreloc;
2891
2892 BFD_ASSERT (sreloc != NULL);
2893
2894 sreloc->size += p->count * bed->s->sizeof_rela;
2895 }
2896
2897 return TRUE;
2898 }
2899
2900 /* Allocate space in .plt, .got and associated reloc sections for
2901 local dynamic relocs. */
2902
2903 static bfd_boolean
2904 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2905 {
2906 struct elf_link_hash_entry *h
2907 = (struct elf_link_hash_entry *) *slot;
2908
2909 if (h->type != STT_GNU_IFUNC
2910 || !h->def_regular
2911 || !h->ref_regular
2912 || !h->forced_local
2913 || h->root.type != bfd_link_hash_defined)
2914 abort ();
2915
2916 return elf_x86_64_allocate_dynrelocs (h, inf);
2917 }
2918
2919 /* Find any dynamic relocs that apply to read-only sections. */
2920
2921 static bfd_boolean
2922 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2923 void * inf)
2924 {
2925 struct elf_x86_64_link_hash_entry *eh;
2926 struct elf_dyn_relocs *p;
2927
2928 /* Skip local IFUNC symbols. */
2929 if (h->forced_local && h->type == STT_GNU_IFUNC)
2930 return TRUE;
2931
2932 eh = (struct elf_x86_64_link_hash_entry *) h;
2933 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2934 {
2935 asection *s = p->sec->output_section;
2936
2937 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2938 {
2939 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2940
2941 info->flags |= DF_TEXTREL;
2942
2943 if ((info->warn_shared_textrel && bfd_link_pic (info))
2944 || info->error_textrel)
2945 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"),
2946 p->sec->owner, h->root.root.string,
2947 p->sec);
2948
2949 /* Not an error, just cut short the traversal. */
2950 return FALSE;
2951 }
2952 }
2953 return TRUE;
2954 }
2955
2956 /* Convert
2957 mov foo@GOTPCREL(%rip), %reg
2958 to
2959 lea foo(%rip), %reg
2960 with the local symbol, foo. */
2961
2962 static bfd_boolean
2963 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2964 struct bfd_link_info *link_info)
2965 {
2966 Elf_Internal_Shdr *symtab_hdr;
2967 Elf_Internal_Rela *internal_relocs;
2968 Elf_Internal_Rela *irel, *irelend;
2969 bfd_byte *contents;
2970 struct elf_x86_64_link_hash_table *htab;
2971 bfd_boolean changed_contents;
2972 bfd_boolean changed_relocs;
2973 bfd_signed_vma *local_got_refcounts;
2974 bfd_vma maxpagesize;
2975
2976 /* Don't even try to convert non-ELF outputs. */
2977 if (!is_elf_hash_table (link_info->hash))
2978 return FALSE;
2979
2980 /* Nothing to do if there is no need or no output. */
2981 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2982 || sec->need_convert_mov_to_lea == 0
2983 || bfd_is_abs_section (sec->output_section))
2984 return TRUE;
2985
2986 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2987
2988 /* Load the relocations for this section. */
2989 internal_relocs = (_bfd_elf_link_read_relocs
2990 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2991 link_info->keep_memory));
2992 if (internal_relocs == NULL)
2993 return FALSE;
2994
2995 htab = elf_x86_64_hash_table (link_info);
2996 changed_contents = FALSE;
2997 changed_relocs = FALSE;
2998 local_got_refcounts = elf_local_got_refcounts (abfd);
2999 maxpagesize = get_elf_backend_data (abfd)->maxpagesize;
3000
3001 /* Get the section contents. */
3002 if (elf_section_data (sec)->this_hdr.contents != NULL)
3003 contents = elf_section_data (sec)->this_hdr.contents;
3004 else
3005 {
3006 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
3007 goto error_return;
3008 }
3009
3010 irelend = internal_relocs + sec->reloc_count;
3011 for (irel = internal_relocs; irel < irelend; irel++)
3012 {
3013 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
3014 unsigned int r_symndx = htab->r_sym (irel->r_info);
3015 unsigned int indx;
3016 struct elf_link_hash_entry *h;
3017 asection *tsec;
3018 char symtype;
3019 bfd_vma toff, roff;
3020 unsigned int opcode;
3021
3022 if (r_type != R_X86_64_GOTPCREL)
3023 continue;
3024
3025 roff = irel->r_offset;
3026
3027 if (roff < 2)
3028 continue;
3029
3030 opcode = bfd_get_8 (abfd, contents + roff - 2);
3031
3032 /* PR ld/18591: Don't convert R_X86_64_GOTPCREL relocation if it
3033 isn't for mov instruction. */
3034 if (opcode != 0x8b)
3035 continue;
3036
3037 /* Get the symbol referred to by the reloc. */
3038 if (r_symndx < symtab_hdr->sh_info)
3039 {
3040 Elf_Internal_Sym *isym;
3041
3042 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
3043 abfd, r_symndx);
3044
3045 symtype = ELF_ST_TYPE (isym->st_info);
3046
3047 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation and
3048 skip relocation against undefined symbols. */
3049 if (symtype == STT_GNU_IFUNC || isym->st_shndx == SHN_UNDEF)
3050 continue;
3051
3052 if (isym->st_shndx == SHN_ABS)
3053 tsec = bfd_abs_section_ptr;
3054 else if (isym->st_shndx == SHN_COMMON)
3055 tsec = bfd_com_section_ptr;
3056 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
3057 tsec = &_bfd_elf_large_com_section;
3058 else
3059 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3060
3061 h = NULL;
3062 toff = isym->st_value;
3063 }
3064 else
3065 {
3066 indx = r_symndx - symtab_hdr->sh_info;
3067 h = elf_sym_hashes (abfd)[indx];
3068 BFD_ASSERT (h != NULL);
3069
3070 while (h->root.type == bfd_link_hash_indirect
3071 || h->root.type == bfd_link_hash_warning)
3072 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3073
3074 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
3075 avoid optimizing _DYNAMIC since ld.so may use its link-time
3076 address. */
3077 if ((h->root.type == bfd_link_hash_defined
3078 || h->root.type == bfd_link_hash_defweak)
3079 && h->type != STT_GNU_IFUNC
3080 && h != htab->elf.hdynamic
3081 && SYMBOL_REFERENCES_LOCAL (link_info, h))
3082 {
3083 tsec = h->root.u.def.section;
3084 toff = h->root.u.def.value;
3085 symtype = h->type;
3086 }
3087 else
3088 continue;
3089 }
3090
3091 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE)
3092 {
3093 /* At this stage in linking, no SEC_MERGE symbol has been
3094 adjusted, so all references to such symbols need to be
3095 passed through _bfd_merged_section_offset. (Later, in
3096 relocate_section, all SEC_MERGE symbols *except* for
3097 section symbols have been adjusted.)
3098
3099 gas may reduce relocations against symbols in SEC_MERGE
3100 sections to a relocation against the section symbol when
3101 the original addend was zero. When the reloc is against
3102 a section symbol we should include the addend in the
3103 offset passed to _bfd_merged_section_offset, since the
3104 location of interest is the original symbol. On the
3105 other hand, an access to "sym+addend" where "sym" is not
3106 a section symbol should not include the addend; Such an
3107 access is presumed to be an offset from "sym"; The
3108 location of interest is just "sym". */
3109 if (symtype == STT_SECTION)
3110 toff += irel->r_addend;
3111
3112 toff = _bfd_merged_section_offset (abfd, &tsec,
3113 elf_section_data (tsec)->sec_info,
3114 toff);
3115
3116 if (symtype != STT_SECTION)
3117 toff += irel->r_addend;
3118 }
3119 else
3120 toff += irel->r_addend;
3121
3122 /* Don't convert if R_X86_64_PC32 relocation overflows. */
3123 if (tsec->output_section == sec->output_section)
3124 {
3125 if ((toff - roff + 0x80000000) > 0xffffffff)
3126 continue;
3127 }
3128 else
3129 {
3130 asection *asect;
3131 bfd_size_type size;
3132
3133 /* At this point, we don't know the load addresses of TSEC
3134 section nor SEC section. We estimate the distrance between
3135 SEC and TSEC. */
3136 size = 0;
3137 for (asect = sec->output_section;
3138 asect != NULL && asect != tsec->output_section;
3139 asect = asect->next)
3140 {
3141 asection *i;
3142 for (i = asect->output_section->map_head.s;
3143 i != NULL;
3144 i = i->map_head.s)
3145 {
3146 size = align_power (size, i->alignment_power);
3147 size += i->size;
3148 }
3149 }
3150
3151 /* Don't convert R_X86_64_GOTPCREL if TSEC isn't placed after
3152 SEC. */
3153 if (asect == NULL)
3154 continue;
3155
3156 /* Take PT_GNU_RELRO segment into account by adding
3157 maxpagesize. */
3158 if ((toff + size + maxpagesize - roff + 0x80000000)
3159 > 0xffffffff)
3160 continue;
3161 }
3162
3163 bfd_put_8 (abfd, 0x8d, contents + roff - 2);
3164 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
3165 changed_contents = TRUE;
3166 changed_relocs = TRUE;
3167
3168 if (h)
3169 {
3170 if (h->got.refcount > 0)
3171 h->got.refcount -= 1;
3172 }
3173 else
3174 {
3175 if (local_got_refcounts != NULL
3176 && local_got_refcounts[r_symndx] > 0)
3177 local_got_refcounts[r_symndx] -= 1;
3178 }
3179 }
3180
3181 if (contents != NULL
3182 && elf_section_data (sec)->this_hdr.contents != contents)
3183 {
3184 if (!changed_contents && !link_info->keep_memory)
3185 free (contents);
3186 else
3187 {
3188 /* Cache the section contents for elf_link_input_bfd. */
3189 elf_section_data (sec)->this_hdr.contents = contents;
3190 }
3191 }
3192
3193 if (elf_section_data (sec)->relocs != internal_relocs)
3194 {
3195 if (!changed_relocs)
3196 free (internal_relocs);
3197 else
3198 elf_section_data (sec)->relocs = internal_relocs;
3199 }
3200
3201 return TRUE;
3202
3203 error_return:
3204 if (contents != NULL
3205 && elf_section_data (sec)->this_hdr.contents != contents)
3206 free (contents);
3207 if (internal_relocs != NULL
3208 && elf_section_data (sec)->relocs != internal_relocs)
3209 free (internal_relocs);
3210 return FALSE;
3211 }
3212
3213 /* Set the sizes of the dynamic sections. */
3214
3215 static bfd_boolean
3216 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3217 struct bfd_link_info *info)
3218 {
3219 struct elf_x86_64_link_hash_table *htab;
3220 bfd *dynobj;
3221 asection *s;
3222 bfd_boolean relocs;
3223 bfd *ibfd;
3224 const struct elf_backend_data *bed;
3225
3226 htab = elf_x86_64_hash_table (info);
3227 if (htab == NULL)
3228 return FALSE;
3229 bed = get_elf_backend_data (output_bfd);
3230
3231 dynobj = htab->elf.dynobj;
3232 if (dynobj == NULL)
3233 abort ();
3234
3235 if (htab->elf.dynamic_sections_created)
3236 {
3237 /* Set the contents of the .interp section to the interpreter. */
3238 if (bfd_link_executable (info) && !info->nointerp)
3239 {
3240 s = bfd_get_linker_section (dynobj, ".interp");
3241 if (s == NULL)
3242 abort ();
3243 s->size = htab->dynamic_interpreter_size;
3244 s->contents = (unsigned char *) htab->dynamic_interpreter;
3245 }
3246 }
3247
3248 /* Set up .got offsets for local syms, and space for local dynamic
3249 relocs. */
3250 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3251 {
3252 bfd_signed_vma *local_got;
3253 bfd_signed_vma *end_local_got;
3254 char *local_tls_type;
3255 bfd_vma *local_tlsdesc_gotent;
3256 bfd_size_type locsymcount;
3257 Elf_Internal_Shdr *symtab_hdr;
3258 asection *srel;
3259
3260 if (! is_x86_64_elf (ibfd))
3261 continue;
3262
3263 for (s = ibfd->sections; s != NULL; s = s->next)
3264 {
3265 struct elf_dyn_relocs *p;
3266
3267 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3268 return FALSE;
3269
3270 for (p = (struct elf_dyn_relocs *)
3271 (elf_section_data (s)->local_dynrel);
3272 p != NULL;
3273 p = p->next)
3274 {
3275 if (!bfd_is_abs_section (p->sec)
3276 && bfd_is_abs_section (p->sec->output_section))
3277 {
3278 /* Input section has been discarded, either because
3279 it is a copy of a linkonce section or due to
3280 linker script /DISCARD/, so we'll be discarding
3281 the relocs too. */
3282 }
3283 else if (p->count != 0)
3284 {
3285 srel = elf_section_data (p->sec)->sreloc;
3286 srel->size += p->count * bed->s->sizeof_rela;
3287 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3288 && (info->flags & DF_TEXTREL) == 0)
3289 {
3290 info->flags |= DF_TEXTREL;
3291 if ((info->warn_shared_textrel && bfd_link_pic (info))
3292 || info->error_textrel)
3293 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"),
3294 p->sec->owner, p->sec);
3295 }
3296 }
3297 }
3298 }
3299
3300 local_got = elf_local_got_refcounts (ibfd);
3301 if (!local_got)
3302 continue;
3303
3304 symtab_hdr = &elf_symtab_hdr (ibfd);
3305 locsymcount = symtab_hdr->sh_info;
3306 end_local_got = local_got + locsymcount;
3307 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3308 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3309 s = htab->elf.sgot;
3310 srel = htab->elf.srelgot;
3311 for (; local_got < end_local_got;
3312 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3313 {
3314 *local_tlsdesc_gotent = (bfd_vma) -1;
3315 if (*local_got > 0)
3316 {
3317 if (GOT_TLS_GDESC_P (*local_tls_type))
3318 {
3319 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3320 - elf_x86_64_compute_jump_table_size (htab);
3321 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3322 *local_got = (bfd_vma) -2;
3323 }
3324 if (! GOT_TLS_GDESC_P (*local_tls_type)
3325 || GOT_TLS_GD_P (*local_tls_type))
3326 {
3327 *local_got = s->size;
3328 s->size += GOT_ENTRY_SIZE;
3329 if (GOT_TLS_GD_P (*local_tls_type))
3330 s->size += GOT_ENTRY_SIZE;
3331 }
3332 if (bfd_link_pic (info)
3333 || GOT_TLS_GD_ANY_P (*local_tls_type)
3334 || *local_tls_type == GOT_TLS_IE)
3335 {
3336 if (GOT_TLS_GDESC_P (*local_tls_type))
3337 {
3338 htab->elf.srelplt->size
3339 += bed->s->sizeof_rela;
3340 htab->tlsdesc_plt = (bfd_vma) -1;
3341 }
3342 if (! GOT_TLS_GDESC_P (*local_tls_type)
3343 || GOT_TLS_GD_P (*local_tls_type))
3344 srel->size += bed->s->sizeof_rela;
3345 }
3346 }
3347 else
3348 *local_got = (bfd_vma) -1;
3349 }
3350 }
3351
3352 if (htab->tls_ld_got.refcount > 0)
3353 {
3354 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3355 relocs. */
3356 htab->tls_ld_got.offset = htab->elf.sgot->size;
3357 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3358 htab->elf.srelgot->size += bed->s->sizeof_rela;
3359 }
3360 else
3361 htab->tls_ld_got.offset = -1;
3362
3363 /* Allocate global sym .plt and .got entries, and space for global
3364 sym dynamic relocs. */
3365 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3366 info);
3367
3368 /* Allocate .plt and .got entries, and space for local symbols. */
3369 htab_traverse (htab->loc_hash_table,
3370 elf_x86_64_allocate_local_dynrelocs,
3371 info);
3372
3373 /* For every jump slot reserved in the sgotplt, reloc_count is
3374 incremented. However, when we reserve space for TLS descriptors,
3375 it's not incremented, so in order to compute the space reserved
3376 for them, it suffices to multiply the reloc count by the jump
3377 slot size.
3378
3379 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3380 so that R_X86_64_IRELATIVE entries come last. */
3381 if (htab->elf.srelplt)
3382 {
3383 htab->sgotplt_jump_table_size
3384 = elf_x86_64_compute_jump_table_size (htab);
3385 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3386 }
3387 else if (htab->elf.irelplt)
3388 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3389
3390 if (htab->tlsdesc_plt)
3391 {
3392 /* If we're not using lazy TLS relocations, don't generate the
3393 PLT and GOT entries they require. */
3394 if ((info->flags & DF_BIND_NOW))
3395 htab->tlsdesc_plt = 0;
3396 else
3397 {
3398 htab->tlsdesc_got = htab->elf.sgot->size;
3399 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3400 /* Reserve room for the initial entry.
3401 FIXME: we could probably do away with it in this case. */
3402 if (htab->elf.splt->size == 0)
3403 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3404 htab->tlsdesc_plt = htab->elf.splt->size;
3405 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3406 }
3407 }
3408
3409 if (htab->elf.sgotplt)
3410 {
3411 /* Don't allocate .got.plt section if there are no GOT nor PLT
3412 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3413 if ((htab->elf.hgot == NULL
3414 || !htab->elf.hgot->ref_regular_nonweak)
3415 && (htab->elf.sgotplt->size
3416 == get_elf_backend_data (output_bfd)->got_header_size)
3417 && (htab->elf.splt == NULL
3418 || htab->elf.splt->size == 0)
3419 && (htab->elf.sgot == NULL
3420 || htab->elf.sgot->size == 0)
3421 && (htab->elf.iplt == NULL
3422 || htab->elf.iplt->size == 0)
3423 && (htab->elf.igotplt == NULL
3424 || htab->elf.igotplt->size == 0))
3425 htab->elf.sgotplt->size = 0;
3426 }
3427
3428 if (htab->plt_eh_frame != NULL
3429 && htab->elf.splt != NULL
3430 && htab->elf.splt->size != 0
3431 && !bfd_is_abs_section (htab->elf.splt->output_section)
3432 && _bfd_elf_eh_frame_present (info))
3433 {
3434 const struct elf_x86_64_backend_data *arch_data
3435 = get_elf_x86_64_arch_data (bed);
3436 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3437 }
3438
3439 /* We now have determined the sizes of the various dynamic sections.
3440 Allocate memory for them. */
3441 relocs = FALSE;
3442 for (s = dynobj->sections; s != NULL; s = s->next)
3443 {
3444 if ((s->flags & SEC_LINKER_CREATED) == 0)
3445 continue;
3446
3447 if (s == htab->elf.splt
3448 || s == htab->elf.sgot
3449 || s == htab->elf.sgotplt
3450 || s == htab->elf.iplt
3451 || s == htab->elf.igotplt
3452 || s == htab->plt_bnd
3453 || s == htab->plt_got
3454 || s == htab->plt_eh_frame
3455 || s == htab->sdynbss)
3456 {
3457 /* Strip this section if we don't need it; see the
3458 comment below. */
3459 }
3460 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3461 {
3462 if (s->size != 0 && s != htab->elf.srelplt)
3463 relocs = TRUE;
3464
3465 /* We use the reloc_count field as a counter if we need
3466 to copy relocs into the output file. */
3467 if (s != htab->elf.srelplt)
3468 s->reloc_count = 0;
3469 }
3470 else
3471 {
3472 /* It's not one of our sections, so don't allocate space. */
3473 continue;
3474 }
3475
3476 if (s->size == 0)
3477 {
3478 /* If we don't need this section, strip it from the
3479 output file. This is mostly to handle .rela.bss and
3480 .rela.plt. We must create both sections in
3481 create_dynamic_sections, because they must be created
3482 before the linker maps input sections to output
3483 sections. The linker does that before
3484 adjust_dynamic_symbol is called, and it is that
3485 function which decides whether anything needs to go
3486 into these sections. */
3487
3488 s->flags |= SEC_EXCLUDE;
3489 continue;
3490 }
3491
3492 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3493 continue;
3494
3495 /* Allocate memory for the section contents. We use bfd_zalloc
3496 here in case unused entries are not reclaimed before the
3497 section's contents are written out. This should not happen,
3498 but this way if it does, we get a R_X86_64_NONE reloc instead
3499 of garbage. */
3500 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3501 if (s->contents == NULL)
3502 return FALSE;
3503 }
3504
3505 if (htab->plt_eh_frame != NULL
3506 && htab->plt_eh_frame->contents != NULL)
3507 {
3508 const struct elf_x86_64_backend_data *arch_data
3509 = get_elf_x86_64_arch_data (bed);
3510
3511 memcpy (htab->plt_eh_frame->contents,
3512 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3513 bfd_put_32 (dynobj, htab->elf.splt->size,
3514 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3515 }
3516
3517 if (htab->elf.dynamic_sections_created)
3518 {
3519 /* Add some entries to the .dynamic section. We fill in the
3520 values later, in elf_x86_64_finish_dynamic_sections, but we
3521 must add the entries now so that we get the correct size for
3522 the .dynamic section. The DT_DEBUG entry is filled in by the
3523 dynamic linker and used by the debugger. */
3524 #define add_dynamic_entry(TAG, VAL) \
3525 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3526
3527 if (bfd_link_executable (info))
3528 {
3529 if (!add_dynamic_entry (DT_DEBUG, 0))
3530 return FALSE;
3531 }
3532
3533 if (htab->elf.splt->size != 0)
3534 {
3535 /* DT_PLTGOT is used by prelink even if there is no PLT
3536 relocation. */
3537 if (!add_dynamic_entry (DT_PLTGOT, 0))
3538 return FALSE;
3539
3540 if (htab->elf.srelplt->size != 0)
3541 {
3542 if (!add_dynamic_entry (DT_PLTRELSZ, 0)
3543 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3544 || !add_dynamic_entry (DT_JMPREL, 0))
3545 return FALSE;
3546 }
3547
3548 if (htab->tlsdesc_plt
3549 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3550 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3551 return FALSE;
3552 }
3553
3554 if (relocs)
3555 {
3556 if (!add_dynamic_entry (DT_RELA, 0)
3557 || !add_dynamic_entry (DT_RELASZ, 0)
3558 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3559 return FALSE;
3560
3561 /* If any dynamic relocs apply to a read-only section,
3562 then we need a DT_TEXTREL entry. */
3563 if ((info->flags & DF_TEXTREL) == 0)
3564 elf_link_hash_traverse (&htab->elf,
3565 elf_x86_64_readonly_dynrelocs,
3566 info);
3567
3568 if ((info->flags & DF_TEXTREL) != 0)
3569 {
3570 if ((elf_tdata (output_bfd)->has_gnu_symbols
3571 & elf_gnu_symbol_ifunc) == elf_gnu_symbol_ifunc)
3572 {
3573 info->callbacks->einfo
3574 (_("%P%X: read-only segment has dynamic IFUNC relocations; recompile with -fPIC\n"));
3575 bfd_set_error (bfd_error_bad_value);
3576 return FALSE;
3577 }
3578
3579 if (!add_dynamic_entry (DT_TEXTREL, 0))
3580 return FALSE;
3581 }
3582 }
3583 }
3584 #undef add_dynamic_entry
3585
3586 return TRUE;
3587 }
3588
3589 static bfd_boolean
3590 elf_x86_64_always_size_sections (bfd *output_bfd,
3591 struct bfd_link_info *info)
3592 {
3593 asection *tls_sec = elf_hash_table (info)->tls_sec;
3594
3595 if (tls_sec)
3596 {
3597 struct elf_link_hash_entry *tlsbase;
3598
3599 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3600 "_TLS_MODULE_BASE_",
3601 FALSE, FALSE, FALSE);
3602
3603 if (tlsbase && tlsbase->type == STT_TLS)
3604 {
3605 struct elf_x86_64_link_hash_table *htab;
3606 struct bfd_link_hash_entry *bh = NULL;
3607 const struct elf_backend_data *bed
3608 = get_elf_backend_data (output_bfd);
3609
3610 htab = elf_x86_64_hash_table (info);
3611 if (htab == NULL)
3612 return FALSE;
3613
3614 if (!(_bfd_generic_link_add_one_symbol
3615 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3616 tls_sec, 0, NULL, FALSE,
3617 bed->collect, &bh)))
3618 return FALSE;
3619
3620 htab->tls_module_base = bh;
3621
3622 tlsbase = (struct elf_link_hash_entry *)bh;
3623 tlsbase->def_regular = 1;
3624 tlsbase->other = STV_HIDDEN;
3625 tlsbase->root.linker_def = 1;
3626 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3627 }
3628 }
3629
3630 return TRUE;
3631 }
3632
3633 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3634 executables. Rather than setting it to the beginning of the TLS
3635 section, we have to set it to the end. This function may be called
3636 multiple times, it is idempotent. */
3637
3638 static void
3639 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3640 {
3641 struct elf_x86_64_link_hash_table *htab;
3642 struct bfd_link_hash_entry *base;
3643
3644 if (!bfd_link_executable (info))
3645 return;
3646
3647 htab = elf_x86_64_hash_table (info);
3648 if (htab == NULL)
3649 return;
3650
3651 base = htab->tls_module_base;
3652 if (base == NULL)
3653 return;
3654
3655 base->u.def.value = htab->elf.tls_size;
3656 }
3657
3658 /* Return the base VMA address which should be subtracted from real addresses
3659 when resolving @dtpoff relocation.
3660 This is PT_TLS segment p_vaddr. */
3661
3662 static bfd_vma
3663 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3664 {
3665 /* If tls_sec is NULL, we should have signalled an error already. */
3666 if (elf_hash_table (info)->tls_sec == NULL)
3667 return 0;
3668 return elf_hash_table (info)->tls_sec->vma;
3669 }
3670
3671 /* Return the relocation value for @tpoff relocation
3672 if STT_TLS virtual address is ADDRESS. */
3673
3674 static bfd_vma
3675 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3676 {
3677 struct elf_link_hash_table *htab = elf_hash_table (info);
3678 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3679 bfd_vma static_tls_size;
3680
3681 /* If tls_segment is NULL, we should have signalled an error already. */
3682 if (htab->tls_sec == NULL)
3683 return 0;
3684
3685 /* Consider special static TLS alignment requirements. */
3686 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3687 return address - static_tls_size - htab->tls_sec->vma;
3688 }
3689
3690 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3691 branch? */
3692
3693 static bfd_boolean
3694 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3695 {
3696 /* Opcode Instruction
3697 0xe8 call
3698 0xe9 jump
3699 0x0f 0x8x conditional jump */
3700 return ((offset > 0
3701 && (contents [offset - 1] == 0xe8
3702 || contents [offset - 1] == 0xe9))
3703 || (offset > 1
3704 && contents [offset - 2] == 0x0f
3705 && (contents [offset - 1] & 0xf0) == 0x80));
3706 }
3707
3708 /* Relocate an x86_64 ELF section. */
3709
3710 static bfd_boolean
3711 elf_x86_64_relocate_section (bfd *output_bfd,
3712 struct bfd_link_info *info,
3713 bfd *input_bfd,
3714 asection *input_section,
3715 bfd_byte *contents,
3716 Elf_Internal_Rela *relocs,
3717 Elf_Internal_Sym *local_syms,
3718 asection **local_sections)
3719 {
3720 struct elf_x86_64_link_hash_table *htab;
3721 Elf_Internal_Shdr *symtab_hdr;
3722 struct elf_link_hash_entry **sym_hashes;
3723 bfd_vma *local_got_offsets;
3724 bfd_vma *local_tlsdesc_gotents;
3725 Elf_Internal_Rela *rel;
3726 Elf_Internal_Rela *relend;
3727 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3728
3729 BFD_ASSERT (is_x86_64_elf (input_bfd));
3730
3731 htab = elf_x86_64_hash_table (info);
3732 if (htab == NULL)
3733 return FALSE;
3734 symtab_hdr = &elf_symtab_hdr (input_bfd);
3735 sym_hashes = elf_sym_hashes (input_bfd);
3736 local_got_offsets = elf_local_got_offsets (input_bfd);
3737 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3738
3739 elf_x86_64_set_tls_module_base (info);
3740
3741 rel = relocs;
3742 relend = relocs + input_section->reloc_count;
3743 for (; rel < relend; rel++)
3744 {
3745 unsigned int r_type;
3746 reloc_howto_type *howto;
3747 unsigned long r_symndx;
3748 struct elf_link_hash_entry *h;
3749 struct elf_x86_64_link_hash_entry *eh;
3750 Elf_Internal_Sym *sym;
3751 asection *sec;
3752 bfd_vma off, offplt, plt_offset;
3753 bfd_vma relocation;
3754 bfd_boolean unresolved_reloc;
3755 bfd_reloc_status_type r;
3756 int tls_type;
3757 asection *base_got, *resolved_plt;
3758 bfd_vma st_size;
3759
3760 r_type = ELF32_R_TYPE (rel->r_info);
3761 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3762 || r_type == (int) R_X86_64_GNU_VTENTRY)
3763 continue;
3764
3765 if (r_type >= (int) R_X86_64_standard)
3766 {
3767 (*_bfd_error_handler)
3768 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3769 input_bfd, input_section, r_type);
3770 bfd_set_error (bfd_error_bad_value);
3771 return FALSE;
3772 }
3773
3774 if (r_type != (int) R_X86_64_32
3775 || ABI_64_P (output_bfd))
3776 howto = x86_64_elf_howto_table + r_type;
3777 else
3778 howto = (x86_64_elf_howto_table
3779 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3780 r_symndx = htab->r_sym (rel->r_info);
3781 h = NULL;
3782 sym = NULL;
3783 sec = NULL;
3784 unresolved_reloc = FALSE;
3785 if (r_symndx < symtab_hdr->sh_info)
3786 {
3787 sym = local_syms + r_symndx;
3788 sec = local_sections[r_symndx];
3789
3790 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3791 &sec, rel);
3792 st_size = sym->st_size;
3793
3794 /* Relocate against local STT_GNU_IFUNC symbol. */
3795 if (!bfd_link_relocatable (info)
3796 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3797 {
3798 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3799 rel, FALSE);
3800 if (h == NULL)
3801 abort ();
3802
3803 /* Set STT_GNU_IFUNC symbol value. */
3804 h->root.u.def.value = sym->st_value;
3805 h->root.u.def.section = sec;
3806 }
3807 }
3808 else
3809 {
3810 bfd_boolean warned ATTRIBUTE_UNUSED;
3811 bfd_boolean ignored ATTRIBUTE_UNUSED;
3812
3813 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3814 r_symndx, symtab_hdr, sym_hashes,
3815 h, sec, relocation,
3816 unresolved_reloc, warned, ignored);
3817 st_size = h->size;
3818 }
3819
3820 if (sec != NULL && discarded_section (sec))
3821 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3822 rel, 1, relend, howto, 0, contents);
3823
3824 if (bfd_link_relocatable (info))
3825 continue;
3826
3827 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3828 {
3829 if (r_type == R_X86_64_64)
3830 {
3831 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3832 zero-extend it to 64bit if addend is zero. */
3833 r_type = R_X86_64_32;
3834 memset (contents + rel->r_offset + 4, 0, 4);
3835 }
3836 else if (r_type == R_X86_64_SIZE64)
3837 {
3838 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3839 zero-extend it to 64bit if addend is zero. */
3840 r_type = R_X86_64_SIZE32;
3841 memset (contents + rel->r_offset + 4, 0, 4);
3842 }
3843 }
3844
3845 eh = (struct elf_x86_64_link_hash_entry *) h;
3846
3847 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3848 it here if it is defined in a non-shared object. */
3849 if (h != NULL
3850 && h->type == STT_GNU_IFUNC
3851 && h->def_regular)
3852 {
3853 bfd_vma plt_index;
3854 const char *name;
3855
3856 if ((input_section->flags & SEC_ALLOC) == 0)
3857 {
3858 /* Dynamic relocs are not propagated for SEC_DEBUGGING
3859 sections because such sections are not SEC_ALLOC and
3860 thus ld.so will not process them. */
3861 if ((input_section->flags & SEC_DEBUGGING) != 0)
3862 continue;
3863 abort ();
3864 }
3865 else if (h->plt.offset == (bfd_vma) -1)
3866 abort ();
3867
3868 /* STT_GNU_IFUNC symbol must go through PLT. */
3869 if (htab->elf.splt != NULL)
3870 {
3871 if (htab->plt_bnd != NULL)
3872 {
3873 resolved_plt = htab->plt_bnd;
3874 plt_offset = eh->plt_bnd.offset;
3875 }
3876 else
3877 {
3878 resolved_plt = htab->elf.splt;
3879 plt_offset = h->plt.offset;
3880 }
3881 }
3882 else
3883 {
3884 resolved_plt = htab->elf.iplt;
3885 plt_offset = h->plt.offset;
3886 }
3887
3888 relocation = (resolved_plt->output_section->vma
3889 + resolved_plt->output_offset + plt_offset);
3890
3891 switch (r_type)
3892 {
3893 default:
3894 if (h->root.root.string)
3895 name = h->root.root.string;
3896 else
3897 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3898 NULL);
3899 (*_bfd_error_handler)
3900 (_("%B: relocation %s against STT_GNU_IFUNC "
3901 "symbol `%s' isn't handled by %s"), input_bfd,
3902 x86_64_elf_howto_table[r_type].name,
3903 name, __FUNCTION__);
3904 bfd_set_error (bfd_error_bad_value);
3905 return FALSE;
3906
3907 case R_X86_64_32S:
3908 if (bfd_link_pic (info))
3909 abort ();
3910 goto do_relocation;
3911
3912 case R_X86_64_32:
3913 if (ABI_64_P (output_bfd))
3914 goto do_relocation;
3915 /* FALLTHROUGH */
3916 case R_X86_64_64:
3917 if (rel->r_addend != 0)
3918 {
3919 if (h->root.root.string)
3920 name = h->root.root.string;
3921 else
3922 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3923 sym, NULL);
3924 (*_bfd_error_handler)
3925 (_("%B: relocation %s against STT_GNU_IFUNC "
3926 "symbol `%s' has non-zero addend: %d"),
3927 input_bfd, x86_64_elf_howto_table[r_type].name,
3928 name, rel->r_addend);
3929 bfd_set_error (bfd_error_bad_value);
3930 return FALSE;
3931 }
3932
3933 /* Generate dynamic relcoation only when there is a
3934 non-GOT reference in a shared object. */
3935 if (bfd_link_pic (info) && h->non_got_ref)
3936 {
3937 Elf_Internal_Rela outrel;
3938 asection *sreloc;
3939
3940 /* Need a dynamic relocation to get the real function
3941 address. */
3942 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3943 info,
3944 input_section,
3945 rel->r_offset);
3946 if (outrel.r_offset == (bfd_vma) -1
3947 || outrel.r_offset == (bfd_vma) -2)
3948 abort ();
3949
3950 outrel.r_offset += (input_section->output_section->vma
3951 + input_section->output_offset);
3952
3953 if (h->dynindx == -1
3954 || h->forced_local
3955 || bfd_link_executable (info))
3956 {
3957 /* This symbol is resolved locally. */
3958 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3959 outrel.r_addend = (h->root.u.def.value
3960 + h->root.u.def.section->output_section->vma
3961 + h->root.u.def.section->output_offset);
3962 }
3963 else
3964 {
3965 outrel.r_info = htab->r_info (h->dynindx, r_type);
3966 outrel.r_addend = 0;
3967 }
3968
3969 sreloc = htab->elf.irelifunc;
3970 elf_append_rela (output_bfd, sreloc, &outrel);
3971
3972 /* If this reloc is against an external symbol, we
3973 do not want to fiddle with the addend. Otherwise,
3974 we need to include the symbol value so that it
3975 becomes an addend for the dynamic reloc. For an
3976 internal symbol, we have updated addend. */
3977 continue;
3978 }
3979 /* FALLTHROUGH */
3980 case R_X86_64_PC32:
3981 case R_X86_64_PC32_BND:
3982 case R_X86_64_PC64:
3983 case R_X86_64_PLT32:
3984 case R_X86_64_PLT32_BND:
3985 goto do_relocation;
3986
3987 case R_X86_64_GOTPCREL:
3988 case R_X86_64_GOTPCREL64:
3989 base_got = htab->elf.sgot;
3990 off = h->got.offset;
3991
3992 if (base_got == NULL)
3993 abort ();
3994
3995 if (off == (bfd_vma) -1)
3996 {
3997 /* We can't use h->got.offset here to save state, or
3998 even just remember the offset, as finish_dynamic_symbol
3999 would use that as offset into .got. */
4000
4001 if (htab->elf.splt != NULL)
4002 {
4003 plt_index = h->plt.offset / plt_entry_size - 1;
4004 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4005 base_got = htab->elf.sgotplt;
4006 }
4007 else
4008 {
4009 plt_index = h->plt.offset / plt_entry_size;
4010 off = plt_index * GOT_ENTRY_SIZE;
4011 base_got = htab->elf.igotplt;
4012 }
4013
4014 if (h->dynindx == -1
4015 || h->forced_local
4016 || info->symbolic)
4017 {
4018 /* This references the local defitionion. We must
4019 initialize this entry in the global offset table.
4020 Since the offset must always be a multiple of 8,
4021 we use the least significant bit to record
4022 whether we have initialized it already.
4023
4024 When doing a dynamic link, we create a .rela.got
4025 relocation entry to initialize the value. This
4026 is done in the finish_dynamic_symbol routine. */
4027 if ((off & 1) != 0)
4028 off &= ~1;
4029 else
4030 {
4031 bfd_put_64 (output_bfd, relocation,
4032 base_got->contents + off);
4033 /* Note that this is harmless for the GOTPLT64
4034 case, as -1 | 1 still is -1. */
4035 h->got.offset |= 1;
4036 }
4037 }
4038 }
4039
4040 relocation = (base_got->output_section->vma
4041 + base_got->output_offset + off);
4042
4043 goto do_relocation;
4044 }
4045 }
4046
4047 /* When generating a shared object, the relocations handled here are
4048 copied into the output file to be resolved at run time. */
4049 switch (r_type)
4050 {
4051 case R_X86_64_GOT32:
4052 case R_X86_64_GOT64:
4053 /* Relocation is to the entry for this symbol in the global
4054 offset table. */
4055 case R_X86_64_GOTPCREL:
4056 case R_X86_64_GOTPCREL64:
4057 /* Use global offset table entry as symbol value. */
4058 case R_X86_64_GOTPLT64:
4059 /* This is obsolete and treated the the same as GOT64. */
4060 base_got = htab->elf.sgot;
4061
4062 if (htab->elf.sgot == NULL)
4063 abort ();
4064
4065 if (h != NULL)
4066 {
4067 bfd_boolean dyn;
4068
4069 off = h->got.offset;
4070 if (h->needs_plt
4071 && h->plt.offset != (bfd_vma)-1
4072 && off == (bfd_vma)-1)
4073 {
4074 /* We can't use h->got.offset here to save
4075 state, or even just remember the offset, as
4076 finish_dynamic_symbol would use that as offset into
4077 .got. */
4078 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
4079 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4080 base_got = htab->elf.sgotplt;
4081 }
4082
4083 dyn = htab->elf.dynamic_sections_created;
4084
4085 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
4086 || (bfd_link_pic (info)
4087 && SYMBOL_REFERENCES_LOCAL (info, h))
4088 || (ELF_ST_VISIBILITY (h->other)
4089 && h->root.type == bfd_link_hash_undefweak))
4090 {
4091 /* This is actually a static link, or it is a -Bsymbolic
4092 link and the symbol is defined locally, or the symbol
4093 was forced to be local because of a version file. We
4094 must initialize this entry in the global offset table.
4095 Since the offset must always be a multiple of 8, we
4096 use the least significant bit to record whether we
4097 have initialized it already.
4098
4099 When doing a dynamic link, we create a .rela.got
4100 relocation entry to initialize the value. This is
4101 done in the finish_dynamic_symbol routine. */
4102 if ((off & 1) != 0)
4103 off &= ~1;
4104 else
4105 {
4106 bfd_put_64 (output_bfd, relocation,
4107 base_got->contents + off);
4108 /* Note that this is harmless for the GOTPLT64 case,
4109 as -1 | 1 still is -1. */
4110 h->got.offset |= 1;
4111 }
4112 }
4113 else
4114 unresolved_reloc = FALSE;
4115 }
4116 else
4117 {
4118 if (local_got_offsets == NULL)
4119 abort ();
4120
4121 off = local_got_offsets[r_symndx];
4122
4123 /* The offset must always be a multiple of 8. We use
4124 the least significant bit to record whether we have
4125 already generated the necessary reloc. */
4126 if ((off & 1) != 0)
4127 off &= ~1;
4128 else
4129 {
4130 bfd_put_64 (output_bfd, relocation,
4131 base_got->contents + off);
4132
4133 if (bfd_link_pic (info))
4134 {
4135 asection *s;
4136 Elf_Internal_Rela outrel;
4137
4138 /* We need to generate a R_X86_64_RELATIVE reloc
4139 for the dynamic linker. */
4140 s = htab->elf.srelgot;
4141 if (s == NULL)
4142 abort ();
4143
4144 outrel.r_offset = (base_got->output_section->vma
4145 + base_got->output_offset
4146 + off);
4147 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4148 outrel.r_addend = relocation;
4149 elf_append_rela (output_bfd, s, &outrel);
4150 }
4151
4152 local_got_offsets[r_symndx] |= 1;
4153 }
4154 }
4155
4156 if (off >= (bfd_vma) -2)
4157 abort ();
4158
4159 relocation = base_got->output_section->vma
4160 + base_got->output_offset + off;
4161 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
4162 relocation -= htab->elf.sgotplt->output_section->vma
4163 - htab->elf.sgotplt->output_offset;
4164
4165 break;
4166
4167 case R_X86_64_GOTOFF64:
4168 /* Relocation is relative to the start of the global offset
4169 table. */
4170
4171 /* Check to make sure it isn't a protected function or data
4172 symbol for shared library since it may not be local when
4173 used as function address or with copy relocation. We also
4174 need to make sure that a symbol is referenced locally. */
4175 if (bfd_link_pic (info) && h)
4176 {
4177 if (!h->def_regular)
4178 {
4179 const char *v;
4180
4181 switch (ELF_ST_VISIBILITY (h->other))
4182 {
4183 case STV_HIDDEN:
4184 v = _("hidden symbol");
4185 break;
4186 case STV_INTERNAL:
4187 v = _("internal symbol");
4188 break;
4189 case STV_PROTECTED:
4190 v = _("protected symbol");
4191 break;
4192 default:
4193 v = _("symbol");
4194 break;
4195 }
4196
4197 (*_bfd_error_handler)
4198 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"),
4199 input_bfd, v, h->root.root.string);
4200 bfd_set_error (bfd_error_bad_value);
4201 return FALSE;
4202 }
4203 else if (!bfd_link_executable (info)
4204 && !SYMBOL_REFERENCES_LOCAL (info, h)
4205 && (h->type == STT_FUNC
4206 || h->type == STT_OBJECT)
4207 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
4208 {
4209 (*_bfd_error_handler)
4210 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"),
4211 input_bfd,
4212 h->type == STT_FUNC ? "function" : "data",
4213 h->root.root.string);
4214 bfd_set_error (bfd_error_bad_value);
4215 return FALSE;
4216 }
4217 }
4218
4219 /* Note that sgot is not involved in this
4220 calculation. We always want the start of .got.plt. If we
4221 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
4222 permitted by the ABI, we might have to change this
4223 calculation. */
4224 relocation -= htab->elf.sgotplt->output_section->vma
4225 + htab->elf.sgotplt->output_offset;
4226 break;
4227
4228 case R_X86_64_GOTPC32:
4229 case R_X86_64_GOTPC64:
4230 /* Use global offset table as symbol value. */
4231 relocation = htab->elf.sgotplt->output_section->vma
4232 + htab->elf.sgotplt->output_offset;
4233 unresolved_reloc = FALSE;
4234 break;
4235
4236 case R_X86_64_PLTOFF64:
4237 /* Relocation is PLT entry relative to GOT. For local
4238 symbols it's the symbol itself relative to GOT. */
4239 if (h != NULL
4240 /* See PLT32 handling. */
4241 && h->plt.offset != (bfd_vma) -1
4242 && htab->elf.splt != NULL)
4243 {
4244 if (htab->plt_bnd != NULL)
4245 {
4246 resolved_plt = htab->plt_bnd;
4247 plt_offset = eh->plt_bnd.offset;
4248 }
4249 else
4250 {
4251 resolved_plt = htab->elf.splt;
4252 plt_offset = h->plt.offset;
4253 }
4254
4255 relocation = (resolved_plt->output_section->vma
4256 + resolved_plt->output_offset
4257 + plt_offset);
4258 unresolved_reloc = FALSE;
4259 }
4260
4261 relocation -= htab->elf.sgotplt->output_section->vma
4262 + htab->elf.sgotplt->output_offset;
4263 break;
4264
4265 case R_X86_64_PLT32:
4266 case R_X86_64_PLT32_BND:
4267 /* Relocation is to the entry for this symbol in the
4268 procedure linkage table. */
4269
4270 /* Resolve a PLT32 reloc against a local symbol directly,
4271 without using the procedure linkage table. */
4272 if (h == NULL)
4273 break;
4274
4275 if ((h->plt.offset == (bfd_vma) -1
4276 && eh->plt_got.offset == (bfd_vma) -1)
4277 || htab->elf.splt == NULL)
4278 {
4279 /* We didn't make a PLT entry for this symbol. This
4280 happens when statically linking PIC code, or when
4281 using -Bsymbolic. */
4282 break;
4283 }
4284
4285 if (h->plt.offset != (bfd_vma) -1)
4286 {
4287 if (htab->plt_bnd != NULL)
4288 {
4289 resolved_plt = htab->plt_bnd;
4290 plt_offset = eh->plt_bnd.offset;
4291 }
4292 else
4293 {
4294 resolved_plt = htab->elf.splt;
4295 plt_offset = h->plt.offset;
4296 }
4297 }
4298 else
4299 {
4300 /* Use the GOT PLT. */
4301 resolved_plt = htab->plt_got;
4302 plt_offset = eh->plt_got.offset;
4303 }
4304
4305 relocation = (resolved_plt->output_section->vma
4306 + resolved_plt->output_offset
4307 + plt_offset);
4308 unresolved_reloc = FALSE;
4309 break;
4310
4311 case R_X86_64_SIZE32:
4312 case R_X86_64_SIZE64:
4313 /* Set to symbol size. */
4314 relocation = st_size;
4315 goto direct;
4316
4317 case R_X86_64_PC8:
4318 case R_X86_64_PC16:
4319 case R_X86_64_PC32:
4320 case R_X86_64_PC32_BND:
4321 /* Don't complain about -fPIC if the symbol is undefined when
4322 building executable. */
4323 if (bfd_link_pic (info)
4324 && (input_section->flags & SEC_ALLOC) != 0
4325 && (input_section->flags & SEC_READONLY) != 0
4326 && h != NULL
4327 && !(bfd_link_executable (info)
4328 && h->root.type == bfd_link_hash_undefined))
4329 {
4330 bfd_boolean fail = FALSE;
4331 bfd_boolean branch
4332 = ((r_type == R_X86_64_PC32
4333 || r_type == R_X86_64_PC32_BND)
4334 && is_32bit_relative_branch (contents, rel->r_offset));
4335
4336 if (SYMBOL_REFERENCES_LOCAL (info, h))
4337 {
4338 /* Symbol is referenced locally. Make sure it is
4339 defined locally or for a branch. */
4340 fail = !h->def_regular && !branch;
4341 }
4342 else if (!(bfd_link_executable (info)
4343 && (h->needs_copy || eh->needs_copy)))
4344 {
4345 /* Symbol doesn't need copy reloc and isn't referenced
4346 locally. We only allow branch to symbol with
4347 non-default visibility. */
4348 fail = (!branch
4349 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4350 }
4351
4352 if (fail)
4353 {
4354 const char *fmt;
4355 const char *v;
4356 const char *pic = "";
4357
4358 switch (ELF_ST_VISIBILITY (h->other))
4359 {
4360 case STV_HIDDEN:
4361 v = _("hidden symbol");
4362 break;
4363 case STV_INTERNAL:
4364 v = _("internal symbol");
4365 break;
4366 case STV_PROTECTED:
4367 v = _("protected symbol");
4368 break;
4369 default:
4370 v = _("symbol");
4371 pic = _("; recompile with -fPIC");
4372 break;
4373 }
4374
4375 if (h->def_regular)
4376 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4377 else
4378 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4379
4380 (*_bfd_error_handler) (fmt, input_bfd,
4381 x86_64_elf_howto_table[r_type].name,
4382 v, h->root.root.string, pic);
4383 bfd_set_error (bfd_error_bad_value);
4384 return FALSE;
4385 }
4386 }
4387 /* Fall through. */
4388
4389 case R_X86_64_8:
4390 case R_X86_64_16:
4391 case R_X86_64_32:
4392 case R_X86_64_PC64:
4393 case R_X86_64_64:
4394 /* FIXME: The ABI says the linker should make sure the value is
4395 the same when it's zeroextended to 64 bit. */
4396
4397 direct:
4398 if ((input_section->flags & SEC_ALLOC) == 0)
4399 break;
4400
4401 /* Don't copy a pc-relative relocation into the output file
4402 if the symbol needs copy reloc or the symbol is undefined
4403 when building executable. Copy dynamic function pointer
4404 relocations. */
4405 if ((bfd_link_pic (info)
4406 && !(bfd_link_executable (info)
4407 && h != NULL
4408 && (h->needs_copy
4409 || eh->needs_copy
4410 || h->root.type == bfd_link_hash_undefined)
4411 && IS_X86_64_PCREL_TYPE (r_type))
4412 && (h == NULL
4413 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4414 || h->root.type != bfd_link_hash_undefweak)
4415 && ((! IS_X86_64_PCREL_TYPE (r_type)
4416 && r_type != R_X86_64_SIZE32
4417 && r_type != R_X86_64_SIZE64)
4418 || ! SYMBOL_CALLS_LOCAL (info, h)))
4419 || (ELIMINATE_COPY_RELOCS
4420 && !bfd_link_pic (info)
4421 && h != NULL
4422 && h->dynindx != -1
4423 && (!h->non_got_ref || eh->func_pointer_refcount > 0)
4424 && ((h->def_dynamic
4425 && !h->def_regular)
4426 || h->root.type == bfd_link_hash_undefweak
4427 || h->root.type == bfd_link_hash_undefined)))
4428 {
4429 Elf_Internal_Rela outrel;
4430 bfd_boolean skip, relocate;
4431 asection *sreloc;
4432
4433 /* When generating a shared object, these relocations
4434 are copied into the output file to be resolved at run
4435 time. */
4436 skip = FALSE;
4437 relocate = FALSE;
4438
4439 outrel.r_offset =
4440 _bfd_elf_section_offset (output_bfd, info, input_section,
4441 rel->r_offset);
4442 if (outrel.r_offset == (bfd_vma) -1)
4443 skip = TRUE;
4444 else if (outrel.r_offset == (bfd_vma) -2)
4445 skip = TRUE, relocate = TRUE;
4446
4447 outrel.r_offset += (input_section->output_section->vma
4448 + input_section->output_offset);
4449
4450 if (skip)
4451 memset (&outrel, 0, sizeof outrel);
4452
4453 /* h->dynindx may be -1 if this symbol was marked to
4454 become local. */
4455 else if (h != NULL
4456 && h->dynindx != -1
4457 && (IS_X86_64_PCREL_TYPE (r_type)
4458 || ! bfd_link_pic (info)
4459 || ! SYMBOLIC_BIND (info, h)
4460 || ! h->def_regular))
4461 {
4462 outrel.r_info = htab->r_info (h->dynindx, r_type);
4463 outrel.r_addend = rel->r_addend;
4464 }
4465 else
4466 {
4467 /* This symbol is local, or marked to become local. */
4468 if (r_type == htab->pointer_r_type)
4469 {
4470 relocate = TRUE;
4471 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4472 outrel.r_addend = relocation + rel->r_addend;
4473 }
4474 else if (r_type == R_X86_64_64
4475 && !ABI_64_P (output_bfd))
4476 {
4477 relocate = TRUE;
4478 outrel.r_info = htab->r_info (0,
4479 R_X86_64_RELATIVE64);
4480 outrel.r_addend = relocation + rel->r_addend;
4481 /* Check addend overflow. */
4482 if ((outrel.r_addend & 0x80000000)
4483 != (rel->r_addend & 0x80000000))
4484 {
4485 const char *name;
4486 int addend = rel->r_addend;
4487 if (h && h->root.root.string)
4488 name = h->root.root.string;
4489 else
4490 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4491 sym, NULL);
4492 if (addend < 0)
4493 (*_bfd_error_handler)
4494 (_("%B: addend -0x%x in relocation %s against "
4495 "symbol `%s' at 0x%lx in section `%A' is "
4496 "out of range"),
4497 input_bfd, input_section, addend,
4498 x86_64_elf_howto_table[r_type].name,
4499 name, (unsigned long) rel->r_offset);
4500 else
4501 (*_bfd_error_handler)
4502 (_("%B: addend 0x%x in relocation %s against "
4503 "symbol `%s' at 0x%lx in section `%A' is "
4504 "out of range"),
4505 input_bfd, input_section, addend,
4506 x86_64_elf_howto_table[r_type].name,
4507 name, (unsigned long) rel->r_offset);
4508 bfd_set_error (bfd_error_bad_value);
4509 return FALSE;
4510 }
4511 }
4512 else
4513 {
4514 long sindx;
4515
4516 if (bfd_is_abs_section (sec))
4517 sindx = 0;
4518 else if (sec == NULL || sec->owner == NULL)
4519 {
4520 bfd_set_error (bfd_error_bad_value);
4521 return FALSE;
4522 }
4523 else
4524 {
4525 asection *osec;
4526
4527 /* We are turning this relocation into one
4528 against a section symbol. It would be
4529 proper to subtract the symbol's value,
4530 osec->vma, from the emitted reloc addend,
4531 but ld.so expects buggy relocs. */
4532 osec = sec->output_section;
4533 sindx = elf_section_data (osec)->dynindx;
4534 if (sindx == 0)
4535 {
4536 asection *oi = htab->elf.text_index_section;
4537 sindx = elf_section_data (oi)->dynindx;
4538 }
4539 BFD_ASSERT (sindx != 0);
4540 }
4541
4542 outrel.r_info = htab->r_info (sindx, r_type);
4543 outrel.r_addend = relocation + rel->r_addend;
4544 }
4545 }
4546
4547 sreloc = elf_section_data (input_section)->sreloc;
4548
4549 if (sreloc == NULL || sreloc->contents == NULL)
4550 {
4551 r = bfd_reloc_notsupported;
4552 goto check_relocation_error;
4553 }
4554
4555 elf_append_rela (output_bfd, sreloc, &outrel);
4556
4557 /* If this reloc is against an external symbol, we do
4558 not want to fiddle with the addend. Otherwise, we
4559 need to include the symbol value so that it becomes
4560 an addend for the dynamic reloc. */
4561 if (! relocate)
4562 continue;
4563 }
4564
4565 break;
4566
4567 case R_X86_64_TLSGD:
4568 case R_X86_64_GOTPC32_TLSDESC:
4569 case R_X86_64_TLSDESC_CALL:
4570 case R_X86_64_GOTTPOFF:
4571 tls_type = GOT_UNKNOWN;
4572 if (h == NULL && local_got_offsets)
4573 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4574 else if (h != NULL)
4575 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4576
4577 if (! elf_x86_64_tls_transition (info, input_bfd,
4578 input_section, contents,
4579 symtab_hdr, sym_hashes,
4580 &r_type, tls_type, rel,
4581 relend, h, r_symndx))
4582 return FALSE;
4583
4584 if (r_type == R_X86_64_TPOFF32)
4585 {
4586 bfd_vma roff = rel->r_offset;
4587
4588 BFD_ASSERT (! unresolved_reloc);
4589
4590 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4591 {
4592 /* GD->LE transition. For 64bit, change
4593 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4594 .word 0x6666; rex64; call __tls_get_addr
4595 into:
4596 movq %fs:0, %rax
4597 leaq foo@tpoff(%rax), %rax
4598 For 32bit, change
4599 leaq foo@tlsgd(%rip), %rdi
4600 .word 0x6666; rex64; call __tls_get_addr
4601 into:
4602 movl %fs:0, %eax
4603 leaq foo@tpoff(%rax), %rax
4604 For largepic, change:
4605 leaq foo@tlsgd(%rip), %rdi
4606 movabsq $__tls_get_addr@pltoff, %rax
4607 addq %rbx, %rax
4608 call *%rax
4609 into:
4610 movq %fs:0, %rax
4611 leaq foo@tpoff(%rax), %rax
4612 nopw 0x0(%rax,%rax,1) */
4613 int largepic = 0;
4614 if (ABI_64_P (output_bfd)
4615 && contents[roff + 5] == (bfd_byte) '\xb8')
4616 {
4617 memcpy (contents + roff - 3,
4618 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4619 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4620 largepic = 1;
4621 }
4622 else if (ABI_64_P (output_bfd))
4623 memcpy (contents + roff - 4,
4624 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4625 16);
4626 else
4627 memcpy (contents + roff - 3,
4628 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4629 15);
4630 bfd_put_32 (output_bfd,
4631 elf_x86_64_tpoff (info, relocation),
4632 contents + roff + 8 + largepic);
4633 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4634 rel++;
4635 continue;
4636 }
4637 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4638 {
4639 /* GDesc -> LE transition.
4640 It's originally something like:
4641 leaq x@tlsdesc(%rip), %rax
4642
4643 Change it to:
4644 movl $x@tpoff, %rax. */
4645
4646 unsigned int val, type;
4647
4648 type = bfd_get_8 (input_bfd, contents + roff - 3);
4649 val = bfd_get_8 (input_bfd, contents + roff - 1);
4650 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4651 contents + roff - 3);
4652 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4653 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4654 contents + roff - 1);
4655 bfd_put_32 (output_bfd,
4656 elf_x86_64_tpoff (info, relocation),
4657 contents + roff);
4658 continue;
4659 }
4660 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4661 {
4662 /* GDesc -> LE transition.
4663 It's originally:
4664 call *(%rax)
4665 Turn it into:
4666 xchg %ax,%ax. */
4667 bfd_put_8 (output_bfd, 0x66, contents + roff);
4668 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4669 continue;
4670 }
4671 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4672 {
4673 /* IE->LE transition:
4674 For 64bit, originally it can be one of:
4675 movq foo@gottpoff(%rip), %reg
4676 addq foo@gottpoff(%rip), %reg
4677 We change it into:
4678 movq $foo, %reg
4679 leaq foo(%reg), %reg
4680 addq $foo, %reg.
4681 For 32bit, originally it can be one of:
4682 movq foo@gottpoff(%rip), %reg
4683 addl foo@gottpoff(%rip), %reg
4684 We change it into:
4685 movq $foo, %reg
4686 leal foo(%reg), %reg
4687 addl $foo, %reg. */
4688
4689 unsigned int val, type, reg;
4690
4691 if (roff >= 3)
4692 val = bfd_get_8 (input_bfd, contents + roff - 3);
4693 else
4694 val = 0;
4695 type = bfd_get_8 (input_bfd, contents + roff - 2);
4696 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4697 reg >>= 3;
4698 if (type == 0x8b)
4699 {
4700 /* movq */
4701 if (val == 0x4c)
4702 bfd_put_8 (output_bfd, 0x49,
4703 contents + roff - 3);
4704 else if (!ABI_64_P (output_bfd) && val == 0x44)
4705 bfd_put_8 (output_bfd, 0x41,
4706 contents + roff - 3);
4707 bfd_put_8 (output_bfd, 0xc7,
4708 contents + roff - 2);
4709 bfd_put_8 (output_bfd, 0xc0 | reg,
4710 contents + roff - 1);
4711 }
4712 else if (reg == 4)
4713 {
4714 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4715 is special */
4716 if (val == 0x4c)
4717 bfd_put_8 (output_bfd, 0x49,
4718 contents + roff - 3);
4719 else if (!ABI_64_P (output_bfd) && val == 0x44)
4720 bfd_put_8 (output_bfd, 0x41,
4721 contents + roff - 3);
4722 bfd_put_8 (output_bfd, 0x81,
4723 contents + roff - 2);
4724 bfd_put_8 (output_bfd, 0xc0 | reg,
4725 contents + roff - 1);
4726 }
4727 else
4728 {
4729 /* addq/addl -> leaq/leal */
4730 if (val == 0x4c)
4731 bfd_put_8 (output_bfd, 0x4d,
4732 contents + roff - 3);
4733 else if (!ABI_64_P (output_bfd) && val == 0x44)
4734 bfd_put_8 (output_bfd, 0x45,
4735 contents + roff - 3);
4736 bfd_put_8 (output_bfd, 0x8d,
4737 contents + roff - 2);
4738 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4739 contents + roff - 1);
4740 }
4741 bfd_put_32 (output_bfd,
4742 elf_x86_64_tpoff (info, relocation),
4743 contents + roff);
4744 continue;
4745 }
4746 else
4747 BFD_ASSERT (FALSE);
4748 }
4749
4750 if (htab->elf.sgot == NULL)
4751 abort ();
4752
4753 if (h != NULL)
4754 {
4755 off = h->got.offset;
4756 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4757 }
4758 else
4759 {
4760 if (local_got_offsets == NULL)
4761 abort ();
4762
4763 off = local_got_offsets[r_symndx];
4764 offplt = local_tlsdesc_gotents[r_symndx];
4765 }
4766
4767 if ((off & 1) != 0)
4768 off &= ~1;
4769 else
4770 {
4771 Elf_Internal_Rela outrel;
4772 int dr_type, indx;
4773 asection *sreloc;
4774
4775 if (htab->elf.srelgot == NULL)
4776 abort ();
4777
4778 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4779
4780 if (GOT_TLS_GDESC_P (tls_type))
4781 {
4782 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4783 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4784 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4785 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4786 + htab->elf.sgotplt->output_offset
4787 + offplt
4788 + htab->sgotplt_jump_table_size);
4789 sreloc = htab->elf.srelplt;
4790 if (indx == 0)
4791 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4792 else
4793 outrel.r_addend = 0;
4794 elf_append_rela (output_bfd, sreloc, &outrel);
4795 }
4796
4797 sreloc = htab->elf.srelgot;
4798
4799 outrel.r_offset = (htab->elf.sgot->output_section->vma
4800 + htab->elf.sgot->output_offset + off);
4801
4802 if (GOT_TLS_GD_P (tls_type))
4803 dr_type = R_X86_64_DTPMOD64;
4804 else if (GOT_TLS_GDESC_P (tls_type))
4805 goto dr_done;
4806 else
4807 dr_type = R_X86_64_TPOFF64;
4808
4809 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4810 outrel.r_addend = 0;
4811 if ((dr_type == R_X86_64_TPOFF64
4812 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4813 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4814 outrel.r_info = htab->r_info (indx, dr_type);
4815
4816 elf_append_rela (output_bfd, sreloc, &outrel);
4817
4818 if (GOT_TLS_GD_P (tls_type))
4819 {
4820 if (indx == 0)
4821 {
4822 BFD_ASSERT (! unresolved_reloc);
4823 bfd_put_64 (output_bfd,
4824 relocation - elf_x86_64_dtpoff_base (info),
4825 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4826 }
4827 else
4828 {
4829 bfd_put_64 (output_bfd, 0,
4830 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4831 outrel.r_info = htab->r_info (indx,
4832 R_X86_64_DTPOFF64);
4833 outrel.r_offset += GOT_ENTRY_SIZE;
4834 elf_append_rela (output_bfd, sreloc,
4835 &outrel);
4836 }
4837 }
4838
4839 dr_done:
4840 if (h != NULL)
4841 h->got.offset |= 1;
4842 else
4843 local_got_offsets[r_symndx] |= 1;
4844 }
4845
4846 if (off >= (bfd_vma) -2
4847 && ! GOT_TLS_GDESC_P (tls_type))
4848 abort ();
4849 if (r_type == ELF32_R_TYPE (rel->r_info))
4850 {
4851 if (r_type == R_X86_64_GOTPC32_TLSDESC
4852 || r_type == R_X86_64_TLSDESC_CALL)
4853 relocation = htab->elf.sgotplt->output_section->vma
4854 + htab->elf.sgotplt->output_offset
4855 + offplt + htab->sgotplt_jump_table_size;
4856 else
4857 relocation = htab->elf.sgot->output_section->vma
4858 + htab->elf.sgot->output_offset + off;
4859 unresolved_reloc = FALSE;
4860 }
4861 else
4862 {
4863 bfd_vma roff = rel->r_offset;
4864
4865 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4866 {
4867 /* GD->IE transition. For 64bit, change
4868 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4869 .word 0x6666; rex64; call __tls_get_addr@plt
4870 into:
4871 movq %fs:0, %rax
4872 addq foo@gottpoff(%rip), %rax
4873 For 32bit, change
4874 leaq foo@tlsgd(%rip), %rdi
4875 .word 0x6666; rex64; call __tls_get_addr@plt
4876 into:
4877 movl %fs:0, %eax
4878 addq foo@gottpoff(%rip), %rax
4879 For largepic, change:
4880 leaq foo@tlsgd(%rip), %rdi
4881 movabsq $__tls_get_addr@pltoff, %rax
4882 addq %rbx, %rax
4883 call *%rax
4884 into:
4885 movq %fs:0, %rax
4886 addq foo@gottpoff(%rax), %rax
4887 nopw 0x0(%rax,%rax,1) */
4888 int largepic = 0;
4889 if (ABI_64_P (output_bfd)
4890 && contents[roff + 5] == (bfd_byte) '\xb8')
4891 {
4892 memcpy (contents + roff - 3,
4893 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4894 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4895 largepic = 1;
4896 }
4897 else if (ABI_64_P (output_bfd))
4898 memcpy (contents + roff - 4,
4899 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4900 16);
4901 else
4902 memcpy (contents + roff - 3,
4903 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4904 15);
4905
4906 relocation = (htab->elf.sgot->output_section->vma
4907 + htab->elf.sgot->output_offset + off
4908 - roff
4909 - largepic
4910 - input_section->output_section->vma
4911 - input_section->output_offset
4912 - 12);
4913 bfd_put_32 (output_bfd, relocation,
4914 contents + roff + 8 + largepic);
4915 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4916 rel++;
4917 continue;
4918 }
4919 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4920 {
4921 /* GDesc -> IE transition.
4922 It's originally something like:
4923 leaq x@tlsdesc(%rip), %rax
4924
4925 Change it to:
4926 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4927
4928 /* Now modify the instruction as appropriate. To
4929 turn a leaq into a movq in the form we use it, it
4930 suffices to change the second byte from 0x8d to
4931 0x8b. */
4932 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4933
4934 bfd_put_32 (output_bfd,
4935 htab->elf.sgot->output_section->vma
4936 + htab->elf.sgot->output_offset + off
4937 - rel->r_offset
4938 - input_section->output_section->vma
4939 - input_section->output_offset
4940 - 4,
4941 contents + roff);
4942 continue;
4943 }
4944 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4945 {
4946 /* GDesc -> IE transition.
4947 It's originally:
4948 call *(%rax)
4949
4950 Change it to:
4951 xchg %ax, %ax. */
4952
4953 bfd_put_8 (output_bfd, 0x66, contents + roff);
4954 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4955 continue;
4956 }
4957 else
4958 BFD_ASSERT (FALSE);
4959 }
4960 break;
4961
4962 case R_X86_64_TLSLD:
4963 if (! elf_x86_64_tls_transition (info, input_bfd,
4964 input_section, contents,
4965 symtab_hdr, sym_hashes,
4966 &r_type, GOT_UNKNOWN,
4967 rel, relend, h, r_symndx))
4968 return FALSE;
4969
4970 if (r_type != R_X86_64_TLSLD)
4971 {
4972 /* LD->LE transition:
4973 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4974 For 64bit, we change it into:
4975 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4976 For 32bit, we change it into:
4977 nopl 0x0(%rax); movl %fs:0, %eax.
4978 For largepic, change:
4979 leaq foo@tlsgd(%rip), %rdi
4980 movabsq $__tls_get_addr@pltoff, %rax
4981 addq %rbx, %rax
4982 call *%rax
4983 into:
4984 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4985 movq %fs:0, %eax */
4986
4987 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4988 if (ABI_64_P (output_bfd)
4989 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4990 memcpy (contents + rel->r_offset - 3,
4991 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4992 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4993 else if (ABI_64_P (output_bfd))
4994 memcpy (contents + rel->r_offset - 3,
4995 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4996 else
4997 memcpy (contents + rel->r_offset - 3,
4998 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4999 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
5000 rel++;
5001 continue;
5002 }
5003
5004 if (htab->elf.sgot == NULL)
5005 abort ();
5006
5007 off = htab->tls_ld_got.offset;
5008 if (off & 1)
5009 off &= ~1;
5010 else
5011 {
5012 Elf_Internal_Rela outrel;
5013
5014 if (htab->elf.srelgot == NULL)
5015 abort ();
5016
5017 outrel.r_offset = (htab->elf.sgot->output_section->vma
5018 + htab->elf.sgot->output_offset + off);
5019
5020 bfd_put_64 (output_bfd, 0,
5021 htab->elf.sgot->contents + off);
5022 bfd_put_64 (output_bfd, 0,
5023 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
5024 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
5025 outrel.r_addend = 0;
5026 elf_append_rela (output_bfd, htab->elf.srelgot,
5027 &outrel);
5028 htab->tls_ld_got.offset |= 1;
5029 }
5030 relocation = htab->elf.sgot->output_section->vma
5031 + htab->elf.sgot->output_offset + off;
5032 unresolved_reloc = FALSE;
5033 break;
5034
5035 case R_X86_64_DTPOFF32:
5036 if (!bfd_link_executable (info)
5037 || (input_section->flags & SEC_CODE) == 0)
5038 relocation -= elf_x86_64_dtpoff_base (info);
5039 else
5040 relocation = elf_x86_64_tpoff (info, relocation);
5041 break;
5042
5043 case R_X86_64_TPOFF32:
5044 case R_X86_64_TPOFF64:
5045 BFD_ASSERT (bfd_link_executable (info));
5046 relocation = elf_x86_64_tpoff (info, relocation);
5047 break;
5048
5049 case R_X86_64_DTPOFF64:
5050 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
5051 relocation -= elf_x86_64_dtpoff_base (info);
5052 break;
5053
5054 default:
5055 break;
5056 }
5057
5058 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5059 because such sections are not SEC_ALLOC and thus ld.so will
5060 not process them. */
5061 if (unresolved_reloc
5062 && !((input_section->flags & SEC_DEBUGGING) != 0
5063 && h->def_dynamic)
5064 && _bfd_elf_section_offset (output_bfd, info, input_section,
5065 rel->r_offset) != (bfd_vma) -1)
5066 {
5067 (*_bfd_error_handler)
5068 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5069 input_bfd,
5070 input_section,
5071 (long) rel->r_offset,
5072 howto->name,
5073 h->root.root.string);
5074 return FALSE;
5075 }
5076
5077 do_relocation:
5078 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
5079 contents, rel->r_offset,
5080 relocation, rel->r_addend);
5081
5082 check_relocation_error:
5083 if (r != bfd_reloc_ok)
5084 {
5085 const char *name;
5086
5087 if (h != NULL)
5088 name = h->root.root.string;
5089 else
5090 {
5091 name = bfd_elf_string_from_elf_section (input_bfd,
5092 symtab_hdr->sh_link,
5093 sym->st_name);
5094 if (name == NULL)
5095 return FALSE;
5096 if (*name == '\0')
5097 name = bfd_section_name (input_bfd, sec);
5098 }
5099
5100 if (r == bfd_reloc_overflow)
5101 {
5102 if (! ((*info->callbacks->reloc_overflow)
5103 (info, (h ? &h->root : NULL), name, howto->name,
5104 (bfd_vma) 0, input_bfd, input_section,
5105 rel->r_offset)))
5106 return FALSE;
5107 }
5108 else
5109 {
5110 (*_bfd_error_handler)
5111 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
5112 input_bfd, input_section,
5113 (long) rel->r_offset, name, (int) r);
5114 return FALSE;
5115 }
5116 }
5117 }
5118
5119 return TRUE;
5120 }
5121
5122 /* Finish up dynamic symbol handling. We set the contents of various
5123 dynamic sections here. */
5124
5125 static bfd_boolean
5126 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
5127 struct bfd_link_info *info,
5128 struct elf_link_hash_entry *h,
5129 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
5130 {
5131 struct elf_x86_64_link_hash_table *htab;
5132 const struct elf_x86_64_backend_data *abed;
5133 bfd_boolean use_plt_bnd;
5134 struct elf_x86_64_link_hash_entry *eh;
5135
5136 htab = elf_x86_64_hash_table (info);
5137 if (htab == NULL)
5138 return FALSE;
5139
5140 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5141 section only if there is .plt section. */
5142 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
5143 abed = (use_plt_bnd
5144 ? &elf_x86_64_bnd_arch_bed
5145 : get_elf_x86_64_backend_data (output_bfd));
5146
5147 eh = (struct elf_x86_64_link_hash_entry *) h;
5148
5149 if (h->plt.offset != (bfd_vma) -1)
5150 {
5151 bfd_vma plt_index;
5152 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
5153 bfd_vma plt_plt_insn_end, plt_got_insn_size;
5154 Elf_Internal_Rela rela;
5155 bfd_byte *loc;
5156 asection *plt, *gotplt, *relplt, *resolved_plt;
5157 const struct elf_backend_data *bed;
5158 bfd_vma plt_got_pcrel_offset;
5159
5160 /* When building a static executable, use .iplt, .igot.plt and
5161 .rela.iplt sections for STT_GNU_IFUNC symbols. */
5162 if (htab->elf.splt != NULL)
5163 {
5164 plt = htab->elf.splt;
5165 gotplt = htab->elf.sgotplt;
5166 relplt = htab->elf.srelplt;
5167 }
5168 else
5169 {
5170 plt = htab->elf.iplt;
5171 gotplt = htab->elf.igotplt;
5172 relplt = htab->elf.irelplt;
5173 }
5174
5175 /* This symbol has an entry in the procedure linkage table. Set
5176 it up. */
5177 if ((h->dynindx == -1
5178 && !((h->forced_local || bfd_link_executable (info))
5179 && h->def_regular
5180 && h->type == STT_GNU_IFUNC))
5181 || plt == NULL
5182 || gotplt == NULL
5183 || relplt == NULL)
5184 abort ();
5185
5186 /* Get the index in the procedure linkage table which
5187 corresponds to this symbol. This is the index of this symbol
5188 in all the symbols for which we are making plt entries. The
5189 first entry in the procedure linkage table is reserved.
5190
5191 Get the offset into the .got table of the entry that
5192 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
5193 bytes. The first three are reserved for the dynamic linker.
5194
5195 For static executables, we don't reserve anything. */
5196
5197 if (plt == htab->elf.splt)
5198 {
5199 got_offset = h->plt.offset / abed->plt_entry_size - 1;
5200 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
5201 }
5202 else
5203 {
5204 got_offset = h->plt.offset / abed->plt_entry_size;
5205 got_offset = got_offset * GOT_ENTRY_SIZE;
5206 }
5207
5208 plt_plt_insn_end = abed->plt_plt_insn_end;
5209 plt_plt_offset = abed->plt_plt_offset;
5210 plt_got_insn_size = abed->plt_got_insn_size;
5211 plt_got_offset = abed->plt_got_offset;
5212 if (use_plt_bnd)
5213 {
5214 /* Use the second PLT with BND relocations. */
5215 const bfd_byte *plt_entry, *plt2_entry;
5216
5217 if (eh->has_bnd_reloc)
5218 {
5219 plt_entry = elf_x86_64_bnd_plt_entry;
5220 plt2_entry = elf_x86_64_bnd_plt2_entry;
5221 }
5222 else
5223 {
5224 plt_entry = elf_x86_64_legacy_plt_entry;
5225 plt2_entry = elf_x86_64_legacy_plt2_entry;
5226
5227 /* Subtract 1 since there is no BND prefix. */
5228 plt_plt_insn_end -= 1;
5229 plt_plt_offset -= 1;
5230 plt_got_insn_size -= 1;
5231 plt_got_offset -= 1;
5232 }
5233
5234 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
5235 == sizeof (elf_x86_64_legacy_plt_entry));
5236
5237 /* Fill in the entry in the procedure linkage table. */
5238 memcpy (plt->contents + h->plt.offset,
5239 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
5240 /* Fill in the entry in the second PLT. */
5241 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
5242 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5243
5244 resolved_plt = htab->plt_bnd;
5245 plt_offset = eh->plt_bnd.offset;
5246 }
5247 else
5248 {
5249 /* Fill in the entry in the procedure linkage table. */
5250 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
5251 abed->plt_entry_size);
5252
5253 resolved_plt = plt;
5254 plt_offset = h->plt.offset;
5255 }
5256
5257 /* Insert the relocation positions of the plt section. */
5258
5259 /* Put offset the PC-relative instruction referring to the GOT entry,
5260 subtracting the size of that instruction. */
5261 plt_got_pcrel_offset = (gotplt->output_section->vma
5262 + gotplt->output_offset
5263 + got_offset
5264 - resolved_plt->output_section->vma
5265 - resolved_plt->output_offset
5266 - plt_offset
5267 - plt_got_insn_size);
5268
5269 /* Check PC-relative offset overflow in PLT entry. */
5270 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
5271 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
5272 output_bfd, h->root.root.string);
5273
5274 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5275 resolved_plt->contents + plt_offset + plt_got_offset);
5276
5277 /* Fill in the entry in the global offset table, initially this
5278 points to the second part of the PLT entry. */
5279 bfd_put_64 (output_bfd, (plt->output_section->vma
5280 + plt->output_offset
5281 + h->plt.offset + abed->plt_lazy_offset),
5282 gotplt->contents + got_offset);
5283
5284 /* Fill in the entry in the .rela.plt section. */
5285 rela.r_offset = (gotplt->output_section->vma
5286 + gotplt->output_offset
5287 + got_offset);
5288 if (h->dynindx == -1
5289 || ((bfd_link_executable (info)
5290 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5291 && h->def_regular
5292 && h->type == STT_GNU_IFUNC))
5293 {
5294 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5295 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5296 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5297 rela.r_addend = (h->root.u.def.value
5298 + h->root.u.def.section->output_section->vma
5299 + h->root.u.def.section->output_offset);
5300 /* R_X86_64_IRELATIVE comes last. */
5301 plt_index = htab->next_irelative_index--;
5302 }
5303 else
5304 {
5305 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5306 rela.r_addend = 0;
5307 plt_index = htab->next_jump_slot_index++;
5308 }
5309
5310 /* Don't fill PLT entry for static executables. */
5311 if (plt == htab->elf.splt)
5312 {
5313 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5314
5315 /* Put relocation index. */
5316 bfd_put_32 (output_bfd, plt_index,
5317 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5318
5319 /* Put offset for jmp .PLT0 and check for overflow. We don't
5320 check relocation index for overflow since branch displacement
5321 will overflow first. */
5322 if (plt0_offset > 0x80000000)
5323 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5324 output_bfd, h->root.root.string);
5325 bfd_put_32 (output_bfd, - plt0_offset,
5326 plt->contents + h->plt.offset + plt_plt_offset);
5327 }
5328
5329 bed = get_elf_backend_data (output_bfd);
5330 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5331 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5332 }
5333 else if (eh->plt_got.offset != (bfd_vma) -1)
5334 {
5335 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5336 asection *plt, *got;
5337 bfd_boolean got_after_plt;
5338 int32_t got_pcrel_offset;
5339 const bfd_byte *got_plt_entry;
5340
5341 /* Set the entry in the GOT procedure linkage table. */
5342 plt = htab->plt_got;
5343 got = htab->elf.sgot;
5344 got_offset = h->got.offset;
5345
5346 if (got_offset == (bfd_vma) -1
5347 || h->type == STT_GNU_IFUNC
5348 || plt == NULL
5349 || got == NULL)
5350 abort ();
5351
5352 /* Use the second PLT entry template for the GOT PLT since they
5353 are the identical. */
5354 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5355 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5356 if (eh->has_bnd_reloc)
5357 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5358 else
5359 {
5360 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5361
5362 /* Subtract 1 since there is no BND prefix. */
5363 plt_got_insn_size -= 1;
5364 plt_got_offset -= 1;
5365 }
5366
5367 /* Fill in the entry in the GOT procedure linkage table. */
5368 plt_offset = eh->plt_got.offset;
5369 memcpy (plt->contents + plt_offset,
5370 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5371
5372 /* Put offset the PC-relative instruction referring to the GOT
5373 entry, subtracting the size of that instruction. */
5374 got_pcrel_offset = (got->output_section->vma
5375 + got->output_offset
5376 + got_offset
5377 - plt->output_section->vma
5378 - plt->output_offset
5379 - plt_offset
5380 - plt_got_insn_size);
5381
5382 /* Check PC-relative offset overflow in GOT PLT entry. */
5383 got_after_plt = got->output_section->vma > plt->output_section->vma;
5384 if ((got_after_plt && got_pcrel_offset < 0)
5385 || (!got_after_plt && got_pcrel_offset > 0))
5386 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5387 output_bfd, h->root.root.string);
5388
5389 bfd_put_32 (output_bfd, got_pcrel_offset,
5390 plt->contents + plt_offset + plt_got_offset);
5391 }
5392
5393 if (!h->def_regular
5394 && (h->plt.offset != (bfd_vma) -1
5395 || eh->plt_got.offset != (bfd_vma) -1))
5396 {
5397 /* Mark the symbol as undefined, rather than as defined in
5398 the .plt section. Leave the value if there were any
5399 relocations where pointer equality matters (this is a clue
5400 for the dynamic linker, to make function pointer
5401 comparisons work between an application and shared
5402 library), otherwise set it to zero. If a function is only
5403 called from a binary, there is no need to slow down
5404 shared libraries because of that. */
5405 sym->st_shndx = SHN_UNDEF;
5406 if (!h->pointer_equality_needed)
5407 sym->st_value = 0;
5408 }
5409
5410 if (h->got.offset != (bfd_vma) -1
5411 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5412 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5413 {
5414 Elf_Internal_Rela rela;
5415
5416 /* This symbol has an entry in the global offset table. Set it
5417 up. */
5418 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5419 abort ();
5420
5421 rela.r_offset = (htab->elf.sgot->output_section->vma
5422 + htab->elf.sgot->output_offset
5423 + (h->got.offset &~ (bfd_vma) 1));
5424
5425 /* If this is a static link, or it is a -Bsymbolic link and the
5426 symbol is defined locally or was forced to be local because
5427 of a version file, we just want to emit a RELATIVE reloc.
5428 The entry in the global offset table will already have been
5429 initialized in the relocate_section function. */
5430 if (h->def_regular
5431 && h->type == STT_GNU_IFUNC)
5432 {
5433 if (bfd_link_pic (info))
5434 {
5435 /* Generate R_X86_64_GLOB_DAT. */
5436 goto do_glob_dat;
5437 }
5438 else
5439 {
5440 asection *plt;
5441
5442 if (!h->pointer_equality_needed)
5443 abort ();
5444
5445 /* For non-shared object, we can't use .got.plt, which
5446 contains the real function addres if we need pointer
5447 equality. We load the GOT entry with the PLT entry. */
5448 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5449 bfd_put_64 (output_bfd, (plt->output_section->vma
5450 + plt->output_offset
5451 + h->plt.offset),
5452 htab->elf.sgot->contents + h->got.offset);
5453 return TRUE;
5454 }
5455 }
5456 else if (bfd_link_pic (info)
5457 && SYMBOL_REFERENCES_LOCAL (info, h))
5458 {
5459 if (!h->def_regular)
5460 return FALSE;
5461 BFD_ASSERT((h->got.offset & 1) != 0);
5462 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5463 rela.r_addend = (h->root.u.def.value
5464 + h->root.u.def.section->output_section->vma
5465 + h->root.u.def.section->output_offset);
5466 }
5467 else
5468 {
5469 BFD_ASSERT((h->got.offset & 1) == 0);
5470 do_glob_dat:
5471 bfd_put_64 (output_bfd, (bfd_vma) 0,
5472 htab->elf.sgot->contents + h->got.offset);
5473 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5474 rela.r_addend = 0;
5475 }
5476
5477 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5478 }
5479
5480 if (h->needs_copy)
5481 {
5482 Elf_Internal_Rela rela;
5483
5484 /* This symbol needs a copy reloc. Set it up. */
5485
5486 if (h->dynindx == -1
5487 || (h->root.type != bfd_link_hash_defined
5488 && h->root.type != bfd_link_hash_defweak)
5489 || htab->srelbss == NULL)
5490 abort ();
5491
5492 rela.r_offset = (h->root.u.def.value
5493 + h->root.u.def.section->output_section->vma
5494 + h->root.u.def.section->output_offset);
5495 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5496 rela.r_addend = 0;
5497 elf_append_rela (output_bfd, htab->srelbss, &rela);
5498 }
5499
5500 return TRUE;
5501 }
5502
5503 /* Finish up local dynamic symbol handling. We set the contents of
5504 various dynamic sections here. */
5505
5506 static bfd_boolean
5507 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5508 {
5509 struct elf_link_hash_entry *h
5510 = (struct elf_link_hash_entry *) *slot;
5511 struct bfd_link_info *info
5512 = (struct bfd_link_info *) inf;
5513
5514 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5515 info, h, NULL);
5516 }
5517
5518 /* Used to decide how to sort relocs in an optimal manner for the
5519 dynamic linker, before writing them out. */
5520
5521 static enum elf_reloc_type_class
5522 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
5523 const asection *rel_sec ATTRIBUTE_UNUSED,
5524 const Elf_Internal_Rela *rela)
5525 {
5526 bfd *abfd = info->output_bfd;
5527 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5528 struct elf_x86_64_link_hash_table *htab = elf_x86_64_hash_table (info);
5529 unsigned long r_symndx = htab->r_sym (rela->r_info);
5530 Elf_Internal_Sym sym;
5531
5532 if (htab->elf.dynsym == NULL
5533 || !bed->s->swap_symbol_in (abfd,
5534 (htab->elf.dynsym->contents
5535 + r_symndx * bed->s->sizeof_sym),
5536 0, &sym))
5537 abort ();
5538
5539 /* Check relocation against STT_GNU_IFUNC symbol. */
5540 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
5541 return reloc_class_ifunc;
5542
5543 switch ((int) ELF32_R_TYPE (rela->r_info))
5544 {
5545 case R_X86_64_RELATIVE:
5546 case R_X86_64_RELATIVE64:
5547 return reloc_class_relative;
5548 case R_X86_64_JUMP_SLOT:
5549 return reloc_class_plt;
5550 case R_X86_64_COPY:
5551 return reloc_class_copy;
5552 default:
5553 return reloc_class_normal;
5554 }
5555 }
5556
5557 /* Finish up the dynamic sections. */
5558
5559 static bfd_boolean
5560 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5561 struct bfd_link_info *info)
5562 {
5563 struct elf_x86_64_link_hash_table *htab;
5564 bfd *dynobj;
5565 asection *sdyn;
5566 const struct elf_x86_64_backend_data *abed;
5567
5568 htab = elf_x86_64_hash_table (info);
5569 if (htab == NULL)
5570 return FALSE;
5571
5572 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5573 section only if there is .plt section. */
5574 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5575 ? &elf_x86_64_bnd_arch_bed
5576 : get_elf_x86_64_backend_data (output_bfd));
5577
5578 dynobj = htab->elf.dynobj;
5579 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5580
5581 if (htab->elf.dynamic_sections_created)
5582 {
5583 bfd_byte *dyncon, *dynconend;
5584 const struct elf_backend_data *bed;
5585 bfd_size_type sizeof_dyn;
5586
5587 if (sdyn == NULL || htab->elf.sgot == NULL)
5588 abort ();
5589
5590 bed = get_elf_backend_data (dynobj);
5591 sizeof_dyn = bed->s->sizeof_dyn;
5592 dyncon = sdyn->contents;
5593 dynconend = sdyn->contents + sdyn->size;
5594 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5595 {
5596 Elf_Internal_Dyn dyn;
5597 asection *s;
5598
5599 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5600
5601 switch (dyn.d_tag)
5602 {
5603 default:
5604 continue;
5605
5606 case DT_PLTGOT:
5607 s = htab->elf.sgotplt;
5608 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5609 break;
5610
5611 case DT_JMPREL:
5612 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5613 break;
5614
5615 case DT_PLTRELSZ:
5616 s = htab->elf.srelplt->output_section;
5617 dyn.d_un.d_val = s->size;
5618 break;
5619
5620 case DT_RELASZ:
5621 /* The procedure linkage table relocs (DT_JMPREL) should
5622 not be included in the overall relocs (DT_RELA).
5623 Therefore, we override the DT_RELASZ entry here to
5624 make it not include the JMPREL relocs. Since the
5625 linker script arranges for .rela.plt to follow all
5626 other relocation sections, we don't have to worry
5627 about changing the DT_RELA entry. */
5628 if (htab->elf.srelplt != NULL)
5629 {
5630 s = htab->elf.srelplt->output_section;
5631 dyn.d_un.d_val -= s->size;
5632 }
5633 break;
5634
5635 case DT_TLSDESC_PLT:
5636 s = htab->elf.splt;
5637 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5638 + htab->tlsdesc_plt;
5639 break;
5640
5641 case DT_TLSDESC_GOT:
5642 s = htab->elf.sgot;
5643 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5644 + htab->tlsdesc_got;
5645 break;
5646 }
5647
5648 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5649 }
5650
5651 /* Fill in the special first entry in the procedure linkage table. */
5652 if (htab->elf.splt && htab->elf.splt->size > 0)
5653 {
5654 /* Fill in the first entry in the procedure linkage table. */
5655 memcpy (htab->elf.splt->contents,
5656 abed->plt0_entry, abed->plt_entry_size);
5657 /* Add offset for pushq GOT+8(%rip), since the instruction
5658 uses 6 bytes subtract this value. */
5659 bfd_put_32 (output_bfd,
5660 (htab->elf.sgotplt->output_section->vma
5661 + htab->elf.sgotplt->output_offset
5662 + 8
5663 - htab->elf.splt->output_section->vma
5664 - htab->elf.splt->output_offset
5665 - 6),
5666 htab->elf.splt->contents + abed->plt0_got1_offset);
5667 /* Add offset for the PC-relative instruction accessing GOT+16,
5668 subtracting the offset to the end of that instruction. */
5669 bfd_put_32 (output_bfd,
5670 (htab->elf.sgotplt->output_section->vma
5671 + htab->elf.sgotplt->output_offset
5672 + 16
5673 - htab->elf.splt->output_section->vma
5674 - htab->elf.splt->output_offset
5675 - abed->plt0_got2_insn_end),
5676 htab->elf.splt->contents + abed->plt0_got2_offset);
5677
5678 elf_section_data (htab->elf.splt->output_section)
5679 ->this_hdr.sh_entsize = abed->plt_entry_size;
5680
5681 if (htab->tlsdesc_plt)
5682 {
5683 bfd_put_64 (output_bfd, (bfd_vma) 0,
5684 htab->elf.sgot->contents + htab->tlsdesc_got);
5685
5686 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5687 abed->plt0_entry, abed->plt_entry_size);
5688
5689 /* Add offset for pushq GOT+8(%rip), since the
5690 instruction uses 6 bytes subtract this value. */
5691 bfd_put_32 (output_bfd,
5692 (htab->elf.sgotplt->output_section->vma
5693 + htab->elf.sgotplt->output_offset
5694 + 8
5695 - htab->elf.splt->output_section->vma
5696 - htab->elf.splt->output_offset
5697 - htab->tlsdesc_plt
5698 - 6),
5699 htab->elf.splt->contents
5700 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5701 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5702 where TGD stands for htab->tlsdesc_got, subtracting the offset
5703 to the end of that instruction. */
5704 bfd_put_32 (output_bfd,
5705 (htab->elf.sgot->output_section->vma
5706 + htab->elf.sgot->output_offset
5707 + htab->tlsdesc_got
5708 - htab->elf.splt->output_section->vma
5709 - htab->elf.splt->output_offset
5710 - htab->tlsdesc_plt
5711 - abed->plt0_got2_insn_end),
5712 htab->elf.splt->contents
5713 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5714 }
5715 }
5716 }
5717
5718 if (htab->plt_bnd != NULL)
5719 elf_section_data (htab->plt_bnd->output_section)
5720 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5721
5722 if (htab->elf.sgotplt)
5723 {
5724 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5725 {
5726 (*_bfd_error_handler)
5727 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5728 return FALSE;
5729 }
5730
5731 /* Fill in the first three entries in the global offset table. */
5732 if (htab->elf.sgotplt->size > 0)
5733 {
5734 /* Set the first entry in the global offset table to the address of
5735 the dynamic section. */
5736 if (sdyn == NULL)
5737 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5738 else
5739 bfd_put_64 (output_bfd,
5740 sdyn->output_section->vma + sdyn->output_offset,
5741 htab->elf.sgotplt->contents);
5742 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5743 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5744 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5745 }
5746
5747 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5748 GOT_ENTRY_SIZE;
5749 }
5750
5751 /* Adjust .eh_frame for .plt section. */
5752 if (htab->plt_eh_frame != NULL
5753 && htab->plt_eh_frame->contents != NULL)
5754 {
5755 if (htab->elf.splt != NULL
5756 && htab->elf.splt->size != 0
5757 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5758 && htab->elf.splt->output_section != NULL
5759 && htab->plt_eh_frame->output_section != NULL)
5760 {
5761 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5762 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5763 + htab->plt_eh_frame->output_offset
5764 + PLT_FDE_START_OFFSET;
5765 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5766 htab->plt_eh_frame->contents
5767 + PLT_FDE_START_OFFSET);
5768 }
5769 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5770 {
5771 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5772 htab->plt_eh_frame,
5773 htab->plt_eh_frame->contents))
5774 return FALSE;
5775 }
5776 }
5777
5778 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5779 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5780 = GOT_ENTRY_SIZE;
5781
5782 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5783 htab_traverse (htab->loc_hash_table,
5784 elf_x86_64_finish_local_dynamic_symbol,
5785 info);
5786
5787 return TRUE;
5788 }
5789
5790 /* Return an array of PLT entry symbol values. */
5791
5792 static bfd_vma *
5793 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
5794 asection *relplt)
5795 {
5796 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5797 arelent *p;
5798 long count, i;
5799 bfd_vma *plt_sym_val;
5800 bfd_vma plt_offset;
5801 bfd_byte *plt_contents;
5802 const struct elf_x86_64_backend_data *bed;
5803 Elf_Internal_Shdr *hdr;
5804 asection *plt_bnd;
5805
5806 /* Get the .plt section contents. PLT passed down may point to the
5807 .plt.bnd section. Make sure that PLT always points to the .plt
5808 section. */
5809 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
5810 if (plt_bnd)
5811 {
5812 if (plt != plt_bnd)
5813 abort ();
5814 plt = bfd_get_section_by_name (abfd, ".plt");
5815 if (plt == NULL)
5816 abort ();
5817 bed = &elf_x86_64_bnd_arch_bed;
5818 }
5819 else
5820 bed = get_elf_x86_64_backend_data (abfd);
5821
5822 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
5823 if (plt_contents == NULL)
5824 return NULL;
5825 if (!bfd_get_section_contents (abfd, (asection *) plt,
5826 plt_contents, 0, plt->size))
5827 {
5828 bad_return:
5829 free (plt_contents);
5830 return NULL;
5831 }
5832
5833 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5834 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5835 goto bad_return;
5836
5837 hdr = &elf_section_data (relplt)->this_hdr;
5838 count = relplt->size / hdr->sh_entsize;
5839
5840 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
5841 if (plt_sym_val == NULL)
5842 goto bad_return;
5843
5844 for (i = 0; i < count; i++)
5845 plt_sym_val[i] = -1;
5846
5847 plt_offset = bed->plt_entry_size;
5848 p = relplt->relocation;
5849 for (i = 0; i < count; i++, p++)
5850 {
5851 long reloc_index;
5852
5853 /* Skip unknown relocation. */
5854 if (p->howto == NULL)
5855 continue;
5856
5857 if (p->howto->type != R_X86_64_JUMP_SLOT
5858 && p->howto->type != R_X86_64_IRELATIVE)
5859 continue;
5860
5861 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
5862 + bed->plt_reloc_offset));
5863 if (reloc_index >= count)
5864 abort ();
5865 if (plt_bnd)
5866 {
5867 /* This is the index in .plt section. */
5868 long plt_index = plt_offset / bed->plt_entry_size;
5869 /* Store VMA + the offset in .plt.bnd section. */
5870 plt_sym_val[reloc_index] =
5871 (plt_bnd->vma
5872 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
5873 }
5874 else
5875 plt_sym_val[reloc_index] = plt->vma + plt_offset;
5876 plt_offset += bed->plt_entry_size;
5877
5878 /* PR binutils/18437: Skip extra relocations in the .rela.plt
5879 section. */
5880 if (plt_offset >= plt->size)
5881 break;
5882 }
5883
5884 free (plt_contents);
5885
5886 return plt_sym_val;
5887 }
5888
5889 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5890 support. */
5891
5892 static long
5893 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5894 long symcount,
5895 asymbol **syms,
5896 long dynsymcount,
5897 asymbol **dynsyms,
5898 asymbol **ret)
5899 {
5900 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
5901 as PLT if it exists. */
5902 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5903 if (plt == NULL)
5904 plt = bfd_get_section_by_name (abfd, ".plt");
5905 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
5906 dynsymcount, dynsyms, ret,
5907 plt,
5908 elf_x86_64_get_plt_sym_val);
5909 }
5910
5911 /* Handle an x86-64 specific section when reading an object file. This
5912 is called when elfcode.h finds a section with an unknown type. */
5913
5914 static bfd_boolean
5915 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5916 const char *name, int shindex)
5917 {
5918 if (hdr->sh_type != SHT_X86_64_UNWIND)
5919 return FALSE;
5920
5921 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5922 return FALSE;
5923
5924 return TRUE;
5925 }
5926
5927 /* Hook called by the linker routine which adds symbols from an object
5928 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5929 of .bss. */
5930
5931 static bfd_boolean
5932 elf_x86_64_add_symbol_hook (bfd *abfd,
5933 struct bfd_link_info *info,
5934 Elf_Internal_Sym *sym,
5935 const char **namep ATTRIBUTE_UNUSED,
5936 flagword *flagsp ATTRIBUTE_UNUSED,
5937 asection **secp,
5938 bfd_vma *valp)
5939 {
5940 asection *lcomm;
5941
5942 switch (sym->st_shndx)
5943 {
5944 case SHN_X86_64_LCOMMON:
5945 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5946 if (lcomm == NULL)
5947 {
5948 lcomm = bfd_make_section_with_flags (abfd,
5949 "LARGE_COMMON",
5950 (SEC_ALLOC
5951 | SEC_IS_COMMON
5952 | SEC_LINKER_CREATED));
5953 if (lcomm == NULL)
5954 return FALSE;
5955 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5956 }
5957 *secp = lcomm;
5958 *valp = sym->st_size;
5959 return TRUE;
5960 }
5961
5962 if (ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE
5963 && (abfd->flags & DYNAMIC) == 0
5964 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5965 elf_tdata (info->output_bfd)->has_gnu_symbols
5966 |= elf_gnu_symbol_unique;
5967
5968 return TRUE;
5969 }
5970
5971
5972 /* Given a BFD section, try to locate the corresponding ELF section
5973 index. */
5974
5975 static bfd_boolean
5976 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5977 asection *sec, int *index_return)
5978 {
5979 if (sec == &_bfd_elf_large_com_section)
5980 {
5981 *index_return = SHN_X86_64_LCOMMON;
5982 return TRUE;
5983 }
5984 return FALSE;
5985 }
5986
5987 /* Process a symbol. */
5988
5989 static void
5990 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5991 asymbol *asym)
5992 {
5993 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5994
5995 switch (elfsym->internal_elf_sym.st_shndx)
5996 {
5997 case SHN_X86_64_LCOMMON:
5998 asym->section = &_bfd_elf_large_com_section;
5999 asym->value = elfsym->internal_elf_sym.st_size;
6000 /* Common symbol doesn't set BSF_GLOBAL. */
6001 asym->flags &= ~BSF_GLOBAL;
6002 break;
6003 }
6004 }
6005
6006 static bfd_boolean
6007 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
6008 {
6009 return (sym->st_shndx == SHN_COMMON
6010 || sym->st_shndx == SHN_X86_64_LCOMMON);
6011 }
6012
6013 static unsigned int
6014 elf_x86_64_common_section_index (asection *sec)
6015 {
6016 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6017 return SHN_COMMON;
6018 else
6019 return SHN_X86_64_LCOMMON;
6020 }
6021
6022 static asection *
6023 elf_x86_64_common_section (asection *sec)
6024 {
6025 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
6026 return bfd_com_section_ptr;
6027 else
6028 return &_bfd_elf_large_com_section;
6029 }
6030
6031 static bfd_boolean
6032 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
6033 const Elf_Internal_Sym *sym,
6034 asection **psec,
6035 bfd_boolean newdef,
6036 bfd_boolean olddef,
6037 bfd *oldbfd,
6038 const asection *oldsec)
6039 {
6040 /* A normal common symbol and a large common symbol result in a
6041 normal common symbol. We turn the large common symbol into a
6042 normal one. */
6043 if (!olddef
6044 && h->root.type == bfd_link_hash_common
6045 && !newdef
6046 && bfd_is_com_section (*psec)
6047 && oldsec != *psec)
6048 {
6049 if (sym->st_shndx == SHN_COMMON
6050 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
6051 {
6052 h->root.u.c.p->section
6053 = bfd_make_section_old_way (oldbfd, "COMMON");
6054 h->root.u.c.p->section->flags = SEC_ALLOC;
6055 }
6056 else if (sym->st_shndx == SHN_X86_64_LCOMMON
6057 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
6058 *psec = bfd_com_section_ptr;
6059 }
6060
6061 return TRUE;
6062 }
6063
6064 static int
6065 elf_x86_64_additional_program_headers (bfd *abfd,
6066 struct bfd_link_info *info ATTRIBUTE_UNUSED)
6067 {
6068 asection *s;
6069 int count = 0;
6070
6071 /* Check to see if we need a large readonly segment. */
6072 s = bfd_get_section_by_name (abfd, ".lrodata");
6073 if (s && (s->flags & SEC_LOAD))
6074 count++;
6075
6076 /* Check to see if we need a large data segment. Since .lbss sections
6077 is placed right after the .bss section, there should be no need for
6078 a large data segment just because of .lbss. */
6079 s = bfd_get_section_by_name (abfd, ".ldata");
6080 if (s && (s->flags & SEC_LOAD))
6081 count++;
6082
6083 return count;
6084 }
6085
6086 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
6087
6088 static bfd_boolean
6089 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
6090 {
6091 if (h->plt.offset != (bfd_vma) -1
6092 && !h->def_regular
6093 && !h->pointer_equality_needed)
6094 return FALSE;
6095
6096 return _bfd_elf_hash_symbol (h);
6097 }
6098
6099 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
6100
6101 static bfd_boolean
6102 elf_x86_64_relocs_compatible (const bfd_target *input,
6103 const bfd_target *output)
6104 {
6105 return ((xvec_get_elf_backend_data (input)->s->elfclass
6106 == xvec_get_elf_backend_data (output)->s->elfclass)
6107 && _bfd_elf_relocs_compatible (input, output));
6108 }
6109
6110 static const struct bfd_elf_special_section
6111 elf_x86_64_special_sections[]=
6112 {
6113 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6114 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6115 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
6116 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6117 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
6118 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
6119 { NULL, 0, 0, 0, 0 }
6120 };
6121
6122 #define TARGET_LITTLE_SYM x86_64_elf64_vec
6123 #define TARGET_LITTLE_NAME "elf64-x86-64"
6124 #define ELF_ARCH bfd_arch_i386
6125 #define ELF_TARGET_ID X86_64_ELF_DATA
6126 #define ELF_MACHINE_CODE EM_X86_64
6127 #define ELF_MAXPAGESIZE 0x200000
6128 #define ELF_MINPAGESIZE 0x1000
6129 #define ELF_COMMONPAGESIZE 0x1000
6130
6131 #define elf_backend_can_gc_sections 1
6132 #define elf_backend_can_refcount 1
6133 #define elf_backend_want_got_plt 1
6134 #define elf_backend_plt_readonly 1
6135 #define elf_backend_want_plt_sym 0
6136 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
6137 #define elf_backend_rela_normal 1
6138 #define elf_backend_plt_alignment 4
6139 #define elf_backend_extern_protected_data 1
6140
6141 #define elf_info_to_howto elf_x86_64_info_to_howto
6142
6143 #define bfd_elf64_bfd_link_hash_table_create \
6144 elf_x86_64_link_hash_table_create
6145 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
6146 #define bfd_elf64_bfd_reloc_name_lookup \
6147 elf_x86_64_reloc_name_lookup
6148
6149 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
6150 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
6151 #define elf_backend_check_relocs elf_x86_64_check_relocs
6152 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
6153 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
6154 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
6155 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
6156 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
6157 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
6158 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
6159 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
6160 #ifdef CORE_HEADER
6161 #define elf_backend_write_core_note elf_x86_64_write_core_note
6162 #endif
6163 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
6164 #define elf_backend_relocate_section elf_x86_64_relocate_section
6165 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
6166 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
6167 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
6168 #define elf_backend_object_p elf64_x86_64_elf_object_p
6169 #define bfd_elf64_mkobject elf_x86_64_mkobject
6170 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
6171
6172 #define elf_backend_section_from_shdr \
6173 elf_x86_64_section_from_shdr
6174
6175 #define elf_backend_section_from_bfd_section \
6176 elf_x86_64_elf_section_from_bfd_section
6177 #define elf_backend_add_symbol_hook \
6178 elf_x86_64_add_symbol_hook
6179 #define elf_backend_symbol_processing \
6180 elf_x86_64_symbol_processing
6181 #define elf_backend_common_section_index \
6182 elf_x86_64_common_section_index
6183 #define elf_backend_common_section \
6184 elf_x86_64_common_section
6185 #define elf_backend_common_definition \
6186 elf_x86_64_common_definition
6187 #define elf_backend_merge_symbol \
6188 elf_x86_64_merge_symbol
6189 #define elf_backend_special_sections \
6190 elf_x86_64_special_sections
6191 #define elf_backend_additional_program_headers \
6192 elf_x86_64_additional_program_headers
6193 #define elf_backend_hash_symbol \
6194 elf_x86_64_hash_symbol
6195
6196 #include "elf64-target.h"
6197
6198 /* CloudABI support. */
6199
6200 #undef TARGET_LITTLE_SYM
6201 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
6202 #undef TARGET_LITTLE_NAME
6203 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
6204
6205 #undef ELF_OSABI
6206 #define ELF_OSABI ELFOSABI_CLOUDABI
6207
6208 #undef elf64_bed
6209 #define elf64_bed elf64_x86_64_cloudabi_bed
6210
6211 #include "elf64-target.h"
6212
6213 /* FreeBSD support. */
6214
6215 #undef TARGET_LITTLE_SYM
6216 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
6217 #undef TARGET_LITTLE_NAME
6218 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
6219
6220 #undef ELF_OSABI
6221 #define ELF_OSABI ELFOSABI_FREEBSD
6222
6223 #undef elf64_bed
6224 #define elf64_bed elf64_x86_64_fbsd_bed
6225
6226 #include "elf64-target.h"
6227
6228 /* Solaris 2 support. */
6229
6230 #undef TARGET_LITTLE_SYM
6231 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
6232 #undef TARGET_LITTLE_NAME
6233 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
6234
6235 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
6236 objects won't be recognized. */
6237 #undef ELF_OSABI
6238
6239 #undef elf64_bed
6240 #define elf64_bed elf64_x86_64_sol2_bed
6241
6242 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
6243 boundary. */
6244 #undef elf_backend_static_tls_alignment
6245 #define elf_backend_static_tls_alignment 16
6246
6247 /* The Solaris 2 ABI requires a plt symbol on all platforms.
6248
6249 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
6250 File, p.63. */
6251 #undef elf_backend_want_plt_sym
6252 #define elf_backend_want_plt_sym 1
6253
6254 #include "elf64-target.h"
6255
6256 /* Native Client support. */
6257
6258 static bfd_boolean
6259 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
6260 {
6261 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
6262 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
6263 return TRUE;
6264 }
6265
6266 #undef TARGET_LITTLE_SYM
6267 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
6268 #undef TARGET_LITTLE_NAME
6269 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
6270 #undef elf64_bed
6271 #define elf64_bed elf64_x86_64_nacl_bed
6272
6273 #undef ELF_MAXPAGESIZE
6274 #undef ELF_MINPAGESIZE
6275 #undef ELF_COMMONPAGESIZE
6276 #define ELF_MAXPAGESIZE 0x10000
6277 #define ELF_MINPAGESIZE 0x10000
6278 #define ELF_COMMONPAGESIZE 0x10000
6279
6280 /* Restore defaults. */
6281 #undef ELF_OSABI
6282 #undef elf_backend_static_tls_alignment
6283 #undef elf_backend_want_plt_sym
6284 #define elf_backend_want_plt_sym 0
6285
6286 /* NaCl uses substantially different PLT entries for the same effects. */
6287
6288 #undef elf_backend_plt_alignment
6289 #define elf_backend_plt_alignment 5
6290 #define NACL_PLT_ENTRY_SIZE 64
6291 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
6292
6293 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
6294 {
6295 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
6296 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
6297 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6298 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6299 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6300
6301 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
6302 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
6303
6304 /* 32 bytes of nop to pad out to the standard size. */
6305 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6306 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6307 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6308 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6309 0x66, /* excess data32 prefix */
6310 0x90 /* nop */
6311 };
6312
6313 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
6314 {
6315 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6316 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6317 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6318 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6319
6320 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6321 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6322 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6323
6324 /* Lazy GOT entries point here (32-byte aligned). */
6325 0x68, /* pushq immediate */
6326 0, 0, 0, 0, /* replaced with index into relocation table. */
6327 0xe9, /* jmp relative */
6328 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6329
6330 /* 22 bytes of nop to pad out to the standard size. */
6331 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6332 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6333 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6334 };
6335
6336 /* .eh_frame covering the .plt section. */
6337
6338 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6339 {
6340 #if (PLT_CIE_LENGTH != 20 \
6341 || PLT_FDE_LENGTH != 36 \
6342 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6343 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6344 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6345 #endif
6346 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6347 0, 0, 0, 0, /* CIE ID */
6348 1, /* CIE version */
6349 'z', 'R', 0, /* Augmentation string */
6350 1, /* Code alignment factor */
6351 0x78, /* Data alignment factor */
6352 16, /* Return address column */
6353 1, /* Augmentation size */
6354 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6355 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6356 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6357 DW_CFA_nop, DW_CFA_nop,
6358
6359 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6360 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6361 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6362 0, 0, 0, 0, /* .plt size goes here */
6363 0, /* Augmentation size */
6364 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6365 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6366 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6367 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6368 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6369 13, /* Block length */
6370 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6371 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6372 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6373 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6374 DW_CFA_nop, DW_CFA_nop
6375 };
6376
6377 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6378 {
6379 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6380 elf_x86_64_nacl_plt_entry, /* plt_entry */
6381 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6382 2, /* plt0_got1_offset */
6383 9, /* plt0_got2_offset */
6384 13, /* plt0_got2_insn_end */
6385 3, /* plt_got_offset */
6386 33, /* plt_reloc_offset */
6387 38, /* plt_plt_offset */
6388 7, /* plt_got_insn_size */
6389 42, /* plt_plt_insn_end */
6390 32, /* plt_lazy_offset */
6391 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6392 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6393 };
6394
6395 #undef elf_backend_arch_data
6396 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6397
6398 #undef elf_backend_object_p
6399 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6400 #undef elf_backend_modify_segment_map
6401 #define elf_backend_modify_segment_map nacl_modify_segment_map
6402 #undef elf_backend_modify_program_headers
6403 #define elf_backend_modify_program_headers nacl_modify_program_headers
6404 #undef elf_backend_final_write_processing
6405 #define elf_backend_final_write_processing nacl_final_write_processing
6406
6407 #include "elf64-target.h"
6408
6409 /* Native Client x32 support. */
6410
6411 static bfd_boolean
6412 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6413 {
6414 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6415 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6416 return TRUE;
6417 }
6418
6419 #undef TARGET_LITTLE_SYM
6420 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6421 #undef TARGET_LITTLE_NAME
6422 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6423 #undef elf32_bed
6424 #define elf32_bed elf32_x86_64_nacl_bed
6425
6426 #define bfd_elf32_bfd_link_hash_table_create \
6427 elf_x86_64_link_hash_table_create
6428 #define bfd_elf32_bfd_reloc_type_lookup \
6429 elf_x86_64_reloc_type_lookup
6430 #define bfd_elf32_bfd_reloc_name_lookup \
6431 elf_x86_64_reloc_name_lookup
6432 #define bfd_elf32_mkobject \
6433 elf_x86_64_mkobject
6434 #define bfd_elf32_get_synthetic_symtab \
6435 elf_x86_64_get_synthetic_symtab
6436
6437 #undef elf_backend_object_p
6438 #define elf_backend_object_p \
6439 elf32_x86_64_nacl_elf_object_p
6440
6441 #undef elf_backend_bfd_from_remote_memory
6442 #define elf_backend_bfd_from_remote_memory \
6443 _bfd_elf32_bfd_from_remote_memory
6444
6445 #undef elf_backend_size_info
6446 #define elf_backend_size_info \
6447 _bfd_elf32_size_info
6448
6449 #include "elf32-target.h"
6450
6451 /* Restore defaults. */
6452 #undef elf_backend_object_p
6453 #define elf_backend_object_p elf64_x86_64_elf_object_p
6454 #undef elf_backend_bfd_from_remote_memory
6455 #undef elf_backend_size_info
6456 #undef elf_backend_modify_segment_map
6457 #undef elf_backend_modify_program_headers
6458 #undef elf_backend_final_write_processing
6459
6460 /* Intel L1OM support. */
6461
6462 static bfd_boolean
6463 elf64_l1om_elf_object_p (bfd *abfd)
6464 {
6465 /* Set the right machine number for an L1OM elf64 file. */
6466 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6467 return TRUE;
6468 }
6469
6470 #undef TARGET_LITTLE_SYM
6471 #define TARGET_LITTLE_SYM l1om_elf64_vec
6472 #undef TARGET_LITTLE_NAME
6473 #define TARGET_LITTLE_NAME "elf64-l1om"
6474 #undef ELF_ARCH
6475 #define ELF_ARCH bfd_arch_l1om
6476
6477 #undef ELF_MACHINE_CODE
6478 #define ELF_MACHINE_CODE EM_L1OM
6479
6480 #undef ELF_OSABI
6481
6482 #undef elf64_bed
6483 #define elf64_bed elf64_l1om_bed
6484
6485 #undef elf_backend_object_p
6486 #define elf_backend_object_p elf64_l1om_elf_object_p
6487
6488 /* Restore defaults. */
6489 #undef ELF_MAXPAGESIZE
6490 #undef ELF_MINPAGESIZE
6491 #undef ELF_COMMONPAGESIZE
6492 #define ELF_MAXPAGESIZE 0x200000
6493 #define ELF_MINPAGESIZE 0x1000
6494 #define ELF_COMMONPAGESIZE 0x1000
6495 #undef elf_backend_plt_alignment
6496 #define elf_backend_plt_alignment 4
6497 #undef elf_backend_arch_data
6498 #define elf_backend_arch_data &elf_x86_64_arch_bed
6499
6500 #include "elf64-target.h"
6501
6502 /* FreeBSD L1OM support. */
6503
6504 #undef TARGET_LITTLE_SYM
6505 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6506 #undef TARGET_LITTLE_NAME
6507 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6508
6509 #undef ELF_OSABI
6510 #define ELF_OSABI ELFOSABI_FREEBSD
6511
6512 #undef elf64_bed
6513 #define elf64_bed elf64_l1om_fbsd_bed
6514
6515 #include "elf64-target.h"
6516
6517 /* Intel K1OM support. */
6518
6519 static bfd_boolean
6520 elf64_k1om_elf_object_p (bfd *abfd)
6521 {
6522 /* Set the right machine number for an K1OM elf64 file. */
6523 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6524 return TRUE;
6525 }
6526
6527 #undef TARGET_LITTLE_SYM
6528 #define TARGET_LITTLE_SYM k1om_elf64_vec
6529 #undef TARGET_LITTLE_NAME
6530 #define TARGET_LITTLE_NAME "elf64-k1om"
6531 #undef ELF_ARCH
6532 #define ELF_ARCH bfd_arch_k1om
6533
6534 #undef ELF_MACHINE_CODE
6535 #define ELF_MACHINE_CODE EM_K1OM
6536
6537 #undef ELF_OSABI
6538
6539 #undef elf64_bed
6540 #define elf64_bed elf64_k1om_bed
6541
6542 #undef elf_backend_object_p
6543 #define elf_backend_object_p elf64_k1om_elf_object_p
6544
6545 #undef elf_backend_static_tls_alignment
6546
6547 #undef elf_backend_want_plt_sym
6548 #define elf_backend_want_plt_sym 0
6549
6550 #include "elf64-target.h"
6551
6552 /* FreeBSD K1OM support. */
6553
6554 #undef TARGET_LITTLE_SYM
6555 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6556 #undef TARGET_LITTLE_NAME
6557 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6558
6559 #undef ELF_OSABI
6560 #define ELF_OSABI ELFOSABI_FREEBSD
6561
6562 #undef elf64_bed
6563 #define elf64_bed elf64_k1om_fbsd_bed
6564
6565 #include "elf64-target.h"
6566
6567 /* 32bit x86-64 support. */
6568
6569 #undef TARGET_LITTLE_SYM
6570 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6571 #undef TARGET_LITTLE_NAME
6572 #define TARGET_LITTLE_NAME "elf32-x86-64"
6573 #undef elf32_bed
6574
6575 #undef ELF_ARCH
6576 #define ELF_ARCH bfd_arch_i386
6577
6578 #undef ELF_MACHINE_CODE
6579 #define ELF_MACHINE_CODE EM_X86_64
6580
6581 #undef ELF_OSABI
6582
6583 #undef elf_backend_object_p
6584 #define elf_backend_object_p \
6585 elf32_x86_64_elf_object_p
6586
6587 #undef elf_backend_bfd_from_remote_memory
6588 #define elf_backend_bfd_from_remote_memory \
6589 _bfd_elf32_bfd_from_remote_memory
6590
6591 #undef elf_backend_size_info
6592 #define elf_backend_size_info \
6593 _bfd_elf32_size_info
6594
6595 #include "elf32-target.h"
This page took 0.159219 seconds and 5 git commands to generate.