bfb2f2a68c843dc58b261674f46dcc59c5ea4dd5
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2014 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf-nacl.h"
28 #include "bfd_stdint.h"
29 #include "objalloc.h"
30 #include "hashtab.h"
31 #include "dwarf2.h"
32 #include "libiberty.h"
33
34 #include "elf/x86-64.h"
35
36 #ifdef CORE_HEADER
37 #include <stdarg.h>
38 #include CORE_HEADER
39 #endif
40
41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
42 #define MINUS_ONE (~ (bfd_vma) 0)
43
44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
47 since they are the same. */
48
49 #define ABI_64_P(abfd) \
50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64)
51
52 /* The relocation "howto" table. Order of fields:
53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
55 static reloc_howto_type x86_64_elf_howto_table[] =
56 {
57 HOWTO(R_X86_64_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
59 FALSE),
60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
62 FALSE),
63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
65 TRUE),
66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
68 FALSE),
69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
71 TRUE),
72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
74 FALSE),
75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
77 MINUS_ONE, FALSE),
78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
80 MINUS_ONE, FALSE),
81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
83 MINUS_ONE, FALSE),
84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
86 0xffffffff, TRUE),
87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
89 FALSE),
90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
92 FALSE),
93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
103 MINUS_ONE, FALSE),
104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
106 MINUS_ONE, FALSE),
107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
109 MINUS_ONE, FALSE),
110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
115 0xffffffff, TRUE),
116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
118 0xffffffff, FALSE),
119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
121 0xffffffff, TRUE),
122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
124 0xffffffff, FALSE),
125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
127 TRUE),
128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
130 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
133 FALSE, 0xffffffff, 0xffffffff, TRUE),
134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
136 FALSE),
137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
139 MINUS_ONE, TRUE),
140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
142 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
145 MINUS_ONE, FALSE),
146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
148 MINUS_ONE, FALSE),
149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
151 FALSE),
152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
154 FALSE),
155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
156 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 "R_X86_64_GOTPC32_TLSDESC",
158 FALSE, 0xffffffff, 0xffffffff, TRUE),
159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
160 complain_overflow_dont, bfd_elf_generic_reloc,
161 "R_X86_64_TLSDESC_CALL",
162 FALSE, 0, 0, FALSE),
163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
164 complain_overflow_bitfield, bfd_elf_generic_reloc,
165 "R_X86_64_TLSDESC",
166 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
169 MINUS_ONE, FALSE),
170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
172 MINUS_ONE, FALSE),
173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
175 TRUE),
176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
178 TRUE),
179
180 /* We have a gap in the reloc numbers here.
181 R_X86_64_standard counts the number up to this point, and
182 R_X86_64_vt_offset is the value to subtract from a reloc type of
183 R_X86_64_GNU_VT* to form an index into this table. */
184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1)
185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
186
187 /* GNU extension to record C++ vtable hierarchy. */
188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
190
191 /* GNU extension to record C++ vtable member usage. */
192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
194 FALSE),
195
196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 FALSE)
200 };
201
202 #define IS_X86_64_PCREL_TYPE(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 /* Map BFD relocs to the x86_64 elf relocs. */
210 struct elf_reloc_map
211 {
212 bfd_reloc_code_real_type bfd_reloc_val;
213 unsigned char elf_reloc_val;
214 };
215
216 static const struct elf_reloc_map x86_64_reloc_map[] =
217 {
218 { BFD_RELOC_NONE, R_X86_64_NONE, },
219 { BFD_RELOC_64, R_X86_64_64, },
220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
228 { BFD_RELOC_32, R_X86_64_32, },
229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
230 { BFD_RELOC_16, R_X86_64_16, },
231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
232 { BFD_RELOC_8, R_X86_64_8, },
233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,},
257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,},
258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
260 };
261
262 static reloc_howto_type *
263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
264 {
265 unsigned i;
266
267 if (r_type == (unsigned int) R_X86_64_32)
268 {
269 if (ABI_64_P (abfd))
270 i = r_type;
271 else
272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
273 }
274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
275 || r_type >= (unsigned int) R_X86_64_max)
276 {
277 if (r_type >= (unsigned int) R_X86_64_standard)
278 {
279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"),
280 abfd, (int) r_type);
281 r_type = R_X86_64_NONE;
282 }
283 i = r_type;
284 }
285 else
286 i = r_type - (unsigned int) R_X86_64_vt_offset;
287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
288 return &x86_64_elf_howto_table[i];
289 }
290
291 /* Given a BFD reloc type, return a HOWTO structure. */
292 static reloc_howto_type *
293 elf_x86_64_reloc_type_lookup (bfd *abfd,
294 bfd_reloc_code_real_type code)
295 {
296 unsigned int i;
297
298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
299 i++)
300 {
301 if (x86_64_reloc_map[i].bfd_reloc_val == code)
302 return elf_x86_64_rtype_to_howto (abfd,
303 x86_64_reloc_map[i].elf_reloc_val);
304 }
305 return NULL;
306 }
307
308 static reloc_howto_type *
309 elf_x86_64_reloc_name_lookup (bfd *abfd,
310 const char *r_name)
311 {
312 unsigned int i;
313
314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
315 {
316 /* Get x32 R_X86_64_32. */
317 reloc_howto_type *reloc
318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
320 return reloc;
321 }
322
323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
324 if (x86_64_elf_howto_table[i].name != NULL
325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
326 return &x86_64_elf_howto_table[i];
327
328 return NULL;
329 }
330
331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
332
333 static void
334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
335 Elf_Internal_Rela *dst)
336 {
337 unsigned r_type;
338
339 r_type = ELF32_R_TYPE (dst->r_info);
340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
341 BFD_ASSERT (r_type == cache_ptr->howto->type);
342 }
343 \f
344 /* Support for core dump NOTE sections. */
345 static bfd_boolean
346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
347 {
348 int offset;
349 size_t size;
350
351 switch (note->descsz)
352 {
353 default:
354 return FALSE;
355
356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
357 /* pr_cursig */
358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
359
360 /* pr_pid */
361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
362
363 /* pr_reg */
364 offset = 72;
365 size = 216;
366
367 break;
368
369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
370 /* pr_cursig */
371 elf_tdata (abfd)->core->signal
372 = bfd_get_16 (abfd, note->descdata + 12);
373
374 /* pr_pid */
375 elf_tdata (abfd)->core->lwpid
376 = bfd_get_32 (abfd, note->descdata + 32);
377
378 /* pr_reg */
379 offset = 112;
380 size = 216;
381
382 break;
383 }
384
385 /* Make a ".reg/999" section. */
386 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
387 size, note->descpos + offset);
388 }
389
390 static bfd_boolean
391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
392 {
393 switch (note->descsz)
394 {
395 default:
396 return FALSE;
397
398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
399 elf_tdata (abfd)->core->pid
400 = bfd_get_32 (abfd, note->descdata + 12);
401 elf_tdata (abfd)->core->program
402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
403 elf_tdata (abfd)->core->command
404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
405 break;
406
407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
408 elf_tdata (abfd)->core->pid
409 = bfd_get_32 (abfd, note->descdata + 24);
410 elf_tdata (abfd)->core->program
411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
412 elf_tdata (abfd)->core->command
413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
414 }
415
416 /* Note that for some reason, a spurious space is tacked
417 onto the end of the args in some (at least one anyway)
418 implementations, so strip it off if it exists. */
419
420 {
421 char *command = elf_tdata (abfd)->core->command;
422 int n = strlen (command);
423
424 if (0 < n && command[n - 1] == ' ')
425 command[n - 1] = '\0';
426 }
427
428 return TRUE;
429 }
430
431 #ifdef CORE_HEADER
432 static char *
433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
434 int note_type, ...)
435 {
436 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
437 va_list ap;
438 const char *fname, *psargs;
439 long pid;
440 int cursig;
441 const void *gregs;
442
443 switch (note_type)
444 {
445 default:
446 return NULL;
447
448 case NT_PRPSINFO:
449 va_start (ap, note_type);
450 fname = va_arg (ap, const char *);
451 psargs = va_arg (ap, const char *);
452 va_end (ap);
453
454 if (bed->s->elfclass == ELFCLASS32)
455 {
456 prpsinfo32_t data;
457 memset (&data, 0, sizeof (data));
458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
461 &data, sizeof (data));
462 }
463 else
464 {
465 prpsinfo64_t data;
466 memset (&data, 0, sizeof (data));
467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
470 &data, sizeof (data));
471 }
472 /* NOTREACHED */
473
474 case NT_PRSTATUS:
475 va_start (ap, note_type);
476 pid = va_arg (ap, long);
477 cursig = va_arg (ap, int);
478 gregs = va_arg (ap, const void *);
479 va_end (ap);
480
481 if (bed->s->elfclass == ELFCLASS32)
482 {
483 if (bed->elf_machine_code == EM_X86_64)
484 {
485 prstatusx32_t prstat;
486 memset (&prstat, 0, sizeof (prstat));
487 prstat.pr_pid = pid;
488 prstat.pr_cursig = cursig;
489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
491 &prstat, sizeof (prstat));
492 }
493 else
494 {
495 prstatus32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 }
504 else
505 {
506 prstatus64_t prstat;
507 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_pid = pid;
509 prstat.pr_cursig = cursig;
510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
512 &prstat, sizeof (prstat));
513 }
514 }
515 /* NOTREACHED */
516 }
517 #endif
518 \f
519 /* Functions for the x86-64 ELF linker. */
520
521 /* The name of the dynamic interpreter. This is put in the .interp
522 section. */
523
524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1"
525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1"
526
527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid
528 copying dynamic variables from a shared lib into an app's dynbss
529 section, and instead use a dynamic relocation to point into the
530 shared lib. */
531 #define ELIMINATE_COPY_RELOCS 1
532
533 /* The size in bytes of an entry in the global offset table. */
534
535 #define GOT_ENTRY_SIZE 8
536
537 /* The size in bytes of an entry in the procedure linkage table. */
538
539 #define PLT_ENTRY_SIZE 16
540
541 /* The first entry in a procedure linkage table looks like this. See the
542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */
543
544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] =
545 {
546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
549 };
550
551 /* Subsequent entries in a procedure linkage table look like this. */
552
553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] =
554 {
555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
557 0x68, /* pushq immediate */
558 0, 0, 0, 0, /* replaced with index into relocation table. */
559 0xe9, /* jmp relative */
560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
561 };
562
563 /* The first entry in a procedure linkage table with BND relocations
564 like this. */
565
566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] =
567 {
568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
570 0x0f, 0x1f, 0 /* nopl (%rax) */
571 };
572
573 /* Subsequent entries for legacy branches in a procedure linkage table
574 with BND relocations look like this. */
575
576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] =
577 {
578 0x68, 0, 0, 0, 0, /* pushq immediate */
579 0xe9, 0, 0, 0, 0, /* jmpq relative */
580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */
581 };
582
583 /* Subsequent entries for branches with BND prefx in a procedure linkage
584 table with BND relocations look like this. */
585
586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] =
587 {
588 0x68, 0, 0, 0, 0, /* pushq immediate */
589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
591 };
592
593 /* Entries for legacy branches in the second procedure linkage table
594 look like this. */
595
596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] =
597 {
598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
600 0x66, 0x90 /* xchg %ax,%ax */
601 };
602
603 /* Entries for branches with BND prefix in the second procedure linkage
604 table look like this. */
605
606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] =
607 {
608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
610 0x90 /* nop */
611 };
612
613 /* .eh_frame covering the .plt section. */
614
615 static const bfd_byte elf_x86_64_eh_frame_plt[] =
616 {
617 #define PLT_CIE_LENGTH 20
618 #define PLT_FDE_LENGTH 36
619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8
620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12
621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
622 0, 0, 0, 0, /* CIE ID */
623 1, /* CIE version */
624 'z', 'R', 0, /* Augmentation string */
625 1, /* Code alignment factor */
626 0x78, /* Data alignment factor */
627 16, /* Return address column */
628 1, /* Augmentation size */
629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
632 DW_CFA_nop, DW_CFA_nop,
633
634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
637 0, 0, 0, 0, /* .plt size goes here */
638 0, /* Augmentation size */
639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
644 11, /* Block length */
645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
648 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
650 };
651
652 /* Architecture-specific backend data for x86-64. */
653
654 struct elf_x86_64_backend_data
655 {
656 /* Templates for the initial PLT entry and for subsequent entries. */
657 const bfd_byte *plt0_entry;
658 const bfd_byte *plt_entry;
659 unsigned int plt_entry_size; /* Size of each PLT entry. */
660
661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */
662 unsigned int plt0_got1_offset;
663 unsigned int plt0_got2_offset;
664
665 /* Offset of the end of the PC-relative instruction containing
666 plt0_got2_offset. */
667 unsigned int plt0_got2_insn_end;
668
669 /* Offsets into plt_entry that are to be replaced with... */
670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */
671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */
672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */
673
674 /* Length of the PC-relative instruction containing plt_got_offset. */
675 unsigned int plt_got_insn_size;
676
677 /* Offset of the end of the PC-relative jump to plt0_entry. */
678 unsigned int plt_plt_insn_end;
679
680 /* Offset into plt_entry where the initial value of the GOT entry points. */
681 unsigned int plt_lazy_offset;
682
683 /* .eh_frame covering the .plt section. */
684 const bfd_byte *eh_frame_plt;
685 unsigned int eh_frame_plt_size;
686 };
687
688 #define get_elf_x86_64_arch_data(bed) \
689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data)
690
691 #define get_elf_x86_64_backend_data(abfd) \
692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd))
693
694 #define GET_PLT_ENTRY_SIZE(abfd) \
695 get_elf_x86_64_backend_data (abfd)->plt_entry_size
696
697 /* These are the standard parameters. */
698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed =
699 {
700 elf_x86_64_plt0_entry, /* plt0_entry */
701 elf_x86_64_plt_entry, /* plt_entry */
702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */
703 2, /* plt0_got1_offset */
704 8, /* plt0_got2_offset */
705 12, /* plt0_got2_insn_end */
706 2, /* plt_got_offset */
707 7, /* plt_reloc_offset */
708 12, /* plt_plt_offset */
709 6, /* plt_got_insn_size */
710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */
711 6, /* plt_lazy_offset */
712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
714 };
715
716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed =
717 {
718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */
719 elf_x86_64_bnd_plt_entry, /* plt_entry */
720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */
721 2, /* plt0_got1_offset */
722 1+8, /* plt0_got2_offset */
723 1+12, /* plt0_got2_insn_end */
724 1+2, /* plt_got_offset */
725 1, /* plt_reloc_offset */
726 7, /* plt_plt_offset */
727 1+6, /* plt_got_insn_size */
728 11, /* plt_plt_insn_end */
729 0, /* plt_lazy_offset */
730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */
731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */
732 };
733
734 #define elf_backend_arch_data &elf_x86_64_arch_bed
735
736 /* x86-64 ELF linker hash entry. */
737
738 struct elf_x86_64_link_hash_entry
739 {
740 struct elf_link_hash_entry elf;
741
742 /* Track dynamic relocs copied for this symbol. */
743 struct elf_dyn_relocs *dyn_relocs;
744
745 #define GOT_UNKNOWN 0
746 #define GOT_NORMAL 1
747 #define GOT_TLS_GD 2
748 #define GOT_TLS_IE 3
749 #define GOT_TLS_GDESC 4
750 #define GOT_TLS_GD_BOTH_P(type) \
751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC))
752 #define GOT_TLS_GD_P(type) \
753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type))
754 #define GOT_TLS_GDESC_P(type) \
755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type))
756 #define GOT_TLS_GD_ANY_P(type) \
757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type))
758 unsigned char tls_type;
759
760 /* TRUE if a weak symbol with a real definition needs a copy reloc.
761 When there is a weak symbol with a real definition, the processor
762 independent code will have arranged for us to see the real
763 definition first. We need to copy the needs_copy bit from the
764 real definition and check it when allowing copy reloc in PIE. */
765 unsigned int needs_copy : 1;
766
767 /* TRUE if symbol has at least one BND relocation. */
768 unsigned int has_bnd_reloc : 1;
769
770 /* Information about the GOT PLT entry. Filled when there are both
771 GOT and PLT relocations against the same function. */
772 union gotplt_union plt_got;
773
774 /* Information about the second PLT entry. Filled when has_bnd_reloc is
775 set. */
776 union gotplt_union plt_bnd;
777
778 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
779 starting at the end of the jump table. */
780 bfd_vma tlsdesc_got;
781 };
782
783 #define elf_x86_64_hash_entry(ent) \
784 ((struct elf_x86_64_link_hash_entry *)(ent))
785
786 struct elf_x86_64_obj_tdata
787 {
788 struct elf_obj_tdata root;
789
790 /* tls_type for each local got entry. */
791 char *local_got_tls_type;
792
793 /* GOTPLT entries for TLS descriptors. */
794 bfd_vma *local_tlsdesc_gotent;
795 };
796
797 #define elf_x86_64_tdata(abfd) \
798 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any)
799
800 #define elf_x86_64_local_got_tls_type(abfd) \
801 (elf_x86_64_tdata (abfd)->local_got_tls_type)
802
803 #define elf_x86_64_local_tlsdesc_gotent(abfd) \
804 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent)
805
806 #define is_x86_64_elf(bfd) \
807 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
808 && elf_tdata (bfd) != NULL \
809 && elf_object_id (bfd) == X86_64_ELF_DATA)
810
811 static bfd_boolean
812 elf_x86_64_mkobject (bfd *abfd)
813 {
814 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata),
815 X86_64_ELF_DATA);
816 }
817
818 /* x86-64 ELF linker hash table. */
819
820 struct elf_x86_64_link_hash_table
821 {
822 struct elf_link_hash_table elf;
823
824 /* Short-cuts to get to dynamic linker sections. */
825 asection *sdynbss;
826 asection *srelbss;
827 asection *plt_eh_frame;
828 asection *plt_bnd;
829 asection *plt_got;
830
831 union
832 {
833 bfd_signed_vma refcount;
834 bfd_vma offset;
835 } tls_ld_got;
836
837 /* The amount of space used by the jump slots in the GOT. */
838 bfd_vma sgotplt_jump_table_size;
839
840 /* Small local sym cache. */
841 struct sym_cache sym_cache;
842
843 bfd_vma (*r_info) (bfd_vma, bfd_vma);
844 bfd_vma (*r_sym) (bfd_vma);
845 unsigned int pointer_r_type;
846 const char *dynamic_interpreter;
847 int dynamic_interpreter_size;
848
849 /* _TLS_MODULE_BASE_ symbol. */
850 struct bfd_link_hash_entry *tls_module_base;
851
852 /* Used by local STT_GNU_IFUNC symbols. */
853 htab_t loc_hash_table;
854 void * loc_hash_memory;
855
856 /* The offset into splt of the PLT entry for the TLS descriptor
857 resolver. Special values are 0, if not necessary (or not found
858 to be necessary yet), and -1 if needed but not determined
859 yet. */
860 bfd_vma tlsdesc_plt;
861 /* The offset into sgot of the GOT entry used by the PLT entry
862 above. */
863 bfd_vma tlsdesc_got;
864
865 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */
866 bfd_vma next_jump_slot_index;
867 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */
868 bfd_vma next_irelative_index;
869 };
870
871 /* Get the x86-64 ELF linker hash table from a link_info structure. */
872
873 #define elf_x86_64_hash_table(p) \
874 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
875 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL)
876
877 #define elf_x86_64_compute_jump_table_size(htab) \
878 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE)
879
880 /* Create an entry in an x86-64 ELF linker hash table. */
881
882 static struct bfd_hash_entry *
883 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry,
884 struct bfd_hash_table *table,
885 const char *string)
886 {
887 /* Allocate the structure if it has not already been allocated by a
888 subclass. */
889 if (entry == NULL)
890 {
891 entry = (struct bfd_hash_entry *)
892 bfd_hash_allocate (table,
893 sizeof (struct elf_x86_64_link_hash_entry));
894 if (entry == NULL)
895 return entry;
896 }
897
898 /* Call the allocation method of the superclass. */
899 entry = _bfd_elf_link_hash_newfunc (entry, table, string);
900 if (entry != NULL)
901 {
902 struct elf_x86_64_link_hash_entry *eh;
903
904 eh = (struct elf_x86_64_link_hash_entry *) entry;
905 eh->dyn_relocs = NULL;
906 eh->tls_type = GOT_UNKNOWN;
907 eh->needs_copy = 0;
908 eh->has_bnd_reloc = 0;
909 eh->plt_bnd.offset = (bfd_vma) -1;
910 eh->plt_got.offset = (bfd_vma) -1;
911 eh->tlsdesc_got = (bfd_vma) -1;
912 }
913
914 return entry;
915 }
916
917 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
918 for local symbol so that we can handle local STT_GNU_IFUNC symbols
919 as global symbol. We reuse indx and dynstr_index for local symbol
920 hash since they aren't used by global symbols in this backend. */
921
922 static hashval_t
923 elf_x86_64_local_htab_hash (const void *ptr)
924 {
925 struct elf_link_hash_entry *h
926 = (struct elf_link_hash_entry *) ptr;
927 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
928 }
929
930 /* Compare local hash entries. */
931
932 static int
933 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2)
934 {
935 struct elf_link_hash_entry *h1
936 = (struct elf_link_hash_entry *) ptr1;
937 struct elf_link_hash_entry *h2
938 = (struct elf_link_hash_entry *) ptr2;
939
940 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
941 }
942
943 /* Find and/or create a hash entry for local symbol. */
944
945 static struct elf_link_hash_entry *
946 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab,
947 bfd *abfd, const Elf_Internal_Rela *rel,
948 bfd_boolean create)
949 {
950 struct elf_x86_64_link_hash_entry e, *ret;
951 asection *sec = abfd->sections;
952 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
953 htab->r_sym (rel->r_info));
954 void **slot;
955
956 e.elf.indx = sec->id;
957 e.elf.dynstr_index = htab->r_sym (rel->r_info);
958 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
959 create ? INSERT : NO_INSERT);
960
961 if (!slot)
962 return NULL;
963
964 if (*slot)
965 {
966 ret = (struct elf_x86_64_link_hash_entry *) *slot;
967 return &ret->elf;
968 }
969
970 ret = (struct elf_x86_64_link_hash_entry *)
971 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
972 sizeof (struct elf_x86_64_link_hash_entry));
973 if (ret)
974 {
975 memset (ret, 0, sizeof (*ret));
976 ret->elf.indx = sec->id;
977 ret->elf.dynstr_index = htab->r_sym (rel->r_info);
978 ret->elf.dynindx = -1;
979 ret->plt_got.offset = (bfd_vma) -1;
980 *slot = ret;
981 }
982 return &ret->elf;
983 }
984
985 /* Destroy an X86-64 ELF linker hash table. */
986
987 static void
988 elf_x86_64_link_hash_table_free (bfd *obfd)
989 {
990 struct elf_x86_64_link_hash_table *htab
991 = (struct elf_x86_64_link_hash_table *) obfd->link.hash;
992
993 if (htab->loc_hash_table)
994 htab_delete (htab->loc_hash_table);
995 if (htab->loc_hash_memory)
996 objalloc_free ((struct objalloc *) htab->loc_hash_memory);
997 _bfd_elf_link_hash_table_free (obfd);
998 }
999
1000 /* Create an X86-64 ELF linker hash table. */
1001
1002 static struct bfd_link_hash_table *
1003 elf_x86_64_link_hash_table_create (bfd *abfd)
1004 {
1005 struct elf_x86_64_link_hash_table *ret;
1006 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table);
1007
1008 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt);
1009 if (ret == NULL)
1010 return NULL;
1011
1012 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
1013 elf_x86_64_link_hash_newfunc,
1014 sizeof (struct elf_x86_64_link_hash_entry),
1015 X86_64_ELF_DATA))
1016 {
1017 free (ret);
1018 return NULL;
1019 }
1020
1021 if (ABI_64_P (abfd))
1022 {
1023 ret->r_info = elf64_r_info;
1024 ret->r_sym = elf64_r_sym;
1025 ret->pointer_r_type = R_X86_64_64;
1026 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER;
1027 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER;
1028 }
1029 else
1030 {
1031 ret->r_info = elf32_r_info;
1032 ret->r_sym = elf32_r_sym;
1033 ret->pointer_r_type = R_X86_64_32;
1034 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER;
1035 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER;
1036 }
1037
1038 ret->loc_hash_table = htab_try_create (1024,
1039 elf_x86_64_local_htab_hash,
1040 elf_x86_64_local_htab_eq,
1041 NULL);
1042 ret->loc_hash_memory = objalloc_create ();
1043 if (!ret->loc_hash_table || !ret->loc_hash_memory)
1044 {
1045 elf_x86_64_link_hash_table_free (abfd);
1046 return NULL;
1047 }
1048 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free;
1049
1050 return &ret->elf.root;
1051 }
1052
1053 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
1054 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
1055 hash table. */
1056
1057 static bfd_boolean
1058 elf_x86_64_create_dynamic_sections (bfd *dynobj,
1059 struct bfd_link_info *info)
1060 {
1061 struct elf_x86_64_link_hash_table *htab;
1062
1063 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
1064 return FALSE;
1065
1066 htab = elf_x86_64_hash_table (info);
1067 if (htab == NULL)
1068 return FALSE;
1069
1070 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
1071 if (!htab->sdynbss)
1072 abort ();
1073
1074 if (info->executable)
1075 {
1076 /* Always allow copy relocs for building executables. */
1077 asection *s = bfd_get_linker_section (dynobj, ".rela.bss");
1078 if (s == NULL)
1079 {
1080 const struct elf_backend_data *bed = get_elf_backend_data (dynobj);
1081 s = bfd_make_section_anyway_with_flags (dynobj,
1082 ".rela.bss",
1083 (bed->dynamic_sec_flags
1084 | SEC_READONLY));
1085 if (s == NULL
1086 || ! bfd_set_section_alignment (dynobj, s,
1087 bed->s->log_file_align))
1088 return FALSE;
1089 }
1090 htab->srelbss = s;
1091 }
1092
1093 if (!info->no_ld_generated_unwind_info
1094 && htab->plt_eh_frame == NULL
1095 && htab->elf.splt != NULL)
1096 {
1097 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY
1098 | SEC_HAS_CONTENTS | SEC_IN_MEMORY
1099 | SEC_LINKER_CREATED);
1100 htab->plt_eh_frame
1101 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags);
1102 if (htab->plt_eh_frame == NULL
1103 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3))
1104 return FALSE;
1105 }
1106 return TRUE;
1107 }
1108
1109 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1110
1111 static void
1112 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info,
1113 struct elf_link_hash_entry *dir,
1114 struct elf_link_hash_entry *ind)
1115 {
1116 struct elf_x86_64_link_hash_entry *edir, *eind;
1117
1118 edir = (struct elf_x86_64_link_hash_entry *) dir;
1119 eind = (struct elf_x86_64_link_hash_entry *) ind;
1120
1121 if (!edir->has_bnd_reloc)
1122 edir->has_bnd_reloc = eind->has_bnd_reloc;
1123
1124 if (eind->dyn_relocs != NULL)
1125 {
1126 if (edir->dyn_relocs != NULL)
1127 {
1128 struct elf_dyn_relocs **pp;
1129 struct elf_dyn_relocs *p;
1130
1131 /* Add reloc counts against the indirect sym to the direct sym
1132 list. Merge any entries against the same section. */
1133 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
1134 {
1135 struct elf_dyn_relocs *q;
1136
1137 for (q = edir->dyn_relocs; q != NULL; q = q->next)
1138 if (q->sec == p->sec)
1139 {
1140 q->pc_count += p->pc_count;
1141 q->count += p->count;
1142 *pp = p->next;
1143 break;
1144 }
1145 if (q == NULL)
1146 pp = &p->next;
1147 }
1148 *pp = edir->dyn_relocs;
1149 }
1150
1151 edir->dyn_relocs = eind->dyn_relocs;
1152 eind->dyn_relocs = NULL;
1153 }
1154
1155 if (ind->root.type == bfd_link_hash_indirect
1156 && dir->got.refcount <= 0)
1157 {
1158 edir->tls_type = eind->tls_type;
1159 eind->tls_type = GOT_UNKNOWN;
1160 }
1161
1162 if (ELIMINATE_COPY_RELOCS
1163 && ind->root.type != bfd_link_hash_indirect
1164 && dir->dynamic_adjusted)
1165 {
1166 /* If called to transfer flags for a weakdef during processing
1167 of elf_adjust_dynamic_symbol, don't copy non_got_ref.
1168 We clear it ourselves for ELIMINATE_COPY_RELOCS. */
1169 dir->ref_dynamic |= ind->ref_dynamic;
1170 dir->ref_regular |= ind->ref_regular;
1171 dir->ref_regular_nonweak |= ind->ref_regular_nonweak;
1172 dir->needs_plt |= ind->needs_plt;
1173 dir->pointer_equality_needed |= ind->pointer_equality_needed;
1174 }
1175 else
1176 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
1177 }
1178
1179 static bfd_boolean
1180 elf64_x86_64_elf_object_p (bfd *abfd)
1181 {
1182 /* Set the right machine number for an x86-64 elf64 file. */
1183 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1184 return TRUE;
1185 }
1186
1187 static bfd_boolean
1188 elf32_x86_64_elf_object_p (bfd *abfd)
1189 {
1190 /* Set the right machine number for an x86-64 elf32 file. */
1191 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1192 return TRUE;
1193 }
1194
1195 /* Return TRUE if the TLS access code sequence support transition
1196 from R_TYPE. */
1197
1198 static bfd_boolean
1199 elf_x86_64_check_tls_transition (bfd *abfd,
1200 struct bfd_link_info *info,
1201 asection *sec,
1202 bfd_byte *contents,
1203 Elf_Internal_Shdr *symtab_hdr,
1204 struct elf_link_hash_entry **sym_hashes,
1205 unsigned int r_type,
1206 const Elf_Internal_Rela *rel,
1207 const Elf_Internal_Rela *relend)
1208 {
1209 unsigned int val;
1210 unsigned long r_symndx;
1211 bfd_boolean largepic = FALSE;
1212 struct elf_link_hash_entry *h;
1213 bfd_vma offset;
1214 struct elf_x86_64_link_hash_table *htab;
1215
1216 /* Get the section contents. */
1217 if (contents == NULL)
1218 {
1219 if (elf_section_data (sec)->this_hdr.contents != NULL)
1220 contents = elf_section_data (sec)->this_hdr.contents;
1221 else
1222 {
1223 /* FIXME: How to better handle error condition? */
1224 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1225 return FALSE;
1226
1227 /* Cache the section contents for elf_link_input_bfd. */
1228 elf_section_data (sec)->this_hdr.contents = contents;
1229 }
1230 }
1231
1232 htab = elf_x86_64_hash_table (info);
1233 offset = rel->r_offset;
1234 switch (r_type)
1235 {
1236 case R_X86_64_TLSGD:
1237 case R_X86_64_TLSLD:
1238 if ((rel + 1) >= relend)
1239 return FALSE;
1240
1241 if (r_type == R_X86_64_TLSGD)
1242 {
1243 /* Check transition from GD access model. For 64bit, only
1244 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1245 .word 0x6666; rex64; call __tls_get_addr
1246 can transit to different access model. For 32bit, only
1247 leaq foo@tlsgd(%rip), %rdi
1248 .word 0x6666; rex64; call __tls_get_addr
1249 can transit to different access model. For largepic
1250 we also support:
1251 leaq foo@tlsgd(%rip), %rdi
1252 movabsq $__tls_get_addr@pltoff, %rax
1253 addq $rbx, %rax
1254 call *%rax. */
1255
1256 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 };
1257 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1258
1259 if ((offset + 12) > sec->size)
1260 return FALSE;
1261
1262 if (memcmp (contents + offset + 4, call, 4) != 0)
1263 {
1264 if (!ABI_64_P (abfd)
1265 || (offset + 19) > sec->size
1266 || offset < 3
1267 || memcmp (contents + offset - 3, leaq + 1, 3) != 0
1268 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1269 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1270 != 0)
1271 return FALSE;
1272 largepic = TRUE;
1273 }
1274 else if (ABI_64_P (abfd))
1275 {
1276 if (offset < 4
1277 || memcmp (contents + offset - 4, leaq, 4) != 0)
1278 return FALSE;
1279 }
1280 else
1281 {
1282 if (offset < 3
1283 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1284 return FALSE;
1285 }
1286 }
1287 else
1288 {
1289 /* Check transition from LD access model. Only
1290 leaq foo@tlsld(%rip), %rdi;
1291 call __tls_get_addr
1292 can transit to different access model. For largepic
1293 we also support:
1294 leaq foo@tlsld(%rip), %rdi
1295 movabsq $__tls_get_addr@pltoff, %rax
1296 addq $rbx, %rax
1297 call *%rax. */
1298
1299 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1300
1301 if (offset < 3 || (offset + 9) > sec->size)
1302 return FALSE;
1303
1304 if (memcmp (contents + offset - 3, lea, 3) != 0)
1305 return FALSE;
1306
1307 if (0xe8 != *(contents + offset + 4))
1308 {
1309 if (!ABI_64_P (abfd)
1310 || (offset + 19) > sec->size
1311 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0
1312 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5)
1313 != 0)
1314 return FALSE;
1315 largepic = TRUE;
1316 }
1317 }
1318
1319 r_symndx = htab->r_sym (rel[1].r_info);
1320 if (r_symndx < symtab_hdr->sh_info)
1321 return FALSE;
1322
1323 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1324 /* Use strncmp to check __tls_get_addr since __tls_get_addr
1325 may be versioned. */
1326 return (h != NULL
1327 && h->root.root.string != NULL
1328 && (largepic
1329 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64
1330 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32
1331 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32))
1332 && (strncmp (h->root.root.string,
1333 "__tls_get_addr", 14) == 0));
1334
1335 case R_X86_64_GOTTPOFF:
1336 /* Check transition from IE access model:
1337 mov foo@gottpoff(%rip), %reg
1338 add foo@gottpoff(%rip), %reg
1339 */
1340
1341 /* Check REX prefix first. */
1342 if (offset >= 3 && (offset + 4) <= sec->size)
1343 {
1344 val = bfd_get_8 (abfd, contents + offset - 3);
1345 if (val != 0x48 && val != 0x4c)
1346 {
1347 /* X32 may have 0x44 REX prefix or no REX prefix. */
1348 if (ABI_64_P (abfd))
1349 return FALSE;
1350 }
1351 }
1352 else
1353 {
1354 /* X32 may not have any REX prefix. */
1355 if (ABI_64_P (abfd))
1356 return FALSE;
1357 if (offset < 2 || (offset + 3) > sec->size)
1358 return FALSE;
1359 }
1360
1361 val = bfd_get_8 (abfd, contents + offset - 2);
1362 if (val != 0x8b && val != 0x03)
1363 return FALSE;
1364
1365 val = bfd_get_8 (abfd, contents + offset - 1);
1366 return (val & 0xc7) == 5;
1367
1368 case R_X86_64_GOTPC32_TLSDESC:
1369 /* Check transition from GDesc access model:
1370 leaq x@tlsdesc(%rip), %rax
1371
1372 Make sure it's a leaq adding rip to a 32-bit offset
1373 into any register, although it's probably almost always
1374 going to be rax. */
1375
1376 if (offset < 3 || (offset + 4) > sec->size)
1377 return FALSE;
1378
1379 val = bfd_get_8 (abfd, contents + offset - 3);
1380 if ((val & 0xfb) != 0x48)
1381 return FALSE;
1382
1383 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1384 return FALSE;
1385
1386 val = bfd_get_8 (abfd, contents + offset - 1);
1387 return (val & 0xc7) == 0x05;
1388
1389 case R_X86_64_TLSDESC_CALL:
1390 /* Check transition from GDesc access model:
1391 call *x@tlsdesc(%rax)
1392 */
1393 if (offset + 2 <= sec->size)
1394 {
1395 /* Make sure that it's a call *x@tlsdesc(%rax). */
1396 static const unsigned char call[] = { 0xff, 0x10 };
1397 return memcmp (contents + offset, call, 2) == 0;
1398 }
1399
1400 return FALSE;
1401
1402 default:
1403 abort ();
1404 }
1405 }
1406
1407 /* Return TRUE if the TLS access transition is OK or no transition
1408 will be performed. Update R_TYPE if there is a transition. */
1409
1410 static bfd_boolean
1411 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1412 asection *sec, bfd_byte *contents,
1413 Elf_Internal_Shdr *symtab_hdr,
1414 struct elf_link_hash_entry **sym_hashes,
1415 unsigned int *r_type, int tls_type,
1416 const Elf_Internal_Rela *rel,
1417 const Elf_Internal_Rela *relend,
1418 struct elf_link_hash_entry *h,
1419 unsigned long r_symndx)
1420 {
1421 unsigned int from_type = *r_type;
1422 unsigned int to_type = from_type;
1423 bfd_boolean check = TRUE;
1424
1425 /* Skip TLS transition for functions. */
1426 if (h != NULL
1427 && (h->type == STT_FUNC
1428 || h->type == STT_GNU_IFUNC))
1429 return TRUE;
1430
1431 switch (from_type)
1432 {
1433 case R_X86_64_TLSGD:
1434 case R_X86_64_GOTPC32_TLSDESC:
1435 case R_X86_64_TLSDESC_CALL:
1436 case R_X86_64_GOTTPOFF:
1437 if (info->executable)
1438 {
1439 if (h == NULL)
1440 to_type = R_X86_64_TPOFF32;
1441 else
1442 to_type = R_X86_64_GOTTPOFF;
1443 }
1444
1445 /* When we are called from elf_x86_64_relocate_section,
1446 CONTENTS isn't NULL and there may be additional transitions
1447 based on TLS_TYPE. */
1448 if (contents != NULL)
1449 {
1450 unsigned int new_to_type = to_type;
1451
1452 if (info->executable
1453 && h != NULL
1454 && h->dynindx == -1
1455 && tls_type == GOT_TLS_IE)
1456 new_to_type = R_X86_64_TPOFF32;
1457
1458 if (to_type == R_X86_64_TLSGD
1459 || to_type == R_X86_64_GOTPC32_TLSDESC
1460 || to_type == R_X86_64_TLSDESC_CALL)
1461 {
1462 if (tls_type == GOT_TLS_IE)
1463 new_to_type = R_X86_64_GOTTPOFF;
1464 }
1465
1466 /* We checked the transition before when we were called from
1467 elf_x86_64_check_relocs. We only want to check the new
1468 transition which hasn't been checked before. */
1469 check = new_to_type != to_type && from_type == to_type;
1470 to_type = new_to_type;
1471 }
1472
1473 break;
1474
1475 case R_X86_64_TLSLD:
1476 if (info->executable)
1477 to_type = R_X86_64_TPOFF32;
1478 break;
1479
1480 default:
1481 return TRUE;
1482 }
1483
1484 /* Return TRUE if there is no transition. */
1485 if (from_type == to_type)
1486 return TRUE;
1487
1488 /* Check if the transition can be performed. */
1489 if (check
1490 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1491 symtab_hdr, sym_hashes,
1492 from_type, rel, relend))
1493 {
1494 reloc_howto_type *from, *to;
1495 const char *name;
1496
1497 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1498 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1499
1500 if (h)
1501 name = h->root.root.string;
1502 else
1503 {
1504 struct elf_x86_64_link_hash_table *htab;
1505
1506 htab = elf_x86_64_hash_table (info);
1507 if (htab == NULL)
1508 name = "*unknown*";
1509 else
1510 {
1511 Elf_Internal_Sym *isym;
1512
1513 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1514 abfd, r_symndx);
1515 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1516 }
1517 }
1518
1519 (*_bfd_error_handler)
1520 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx "
1521 "in section `%A' failed"),
1522 abfd, sec, from->name, to->name, name,
1523 (unsigned long) rel->r_offset);
1524 bfd_set_error (bfd_error_bad_value);
1525 return FALSE;
1526 }
1527
1528 *r_type = to_type;
1529 return TRUE;
1530 }
1531
1532 /* Look through the relocs for a section during the first phase, and
1533 calculate needed space in the global offset table, procedure
1534 linkage table, and dynamic reloc sections. */
1535
1536 static bfd_boolean
1537 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1538 asection *sec,
1539 const Elf_Internal_Rela *relocs)
1540 {
1541 struct elf_x86_64_link_hash_table *htab;
1542 Elf_Internal_Shdr *symtab_hdr;
1543 struct elf_link_hash_entry **sym_hashes;
1544 const Elf_Internal_Rela *rel;
1545 const Elf_Internal_Rela *rel_end;
1546 asection *sreloc;
1547 bfd_boolean use_plt_got;
1548
1549 if (info->relocatable)
1550 return TRUE;
1551
1552 BFD_ASSERT (is_x86_64_elf (abfd));
1553
1554 htab = elf_x86_64_hash_table (info);
1555 if (htab == NULL)
1556 return FALSE;
1557
1558 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed;
1559
1560 symtab_hdr = &elf_symtab_hdr (abfd);
1561 sym_hashes = elf_sym_hashes (abfd);
1562
1563 sreloc = NULL;
1564
1565 rel_end = relocs + sec->reloc_count;
1566 for (rel = relocs; rel < rel_end; rel++)
1567 {
1568 unsigned int r_type;
1569 unsigned long r_symndx;
1570 struct elf_link_hash_entry *h;
1571 Elf_Internal_Sym *isym;
1572 const char *name;
1573 bfd_boolean size_reloc;
1574
1575 r_symndx = htab->r_sym (rel->r_info);
1576 r_type = ELF32_R_TYPE (rel->r_info);
1577
1578 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1579 {
1580 (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
1581 abfd, r_symndx);
1582 return FALSE;
1583 }
1584
1585 if (r_symndx < symtab_hdr->sh_info)
1586 {
1587 /* A local symbol. */
1588 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1589 abfd, r_symndx);
1590 if (isym == NULL)
1591 return FALSE;
1592
1593 /* Check relocation against local STT_GNU_IFUNC symbol. */
1594 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1595 {
1596 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel,
1597 TRUE);
1598 if (h == NULL)
1599 return FALSE;
1600
1601 /* Fake a STT_GNU_IFUNC symbol. */
1602 h->type = STT_GNU_IFUNC;
1603 h->def_regular = 1;
1604 h->ref_regular = 1;
1605 h->forced_local = 1;
1606 h->root.type = bfd_link_hash_defined;
1607 }
1608 else
1609 h = NULL;
1610 }
1611 else
1612 {
1613 isym = NULL;
1614 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1615 while (h->root.type == bfd_link_hash_indirect
1616 || h->root.type == bfd_link_hash_warning)
1617 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1618 }
1619
1620 /* Check invalid x32 relocations. */
1621 if (!ABI_64_P (abfd))
1622 switch (r_type)
1623 {
1624 default:
1625 break;
1626
1627 case R_X86_64_DTPOFF64:
1628 case R_X86_64_TPOFF64:
1629 case R_X86_64_PC64:
1630 case R_X86_64_GOTOFF64:
1631 case R_X86_64_GOT64:
1632 case R_X86_64_GOTPCREL64:
1633 case R_X86_64_GOTPC64:
1634 case R_X86_64_GOTPLT64:
1635 case R_X86_64_PLTOFF64:
1636 {
1637 if (h)
1638 name = h->root.root.string;
1639 else
1640 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1641 NULL);
1642 (*_bfd_error_handler)
1643 (_("%B: relocation %s against symbol `%s' isn't "
1644 "supported in x32 mode"), abfd,
1645 x86_64_elf_howto_table[r_type].name, name);
1646 bfd_set_error (bfd_error_bad_value);
1647 return FALSE;
1648 }
1649 break;
1650 }
1651
1652 if (h != NULL)
1653 {
1654 /* Create the ifunc sections for static executables. If we
1655 never see an indirect function symbol nor we are building
1656 a static executable, those sections will be empty and
1657 won't appear in output. */
1658 switch (r_type)
1659 {
1660 default:
1661 break;
1662
1663 case R_X86_64_PC32_BND:
1664 case R_X86_64_PLT32_BND:
1665 case R_X86_64_PC32:
1666 case R_X86_64_PLT32:
1667 case R_X86_64_32:
1668 case R_X86_64_64:
1669 /* MPX PLT is supported only if elf_x86_64_arch_bed
1670 is used in 64-bit mode. */
1671 if (ABI_64_P (abfd)
1672 && info->bndplt
1673 && (get_elf_x86_64_backend_data (abfd)
1674 == &elf_x86_64_arch_bed))
1675 {
1676 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1;
1677
1678 /* Create the second PLT for Intel MPX support. */
1679 if (htab->plt_bnd == NULL)
1680 {
1681 unsigned int plt_bnd_align;
1682 const struct elf_backend_data *bed;
1683
1684 bed = get_elf_backend_data (info->output_bfd);
1685 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8
1686 && (sizeof (elf_x86_64_bnd_plt2_entry)
1687 == sizeof (elf_x86_64_legacy_plt2_entry)));
1688 plt_bnd_align = 3;
1689
1690 if (htab->elf.dynobj == NULL)
1691 htab->elf.dynobj = abfd;
1692 htab->plt_bnd
1693 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
1694 ".plt.bnd",
1695 (bed->dynamic_sec_flags
1696 | SEC_ALLOC
1697 | SEC_CODE
1698 | SEC_LOAD
1699 | SEC_READONLY));
1700 if (htab->plt_bnd == NULL
1701 || !bfd_set_section_alignment (htab->elf.dynobj,
1702 htab->plt_bnd,
1703 plt_bnd_align))
1704 return FALSE;
1705 }
1706 }
1707
1708 case R_X86_64_32S:
1709 case R_X86_64_PC64:
1710 case R_X86_64_GOTPCREL:
1711 case R_X86_64_GOTPCREL64:
1712 if (htab->elf.dynobj == NULL)
1713 htab->elf.dynobj = abfd;
1714 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info))
1715 return FALSE;
1716 break;
1717 }
1718
1719 /* It is referenced by a non-shared object. */
1720 h->ref_regular = 1;
1721 h->root.non_ir_ref = 1;
1722 }
1723
1724 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
1725 symtab_hdr, sym_hashes,
1726 &r_type, GOT_UNKNOWN,
1727 rel, rel_end, h, r_symndx))
1728 return FALSE;
1729
1730 switch (r_type)
1731 {
1732 case R_X86_64_TLSLD:
1733 htab->tls_ld_got.refcount += 1;
1734 goto create_got;
1735
1736 case R_X86_64_TPOFF32:
1737 if (!info->executable && ABI_64_P (abfd))
1738 {
1739 if (h)
1740 name = h->root.root.string;
1741 else
1742 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1743 NULL);
1744 (*_bfd_error_handler)
1745 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1746 abfd,
1747 x86_64_elf_howto_table[r_type].name, name);
1748 bfd_set_error (bfd_error_bad_value);
1749 return FALSE;
1750 }
1751 break;
1752
1753 case R_X86_64_GOTTPOFF:
1754 if (!info->executable)
1755 info->flags |= DF_STATIC_TLS;
1756 /* Fall through */
1757
1758 case R_X86_64_GOT32:
1759 case R_X86_64_GOTPCREL:
1760 case R_X86_64_TLSGD:
1761 case R_X86_64_GOT64:
1762 case R_X86_64_GOTPCREL64:
1763 case R_X86_64_GOTPLT64:
1764 case R_X86_64_GOTPC32_TLSDESC:
1765 case R_X86_64_TLSDESC_CALL:
1766 /* This symbol requires a global offset table entry. */
1767 {
1768 int tls_type, old_tls_type;
1769
1770 switch (r_type)
1771 {
1772 default: tls_type = GOT_NORMAL; break;
1773 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1774 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1775 case R_X86_64_GOTPC32_TLSDESC:
1776 case R_X86_64_TLSDESC_CALL:
1777 tls_type = GOT_TLS_GDESC; break;
1778 }
1779
1780 if (h != NULL)
1781 {
1782 h->got.refcount += 1;
1783 old_tls_type = elf_x86_64_hash_entry (h)->tls_type;
1784 }
1785 else
1786 {
1787 bfd_signed_vma *local_got_refcounts;
1788
1789 /* This is a global offset table entry for a local symbol. */
1790 local_got_refcounts = elf_local_got_refcounts (abfd);
1791 if (local_got_refcounts == NULL)
1792 {
1793 bfd_size_type size;
1794
1795 size = symtab_hdr->sh_info;
1796 size *= sizeof (bfd_signed_vma)
1797 + sizeof (bfd_vma) + sizeof (char);
1798 local_got_refcounts = ((bfd_signed_vma *)
1799 bfd_zalloc (abfd, size));
1800 if (local_got_refcounts == NULL)
1801 return FALSE;
1802 elf_local_got_refcounts (abfd) = local_got_refcounts;
1803 elf_x86_64_local_tlsdesc_gotent (abfd)
1804 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1805 elf_x86_64_local_got_tls_type (abfd)
1806 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
1807 }
1808 local_got_refcounts[r_symndx] += 1;
1809 old_tls_type
1810 = elf_x86_64_local_got_tls_type (abfd) [r_symndx];
1811 }
1812
1813 /* If a TLS symbol is accessed using IE at least once,
1814 there is no point to use dynamic model for it. */
1815 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
1816 && (! GOT_TLS_GD_ANY_P (old_tls_type)
1817 || tls_type != GOT_TLS_IE))
1818 {
1819 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
1820 tls_type = old_tls_type;
1821 else if (GOT_TLS_GD_ANY_P (old_tls_type)
1822 && GOT_TLS_GD_ANY_P (tls_type))
1823 tls_type |= old_tls_type;
1824 else
1825 {
1826 if (h)
1827 name = h->root.root.string;
1828 else
1829 name = bfd_elf_sym_name (abfd, symtab_hdr,
1830 isym, NULL);
1831 (*_bfd_error_handler)
1832 (_("%B: '%s' accessed both as normal and thread local symbol"),
1833 abfd, name);
1834 bfd_set_error (bfd_error_bad_value);
1835 return FALSE;
1836 }
1837 }
1838
1839 if (old_tls_type != tls_type)
1840 {
1841 if (h != NULL)
1842 elf_x86_64_hash_entry (h)->tls_type = tls_type;
1843 else
1844 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type;
1845 }
1846 }
1847 /* Fall through */
1848
1849 case R_X86_64_GOTOFF64:
1850 case R_X86_64_GOTPC32:
1851 case R_X86_64_GOTPC64:
1852 create_got:
1853 if (htab->elf.sgot == NULL)
1854 {
1855 if (htab->elf.dynobj == NULL)
1856 htab->elf.dynobj = abfd;
1857 if (!_bfd_elf_create_got_section (htab->elf.dynobj,
1858 info))
1859 return FALSE;
1860 }
1861 break;
1862
1863 case R_X86_64_PLT32:
1864 case R_X86_64_PLT32_BND:
1865 /* This symbol requires a procedure linkage table entry. We
1866 actually build the entry in adjust_dynamic_symbol,
1867 because this might be a case of linking PIC code which is
1868 never referenced by a dynamic object, in which case we
1869 don't need to generate a procedure linkage table entry
1870 after all. */
1871
1872 /* If this is a local symbol, we resolve it directly without
1873 creating a procedure linkage table entry. */
1874 if (h == NULL)
1875 continue;
1876
1877 h->needs_plt = 1;
1878 h->plt.refcount += 1;
1879 break;
1880
1881 case R_X86_64_PLTOFF64:
1882 /* This tries to form the 'address' of a function relative
1883 to GOT. For global symbols we need a PLT entry. */
1884 if (h != NULL)
1885 {
1886 h->needs_plt = 1;
1887 h->plt.refcount += 1;
1888 }
1889 goto create_got;
1890
1891 case R_X86_64_SIZE32:
1892 case R_X86_64_SIZE64:
1893 size_reloc = TRUE;
1894 goto do_size;
1895
1896 case R_X86_64_32:
1897 if (!ABI_64_P (abfd))
1898 goto pointer;
1899 case R_X86_64_8:
1900 case R_X86_64_16:
1901 case R_X86_64_32S:
1902 /* Let's help debug shared library creation. These relocs
1903 cannot be used in shared libs. Don't error out for
1904 sections we don't care about, such as debug sections or
1905 non-constant sections. */
1906 if (info->shared
1907 && (sec->flags & SEC_ALLOC) != 0
1908 && (sec->flags & SEC_READONLY) != 0)
1909 {
1910 if (h)
1911 name = h->root.root.string;
1912 else
1913 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1914 (*_bfd_error_handler)
1915 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
1916 abfd, x86_64_elf_howto_table[r_type].name, name);
1917 bfd_set_error (bfd_error_bad_value);
1918 return FALSE;
1919 }
1920 /* Fall through. */
1921
1922 case R_X86_64_PC8:
1923 case R_X86_64_PC16:
1924 case R_X86_64_PC32:
1925 case R_X86_64_PC32_BND:
1926 case R_X86_64_PC64:
1927 case R_X86_64_64:
1928 pointer:
1929 if (h != NULL && info->executable)
1930 {
1931 /* If this reloc is in a read-only section, we might
1932 need a copy reloc. We can't check reliably at this
1933 stage whether the section is read-only, as input
1934 sections have not yet been mapped to output sections.
1935 Tentatively set the flag for now, and correct in
1936 adjust_dynamic_symbol. */
1937 h->non_got_ref = 1;
1938
1939 /* We may need a .plt entry if the function this reloc
1940 refers to is in a shared lib. */
1941 h->plt.refcount += 1;
1942 if (r_type != R_X86_64_PC32
1943 && r_type != R_X86_64_PC32_BND
1944 && r_type != R_X86_64_PC64)
1945 h->pointer_equality_needed = 1;
1946 }
1947
1948 size_reloc = FALSE;
1949 do_size:
1950 /* If we are creating a shared library, and this is a reloc
1951 against a global symbol, or a non PC relative reloc
1952 against a local symbol, then we need to copy the reloc
1953 into the shared library. However, if we are linking with
1954 -Bsymbolic, we do not need to copy a reloc against a
1955 global symbol which is defined in an object we are
1956 including in the link (i.e., DEF_REGULAR is set). At
1957 this point we have not seen all the input files, so it is
1958 possible that DEF_REGULAR is not set now but will be set
1959 later (it is never cleared). In case of a weak definition,
1960 DEF_REGULAR may be cleared later by a strong definition in
1961 a shared library. We account for that possibility below by
1962 storing information in the relocs_copied field of the hash
1963 table entry. A similar situation occurs when creating
1964 shared libraries and symbol visibility changes render the
1965 symbol local.
1966
1967 If on the other hand, we are creating an executable, we
1968 may need to keep relocations for symbols satisfied by a
1969 dynamic library if we manage to avoid copy relocs for the
1970 symbol. */
1971 if ((info->shared
1972 && (sec->flags & SEC_ALLOC) != 0
1973 && (! IS_X86_64_PCREL_TYPE (r_type)
1974 || (h != NULL
1975 && (! SYMBOLIC_BIND (info, h)
1976 || h->root.type == bfd_link_hash_defweak
1977 || !h->def_regular))))
1978 || (ELIMINATE_COPY_RELOCS
1979 && !info->shared
1980 && (sec->flags & SEC_ALLOC) != 0
1981 && h != NULL
1982 && (h->root.type == bfd_link_hash_defweak
1983 || !h->def_regular)))
1984 {
1985 struct elf_dyn_relocs *p;
1986 struct elf_dyn_relocs **head;
1987
1988 /* We must copy these reloc types into the output file.
1989 Create a reloc section in dynobj and make room for
1990 this reloc. */
1991 if (sreloc == NULL)
1992 {
1993 if (htab->elf.dynobj == NULL)
1994 htab->elf.dynobj = abfd;
1995
1996 sreloc = _bfd_elf_make_dynamic_reloc_section
1997 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
1998 abfd, /*rela?*/ TRUE);
1999
2000 if (sreloc == NULL)
2001 return FALSE;
2002 }
2003
2004 /* If this is a global symbol, we count the number of
2005 relocations we need for this symbol. */
2006 if (h != NULL)
2007 {
2008 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs;
2009 }
2010 else
2011 {
2012 /* Track dynamic relocs needed for local syms too.
2013 We really need local syms available to do this
2014 easily. Oh well. */
2015 asection *s;
2016 void **vpp;
2017
2018 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2019 abfd, r_symndx);
2020 if (isym == NULL)
2021 return FALSE;
2022
2023 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2024 if (s == NULL)
2025 s = sec;
2026
2027 /* Beware of type punned pointers vs strict aliasing
2028 rules. */
2029 vpp = &(elf_section_data (s)->local_dynrel);
2030 head = (struct elf_dyn_relocs **)vpp;
2031 }
2032
2033 p = *head;
2034 if (p == NULL || p->sec != sec)
2035 {
2036 bfd_size_type amt = sizeof *p;
2037
2038 p = ((struct elf_dyn_relocs *)
2039 bfd_alloc (htab->elf.dynobj, amt));
2040 if (p == NULL)
2041 return FALSE;
2042 p->next = *head;
2043 *head = p;
2044 p->sec = sec;
2045 p->count = 0;
2046 p->pc_count = 0;
2047 }
2048
2049 p->count += 1;
2050 /* Count size relocation as PC-relative relocation. */
2051 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc)
2052 p->pc_count += 1;
2053 }
2054 break;
2055
2056 /* This relocation describes the C++ object vtable hierarchy.
2057 Reconstruct it for later use during GC. */
2058 case R_X86_64_GNU_VTINHERIT:
2059 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2060 return FALSE;
2061 break;
2062
2063 /* This relocation describes which C++ vtable entries are actually
2064 used. Record for later use during GC. */
2065 case R_X86_64_GNU_VTENTRY:
2066 BFD_ASSERT (h != NULL);
2067 if (h != NULL
2068 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2069 return FALSE;
2070 break;
2071
2072 default:
2073 break;
2074 }
2075
2076 if (use_plt_got
2077 && h != NULL
2078 && h->plt.refcount > 0
2079 && h->got.refcount > 0
2080 && htab->plt_got == NULL)
2081 {
2082 /* Create the GOT procedure linkage table. */
2083 unsigned int plt_got_align;
2084 const struct elf_backend_data *bed;
2085
2086 bed = get_elf_backend_data (info->output_bfd);
2087 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8
2088 && (sizeof (elf_x86_64_bnd_plt2_entry)
2089 == sizeof (elf_x86_64_legacy_plt2_entry)));
2090 plt_got_align = 3;
2091
2092 if (htab->elf.dynobj == NULL)
2093 htab->elf.dynobj = abfd;
2094 htab->plt_got
2095 = bfd_make_section_anyway_with_flags (htab->elf.dynobj,
2096 ".plt.got",
2097 (bed->dynamic_sec_flags
2098 | SEC_ALLOC
2099 | SEC_CODE
2100 | SEC_LOAD
2101 | SEC_READONLY));
2102 if (htab->plt_got == NULL
2103 || !bfd_set_section_alignment (htab->elf.dynobj,
2104 htab->plt_got,
2105 plt_got_align))
2106 return FALSE;
2107 }
2108 }
2109
2110 return TRUE;
2111 }
2112
2113 /* Return the section that should be marked against GC for a given
2114 relocation. */
2115
2116 static asection *
2117 elf_x86_64_gc_mark_hook (asection *sec,
2118 struct bfd_link_info *info,
2119 Elf_Internal_Rela *rel,
2120 struct elf_link_hash_entry *h,
2121 Elf_Internal_Sym *sym)
2122 {
2123 if (h != NULL)
2124 switch (ELF32_R_TYPE (rel->r_info))
2125 {
2126 case R_X86_64_GNU_VTINHERIT:
2127 case R_X86_64_GNU_VTENTRY:
2128 return NULL;
2129 }
2130
2131 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
2132 }
2133
2134 /* Update the got entry reference counts for the section being removed. */
2135
2136 static bfd_boolean
2137 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
2138 asection *sec,
2139 const Elf_Internal_Rela *relocs)
2140 {
2141 struct elf_x86_64_link_hash_table *htab;
2142 Elf_Internal_Shdr *symtab_hdr;
2143 struct elf_link_hash_entry **sym_hashes;
2144 bfd_signed_vma *local_got_refcounts;
2145 const Elf_Internal_Rela *rel, *relend;
2146
2147 if (info->relocatable)
2148 return TRUE;
2149
2150 htab = elf_x86_64_hash_table (info);
2151 if (htab == NULL)
2152 return FALSE;
2153
2154 elf_section_data (sec)->local_dynrel = NULL;
2155
2156 symtab_hdr = &elf_symtab_hdr (abfd);
2157 sym_hashes = elf_sym_hashes (abfd);
2158 local_got_refcounts = elf_local_got_refcounts (abfd);
2159
2160 htab = elf_x86_64_hash_table (info);
2161 relend = relocs + sec->reloc_count;
2162 for (rel = relocs; rel < relend; rel++)
2163 {
2164 unsigned long r_symndx;
2165 unsigned int r_type;
2166 struct elf_link_hash_entry *h = NULL;
2167
2168 r_symndx = htab->r_sym (rel->r_info);
2169 if (r_symndx >= symtab_hdr->sh_info)
2170 {
2171 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2172 while (h->root.type == bfd_link_hash_indirect
2173 || h->root.type == bfd_link_hash_warning)
2174 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2175 }
2176 else
2177 {
2178 /* A local symbol. */
2179 Elf_Internal_Sym *isym;
2180
2181 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2182 abfd, r_symndx);
2183
2184 /* Check relocation against local STT_GNU_IFUNC symbol. */
2185 if (isym != NULL
2186 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2187 {
2188 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE);
2189 if (h == NULL)
2190 abort ();
2191 }
2192 }
2193
2194 if (h)
2195 {
2196 struct elf_x86_64_link_hash_entry *eh;
2197 struct elf_dyn_relocs **pp;
2198 struct elf_dyn_relocs *p;
2199
2200 eh = (struct elf_x86_64_link_hash_entry *) h;
2201
2202 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
2203 if (p->sec == sec)
2204 {
2205 /* Everything must go for SEC. */
2206 *pp = p->next;
2207 break;
2208 }
2209 }
2210
2211 r_type = ELF32_R_TYPE (rel->r_info);
2212 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL,
2213 symtab_hdr, sym_hashes,
2214 &r_type, GOT_UNKNOWN,
2215 rel, relend, h, r_symndx))
2216 return FALSE;
2217
2218 switch (r_type)
2219 {
2220 case R_X86_64_TLSLD:
2221 if (htab->tls_ld_got.refcount > 0)
2222 htab->tls_ld_got.refcount -= 1;
2223 break;
2224
2225 case R_X86_64_TLSGD:
2226 case R_X86_64_GOTPC32_TLSDESC:
2227 case R_X86_64_TLSDESC_CALL:
2228 case R_X86_64_GOTTPOFF:
2229 case R_X86_64_GOT32:
2230 case R_X86_64_GOTPCREL:
2231 case R_X86_64_GOT64:
2232 case R_X86_64_GOTPCREL64:
2233 case R_X86_64_GOTPLT64:
2234 if (h != NULL)
2235 {
2236 if (h->got.refcount > 0)
2237 h->got.refcount -= 1;
2238 if (h->type == STT_GNU_IFUNC)
2239 {
2240 if (h->plt.refcount > 0)
2241 h->plt.refcount -= 1;
2242 }
2243 }
2244 else if (local_got_refcounts != NULL)
2245 {
2246 if (local_got_refcounts[r_symndx] > 0)
2247 local_got_refcounts[r_symndx] -= 1;
2248 }
2249 break;
2250
2251 case R_X86_64_8:
2252 case R_X86_64_16:
2253 case R_X86_64_32:
2254 case R_X86_64_64:
2255 case R_X86_64_32S:
2256 case R_X86_64_PC8:
2257 case R_X86_64_PC16:
2258 case R_X86_64_PC32:
2259 case R_X86_64_PC32_BND:
2260 case R_X86_64_PC64:
2261 case R_X86_64_SIZE32:
2262 case R_X86_64_SIZE64:
2263 if (info->shared
2264 && (h == NULL || h->type != STT_GNU_IFUNC))
2265 break;
2266 /* Fall thru */
2267
2268 case R_X86_64_PLT32:
2269 case R_X86_64_PLT32_BND:
2270 case R_X86_64_PLTOFF64:
2271 if (h != NULL)
2272 {
2273 if (h->plt.refcount > 0)
2274 h->plt.refcount -= 1;
2275 }
2276 break;
2277
2278 default:
2279 break;
2280 }
2281 }
2282
2283 return TRUE;
2284 }
2285
2286 /* Adjust a symbol defined by a dynamic object and referenced by a
2287 regular object. The current definition is in some section of the
2288 dynamic object, but we're not including those sections. We have to
2289 change the definition to something the rest of the link can
2290 understand. */
2291
2292 static bfd_boolean
2293 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info,
2294 struct elf_link_hash_entry *h)
2295 {
2296 struct elf_x86_64_link_hash_table *htab;
2297 asection *s;
2298 struct elf_x86_64_link_hash_entry *eh;
2299 struct elf_dyn_relocs *p;
2300
2301 /* STT_GNU_IFUNC symbol must go through PLT. */
2302 if (h->type == STT_GNU_IFUNC)
2303 {
2304 /* All local STT_GNU_IFUNC references must be treate as local
2305 calls via local PLT. */
2306 if (h->ref_regular
2307 && SYMBOL_CALLS_LOCAL (info, h))
2308 {
2309 bfd_size_type pc_count = 0, count = 0;
2310 struct elf_dyn_relocs **pp;
2311
2312 eh = (struct elf_x86_64_link_hash_entry *) h;
2313 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2314 {
2315 pc_count += p->pc_count;
2316 p->count -= p->pc_count;
2317 p->pc_count = 0;
2318 count += p->count;
2319 if (p->count == 0)
2320 *pp = p->next;
2321 else
2322 pp = &p->next;
2323 }
2324
2325 if (pc_count || count)
2326 {
2327 h->needs_plt = 1;
2328 h->non_got_ref = 1;
2329 if (h->plt.refcount <= 0)
2330 h->plt.refcount = 1;
2331 else
2332 h->plt.refcount += 1;
2333 }
2334 }
2335
2336 if (h->plt.refcount <= 0)
2337 {
2338 h->plt.offset = (bfd_vma) -1;
2339 h->needs_plt = 0;
2340 }
2341 return TRUE;
2342 }
2343
2344 /* If this is a function, put it in the procedure linkage table. We
2345 will fill in the contents of the procedure linkage table later,
2346 when we know the address of the .got section. */
2347 if (h->type == STT_FUNC
2348 || h->needs_plt)
2349 {
2350 if (h->plt.refcount <= 0
2351 || SYMBOL_CALLS_LOCAL (info, h)
2352 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
2353 && h->root.type == bfd_link_hash_undefweak))
2354 {
2355 /* This case can occur if we saw a PLT32 reloc in an input
2356 file, but the symbol was never referred to by a dynamic
2357 object, or if all references were garbage collected. In
2358 such a case, we don't actually need to build a procedure
2359 linkage table, and we can just do a PC32 reloc instead. */
2360 h->plt.offset = (bfd_vma) -1;
2361 h->needs_plt = 0;
2362 }
2363
2364 return TRUE;
2365 }
2366 else
2367 /* It's possible that we incorrectly decided a .plt reloc was
2368 needed for an R_X86_64_PC32 reloc to a non-function sym in
2369 check_relocs. We can't decide accurately between function and
2370 non-function syms in check-relocs; Objects loaded later in
2371 the link may change h->type. So fix it now. */
2372 h->plt.offset = (bfd_vma) -1;
2373
2374 /* If this is a weak symbol, and there is a real definition, the
2375 processor independent code will have arranged for us to see the
2376 real definition first, and we can just use the same value. */
2377 if (h->u.weakdef != NULL)
2378 {
2379 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
2380 || h->u.weakdef->root.type == bfd_link_hash_defweak);
2381 h->root.u.def.section = h->u.weakdef->root.u.def.section;
2382 h->root.u.def.value = h->u.weakdef->root.u.def.value;
2383 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
2384 {
2385 eh = (struct elf_x86_64_link_hash_entry *) h;
2386 h->non_got_ref = h->u.weakdef->non_got_ref;
2387 eh->needs_copy = h->u.weakdef->needs_copy;
2388 }
2389 return TRUE;
2390 }
2391
2392 /* This is a reference to a symbol defined by a dynamic object which
2393 is not a function. */
2394
2395 /* If we are creating a shared library, we must presume that the
2396 only references to the symbol are via the global offset table.
2397 For such cases we need not do anything here; the relocations will
2398 be handled correctly by relocate_section. */
2399 if (!info->executable)
2400 return TRUE;
2401
2402 /* If there are no references to this symbol that do not use the
2403 GOT, we don't need to generate a copy reloc. */
2404 if (!h->non_got_ref)
2405 return TRUE;
2406
2407 /* If -z nocopyreloc was given, we won't generate them either. */
2408 if (info->nocopyreloc)
2409 {
2410 h->non_got_ref = 0;
2411 return TRUE;
2412 }
2413
2414 if (ELIMINATE_COPY_RELOCS)
2415 {
2416 eh = (struct elf_x86_64_link_hash_entry *) h;
2417 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2418 {
2419 s = p->sec->output_section;
2420 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2421 break;
2422 }
2423
2424 /* If we didn't find any dynamic relocs in read-only sections, then
2425 we'll be keeping the dynamic relocs and avoiding the copy reloc. */
2426 if (p == NULL)
2427 {
2428 h->non_got_ref = 0;
2429 return TRUE;
2430 }
2431 }
2432
2433 /* We must allocate the symbol in our .dynbss section, which will
2434 become part of the .bss section of the executable. There will be
2435 an entry for this symbol in the .dynsym section. The dynamic
2436 object will contain position independent code, so all references
2437 from the dynamic object to this symbol will go through the global
2438 offset table. The dynamic linker will use the .dynsym entry to
2439 determine the address it must put in the global offset table, so
2440 both the dynamic object and the regular object will refer to the
2441 same memory location for the variable. */
2442
2443 htab = elf_x86_64_hash_table (info);
2444 if (htab == NULL)
2445 return FALSE;
2446
2447 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker
2448 to copy the initial value out of the dynamic object and into the
2449 runtime process image. */
2450 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
2451 {
2452 const struct elf_backend_data *bed;
2453 bed = get_elf_backend_data (info->output_bfd);
2454 htab->srelbss->size += bed->s->sizeof_rela;
2455 h->needs_copy = 1;
2456 }
2457
2458 s = htab->sdynbss;
2459
2460 return _bfd_elf_adjust_dynamic_copy (info, h, s);
2461 }
2462
2463 /* Allocate space in .plt, .got and associated reloc sections for
2464 dynamic relocs. */
2465
2466 static bfd_boolean
2467 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
2468 {
2469 struct bfd_link_info *info;
2470 struct elf_x86_64_link_hash_table *htab;
2471 struct elf_x86_64_link_hash_entry *eh;
2472 struct elf_dyn_relocs *p;
2473 const struct elf_backend_data *bed;
2474 unsigned int plt_entry_size;
2475
2476 if (h->root.type == bfd_link_hash_indirect)
2477 return TRUE;
2478
2479 eh = (struct elf_x86_64_link_hash_entry *) h;
2480
2481 info = (struct bfd_link_info *) inf;
2482 htab = elf_x86_64_hash_table (info);
2483 if (htab == NULL)
2484 return FALSE;
2485 bed = get_elf_backend_data (info->output_bfd);
2486 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
2487
2488 /* We can't use the GOT PLT if pointer equality is needed since
2489 finish_dynamic_symbol won't clear symbol value and the dynamic
2490 linker won't update the GOT slot. We will get into an infinite
2491 loop at run-time. */
2492 if (htab->plt_got != NULL
2493 && h->type != STT_GNU_IFUNC
2494 && !h->pointer_equality_needed
2495 && h->plt.refcount > 0
2496 && h->got.refcount > 0)
2497 {
2498 /* Don't use the regular PLT if there are both GOT and GOTPLT
2499 reloctions. */
2500 h->plt.offset = (bfd_vma) -1;
2501
2502 /* Use the GOT PLT. */
2503 eh->plt_got.refcount = 1;
2504 }
2505
2506 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
2507 here if it is defined and referenced in a non-shared object. */
2508 if (h->type == STT_GNU_IFUNC
2509 && h->def_regular)
2510 {
2511 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h,
2512 &eh->dyn_relocs,
2513 plt_entry_size,
2514 plt_entry_size,
2515 GOT_ENTRY_SIZE))
2516 {
2517 asection *s = htab->plt_bnd;
2518 if (h->plt.offset != (bfd_vma) -1 && s != NULL)
2519 {
2520 /* Use the .plt.bnd section if it is created. */
2521 eh->plt_bnd.offset = s->size;
2522
2523 /* Make room for this entry in the .plt.bnd section. */
2524 s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2525 }
2526
2527 return TRUE;
2528 }
2529 else
2530 return FALSE;
2531 }
2532 else if (htab->elf.dynamic_sections_created
2533 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0))
2534 {
2535 bfd_boolean use_plt_got = eh->plt_got.refcount > 0;
2536
2537 /* Make sure this symbol is output as a dynamic symbol.
2538 Undefined weak syms won't yet be marked as dynamic. */
2539 if (h->dynindx == -1
2540 && !h->forced_local)
2541 {
2542 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2543 return FALSE;
2544 }
2545
2546 if (info->shared
2547 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
2548 {
2549 asection *s = htab->elf.splt;
2550 asection *bnd_s = htab->plt_bnd;
2551 asection *got_s = htab->plt_got;
2552
2553 /* If this is the first .plt entry, make room for the special
2554 first entry. */
2555 if (s->size == 0)
2556 s->size = plt_entry_size;
2557
2558 if (use_plt_got)
2559 eh->plt_got.offset = got_s->size;
2560 else
2561 {
2562 h->plt.offset = s->size;
2563 if (bnd_s)
2564 eh->plt_bnd.offset = bnd_s->size;
2565 }
2566
2567 /* If this symbol is not defined in a regular file, and we are
2568 not generating a shared library, then set the symbol to this
2569 location in the .plt. This is required to make function
2570 pointers compare as equal between the normal executable and
2571 the shared library. */
2572 if (! info->shared
2573 && !h->def_regular)
2574 {
2575 if (use_plt_got)
2576 {
2577 /* We need to make a call to the entry of the GOT PLT
2578 instead of regular PLT entry. */
2579 h->root.u.def.section = got_s;
2580 h->root.u.def.value = eh->plt_got.offset;
2581 }
2582 else
2583 {
2584 if (bnd_s)
2585 {
2586 /* We need to make a call to the entry of the second
2587 PLT instead of regular PLT entry. */
2588 h->root.u.def.section = bnd_s;
2589 h->root.u.def.value = eh->plt_bnd.offset;
2590 }
2591 else
2592 {
2593 h->root.u.def.section = s;
2594 h->root.u.def.value = h->plt.offset;
2595 }
2596 }
2597 }
2598
2599 /* Make room for this entry. */
2600 if (use_plt_got)
2601 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2602 else
2603 {
2604 s->size += plt_entry_size;
2605 if (bnd_s)
2606 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry);
2607
2608 /* We also need to make an entry in the .got.plt section,
2609 which will be placed in the .got section by the linker
2610 script. */
2611 htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
2612
2613 /* We also need to make an entry in the .rela.plt
2614 section. */
2615 htab->elf.srelplt->size += bed->s->sizeof_rela;
2616 htab->elf.srelplt->reloc_count++;
2617 }
2618 }
2619 else
2620 {
2621 h->plt.offset = (bfd_vma) -1;
2622 h->needs_plt = 0;
2623 }
2624 }
2625 else
2626 {
2627 h->plt.offset = (bfd_vma) -1;
2628 h->needs_plt = 0;
2629 }
2630
2631 eh->tlsdesc_got = (bfd_vma) -1;
2632
2633 /* If R_X86_64_GOTTPOFF symbol is now local to the binary,
2634 make it a R_X86_64_TPOFF32 requiring no GOT entry. */
2635 if (h->got.refcount > 0
2636 && info->executable
2637 && h->dynindx == -1
2638 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE)
2639 {
2640 h->got.offset = (bfd_vma) -1;
2641 }
2642 else if (h->got.refcount > 0)
2643 {
2644 asection *s;
2645 bfd_boolean dyn;
2646 int tls_type = elf_x86_64_hash_entry (h)->tls_type;
2647
2648 /* Make sure this symbol is output as a dynamic symbol.
2649 Undefined weak syms won't yet be marked as dynamic. */
2650 if (h->dynindx == -1
2651 && !h->forced_local)
2652 {
2653 if (! bfd_elf_link_record_dynamic_symbol (info, h))
2654 return FALSE;
2655 }
2656
2657 if (GOT_TLS_GDESC_P (tls_type))
2658 {
2659 eh->tlsdesc_got = htab->elf.sgotplt->size
2660 - elf_x86_64_compute_jump_table_size (htab);
2661 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
2662 h->got.offset = (bfd_vma) -2;
2663 }
2664 if (! GOT_TLS_GDESC_P (tls_type)
2665 || GOT_TLS_GD_P (tls_type))
2666 {
2667 s = htab->elf.sgot;
2668 h->got.offset = s->size;
2669 s->size += GOT_ENTRY_SIZE;
2670 if (GOT_TLS_GD_P (tls_type))
2671 s->size += GOT_ENTRY_SIZE;
2672 }
2673 dyn = htab->elf.dynamic_sections_created;
2674 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol
2675 and two if global.
2676 R_X86_64_GOTTPOFF needs one dynamic relocation. */
2677 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1)
2678 || tls_type == GOT_TLS_IE)
2679 htab->elf.srelgot->size += bed->s->sizeof_rela;
2680 else if (GOT_TLS_GD_P (tls_type))
2681 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela;
2682 else if (! GOT_TLS_GDESC_P (tls_type)
2683 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2684 || h->root.type != bfd_link_hash_undefweak)
2685 && (info->shared
2686 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
2687 htab->elf.srelgot->size += bed->s->sizeof_rela;
2688 if (GOT_TLS_GDESC_P (tls_type))
2689 {
2690 htab->elf.srelplt->size += bed->s->sizeof_rela;
2691 htab->tlsdesc_plt = (bfd_vma) -1;
2692 }
2693 }
2694 else
2695 h->got.offset = (bfd_vma) -1;
2696
2697 if (eh->dyn_relocs == NULL)
2698 return TRUE;
2699
2700 /* In the shared -Bsymbolic case, discard space allocated for
2701 dynamic pc-relative relocs against symbols which turn out to be
2702 defined in regular objects. For the normal shared case, discard
2703 space for pc-relative relocs that have become local due to symbol
2704 visibility changes. */
2705
2706 if (info->shared)
2707 {
2708 /* Relocs that use pc_count are those that appear on a call
2709 insn, or certain REL relocs that can generated via assembly.
2710 We want calls to protected symbols to resolve directly to the
2711 function rather than going via the plt. If people want
2712 function pointer comparisons to work as expected then they
2713 should avoid writing weird assembly. */
2714 if (SYMBOL_CALLS_LOCAL (info, h))
2715 {
2716 struct elf_dyn_relocs **pp;
2717
2718 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
2719 {
2720 p->count -= p->pc_count;
2721 p->pc_count = 0;
2722 if (p->count == 0)
2723 *pp = p->next;
2724 else
2725 pp = &p->next;
2726 }
2727 }
2728
2729 /* Also discard relocs on undefined weak syms with non-default
2730 visibility. */
2731 if (eh->dyn_relocs != NULL)
2732 {
2733 if (h->root.type == bfd_link_hash_undefweak)
2734 {
2735 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
2736 eh->dyn_relocs = NULL;
2737
2738 /* Make sure undefined weak symbols are output as a dynamic
2739 symbol in PIEs. */
2740 else if (h->dynindx == -1
2741 && ! h->forced_local
2742 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2743 return FALSE;
2744 }
2745 /* For PIE, discard space for relocs against symbols which
2746 turn out to need copy relocs. */
2747 else if (info->executable
2748 && (h->needs_copy || eh->needs_copy)
2749 && h->def_dynamic
2750 && !h->def_regular)
2751 eh->dyn_relocs = NULL;
2752 }
2753 }
2754 else if (ELIMINATE_COPY_RELOCS)
2755 {
2756 /* For the non-shared case, discard space for relocs against
2757 symbols which turn out to need copy relocs or are not
2758 dynamic. */
2759
2760 if (!h->non_got_ref
2761 && ((h->def_dynamic
2762 && !h->def_regular)
2763 || (htab->elf.dynamic_sections_created
2764 && (h->root.type == bfd_link_hash_undefweak
2765 || h->root.type == bfd_link_hash_undefined))))
2766 {
2767 /* Make sure this symbol is output as a dynamic symbol.
2768 Undefined weak syms won't yet be marked as dynamic. */
2769 if (h->dynindx == -1
2770 && ! h->forced_local
2771 && ! bfd_elf_link_record_dynamic_symbol (info, h))
2772 return FALSE;
2773
2774 /* If that succeeded, we know we'll be keeping all the
2775 relocs. */
2776 if (h->dynindx != -1)
2777 goto keep;
2778 }
2779
2780 eh->dyn_relocs = NULL;
2781
2782 keep: ;
2783 }
2784
2785 /* Finally, allocate space. */
2786 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2787 {
2788 asection * sreloc;
2789
2790 sreloc = elf_section_data (p->sec)->sreloc;
2791
2792 BFD_ASSERT (sreloc != NULL);
2793
2794 sreloc->size += p->count * bed->s->sizeof_rela;
2795 }
2796
2797 return TRUE;
2798 }
2799
2800 /* Allocate space in .plt, .got and associated reloc sections for
2801 local dynamic relocs. */
2802
2803 static bfd_boolean
2804 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf)
2805 {
2806 struct elf_link_hash_entry *h
2807 = (struct elf_link_hash_entry *) *slot;
2808
2809 if (h->type != STT_GNU_IFUNC
2810 || !h->def_regular
2811 || !h->ref_regular
2812 || !h->forced_local
2813 || h->root.type != bfd_link_hash_defined)
2814 abort ();
2815
2816 return elf_x86_64_allocate_dynrelocs (h, inf);
2817 }
2818
2819 /* Find any dynamic relocs that apply to read-only sections. */
2820
2821 static bfd_boolean
2822 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h,
2823 void * inf)
2824 {
2825 struct elf_x86_64_link_hash_entry *eh;
2826 struct elf_dyn_relocs *p;
2827
2828 /* Skip local IFUNC symbols. */
2829 if (h->forced_local && h->type == STT_GNU_IFUNC)
2830 return TRUE;
2831
2832 eh = (struct elf_x86_64_link_hash_entry *) h;
2833 for (p = eh->dyn_relocs; p != NULL; p = p->next)
2834 {
2835 asection *s = p->sec->output_section;
2836
2837 if (s != NULL && (s->flags & SEC_READONLY) != 0)
2838 {
2839 struct bfd_link_info *info = (struct bfd_link_info *) inf;
2840
2841 info->flags |= DF_TEXTREL;
2842
2843 if (info->warn_shared_textrel && info->shared)
2844 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'.\n"),
2845 p->sec->owner, h->root.root.string,
2846 p->sec);
2847
2848 /* Not an error, just cut short the traversal. */
2849 return FALSE;
2850 }
2851 }
2852 return TRUE;
2853 }
2854
2855 /* Convert
2856 mov foo@GOTPCREL(%rip), %reg
2857 to
2858 lea foo(%rip), %reg
2859 with the local symbol, foo. */
2860
2861 static bfd_boolean
2862 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec,
2863 struct bfd_link_info *link_info)
2864 {
2865 Elf_Internal_Shdr *symtab_hdr;
2866 Elf_Internal_Rela *internal_relocs;
2867 Elf_Internal_Rela *irel, *irelend;
2868 bfd_byte *contents;
2869 struct elf_x86_64_link_hash_table *htab;
2870 bfd_boolean changed_contents;
2871 bfd_boolean changed_relocs;
2872 bfd_signed_vma *local_got_refcounts;
2873
2874 /* Don't even try to convert non-ELF outputs. */
2875 if (!is_elf_hash_table (link_info->hash))
2876 return FALSE;
2877
2878 /* Nothing to do if there are no codes, no relocations or no output. */
2879 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC)
2880 || sec->reloc_count == 0
2881 || bfd_is_abs_section (sec->output_section))
2882 return TRUE;
2883
2884 symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
2885
2886 /* Load the relocations for this section. */
2887 internal_relocs = (_bfd_elf_link_read_relocs
2888 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL,
2889 link_info->keep_memory));
2890 if (internal_relocs == NULL)
2891 return FALSE;
2892
2893 htab = elf_x86_64_hash_table (link_info);
2894 changed_contents = FALSE;
2895 changed_relocs = FALSE;
2896 local_got_refcounts = elf_local_got_refcounts (abfd);
2897
2898 /* Get the section contents. */
2899 if (elf_section_data (sec)->this_hdr.contents != NULL)
2900 contents = elf_section_data (sec)->this_hdr.contents;
2901 else
2902 {
2903 if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2904 goto error_return;
2905 }
2906
2907 irelend = internal_relocs + sec->reloc_count;
2908 for (irel = internal_relocs; irel < irelend; irel++)
2909 {
2910 unsigned int r_type = ELF32_R_TYPE (irel->r_info);
2911 unsigned int r_symndx = htab->r_sym (irel->r_info);
2912 unsigned int indx;
2913 struct elf_link_hash_entry *h;
2914
2915 if (r_type != R_X86_64_GOTPCREL)
2916 continue;
2917
2918 /* Get the symbol referred to by the reloc. */
2919 if (r_symndx < symtab_hdr->sh_info)
2920 {
2921 Elf_Internal_Sym *isym;
2922
2923 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2924 abfd, r_symndx);
2925
2926 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. */
2927 if (ELF_ST_TYPE (isym->st_info) != STT_GNU_IFUNC
2928 && irel->r_offset >= 2
2929 && bfd_get_8 (input_bfd,
2930 contents + irel->r_offset - 2) == 0x8b)
2931 {
2932 bfd_put_8 (output_bfd, 0x8d,
2933 contents + irel->r_offset - 2);
2934 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2935 if (local_got_refcounts != NULL
2936 && local_got_refcounts[r_symndx] > 0)
2937 local_got_refcounts[r_symndx] -= 1;
2938 changed_contents = TRUE;
2939 changed_relocs = TRUE;
2940 }
2941 continue;
2942 }
2943
2944 indx = r_symndx - symtab_hdr->sh_info;
2945 h = elf_sym_hashes (abfd)[indx];
2946 BFD_ASSERT (h != NULL);
2947
2948 while (h->root.type == bfd_link_hash_indirect
2949 || h->root.type == bfd_link_hash_warning)
2950 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2951
2952 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also
2953 avoid optimizing _DYNAMIC since ld.so may use its link-time
2954 address. */
2955 if (h->def_regular
2956 && h->type != STT_GNU_IFUNC
2957 && h != htab->elf.hdynamic
2958 && SYMBOL_REFERENCES_LOCAL (link_info, h)
2959 && irel->r_offset >= 2
2960 && bfd_get_8 (input_bfd,
2961 contents + irel->r_offset - 2) == 0x8b)
2962 {
2963 bfd_put_8 (output_bfd, 0x8d,
2964 contents + irel->r_offset - 2);
2965 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32);
2966 if (h->got.refcount > 0)
2967 h->got.refcount -= 1;
2968 changed_contents = TRUE;
2969 changed_relocs = TRUE;
2970 }
2971 }
2972
2973 if (contents != NULL
2974 && elf_section_data (sec)->this_hdr.contents != contents)
2975 {
2976 if (!changed_contents && !link_info->keep_memory)
2977 free (contents);
2978 else
2979 {
2980 /* Cache the section contents for elf_link_input_bfd. */
2981 elf_section_data (sec)->this_hdr.contents = contents;
2982 }
2983 }
2984
2985 if (elf_section_data (sec)->relocs != internal_relocs)
2986 {
2987 if (!changed_relocs)
2988 free (internal_relocs);
2989 else
2990 elf_section_data (sec)->relocs = internal_relocs;
2991 }
2992
2993 return TRUE;
2994
2995 error_return:
2996 if (contents != NULL
2997 && elf_section_data (sec)->this_hdr.contents != contents)
2998 free (contents);
2999 if (internal_relocs != NULL
3000 && elf_section_data (sec)->relocs != internal_relocs)
3001 free (internal_relocs);
3002 return FALSE;
3003 }
3004
3005 /* Set the sizes of the dynamic sections. */
3006
3007 static bfd_boolean
3008 elf_x86_64_size_dynamic_sections (bfd *output_bfd,
3009 struct bfd_link_info *info)
3010 {
3011 struct elf_x86_64_link_hash_table *htab;
3012 bfd *dynobj;
3013 asection *s;
3014 bfd_boolean relocs;
3015 bfd *ibfd;
3016 const struct elf_backend_data *bed;
3017
3018 htab = elf_x86_64_hash_table (info);
3019 if (htab == NULL)
3020 return FALSE;
3021 bed = get_elf_backend_data (output_bfd);
3022
3023 dynobj = htab->elf.dynobj;
3024 if (dynobj == NULL)
3025 abort ();
3026
3027 if (htab->elf.dynamic_sections_created)
3028 {
3029 /* Set the contents of the .interp section to the interpreter. */
3030 if (info->executable)
3031 {
3032 s = bfd_get_linker_section (dynobj, ".interp");
3033 if (s == NULL)
3034 abort ();
3035 s->size = htab->dynamic_interpreter_size;
3036 s->contents = (unsigned char *) htab->dynamic_interpreter;
3037 }
3038 }
3039
3040 /* Set up .got offsets for local syms, and space for local dynamic
3041 relocs. */
3042 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3043 {
3044 bfd_signed_vma *local_got;
3045 bfd_signed_vma *end_local_got;
3046 char *local_tls_type;
3047 bfd_vma *local_tlsdesc_gotent;
3048 bfd_size_type locsymcount;
3049 Elf_Internal_Shdr *symtab_hdr;
3050 asection *srel;
3051
3052 if (! is_x86_64_elf (ibfd))
3053 continue;
3054
3055 for (s = ibfd->sections; s != NULL; s = s->next)
3056 {
3057 struct elf_dyn_relocs *p;
3058
3059 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info))
3060 return FALSE;
3061
3062 for (p = (struct elf_dyn_relocs *)
3063 (elf_section_data (s)->local_dynrel);
3064 p != NULL;
3065 p = p->next)
3066 {
3067 if (!bfd_is_abs_section (p->sec)
3068 && bfd_is_abs_section (p->sec->output_section))
3069 {
3070 /* Input section has been discarded, either because
3071 it is a copy of a linkonce section or due to
3072 linker script /DISCARD/, so we'll be discarding
3073 the relocs too. */
3074 }
3075 else if (p->count != 0)
3076 {
3077 srel = elf_section_data (p->sec)->sreloc;
3078 srel->size += p->count * bed->s->sizeof_rela;
3079 if ((p->sec->output_section->flags & SEC_READONLY) != 0
3080 && (info->flags & DF_TEXTREL) == 0)
3081 {
3082 info->flags |= DF_TEXTREL;
3083 if (info->warn_shared_textrel && info->shared)
3084 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'.\n"),
3085 p->sec->owner, p->sec);
3086 }
3087 }
3088 }
3089 }
3090
3091 local_got = elf_local_got_refcounts (ibfd);
3092 if (!local_got)
3093 continue;
3094
3095 symtab_hdr = &elf_symtab_hdr (ibfd);
3096 locsymcount = symtab_hdr->sh_info;
3097 end_local_got = local_got + locsymcount;
3098 local_tls_type = elf_x86_64_local_got_tls_type (ibfd);
3099 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd);
3100 s = htab->elf.sgot;
3101 srel = htab->elf.srelgot;
3102 for (; local_got < end_local_got;
3103 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent)
3104 {
3105 *local_tlsdesc_gotent = (bfd_vma) -1;
3106 if (*local_got > 0)
3107 {
3108 if (GOT_TLS_GDESC_P (*local_tls_type))
3109 {
3110 *local_tlsdesc_gotent = htab->elf.sgotplt->size
3111 - elf_x86_64_compute_jump_table_size (htab);
3112 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE;
3113 *local_got = (bfd_vma) -2;
3114 }
3115 if (! GOT_TLS_GDESC_P (*local_tls_type)
3116 || GOT_TLS_GD_P (*local_tls_type))
3117 {
3118 *local_got = s->size;
3119 s->size += GOT_ENTRY_SIZE;
3120 if (GOT_TLS_GD_P (*local_tls_type))
3121 s->size += GOT_ENTRY_SIZE;
3122 }
3123 if (info->shared
3124 || GOT_TLS_GD_ANY_P (*local_tls_type)
3125 || *local_tls_type == GOT_TLS_IE)
3126 {
3127 if (GOT_TLS_GDESC_P (*local_tls_type))
3128 {
3129 htab->elf.srelplt->size
3130 += bed->s->sizeof_rela;
3131 htab->tlsdesc_plt = (bfd_vma) -1;
3132 }
3133 if (! GOT_TLS_GDESC_P (*local_tls_type)
3134 || GOT_TLS_GD_P (*local_tls_type))
3135 srel->size += bed->s->sizeof_rela;
3136 }
3137 }
3138 else
3139 *local_got = (bfd_vma) -1;
3140 }
3141 }
3142
3143 if (htab->tls_ld_got.refcount > 0)
3144 {
3145 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD
3146 relocs. */
3147 htab->tls_ld_got.offset = htab->elf.sgot->size;
3148 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE;
3149 htab->elf.srelgot->size += bed->s->sizeof_rela;
3150 }
3151 else
3152 htab->tls_ld_got.offset = -1;
3153
3154 /* Allocate global sym .plt and .got entries, and space for global
3155 sym dynamic relocs. */
3156 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs,
3157 info);
3158
3159 /* Allocate .plt and .got entries, and space for local symbols. */
3160 htab_traverse (htab->loc_hash_table,
3161 elf_x86_64_allocate_local_dynrelocs,
3162 info);
3163
3164 /* For every jump slot reserved in the sgotplt, reloc_count is
3165 incremented. However, when we reserve space for TLS descriptors,
3166 it's not incremented, so in order to compute the space reserved
3167 for them, it suffices to multiply the reloc count by the jump
3168 slot size.
3169
3170 PR ld/13302: We start next_irelative_index at the end of .rela.plt
3171 so that R_X86_64_IRELATIVE entries come last. */
3172 if (htab->elf.srelplt)
3173 {
3174 htab->sgotplt_jump_table_size
3175 = elf_x86_64_compute_jump_table_size (htab);
3176 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1;
3177 }
3178 else if (htab->elf.irelplt)
3179 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1;
3180
3181 if (htab->tlsdesc_plt)
3182 {
3183 /* If we're not using lazy TLS relocations, don't generate the
3184 PLT and GOT entries they require. */
3185 if ((info->flags & DF_BIND_NOW))
3186 htab->tlsdesc_plt = 0;
3187 else
3188 {
3189 htab->tlsdesc_got = htab->elf.sgot->size;
3190 htab->elf.sgot->size += GOT_ENTRY_SIZE;
3191 /* Reserve room for the initial entry.
3192 FIXME: we could probably do away with it in this case. */
3193 if (htab->elf.splt->size == 0)
3194 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3195 htab->tlsdesc_plt = htab->elf.splt->size;
3196 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd);
3197 }
3198 }
3199
3200 if (htab->elf.sgotplt)
3201 {
3202 /* Don't allocate .got.plt section if there are no GOT nor PLT
3203 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
3204 if ((htab->elf.hgot == NULL
3205 || !htab->elf.hgot->ref_regular_nonweak)
3206 && (htab->elf.sgotplt->size
3207 == get_elf_backend_data (output_bfd)->got_header_size)
3208 && (htab->elf.splt == NULL
3209 || htab->elf.splt->size == 0)
3210 && (htab->elf.sgot == NULL
3211 || htab->elf.sgot->size == 0)
3212 && (htab->elf.iplt == NULL
3213 || htab->elf.iplt->size == 0)
3214 && (htab->elf.igotplt == NULL
3215 || htab->elf.igotplt->size == 0))
3216 htab->elf.sgotplt->size = 0;
3217 }
3218
3219 if (htab->plt_eh_frame != NULL
3220 && htab->elf.splt != NULL
3221 && htab->elf.splt->size != 0
3222 && !bfd_is_abs_section (htab->elf.splt->output_section)
3223 && _bfd_elf_eh_frame_present (info))
3224 {
3225 const struct elf_x86_64_backend_data *arch_data
3226 = get_elf_x86_64_arch_data (bed);
3227 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size;
3228 }
3229
3230 /* We now have determined the sizes of the various dynamic sections.
3231 Allocate memory for them. */
3232 relocs = FALSE;
3233 for (s = dynobj->sections; s != NULL; s = s->next)
3234 {
3235 if ((s->flags & SEC_LINKER_CREATED) == 0)
3236 continue;
3237
3238 if (s == htab->elf.splt
3239 || s == htab->elf.sgot
3240 || s == htab->elf.sgotplt
3241 || s == htab->elf.iplt
3242 || s == htab->elf.igotplt
3243 || s == htab->plt_bnd
3244 || s == htab->plt_got
3245 || s == htab->plt_eh_frame
3246 || s == htab->sdynbss)
3247 {
3248 /* Strip this section if we don't need it; see the
3249 comment below. */
3250 }
3251 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
3252 {
3253 if (s->size != 0 && s != htab->elf.srelplt)
3254 relocs = TRUE;
3255
3256 /* We use the reloc_count field as a counter if we need
3257 to copy relocs into the output file. */
3258 if (s != htab->elf.srelplt)
3259 s->reloc_count = 0;
3260 }
3261 else
3262 {
3263 /* It's not one of our sections, so don't allocate space. */
3264 continue;
3265 }
3266
3267 if (s->size == 0)
3268 {
3269 /* If we don't need this section, strip it from the
3270 output file. This is mostly to handle .rela.bss and
3271 .rela.plt. We must create both sections in
3272 create_dynamic_sections, because they must be created
3273 before the linker maps input sections to output
3274 sections. The linker does that before
3275 adjust_dynamic_symbol is called, and it is that
3276 function which decides whether anything needs to go
3277 into these sections. */
3278
3279 s->flags |= SEC_EXCLUDE;
3280 continue;
3281 }
3282
3283 if ((s->flags & SEC_HAS_CONTENTS) == 0)
3284 continue;
3285
3286 /* Allocate memory for the section contents. We use bfd_zalloc
3287 here in case unused entries are not reclaimed before the
3288 section's contents are written out. This should not happen,
3289 but this way if it does, we get a R_X86_64_NONE reloc instead
3290 of garbage. */
3291 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
3292 if (s->contents == NULL)
3293 return FALSE;
3294 }
3295
3296 if (htab->plt_eh_frame != NULL
3297 && htab->plt_eh_frame->contents != NULL)
3298 {
3299 const struct elf_x86_64_backend_data *arch_data
3300 = get_elf_x86_64_arch_data (bed);
3301
3302 memcpy (htab->plt_eh_frame->contents,
3303 arch_data->eh_frame_plt, htab->plt_eh_frame->size);
3304 bfd_put_32 (dynobj, htab->elf.splt->size,
3305 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET);
3306 }
3307
3308 if (htab->elf.dynamic_sections_created)
3309 {
3310 /* Add some entries to the .dynamic section. We fill in the
3311 values later, in elf_x86_64_finish_dynamic_sections, but we
3312 must add the entries now so that we get the correct size for
3313 the .dynamic section. The DT_DEBUG entry is filled in by the
3314 dynamic linker and used by the debugger. */
3315 #define add_dynamic_entry(TAG, VAL) \
3316 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
3317
3318 if (info->executable)
3319 {
3320 if (!add_dynamic_entry (DT_DEBUG, 0))
3321 return FALSE;
3322 }
3323
3324 if (htab->elf.splt->size != 0)
3325 {
3326 if (!add_dynamic_entry (DT_PLTGOT, 0)
3327 || !add_dynamic_entry (DT_PLTRELSZ, 0)
3328 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
3329 || !add_dynamic_entry (DT_JMPREL, 0))
3330 return FALSE;
3331
3332 if (htab->tlsdesc_plt
3333 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
3334 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
3335 return FALSE;
3336 }
3337
3338 if (relocs)
3339 {
3340 if (!add_dynamic_entry (DT_RELA, 0)
3341 || !add_dynamic_entry (DT_RELASZ, 0)
3342 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela))
3343 return FALSE;
3344
3345 /* If any dynamic relocs apply to a read-only section,
3346 then we need a DT_TEXTREL entry. */
3347 if ((info->flags & DF_TEXTREL) == 0)
3348 elf_link_hash_traverse (&htab->elf,
3349 elf_x86_64_readonly_dynrelocs,
3350 info);
3351
3352 if ((info->flags & DF_TEXTREL) != 0)
3353 {
3354 if (!add_dynamic_entry (DT_TEXTREL, 0))
3355 return FALSE;
3356 }
3357 }
3358 }
3359 #undef add_dynamic_entry
3360
3361 return TRUE;
3362 }
3363
3364 static bfd_boolean
3365 elf_x86_64_always_size_sections (bfd *output_bfd,
3366 struct bfd_link_info *info)
3367 {
3368 asection *tls_sec = elf_hash_table (info)->tls_sec;
3369
3370 if (tls_sec)
3371 {
3372 struct elf_link_hash_entry *tlsbase;
3373
3374 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
3375 "_TLS_MODULE_BASE_",
3376 FALSE, FALSE, FALSE);
3377
3378 if (tlsbase && tlsbase->type == STT_TLS)
3379 {
3380 struct elf_x86_64_link_hash_table *htab;
3381 struct bfd_link_hash_entry *bh = NULL;
3382 const struct elf_backend_data *bed
3383 = get_elf_backend_data (output_bfd);
3384
3385 htab = elf_x86_64_hash_table (info);
3386 if (htab == NULL)
3387 return FALSE;
3388
3389 if (!(_bfd_generic_link_add_one_symbol
3390 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
3391 tls_sec, 0, NULL, FALSE,
3392 bed->collect, &bh)))
3393 return FALSE;
3394
3395 htab->tls_module_base = bh;
3396
3397 tlsbase = (struct elf_link_hash_entry *)bh;
3398 tlsbase->def_regular = 1;
3399 tlsbase->other = STV_HIDDEN;
3400 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
3401 }
3402 }
3403
3404 return TRUE;
3405 }
3406
3407 /* _TLS_MODULE_BASE_ needs to be treated especially when linking
3408 executables. Rather than setting it to the beginning of the TLS
3409 section, we have to set it to the end. This function may be called
3410 multiple times, it is idempotent. */
3411
3412 static void
3413 elf_x86_64_set_tls_module_base (struct bfd_link_info *info)
3414 {
3415 struct elf_x86_64_link_hash_table *htab;
3416 struct bfd_link_hash_entry *base;
3417
3418 if (!info->executable)
3419 return;
3420
3421 htab = elf_x86_64_hash_table (info);
3422 if (htab == NULL)
3423 return;
3424
3425 base = htab->tls_module_base;
3426 if (base == NULL)
3427 return;
3428
3429 base->u.def.value = htab->elf.tls_size;
3430 }
3431
3432 /* Return the base VMA address which should be subtracted from real addresses
3433 when resolving @dtpoff relocation.
3434 This is PT_TLS segment p_vaddr. */
3435
3436 static bfd_vma
3437 elf_x86_64_dtpoff_base (struct bfd_link_info *info)
3438 {
3439 /* If tls_sec is NULL, we should have signalled an error already. */
3440 if (elf_hash_table (info)->tls_sec == NULL)
3441 return 0;
3442 return elf_hash_table (info)->tls_sec->vma;
3443 }
3444
3445 /* Return the relocation value for @tpoff relocation
3446 if STT_TLS virtual address is ADDRESS. */
3447
3448 static bfd_vma
3449 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
3450 {
3451 struct elf_link_hash_table *htab = elf_hash_table (info);
3452 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
3453 bfd_vma static_tls_size;
3454
3455 /* If tls_segment is NULL, we should have signalled an error already. */
3456 if (htab->tls_sec == NULL)
3457 return 0;
3458
3459 /* Consider special static TLS alignment requirements. */
3460 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
3461 return address - static_tls_size - htab->tls_sec->vma;
3462 }
3463
3464 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
3465 branch? */
3466
3467 static bfd_boolean
3468 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
3469 {
3470 /* Opcode Instruction
3471 0xe8 call
3472 0xe9 jump
3473 0x0f 0x8x conditional jump */
3474 return ((offset > 0
3475 && (contents [offset - 1] == 0xe8
3476 || contents [offset - 1] == 0xe9))
3477 || (offset > 1
3478 && contents [offset - 2] == 0x0f
3479 && (contents [offset - 1] & 0xf0) == 0x80));
3480 }
3481
3482 /* Relocate an x86_64 ELF section. */
3483
3484 static bfd_boolean
3485 elf_x86_64_relocate_section (bfd *output_bfd,
3486 struct bfd_link_info *info,
3487 bfd *input_bfd,
3488 asection *input_section,
3489 bfd_byte *contents,
3490 Elf_Internal_Rela *relocs,
3491 Elf_Internal_Sym *local_syms,
3492 asection **local_sections)
3493 {
3494 struct elf_x86_64_link_hash_table *htab;
3495 Elf_Internal_Shdr *symtab_hdr;
3496 struct elf_link_hash_entry **sym_hashes;
3497 bfd_vma *local_got_offsets;
3498 bfd_vma *local_tlsdesc_gotents;
3499 Elf_Internal_Rela *rel;
3500 Elf_Internal_Rela *relend;
3501 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd);
3502
3503 BFD_ASSERT (is_x86_64_elf (input_bfd));
3504
3505 htab = elf_x86_64_hash_table (info);
3506 if (htab == NULL)
3507 return FALSE;
3508 symtab_hdr = &elf_symtab_hdr (input_bfd);
3509 sym_hashes = elf_sym_hashes (input_bfd);
3510 local_got_offsets = elf_local_got_offsets (input_bfd);
3511 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd);
3512
3513 elf_x86_64_set_tls_module_base (info);
3514
3515 rel = relocs;
3516 relend = relocs + input_section->reloc_count;
3517 for (; rel < relend; rel++)
3518 {
3519 unsigned int r_type;
3520 reloc_howto_type *howto;
3521 unsigned long r_symndx;
3522 struct elf_link_hash_entry *h;
3523 struct elf_x86_64_link_hash_entry *eh;
3524 Elf_Internal_Sym *sym;
3525 asection *sec;
3526 bfd_vma off, offplt, plt_offset;
3527 bfd_vma relocation;
3528 bfd_boolean unresolved_reloc;
3529 bfd_reloc_status_type r;
3530 int tls_type;
3531 asection *base_got, *resolved_plt;
3532 bfd_vma st_size;
3533
3534 r_type = ELF32_R_TYPE (rel->r_info);
3535 if (r_type == (int) R_X86_64_GNU_VTINHERIT
3536 || r_type == (int) R_X86_64_GNU_VTENTRY)
3537 continue;
3538
3539 if (r_type >= (int) R_X86_64_standard)
3540 {
3541 (*_bfd_error_handler)
3542 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
3543 input_bfd, input_section, r_type);
3544 bfd_set_error (bfd_error_bad_value);
3545 return FALSE;
3546 }
3547
3548 if (r_type != (int) R_X86_64_32
3549 || ABI_64_P (output_bfd))
3550 howto = x86_64_elf_howto_table + r_type;
3551 else
3552 howto = (x86_64_elf_howto_table
3553 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
3554 r_symndx = htab->r_sym (rel->r_info);
3555 h = NULL;
3556 sym = NULL;
3557 sec = NULL;
3558 unresolved_reloc = FALSE;
3559 if (r_symndx < symtab_hdr->sh_info)
3560 {
3561 sym = local_syms + r_symndx;
3562 sec = local_sections[r_symndx];
3563
3564 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
3565 &sec, rel);
3566 st_size = sym->st_size;
3567
3568 /* Relocate against local STT_GNU_IFUNC symbol. */
3569 if (!info->relocatable
3570 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
3571 {
3572 h = elf_x86_64_get_local_sym_hash (htab, input_bfd,
3573 rel, FALSE);
3574 if (h == NULL)
3575 abort ();
3576
3577 /* Set STT_GNU_IFUNC symbol value. */
3578 h->root.u.def.value = sym->st_value;
3579 h->root.u.def.section = sec;
3580 }
3581 }
3582 else
3583 {
3584 bfd_boolean warned ATTRIBUTE_UNUSED;
3585 bfd_boolean ignored ATTRIBUTE_UNUSED;
3586
3587 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3588 r_symndx, symtab_hdr, sym_hashes,
3589 h, sec, relocation,
3590 unresolved_reloc, warned, ignored);
3591 st_size = h->size;
3592 }
3593
3594 if (sec != NULL && discarded_section (sec))
3595 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
3596 rel, 1, relend, howto, 0, contents);
3597
3598 if (info->relocatable)
3599 continue;
3600
3601 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
3602 {
3603 if (r_type == R_X86_64_64)
3604 {
3605 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
3606 zero-extend it to 64bit if addend is zero. */
3607 r_type = R_X86_64_32;
3608 memset (contents + rel->r_offset + 4, 0, 4);
3609 }
3610 else if (r_type == R_X86_64_SIZE64)
3611 {
3612 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
3613 zero-extend it to 64bit if addend is zero. */
3614 r_type = R_X86_64_SIZE32;
3615 memset (contents + rel->r_offset + 4, 0, 4);
3616 }
3617 }
3618
3619 eh = (struct elf_x86_64_link_hash_entry *) h;
3620
3621 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
3622 it here if it is defined in a non-shared object. */
3623 if (h != NULL
3624 && h->type == STT_GNU_IFUNC
3625 && h->def_regular)
3626 {
3627 bfd_vma plt_index;
3628 const char *name;
3629
3630 if ((input_section->flags & SEC_ALLOC) == 0
3631 || h->plt.offset == (bfd_vma) -1)
3632 abort ();
3633
3634 /* STT_GNU_IFUNC symbol must go through PLT. */
3635 if (htab->elf.splt != NULL)
3636 {
3637 if (htab->plt_bnd != NULL)
3638 {
3639 resolved_plt = htab->plt_bnd;
3640 plt_offset = eh->plt_bnd.offset;
3641 }
3642 else
3643 {
3644 resolved_plt = htab->elf.splt;
3645 plt_offset = h->plt.offset;
3646 }
3647 }
3648 else
3649 {
3650 resolved_plt = htab->elf.iplt;
3651 plt_offset = h->plt.offset;
3652 }
3653
3654 relocation = (resolved_plt->output_section->vma
3655 + resolved_plt->output_offset + plt_offset);
3656
3657 switch (r_type)
3658 {
3659 default:
3660 if (h->root.root.string)
3661 name = h->root.root.string;
3662 else
3663 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
3664 NULL);
3665 (*_bfd_error_handler)
3666 (_("%B: relocation %s against STT_GNU_IFUNC "
3667 "symbol `%s' isn't handled by %s"), input_bfd,
3668 x86_64_elf_howto_table[r_type].name,
3669 name, __FUNCTION__);
3670 bfd_set_error (bfd_error_bad_value);
3671 return FALSE;
3672
3673 case R_X86_64_32S:
3674 if (info->shared)
3675 abort ();
3676 goto do_relocation;
3677
3678 case R_X86_64_32:
3679 if (ABI_64_P (output_bfd))
3680 goto do_relocation;
3681 /* FALLTHROUGH */
3682 case R_X86_64_64:
3683 if (rel->r_addend != 0)
3684 {
3685 if (h->root.root.string)
3686 name = h->root.root.string;
3687 else
3688 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3689 sym, NULL);
3690 (*_bfd_error_handler)
3691 (_("%B: relocation %s against STT_GNU_IFUNC "
3692 "symbol `%s' has non-zero addend: %d"),
3693 input_bfd, x86_64_elf_howto_table[r_type].name,
3694 name, rel->r_addend);
3695 bfd_set_error (bfd_error_bad_value);
3696 return FALSE;
3697 }
3698
3699 /* Generate dynamic relcoation only when there is a
3700 non-GOT reference in a shared object. */
3701 if (info->shared && h->non_got_ref)
3702 {
3703 Elf_Internal_Rela outrel;
3704 asection *sreloc;
3705
3706 /* Need a dynamic relocation to get the real function
3707 address. */
3708 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
3709 info,
3710 input_section,
3711 rel->r_offset);
3712 if (outrel.r_offset == (bfd_vma) -1
3713 || outrel.r_offset == (bfd_vma) -2)
3714 abort ();
3715
3716 outrel.r_offset += (input_section->output_section->vma
3717 + input_section->output_offset);
3718
3719 if (h->dynindx == -1
3720 || h->forced_local
3721 || info->executable)
3722 {
3723 /* This symbol is resolved locally. */
3724 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
3725 outrel.r_addend = (h->root.u.def.value
3726 + h->root.u.def.section->output_section->vma
3727 + h->root.u.def.section->output_offset);
3728 }
3729 else
3730 {
3731 outrel.r_info = htab->r_info (h->dynindx, r_type);
3732 outrel.r_addend = 0;
3733 }
3734
3735 sreloc = htab->elf.irelifunc;
3736 elf_append_rela (output_bfd, sreloc, &outrel);
3737
3738 /* If this reloc is against an external symbol, we
3739 do not want to fiddle with the addend. Otherwise,
3740 we need to include the symbol value so that it
3741 becomes an addend for the dynamic reloc. For an
3742 internal symbol, we have updated addend. */
3743 continue;
3744 }
3745 /* FALLTHROUGH */
3746 case R_X86_64_PC32:
3747 case R_X86_64_PC32_BND:
3748 case R_X86_64_PC64:
3749 case R_X86_64_PLT32:
3750 case R_X86_64_PLT32_BND:
3751 goto do_relocation;
3752
3753 case R_X86_64_GOTPCREL:
3754 case R_X86_64_GOTPCREL64:
3755 base_got = htab->elf.sgot;
3756 off = h->got.offset;
3757
3758 if (base_got == NULL)
3759 abort ();
3760
3761 if (off == (bfd_vma) -1)
3762 {
3763 /* We can't use h->got.offset here to save state, or
3764 even just remember the offset, as finish_dynamic_symbol
3765 would use that as offset into .got. */
3766
3767 if (htab->elf.splt != NULL)
3768 {
3769 plt_index = h->plt.offset / plt_entry_size - 1;
3770 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3771 base_got = htab->elf.sgotplt;
3772 }
3773 else
3774 {
3775 plt_index = h->plt.offset / plt_entry_size;
3776 off = plt_index * GOT_ENTRY_SIZE;
3777 base_got = htab->elf.igotplt;
3778 }
3779
3780 if (h->dynindx == -1
3781 || h->forced_local
3782 || info->symbolic)
3783 {
3784 /* This references the local defitionion. We must
3785 initialize this entry in the global offset table.
3786 Since the offset must always be a multiple of 8,
3787 we use the least significant bit to record
3788 whether we have initialized it already.
3789
3790 When doing a dynamic link, we create a .rela.got
3791 relocation entry to initialize the value. This
3792 is done in the finish_dynamic_symbol routine. */
3793 if ((off & 1) != 0)
3794 off &= ~1;
3795 else
3796 {
3797 bfd_put_64 (output_bfd, relocation,
3798 base_got->contents + off);
3799 /* Note that this is harmless for the GOTPLT64
3800 case, as -1 | 1 still is -1. */
3801 h->got.offset |= 1;
3802 }
3803 }
3804 }
3805
3806 relocation = (base_got->output_section->vma
3807 + base_got->output_offset + off);
3808
3809 goto do_relocation;
3810 }
3811 }
3812
3813 /* When generating a shared object, the relocations handled here are
3814 copied into the output file to be resolved at run time. */
3815 switch (r_type)
3816 {
3817 case R_X86_64_GOT32:
3818 case R_X86_64_GOT64:
3819 /* Relocation is to the entry for this symbol in the global
3820 offset table. */
3821 case R_X86_64_GOTPCREL:
3822 case R_X86_64_GOTPCREL64:
3823 /* Use global offset table entry as symbol value. */
3824 case R_X86_64_GOTPLT64:
3825 /* This is obsolete and treated the the same as GOT64. */
3826 base_got = htab->elf.sgot;
3827
3828 if (htab->elf.sgot == NULL)
3829 abort ();
3830
3831 if (h != NULL)
3832 {
3833 bfd_boolean dyn;
3834
3835 off = h->got.offset;
3836 if (h->needs_plt
3837 && h->plt.offset != (bfd_vma)-1
3838 && off == (bfd_vma)-1)
3839 {
3840 /* We can't use h->got.offset here to save
3841 state, or even just remember the offset, as
3842 finish_dynamic_symbol would use that as offset into
3843 .got. */
3844 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1;
3845 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3846 base_got = htab->elf.sgotplt;
3847 }
3848
3849 dyn = htab->elf.dynamic_sections_created;
3850
3851 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3852 || (info->shared
3853 && SYMBOL_REFERENCES_LOCAL (info, h))
3854 || (ELF_ST_VISIBILITY (h->other)
3855 && h->root.type == bfd_link_hash_undefweak))
3856 {
3857 /* This is actually a static link, or it is a -Bsymbolic
3858 link and the symbol is defined locally, or the symbol
3859 was forced to be local because of a version file. We
3860 must initialize this entry in the global offset table.
3861 Since the offset must always be a multiple of 8, we
3862 use the least significant bit to record whether we
3863 have initialized it already.
3864
3865 When doing a dynamic link, we create a .rela.got
3866 relocation entry to initialize the value. This is
3867 done in the finish_dynamic_symbol routine. */
3868 if ((off & 1) != 0)
3869 off &= ~1;
3870 else
3871 {
3872 bfd_put_64 (output_bfd, relocation,
3873 base_got->contents + off);
3874 /* Note that this is harmless for the GOTPLT64 case,
3875 as -1 | 1 still is -1. */
3876 h->got.offset |= 1;
3877 }
3878 }
3879 else
3880 unresolved_reloc = FALSE;
3881 }
3882 else
3883 {
3884 if (local_got_offsets == NULL)
3885 abort ();
3886
3887 off = local_got_offsets[r_symndx];
3888
3889 /* The offset must always be a multiple of 8. We use
3890 the least significant bit to record whether we have
3891 already generated the necessary reloc. */
3892 if ((off & 1) != 0)
3893 off &= ~1;
3894 else
3895 {
3896 bfd_put_64 (output_bfd, relocation,
3897 base_got->contents + off);
3898
3899 if (info->shared)
3900 {
3901 asection *s;
3902 Elf_Internal_Rela outrel;
3903
3904 /* We need to generate a R_X86_64_RELATIVE reloc
3905 for the dynamic linker. */
3906 s = htab->elf.srelgot;
3907 if (s == NULL)
3908 abort ();
3909
3910 outrel.r_offset = (base_got->output_section->vma
3911 + base_got->output_offset
3912 + off);
3913 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3914 outrel.r_addend = relocation;
3915 elf_append_rela (output_bfd, s, &outrel);
3916 }
3917
3918 local_got_offsets[r_symndx] |= 1;
3919 }
3920 }
3921
3922 if (off >= (bfd_vma) -2)
3923 abort ();
3924
3925 relocation = base_got->output_section->vma
3926 + base_got->output_offset + off;
3927 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64)
3928 relocation -= htab->elf.sgotplt->output_section->vma
3929 - htab->elf.sgotplt->output_offset;
3930
3931 break;
3932
3933 case R_X86_64_GOTOFF64:
3934 /* Relocation is relative to the start of the global offset
3935 table. */
3936
3937 /* Check to make sure it isn't a protected function symbol
3938 for shared library since it may not be local when used
3939 as function address. */
3940 if (!info->executable
3941 && h
3942 && !SYMBOLIC_BIND (info, h)
3943 && h->def_regular
3944 && h->type == STT_FUNC
3945 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3946 {
3947 (*_bfd_error_handler)
3948 (_("%B: relocation R_X86_64_GOTOFF64 against protected function `%s' can not be used when making a shared object"),
3949 input_bfd, h->root.root.string);
3950 bfd_set_error (bfd_error_bad_value);
3951 return FALSE;
3952 }
3953
3954 /* Note that sgot is not involved in this
3955 calculation. We always want the start of .got.plt. If we
3956 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3957 permitted by the ABI, we might have to change this
3958 calculation. */
3959 relocation -= htab->elf.sgotplt->output_section->vma
3960 + htab->elf.sgotplt->output_offset;
3961 break;
3962
3963 case R_X86_64_GOTPC32:
3964 case R_X86_64_GOTPC64:
3965 /* Use global offset table as symbol value. */
3966 relocation = htab->elf.sgotplt->output_section->vma
3967 + htab->elf.sgotplt->output_offset;
3968 unresolved_reloc = FALSE;
3969 break;
3970
3971 case R_X86_64_PLTOFF64:
3972 /* Relocation is PLT entry relative to GOT. For local
3973 symbols it's the symbol itself relative to GOT. */
3974 if (h != NULL
3975 /* See PLT32 handling. */
3976 && h->plt.offset != (bfd_vma) -1
3977 && htab->elf.splt != NULL)
3978 {
3979 if (htab->plt_bnd != NULL)
3980 {
3981 resolved_plt = htab->plt_bnd;
3982 plt_offset = eh->plt_bnd.offset;
3983 }
3984 else
3985 {
3986 resolved_plt = htab->elf.splt;
3987 plt_offset = h->plt.offset;
3988 }
3989
3990 relocation = (resolved_plt->output_section->vma
3991 + resolved_plt->output_offset
3992 + plt_offset);
3993 unresolved_reloc = FALSE;
3994 }
3995
3996 relocation -= htab->elf.sgotplt->output_section->vma
3997 + htab->elf.sgotplt->output_offset;
3998 break;
3999
4000 case R_X86_64_PLT32:
4001 case R_X86_64_PLT32_BND:
4002 /* Relocation is to the entry for this symbol in the
4003 procedure linkage table. */
4004
4005 /* Resolve a PLT32 reloc against a local symbol directly,
4006 without using the procedure linkage table. */
4007 if (h == NULL)
4008 break;
4009
4010 if ((h->plt.offset == (bfd_vma) -1
4011 && eh->plt_got.offset == (bfd_vma) -1)
4012 || htab->elf.splt == NULL)
4013 {
4014 /* We didn't make a PLT entry for this symbol. This
4015 happens when statically linking PIC code, or when
4016 using -Bsymbolic. */
4017 break;
4018 }
4019
4020 if (h->plt.offset != (bfd_vma) -1)
4021 {
4022 if (htab->plt_bnd != NULL)
4023 {
4024 resolved_plt = htab->plt_bnd;
4025 plt_offset = eh->plt_bnd.offset;
4026 }
4027 else
4028 {
4029 resolved_plt = htab->elf.splt;
4030 plt_offset = h->plt.offset;
4031 }
4032 }
4033 else
4034 {
4035 /* Use the GOT PLT. */
4036 resolved_plt = htab->plt_got;
4037 plt_offset = eh->plt_got.offset;
4038 }
4039
4040 relocation = (resolved_plt->output_section->vma
4041 + resolved_plt->output_offset
4042 + plt_offset);
4043 unresolved_reloc = FALSE;
4044 break;
4045
4046 case R_X86_64_SIZE32:
4047 case R_X86_64_SIZE64:
4048 /* Set to symbol size. */
4049 relocation = st_size;
4050 goto direct;
4051
4052 case R_X86_64_PC8:
4053 case R_X86_64_PC16:
4054 case R_X86_64_PC32:
4055 case R_X86_64_PC32_BND:
4056 if (info->shared
4057 && (input_section->flags & SEC_ALLOC) != 0
4058 && (input_section->flags & SEC_READONLY) != 0
4059 && h != NULL)
4060 {
4061 bfd_boolean fail = FALSE;
4062 bfd_boolean branch
4063 = ((r_type == R_X86_64_PC32
4064 || r_type == R_X86_64_PC32_BND)
4065 && is_32bit_relative_branch (contents, rel->r_offset));
4066
4067 if (SYMBOL_REFERENCES_LOCAL (info, h))
4068 {
4069 /* Symbol is referenced locally. Make sure it is
4070 defined locally or for a branch. */
4071 fail = !h->def_regular && !branch;
4072 }
4073 else if (!(info->executable
4074 && (h->needs_copy || eh->needs_copy)))
4075 {
4076 /* Symbol doesn't need copy reloc and isn't referenced
4077 locally. We only allow branch to symbol with
4078 non-default visibility. */
4079 fail = (!branch
4080 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
4081 }
4082
4083 if (fail)
4084 {
4085 const char *fmt;
4086 const char *v;
4087 const char *pic = "";
4088
4089 switch (ELF_ST_VISIBILITY (h->other))
4090 {
4091 case STV_HIDDEN:
4092 v = _("hidden symbol");
4093 break;
4094 case STV_INTERNAL:
4095 v = _("internal symbol");
4096 break;
4097 case STV_PROTECTED:
4098 v = _("protected symbol");
4099 break;
4100 default:
4101 v = _("symbol");
4102 pic = _("; recompile with -fPIC");
4103 break;
4104 }
4105
4106 if (h->def_regular)
4107 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s");
4108 else
4109 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s");
4110
4111 (*_bfd_error_handler) (fmt, input_bfd,
4112 x86_64_elf_howto_table[r_type].name,
4113 v, h->root.root.string, pic);
4114 bfd_set_error (bfd_error_bad_value);
4115 return FALSE;
4116 }
4117 }
4118 /* Fall through. */
4119
4120 case R_X86_64_8:
4121 case R_X86_64_16:
4122 case R_X86_64_32:
4123 case R_X86_64_PC64:
4124 case R_X86_64_64:
4125 /* FIXME: The ABI says the linker should make sure the value is
4126 the same when it's zeroextended to 64 bit. */
4127
4128 direct:
4129 if ((input_section->flags & SEC_ALLOC) == 0)
4130 break;
4131
4132 /* Don't copy a pc-relative relocation into the output file
4133 if the symbol needs copy reloc. */
4134 if ((info->shared
4135 && !(info->executable
4136 && h != NULL
4137 && (h->needs_copy || eh->needs_copy)
4138 && IS_X86_64_PCREL_TYPE (r_type))
4139 && (h == NULL
4140 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4141 || h->root.type != bfd_link_hash_undefweak)
4142 && ((! IS_X86_64_PCREL_TYPE (r_type)
4143 && r_type != R_X86_64_SIZE32
4144 && r_type != R_X86_64_SIZE64)
4145 || ! SYMBOL_CALLS_LOCAL (info, h)))
4146 || (ELIMINATE_COPY_RELOCS
4147 && !info->shared
4148 && h != NULL
4149 && h->dynindx != -1
4150 && !h->non_got_ref
4151 && ((h->def_dynamic
4152 && !h->def_regular)
4153 || h->root.type == bfd_link_hash_undefweak
4154 || h->root.type == bfd_link_hash_undefined)))
4155 {
4156 Elf_Internal_Rela outrel;
4157 bfd_boolean skip, relocate;
4158 asection *sreloc;
4159
4160 /* When generating a shared object, these relocations
4161 are copied into the output file to be resolved at run
4162 time. */
4163 skip = FALSE;
4164 relocate = FALSE;
4165
4166 outrel.r_offset =
4167 _bfd_elf_section_offset (output_bfd, info, input_section,
4168 rel->r_offset);
4169 if (outrel.r_offset == (bfd_vma) -1)
4170 skip = TRUE;
4171 else if (outrel.r_offset == (bfd_vma) -2)
4172 skip = TRUE, relocate = TRUE;
4173
4174 outrel.r_offset += (input_section->output_section->vma
4175 + input_section->output_offset);
4176
4177 if (skip)
4178 memset (&outrel, 0, sizeof outrel);
4179
4180 /* h->dynindx may be -1 if this symbol was marked to
4181 become local. */
4182 else if (h != NULL
4183 && h->dynindx != -1
4184 && (IS_X86_64_PCREL_TYPE (r_type)
4185 || ! info->shared
4186 || ! SYMBOLIC_BIND (info, h)
4187 || ! h->def_regular))
4188 {
4189 outrel.r_info = htab->r_info (h->dynindx, r_type);
4190 outrel.r_addend = rel->r_addend;
4191 }
4192 else
4193 {
4194 /* This symbol is local, or marked to become local. */
4195 if (r_type == htab->pointer_r_type)
4196 {
4197 relocate = TRUE;
4198 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4199 outrel.r_addend = relocation + rel->r_addend;
4200 }
4201 else if (r_type == R_X86_64_64
4202 && !ABI_64_P (output_bfd))
4203 {
4204 relocate = TRUE;
4205 outrel.r_info = htab->r_info (0,
4206 R_X86_64_RELATIVE64);
4207 outrel.r_addend = relocation + rel->r_addend;
4208 /* Check addend overflow. */
4209 if ((outrel.r_addend & 0x80000000)
4210 != (rel->r_addend & 0x80000000))
4211 {
4212 const char *name;
4213 int addend = rel->r_addend;
4214 if (h && h->root.root.string)
4215 name = h->root.root.string;
4216 else
4217 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4218 sym, NULL);
4219 if (addend < 0)
4220 (*_bfd_error_handler)
4221 (_("%B: addend -0x%x in relocation %s against "
4222 "symbol `%s' at 0x%lx in section `%A' is "
4223 "out of range"),
4224 input_bfd, input_section, addend,
4225 x86_64_elf_howto_table[r_type].name,
4226 name, (unsigned long) rel->r_offset);
4227 else
4228 (*_bfd_error_handler)
4229 (_("%B: addend 0x%x in relocation %s against "
4230 "symbol `%s' at 0x%lx in section `%A' is "
4231 "out of range"),
4232 input_bfd, input_section, addend,
4233 x86_64_elf_howto_table[r_type].name,
4234 name, (unsigned long) rel->r_offset);
4235 bfd_set_error (bfd_error_bad_value);
4236 return FALSE;
4237 }
4238 }
4239 else
4240 {
4241 long sindx;
4242
4243 if (bfd_is_abs_section (sec))
4244 sindx = 0;
4245 else if (sec == NULL || sec->owner == NULL)
4246 {
4247 bfd_set_error (bfd_error_bad_value);
4248 return FALSE;
4249 }
4250 else
4251 {
4252 asection *osec;
4253
4254 /* We are turning this relocation into one
4255 against a section symbol. It would be
4256 proper to subtract the symbol's value,
4257 osec->vma, from the emitted reloc addend,
4258 but ld.so expects buggy relocs. */
4259 osec = sec->output_section;
4260 sindx = elf_section_data (osec)->dynindx;
4261 if (sindx == 0)
4262 {
4263 asection *oi = htab->elf.text_index_section;
4264 sindx = elf_section_data (oi)->dynindx;
4265 }
4266 BFD_ASSERT (sindx != 0);
4267 }
4268
4269 outrel.r_info = htab->r_info (sindx, r_type);
4270 outrel.r_addend = relocation + rel->r_addend;
4271 }
4272 }
4273
4274 sreloc = elf_section_data (input_section)->sreloc;
4275
4276 if (sreloc == NULL || sreloc->contents == NULL)
4277 {
4278 r = bfd_reloc_notsupported;
4279 goto check_relocation_error;
4280 }
4281
4282 elf_append_rela (output_bfd, sreloc, &outrel);
4283
4284 /* If this reloc is against an external symbol, we do
4285 not want to fiddle with the addend. Otherwise, we
4286 need to include the symbol value so that it becomes
4287 an addend for the dynamic reloc. */
4288 if (! relocate)
4289 continue;
4290 }
4291
4292 break;
4293
4294 case R_X86_64_TLSGD:
4295 case R_X86_64_GOTPC32_TLSDESC:
4296 case R_X86_64_TLSDESC_CALL:
4297 case R_X86_64_GOTTPOFF:
4298 tls_type = GOT_UNKNOWN;
4299 if (h == NULL && local_got_offsets)
4300 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx];
4301 else if (h != NULL)
4302 tls_type = elf_x86_64_hash_entry (h)->tls_type;
4303
4304 if (! elf_x86_64_tls_transition (info, input_bfd,
4305 input_section, contents,
4306 symtab_hdr, sym_hashes,
4307 &r_type, tls_type, rel,
4308 relend, h, r_symndx))
4309 return FALSE;
4310
4311 if (r_type == R_X86_64_TPOFF32)
4312 {
4313 bfd_vma roff = rel->r_offset;
4314
4315 BFD_ASSERT (! unresolved_reloc);
4316
4317 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4318 {
4319 /* GD->LE transition. For 64bit, change
4320 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4321 .word 0x6666; rex64; call __tls_get_addr
4322 into:
4323 movq %fs:0, %rax
4324 leaq foo@tpoff(%rax), %rax
4325 For 32bit, change
4326 leaq foo@tlsgd(%rip), %rdi
4327 .word 0x6666; rex64; call __tls_get_addr
4328 into:
4329 movl %fs:0, %eax
4330 leaq foo@tpoff(%rax), %rax
4331 For largepic, change:
4332 leaq foo@tlsgd(%rip), %rdi
4333 movabsq $__tls_get_addr@pltoff, %rax
4334 addq %rbx, %rax
4335 call *%rax
4336 into:
4337 movq %fs:0, %rax
4338 leaq foo@tpoff(%rax), %rax
4339 nopw 0x0(%rax,%rax,1) */
4340 int largepic = 0;
4341 if (ABI_64_P (output_bfd)
4342 && contents[roff + 5] == (bfd_byte) '\xb8')
4343 {
4344 memcpy (contents + roff - 3,
4345 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
4346 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4347 largepic = 1;
4348 }
4349 else if (ABI_64_P (output_bfd))
4350 memcpy (contents + roff - 4,
4351 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4352 16);
4353 else
4354 memcpy (contents + roff - 3,
4355 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
4356 15);
4357 bfd_put_32 (output_bfd,
4358 elf_x86_64_tpoff (info, relocation),
4359 contents + roff + 8 + largepic);
4360 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4361 rel++;
4362 continue;
4363 }
4364 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4365 {
4366 /* GDesc -> LE transition.
4367 It's originally something like:
4368 leaq x@tlsdesc(%rip), %rax
4369
4370 Change it to:
4371 movl $x@tpoff, %rax. */
4372
4373 unsigned int val, type;
4374
4375 type = bfd_get_8 (input_bfd, contents + roff - 3);
4376 val = bfd_get_8 (input_bfd, contents + roff - 1);
4377 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
4378 contents + roff - 3);
4379 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
4380 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
4381 contents + roff - 1);
4382 bfd_put_32 (output_bfd,
4383 elf_x86_64_tpoff (info, relocation),
4384 contents + roff);
4385 continue;
4386 }
4387 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4388 {
4389 /* GDesc -> LE transition.
4390 It's originally:
4391 call *(%rax)
4392 Turn it into:
4393 xchg %ax,%ax. */
4394 bfd_put_8 (output_bfd, 0x66, contents + roff);
4395 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4396 continue;
4397 }
4398 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF)
4399 {
4400 /* IE->LE transition:
4401 For 64bit, originally it can be one of:
4402 movq foo@gottpoff(%rip), %reg
4403 addq foo@gottpoff(%rip), %reg
4404 We change it into:
4405 movq $foo, %reg
4406 leaq foo(%reg), %reg
4407 addq $foo, %reg.
4408 For 32bit, originally it can be one of:
4409 movq foo@gottpoff(%rip), %reg
4410 addl foo@gottpoff(%rip), %reg
4411 We change it into:
4412 movq $foo, %reg
4413 leal foo(%reg), %reg
4414 addl $foo, %reg. */
4415
4416 unsigned int val, type, reg;
4417
4418 if (roff >= 3)
4419 val = bfd_get_8 (input_bfd, contents + roff - 3);
4420 else
4421 val = 0;
4422 type = bfd_get_8 (input_bfd, contents + roff - 2);
4423 reg = bfd_get_8 (input_bfd, contents + roff - 1);
4424 reg >>= 3;
4425 if (type == 0x8b)
4426 {
4427 /* movq */
4428 if (val == 0x4c)
4429 bfd_put_8 (output_bfd, 0x49,
4430 contents + roff - 3);
4431 else if (!ABI_64_P (output_bfd) && val == 0x44)
4432 bfd_put_8 (output_bfd, 0x41,
4433 contents + roff - 3);
4434 bfd_put_8 (output_bfd, 0xc7,
4435 contents + roff - 2);
4436 bfd_put_8 (output_bfd, 0xc0 | reg,
4437 contents + roff - 1);
4438 }
4439 else if (reg == 4)
4440 {
4441 /* addq/addl -> addq/addl - addressing with %rsp/%r12
4442 is special */
4443 if (val == 0x4c)
4444 bfd_put_8 (output_bfd, 0x49,
4445 contents + roff - 3);
4446 else if (!ABI_64_P (output_bfd) && val == 0x44)
4447 bfd_put_8 (output_bfd, 0x41,
4448 contents + roff - 3);
4449 bfd_put_8 (output_bfd, 0x81,
4450 contents + roff - 2);
4451 bfd_put_8 (output_bfd, 0xc0 | reg,
4452 contents + roff - 1);
4453 }
4454 else
4455 {
4456 /* addq/addl -> leaq/leal */
4457 if (val == 0x4c)
4458 bfd_put_8 (output_bfd, 0x4d,
4459 contents + roff - 3);
4460 else if (!ABI_64_P (output_bfd) && val == 0x44)
4461 bfd_put_8 (output_bfd, 0x45,
4462 contents + roff - 3);
4463 bfd_put_8 (output_bfd, 0x8d,
4464 contents + roff - 2);
4465 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
4466 contents + roff - 1);
4467 }
4468 bfd_put_32 (output_bfd,
4469 elf_x86_64_tpoff (info, relocation),
4470 contents + roff);
4471 continue;
4472 }
4473 else
4474 BFD_ASSERT (FALSE);
4475 }
4476
4477 if (htab->elf.sgot == NULL)
4478 abort ();
4479
4480 if (h != NULL)
4481 {
4482 off = h->got.offset;
4483 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got;
4484 }
4485 else
4486 {
4487 if (local_got_offsets == NULL)
4488 abort ();
4489
4490 off = local_got_offsets[r_symndx];
4491 offplt = local_tlsdesc_gotents[r_symndx];
4492 }
4493
4494 if ((off & 1) != 0)
4495 off &= ~1;
4496 else
4497 {
4498 Elf_Internal_Rela outrel;
4499 int dr_type, indx;
4500 asection *sreloc;
4501
4502 if (htab->elf.srelgot == NULL)
4503 abort ();
4504
4505 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4506
4507 if (GOT_TLS_GDESC_P (tls_type))
4508 {
4509 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
4510 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
4511 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
4512 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
4513 + htab->elf.sgotplt->output_offset
4514 + offplt
4515 + htab->sgotplt_jump_table_size);
4516 sreloc = htab->elf.srelplt;
4517 if (indx == 0)
4518 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4519 else
4520 outrel.r_addend = 0;
4521 elf_append_rela (output_bfd, sreloc, &outrel);
4522 }
4523
4524 sreloc = htab->elf.srelgot;
4525
4526 outrel.r_offset = (htab->elf.sgot->output_section->vma
4527 + htab->elf.sgot->output_offset + off);
4528
4529 if (GOT_TLS_GD_P (tls_type))
4530 dr_type = R_X86_64_DTPMOD64;
4531 else if (GOT_TLS_GDESC_P (tls_type))
4532 goto dr_done;
4533 else
4534 dr_type = R_X86_64_TPOFF64;
4535
4536 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
4537 outrel.r_addend = 0;
4538 if ((dr_type == R_X86_64_TPOFF64
4539 || dr_type == R_X86_64_TLSDESC) && indx == 0)
4540 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info);
4541 outrel.r_info = htab->r_info (indx, dr_type);
4542
4543 elf_append_rela (output_bfd, sreloc, &outrel);
4544
4545 if (GOT_TLS_GD_P (tls_type))
4546 {
4547 if (indx == 0)
4548 {
4549 BFD_ASSERT (! unresolved_reloc);
4550 bfd_put_64 (output_bfd,
4551 relocation - elf_x86_64_dtpoff_base (info),
4552 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4553 }
4554 else
4555 {
4556 bfd_put_64 (output_bfd, 0,
4557 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4558 outrel.r_info = htab->r_info (indx,
4559 R_X86_64_DTPOFF64);
4560 outrel.r_offset += GOT_ENTRY_SIZE;
4561 elf_append_rela (output_bfd, sreloc,
4562 &outrel);
4563 }
4564 }
4565
4566 dr_done:
4567 if (h != NULL)
4568 h->got.offset |= 1;
4569 else
4570 local_got_offsets[r_symndx] |= 1;
4571 }
4572
4573 if (off >= (bfd_vma) -2
4574 && ! GOT_TLS_GDESC_P (tls_type))
4575 abort ();
4576 if (r_type == ELF32_R_TYPE (rel->r_info))
4577 {
4578 if (r_type == R_X86_64_GOTPC32_TLSDESC
4579 || r_type == R_X86_64_TLSDESC_CALL)
4580 relocation = htab->elf.sgotplt->output_section->vma
4581 + htab->elf.sgotplt->output_offset
4582 + offplt + htab->sgotplt_jump_table_size;
4583 else
4584 relocation = htab->elf.sgot->output_section->vma
4585 + htab->elf.sgot->output_offset + off;
4586 unresolved_reloc = FALSE;
4587 }
4588 else
4589 {
4590 bfd_vma roff = rel->r_offset;
4591
4592 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD)
4593 {
4594 /* GD->IE transition. For 64bit, change
4595 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4596 .word 0x6666; rex64; call __tls_get_addr@plt
4597 into:
4598 movq %fs:0, %rax
4599 addq foo@gottpoff(%rip), %rax
4600 For 32bit, change
4601 leaq foo@tlsgd(%rip), %rdi
4602 .word 0x6666; rex64; call __tls_get_addr@plt
4603 into:
4604 movl %fs:0, %eax
4605 addq foo@gottpoff(%rip), %rax
4606 For largepic, change:
4607 leaq foo@tlsgd(%rip), %rdi
4608 movabsq $__tls_get_addr@pltoff, %rax
4609 addq %rbx, %rax
4610 call *%rax
4611 into:
4612 movq %fs:0, %rax
4613 addq foo@gottpoff(%rax), %rax
4614 nopw 0x0(%rax,%rax,1) */
4615 int largepic = 0;
4616 if (ABI_64_P (output_bfd)
4617 && contents[roff + 5] == (bfd_byte) '\xb8')
4618 {
4619 memcpy (contents + roff - 3,
4620 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4621 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4622 largepic = 1;
4623 }
4624 else if (ABI_64_P (output_bfd))
4625 memcpy (contents + roff - 4,
4626 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4627 16);
4628 else
4629 memcpy (contents + roff - 3,
4630 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4631 15);
4632
4633 relocation = (htab->elf.sgot->output_section->vma
4634 + htab->elf.sgot->output_offset + off
4635 - roff
4636 - largepic
4637 - input_section->output_section->vma
4638 - input_section->output_offset
4639 - 12);
4640 bfd_put_32 (output_bfd, relocation,
4641 contents + roff + 8 + largepic);
4642 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4643 rel++;
4644 continue;
4645 }
4646 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC)
4647 {
4648 /* GDesc -> IE transition.
4649 It's originally something like:
4650 leaq x@tlsdesc(%rip), %rax
4651
4652 Change it to:
4653 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
4654
4655 /* Now modify the instruction as appropriate. To
4656 turn a leaq into a movq in the form we use it, it
4657 suffices to change the second byte from 0x8d to
4658 0x8b. */
4659 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4660
4661 bfd_put_32 (output_bfd,
4662 htab->elf.sgot->output_section->vma
4663 + htab->elf.sgot->output_offset + off
4664 - rel->r_offset
4665 - input_section->output_section->vma
4666 - input_section->output_offset
4667 - 4,
4668 contents + roff);
4669 continue;
4670 }
4671 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL)
4672 {
4673 /* GDesc -> IE transition.
4674 It's originally:
4675 call *(%rax)
4676
4677 Change it to:
4678 xchg %ax, %ax. */
4679
4680 bfd_put_8 (output_bfd, 0x66, contents + roff);
4681 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4682 continue;
4683 }
4684 else
4685 BFD_ASSERT (FALSE);
4686 }
4687 break;
4688
4689 case R_X86_64_TLSLD:
4690 if (! elf_x86_64_tls_transition (info, input_bfd,
4691 input_section, contents,
4692 symtab_hdr, sym_hashes,
4693 &r_type, GOT_UNKNOWN,
4694 rel, relend, h, r_symndx))
4695 return FALSE;
4696
4697 if (r_type != R_X86_64_TLSLD)
4698 {
4699 /* LD->LE transition:
4700 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr.
4701 For 64bit, we change it into:
4702 .word 0x6666; .byte 0x66; movq %fs:0, %rax.
4703 For 32bit, we change it into:
4704 nopl 0x0(%rax); movl %fs:0, %eax.
4705 For largepic, change:
4706 leaq foo@tlsgd(%rip), %rdi
4707 movabsq $__tls_get_addr@pltoff, %rax
4708 addq %rbx, %rax
4709 call *%rax
4710 into:
4711 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1)
4712 movq %fs:0, %eax */
4713
4714 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4715 if (ABI_64_P (output_bfd)
4716 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8')
4717 memcpy (contents + rel->r_offset - 3,
4718 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4719 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4720 else if (ABI_64_P (output_bfd))
4721 memcpy (contents + rel->r_offset - 3,
4722 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4723 else
4724 memcpy (contents + rel->r_offset - 3,
4725 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4726 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4727 rel++;
4728 continue;
4729 }
4730
4731 if (htab->elf.sgot == NULL)
4732 abort ();
4733
4734 off = htab->tls_ld_got.offset;
4735 if (off & 1)
4736 off &= ~1;
4737 else
4738 {
4739 Elf_Internal_Rela outrel;
4740
4741 if (htab->elf.srelgot == NULL)
4742 abort ();
4743
4744 outrel.r_offset = (htab->elf.sgot->output_section->vma
4745 + htab->elf.sgot->output_offset + off);
4746
4747 bfd_put_64 (output_bfd, 0,
4748 htab->elf.sgot->contents + off);
4749 bfd_put_64 (output_bfd, 0,
4750 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4751 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4752 outrel.r_addend = 0;
4753 elf_append_rela (output_bfd, htab->elf.srelgot,
4754 &outrel);
4755 htab->tls_ld_got.offset |= 1;
4756 }
4757 relocation = htab->elf.sgot->output_section->vma
4758 + htab->elf.sgot->output_offset + off;
4759 unresolved_reloc = FALSE;
4760 break;
4761
4762 case R_X86_64_DTPOFF32:
4763 if (!info->executable|| (input_section->flags & SEC_CODE) == 0)
4764 relocation -= elf_x86_64_dtpoff_base (info);
4765 else
4766 relocation = elf_x86_64_tpoff (info, relocation);
4767 break;
4768
4769 case R_X86_64_TPOFF32:
4770 case R_X86_64_TPOFF64:
4771 BFD_ASSERT (info->executable);
4772 relocation = elf_x86_64_tpoff (info, relocation);
4773 break;
4774
4775 case R_X86_64_DTPOFF64:
4776 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4777 relocation -= elf_x86_64_dtpoff_base (info);
4778 break;
4779
4780 default:
4781 break;
4782 }
4783
4784 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4785 because such sections are not SEC_ALLOC and thus ld.so will
4786 not process them. */
4787 if (unresolved_reloc
4788 && !((input_section->flags & SEC_DEBUGGING) != 0
4789 && h->def_dynamic)
4790 && _bfd_elf_section_offset (output_bfd, info, input_section,
4791 rel->r_offset) != (bfd_vma) -1)
4792 {
4793 (*_bfd_error_handler)
4794 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4795 input_bfd,
4796 input_section,
4797 (long) rel->r_offset,
4798 howto->name,
4799 h->root.root.string);
4800 return FALSE;
4801 }
4802
4803 do_relocation:
4804 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4805 contents, rel->r_offset,
4806 relocation, rel->r_addend);
4807
4808 check_relocation_error:
4809 if (r != bfd_reloc_ok)
4810 {
4811 const char *name;
4812
4813 if (h != NULL)
4814 name = h->root.root.string;
4815 else
4816 {
4817 name = bfd_elf_string_from_elf_section (input_bfd,
4818 symtab_hdr->sh_link,
4819 sym->st_name);
4820 if (name == NULL)
4821 return FALSE;
4822 if (*name == '\0')
4823 name = bfd_section_name (input_bfd, sec);
4824 }
4825
4826 if (r == bfd_reloc_overflow)
4827 {
4828 if (! ((*info->callbacks->reloc_overflow)
4829 (info, (h ? &h->root : NULL), name, howto->name,
4830 (bfd_vma) 0, input_bfd, input_section,
4831 rel->r_offset)))
4832 return FALSE;
4833 }
4834 else
4835 {
4836 (*_bfd_error_handler)
4837 (_("%B(%A+0x%lx): reloc against `%s': error %d"),
4838 input_bfd, input_section,
4839 (long) rel->r_offset, name, (int) r);
4840 return FALSE;
4841 }
4842 }
4843 }
4844
4845 return TRUE;
4846 }
4847
4848 /* Finish up dynamic symbol handling. We set the contents of various
4849 dynamic sections here. */
4850
4851 static bfd_boolean
4852 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4853 struct bfd_link_info *info,
4854 struct elf_link_hash_entry *h,
4855 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED)
4856 {
4857 struct elf_x86_64_link_hash_table *htab;
4858 const struct elf_x86_64_backend_data *abed;
4859 bfd_boolean use_plt_bnd;
4860 struct elf_x86_64_link_hash_entry *eh;
4861
4862 htab = elf_x86_64_hash_table (info);
4863 if (htab == NULL)
4864 return FALSE;
4865
4866 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
4867 section only if there is .plt section. */
4868 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL;
4869 abed = (use_plt_bnd
4870 ? &elf_x86_64_bnd_arch_bed
4871 : get_elf_x86_64_backend_data (output_bfd));
4872
4873 eh = (struct elf_x86_64_link_hash_entry *) h;
4874
4875 if (h->plt.offset != (bfd_vma) -1)
4876 {
4877 bfd_vma plt_index;
4878 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset;
4879 bfd_vma plt_plt_insn_end, plt_got_insn_size;
4880 Elf_Internal_Rela rela;
4881 bfd_byte *loc;
4882 asection *plt, *gotplt, *relplt, *resolved_plt;
4883 const struct elf_backend_data *bed;
4884 bfd_vma plt_got_pcrel_offset;
4885
4886 /* When building a static executable, use .iplt, .igot.plt and
4887 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4888 if (htab->elf.splt != NULL)
4889 {
4890 plt = htab->elf.splt;
4891 gotplt = htab->elf.sgotplt;
4892 relplt = htab->elf.srelplt;
4893 }
4894 else
4895 {
4896 plt = htab->elf.iplt;
4897 gotplt = htab->elf.igotplt;
4898 relplt = htab->elf.irelplt;
4899 }
4900
4901 /* This symbol has an entry in the procedure linkage table. Set
4902 it up. */
4903 if ((h->dynindx == -1
4904 && !((h->forced_local || info->executable)
4905 && h->def_regular
4906 && h->type == STT_GNU_IFUNC))
4907 || plt == NULL
4908 || gotplt == NULL
4909 || relplt == NULL)
4910 abort ();
4911
4912 /* Get the index in the procedure linkage table which
4913 corresponds to this symbol. This is the index of this symbol
4914 in all the symbols for which we are making plt entries. The
4915 first entry in the procedure linkage table is reserved.
4916
4917 Get the offset into the .got table of the entry that
4918 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4919 bytes. The first three are reserved for the dynamic linker.
4920
4921 For static executables, we don't reserve anything. */
4922
4923 if (plt == htab->elf.splt)
4924 {
4925 got_offset = h->plt.offset / abed->plt_entry_size - 1;
4926 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4927 }
4928 else
4929 {
4930 got_offset = h->plt.offset / abed->plt_entry_size;
4931 got_offset = got_offset * GOT_ENTRY_SIZE;
4932 }
4933
4934 plt_plt_insn_end = abed->plt_plt_insn_end;
4935 plt_plt_offset = abed->plt_plt_offset;
4936 plt_got_insn_size = abed->plt_got_insn_size;
4937 plt_got_offset = abed->plt_got_offset;
4938 if (use_plt_bnd)
4939 {
4940 /* Use the second PLT with BND relocations. */
4941 const bfd_byte *plt_entry, *plt2_entry;
4942
4943 if (eh->has_bnd_reloc)
4944 {
4945 plt_entry = elf_x86_64_bnd_plt_entry;
4946 plt2_entry = elf_x86_64_bnd_plt2_entry;
4947 }
4948 else
4949 {
4950 plt_entry = elf_x86_64_legacy_plt_entry;
4951 plt2_entry = elf_x86_64_legacy_plt2_entry;
4952
4953 /* Subtract 1 since there is no BND prefix. */
4954 plt_plt_insn_end -= 1;
4955 plt_plt_offset -= 1;
4956 plt_got_insn_size -= 1;
4957 plt_got_offset -= 1;
4958 }
4959
4960 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry)
4961 == sizeof (elf_x86_64_legacy_plt_entry));
4962
4963 /* Fill in the entry in the procedure linkage table. */
4964 memcpy (plt->contents + h->plt.offset,
4965 plt_entry, sizeof (elf_x86_64_legacy_plt_entry));
4966 /* Fill in the entry in the second PLT. */
4967 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset,
4968 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry));
4969
4970 resolved_plt = htab->plt_bnd;
4971 plt_offset = eh->plt_bnd.offset;
4972 }
4973 else
4974 {
4975 /* Fill in the entry in the procedure linkage table. */
4976 memcpy (plt->contents + h->plt.offset, abed->plt_entry,
4977 abed->plt_entry_size);
4978
4979 resolved_plt = plt;
4980 plt_offset = h->plt.offset;
4981 }
4982
4983 /* Insert the relocation positions of the plt section. */
4984
4985 /* Put offset the PC-relative instruction referring to the GOT entry,
4986 subtracting the size of that instruction. */
4987 plt_got_pcrel_offset = (gotplt->output_section->vma
4988 + gotplt->output_offset
4989 + got_offset
4990 - resolved_plt->output_section->vma
4991 - resolved_plt->output_offset
4992 - plt_offset
4993 - plt_got_insn_size);
4994
4995 /* Check PC-relative offset overflow in PLT entry. */
4996 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4997 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
4998 output_bfd, h->root.root.string);
4999
5000 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
5001 resolved_plt->contents + plt_offset + plt_got_offset);
5002
5003 /* Fill in the entry in the global offset table, initially this
5004 points to the second part of the PLT entry. */
5005 bfd_put_64 (output_bfd, (plt->output_section->vma
5006 + plt->output_offset
5007 + h->plt.offset + abed->plt_lazy_offset),
5008 gotplt->contents + got_offset);
5009
5010 /* Fill in the entry in the .rela.plt section. */
5011 rela.r_offset = (gotplt->output_section->vma
5012 + gotplt->output_offset
5013 + got_offset);
5014 if (h->dynindx == -1
5015 || ((info->executable
5016 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
5017 && h->def_regular
5018 && h->type == STT_GNU_IFUNC))
5019 {
5020 /* If an STT_GNU_IFUNC symbol is locally defined, generate
5021 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
5022 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
5023 rela.r_addend = (h->root.u.def.value
5024 + h->root.u.def.section->output_section->vma
5025 + h->root.u.def.section->output_offset);
5026 /* R_X86_64_IRELATIVE comes last. */
5027 plt_index = htab->next_irelative_index--;
5028 }
5029 else
5030 {
5031 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
5032 rela.r_addend = 0;
5033 plt_index = htab->next_jump_slot_index++;
5034 }
5035
5036 /* Don't fill PLT entry for static executables. */
5037 if (plt == htab->elf.splt)
5038 {
5039 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end;
5040
5041 /* Put relocation index. */
5042 bfd_put_32 (output_bfd, plt_index,
5043 plt->contents + h->plt.offset + abed->plt_reloc_offset);
5044
5045 /* Put offset for jmp .PLT0 and check for overflow. We don't
5046 check relocation index for overflow since branch displacement
5047 will overflow first. */
5048 if (plt0_offset > 0x80000000)
5049 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
5050 output_bfd, h->root.root.string);
5051 bfd_put_32 (output_bfd, - plt0_offset,
5052 plt->contents + h->plt.offset + plt_plt_offset);
5053 }
5054
5055 bed = get_elf_backend_data (output_bfd);
5056 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
5057 bed->s->swap_reloca_out (output_bfd, &rela, loc);
5058 }
5059 else if (eh->plt_got.offset != (bfd_vma) -1)
5060 {
5061 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size;
5062 asection *plt, *got;
5063 bfd_boolean got_after_plt;
5064 int32_t got_pcrel_offset;
5065 const bfd_byte *got_plt_entry;
5066
5067 /* Set the entry in the GOT procedure linkage table. */
5068 plt = htab->plt_got;
5069 got = htab->elf.sgot;
5070 got_offset = h->got.offset;
5071
5072 if (got_offset == (bfd_vma) -1
5073 || h->type == STT_GNU_IFUNC
5074 || plt == NULL
5075 || got == NULL)
5076 abort ();
5077
5078 /* Use the second PLT entry template for the GOT PLT since they
5079 are the identical. */
5080 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size;
5081 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset;
5082 if (eh->has_bnd_reloc)
5083 got_plt_entry = elf_x86_64_bnd_plt2_entry;
5084 else
5085 {
5086 got_plt_entry = elf_x86_64_legacy_plt2_entry;
5087
5088 /* Subtract 1 since there is no BND prefix. */
5089 plt_got_insn_size -= 1;
5090 plt_got_offset -= 1;
5091 }
5092
5093 /* Fill in the entry in the GOT procedure linkage table. */
5094 plt_offset = eh->plt_got.offset;
5095 memcpy (plt->contents + plt_offset,
5096 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry));
5097
5098 /* Put offset the PC-relative instruction referring to the GOT
5099 entry, subtracting the size of that instruction. */
5100 got_pcrel_offset = (got->output_section->vma
5101 + got->output_offset
5102 + got_offset
5103 - plt->output_section->vma
5104 - plt->output_offset
5105 - plt_offset
5106 - plt_got_insn_size);
5107
5108 /* Check PC-relative offset overflow in GOT PLT entry. */
5109 got_after_plt = got->output_section->vma > plt->output_section->vma;
5110 if ((got_after_plt && got_pcrel_offset < 0)
5111 || (!got_after_plt && got_pcrel_offset > 0))
5112 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
5113 output_bfd, h->root.root.string);
5114
5115 bfd_put_32 (output_bfd, got_pcrel_offset,
5116 plt->contents + plt_offset + plt_got_offset);
5117 }
5118
5119 if (!h->def_regular
5120 && (h->plt.offset != (bfd_vma) -1
5121 || eh->plt_got.offset != (bfd_vma) -1))
5122 {
5123 /* Mark the symbol as undefined, rather than as defined in
5124 the .plt section. Leave the value if there were any
5125 relocations where pointer equality matters (this is a clue
5126 for the dynamic linker, to make function pointer
5127 comparisons work between an application and shared
5128 library), otherwise set it to zero. If a function is only
5129 called from a binary, there is no need to slow down
5130 shared libraries because of that. */
5131 sym->st_shndx = SHN_UNDEF;
5132 if (!h->pointer_equality_needed)
5133 sym->st_value = 0;
5134 }
5135
5136 if (h->got.offset != (bfd_vma) -1
5137 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type)
5138 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE)
5139 {
5140 Elf_Internal_Rela rela;
5141
5142 /* This symbol has an entry in the global offset table. Set it
5143 up. */
5144 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
5145 abort ();
5146
5147 rela.r_offset = (htab->elf.sgot->output_section->vma
5148 + htab->elf.sgot->output_offset
5149 + (h->got.offset &~ (bfd_vma) 1));
5150
5151 /* If this is a static link, or it is a -Bsymbolic link and the
5152 symbol is defined locally or was forced to be local because
5153 of a version file, we just want to emit a RELATIVE reloc.
5154 The entry in the global offset table will already have been
5155 initialized in the relocate_section function. */
5156 if (h->def_regular
5157 && h->type == STT_GNU_IFUNC)
5158 {
5159 if (info->shared)
5160 {
5161 /* Generate R_X86_64_GLOB_DAT. */
5162 goto do_glob_dat;
5163 }
5164 else
5165 {
5166 asection *plt;
5167
5168 if (!h->pointer_equality_needed)
5169 abort ();
5170
5171 /* For non-shared object, we can't use .got.plt, which
5172 contains the real function addres if we need pointer
5173 equality. We load the GOT entry with the PLT entry. */
5174 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
5175 bfd_put_64 (output_bfd, (plt->output_section->vma
5176 + plt->output_offset
5177 + h->plt.offset),
5178 htab->elf.sgot->contents + h->got.offset);
5179 return TRUE;
5180 }
5181 }
5182 else if (info->shared
5183 && SYMBOL_REFERENCES_LOCAL (info, h))
5184 {
5185 if (!h->def_regular)
5186 return FALSE;
5187 BFD_ASSERT((h->got.offset & 1) != 0);
5188 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
5189 rela.r_addend = (h->root.u.def.value
5190 + h->root.u.def.section->output_section->vma
5191 + h->root.u.def.section->output_offset);
5192 }
5193 else
5194 {
5195 BFD_ASSERT((h->got.offset & 1) == 0);
5196 do_glob_dat:
5197 bfd_put_64 (output_bfd, (bfd_vma) 0,
5198 htab->elf.sgot->contents + h->got.offset);
5199 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
5200 rela.r_addend = 0;
5201 }
5202
5203 elf_append_rela (output_bfd, htab->elf.srelgot, &rela);
5204 }
5205
5206 if (h->needs_copy)
5207 {
5208 Elf_Internal_Rela rela;
5209
5210 /* This symbol needs a copy reloc. Set it up. */
5211
5212 if (h->dynindx == -1
5213 || (h->root.type != bfd_link_hash_defined
5214 && h->root.type != bfd_link_hash_defweak)
5215 || htab->srelbss == NULL)
5216 abort ();
5217
5218 rela.r_offset = (h->root.u.def.value
5219 + h->root.u.def.section->output_section->vma
5220 + h->root.u.def.section->output_offset);
5221 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
5222 rela.r_addend = 0;
5223 elf_append_rela (output_bfd, htab->srelbss, &rela);
5224 }
5225
5226 return TRUE;
5227 }
5228
5229 /* Finish up local dynamic symbol handling. We set the contents of
5230 various dynamic sections here. */
5231
5232 static bfd_boolean
5233 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
5234 {
5235 struct elf_link_hash_entry *h
5236 = (struct elf_link_hash_entry *) *slot;
5237 struct bfd_link_info *info
5238 = (struct bfd_link_info *) inf;
5239
5240 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
5241 info, h, NULL);
5242 }
5243
5244 /* Used to decide how to sort relocs in an optimal manner for the
5245 dynamic linker, before writing them out. */
5246
5247 static enum elf_reloc_type_class
5248 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5249 const asection *rel_sec ATTRIBUTE_UNUSED,
5250 const Elf_Internal_Rela *rela)
5251 {
5252 switch ((int) ELF32_R_TYPE (rela->r_info))
5253 {
5254 case R_X86_64_RELATIVE:
5255 case R_X86_64_RELATIVE64:
5256 return reloc_class_relative;
5257 case R_X86_64_JUMP_SLOT:
5258 return reloc_class_plt;
5259 case R_X86_64_COPY:
5260 return reloc_class_copy;
5261 default:
5262 return reloc_class_normal;
5263 }
5264 }
5265
5266 /* Finish up the dynamic sections. */
5267
5268 static bfd_boolean
5269 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
5270 struct bfd_link_info *info)
5271 {
5272 struct elf_x86_64_link_hash_table *htab;
5273 bfd *dynobj;
5274 asection *sdyn;
5275 const struct elf_x86_64_backend_data *abed;
5276
5277 htab = elf_x86_64_hash_table (info);
5278 if (htab == NULL)
5279 return FALSE;
5280
5281 /* Use MPX backend data in case of BND relocation. Use .plt_bnd
5282 section only if there is .plt section. */
5283 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL
5284 ? &elf_x86_64_bnd_arch_bed
5285 : get_elf_x86_64_backend_data (output_bfd));
5286
5287 dynobj = htab->elf.dynobj;
5288 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
5289
5290 if (htab->elf.dynamic_sections_created)
5291 {
5292 bfd_byte *dyncon, *dynconend;
5293 const struct elf_backend_data *bed;
5294 bfd_size_type sizeof_dyn;
5295
5296 if (sdyn == NULL || htab->elf.sgot == NULL)
5297 abort ();
5298
5299 bed = get_elf_backend_data (dynobj);
5300 sizeof_dyn = bed->s->sizeof_dyn;
5301 dyncon = sdyn->contents;
5302 dynconend = sdyn->contents + sdyn->size;
5303 for (; dyncon < dynconend; dyncon += sizeof_dyn)
5304 {
5305 Elf_Internal_Dyn dyn;
5306 asection *s;
5307
5308 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn);
5309
5310 switch (dyn.d_tag)
5311 {
5312 default:
5313 continue;
5314
5315 case DT_PLTGOT:
5316 s = htab->elf.sgotplt;
5317 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
5318 break;
5319
5320 case DT_JMPREL:
5321 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma;
5322 break;
5323
5324 case DT_PLTRELSZ:
5325 s = htab->elf.srelplt->output_section;
5326 dyn.d_un.d_val = s->size;
5327 break;
5328
5329 case DT_RELASZ:
5330 /* The procedure linkage table relocs (DT_JMPREL) should
5331 not be included in the overall relocs (DT_RELA).
5332 Therefore, we override the DT_RELASZ entry here to
5333 make it not include the JMPREL relocs. Since the
5334 linker script arranges for .rela.plt to follow all
5335 other relocation sections, we don't have to worry
5336 about changing the DT_RELA entry. */
5337 if (htab->elf.srelplt != NULL)
5338 {
5339 s = htab->elf.srelplt->output_section;
5340 dyn.d_un.d_val -= s->size;
5341 }
5342 break;
5343
5344 case DT_TLSDESC_PLT:
5345 s = htab->elf.splt;
5346 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5347 + htab->tlsdesc_plt;
5348 break;
5349
5350 case DT_TLSDESC_GOT:
5351 s = htab->elf.sgot;
5352 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
5353 + htab->tlsdesc_got;
5354 break;
5355 }
5356
5357 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon);
5358 }
5359
5360 /* Fill in the special first entry in the procedure linkage table. */
5361 if (htab->elf.splt && htab->elf.splt->size > 0)
5362 {
5363 /* Fill in the first entry in the procedure linkage table. */
5364 memcpy (htab->elf.splt->contents,
5365 abed->plt0_entry, abed->plt_entry_size);
5366 /* Add offset for pushq GOT+8(%rip), since the instruction
5367 uses 6 bytes subtract this value. */
5368 bfd_put_32 (output_bfd,
5369 (htab->elf.sgotplt->output_section->vma
5370 + htab->elf.sgotplt->output_offset
5371 + 8
5372 - htab->elf.splt->output_section->vma
5373 - htab->elf.splt->output_offset
5374 - 6),
5375 htab->elf.splt->contents + abed->plt0_got1_offset);
5376 /* Add offset for the PC-relative instruction accessing GOT+16,
5377 subtracting the offset to the end of that instruction. */
5378 bfd_put_32 (output_bfd,
5379 (htab->elf.sgotplt->output_section->vma
5380 + htab->elf.sgotplt->output_offset
5381 + 16
5382 - htab->elf.splt->output_section->vma
5383 - htab->elf.splt->output_offset
5384 - abed->plt0_got2_insn_end),
5385 htab->elf.splt->contents + abed->plt0_got2_offset);
5386
5387 elf_section_data (htab->elf.splt->output_section)
5388 ->this_hdr.sh_entsize = abed->plt_entry_size;
5389
5390 if (htab->tlsdesc_plt)
5391 {
5392 bfd_put_64 (output_bfd, (bfd_vma) 0,
5393 htab->elf.sgot->contents + htab->tlsdesc_got);
5394
5395 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
5396 abed->plt0_entry, abed->plt_entry_size);
5397
5398 /* Add offset for pushq GOT+8(%rip), since the
5399 instruction uses 6 bytes subtract this value. */
5400 bfd_put_32 (output_bfd,
5401 (htab->elf.sgotplt->output_section->vma
5402 + htab->elf.sgotplt->output_offset
5403 + 8
5404 - htab->elf.splt->output_section->vma
5405 - htab->elf.splt->output_offset
5406 - htab->tlsdesc_plt
5407 - 6),
5408 htab->elf.splt->contents
5409 + htab->tlsdesc_plt + abed->plt0_got1_offset);
5410 /* Add offset for the PC-relative instruction accessing GOT+TDG,
5411 where TGD stands for htab->tlsdesc_got, subtracting the offset
5412 to the end of that instruction. */
5413 bfd_put_32 (output_bfd,
5414 (htab->elf.sgot->output_section->vma
5415 + htab->elf.sgot->output_offset
5416 + htab->tlsdesc_got
5417 - htab->elf.splt->output_section->vma
5418 - htab->elf.splt->output_offset
5419 - htab->tlsdesc_plt
5420 - abed->plt0_got2_insn_end),
5421 htab->elf.splt->contents
5422 + htab->tlsdesc_plt + abed->plt0_got2_offset);
5423 }
5424 }
5425 }
5426
5427 if (htab->plt_bnd != NULL)
5428 elf_section_data (htab->plt_bnd->output_section)
5429 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry);
5430
5431 if (htab->elf.sgotplt)
5432 {
5433 if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
5434 {
5435 (*_bfd_error_handler)
5436 (_("discarded output section: `%A'"), htab->elf.sgotplt);
5437 return FALSE;
5438 }
5439
5440 /* Fill in the first three entries in the global offset table. */
5441 if (htab->elf.sgotplt->size > 0)
5442 {
5443 /* Set the first entry in the global offset table to the address of
5444 the dynamic section. */
5445 if (sdyn == NULL)
5446 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents);
5447 else
5448 bfd_put_64 (output_bfd,
5449 sdyn->output_section->vma + sdyn->output_offset,
5450 htab->elf.sgotplt->contents);
5451 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
5452 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
5453 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2);
5454 }
5455
5456 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
5457 GOT_ENTRY_SIZE;
5458 }
5459
5460 /* Adjust .eh_frame for .plt section. */
5461 if (htab->plt_eh_frame != NULL
5462 && htab->plt_eh_frame->contents != NULL)
5463 {
5464 if (htab->elf.splt != NULL
5465 && htab->elf.splt->size != 0
5466 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0
5467 && htab->elf.splt->output_section != NULL
5468 && htab->plt_eh_frame->output_section != NULL)
5469 {
5470 bfd_vma plt_start = htab->elf.splt->output_section->vma;
5471 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma
5472 + htab->plt_eh_frame->output_offset
5473 + PLT_FDE_START_OFFSET;
5474 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start,
5475 htab->plt_eh_frame->contents
5476 + PLT_FDE_START_OFFSET);
5477 }
5478 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME)
5479 {
5480 if (! _bfd_elf_write_section_eh_frame (output_bfd, info,
5481 htab->plt_eh_frame,
5482 htab->plt_eh_frame->contents))
5483 return FALSE;
5484 }
5485 }
5486
5487 if (htab->elf.sgot && htab->elf.sgot->size > 0)
5488 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize
5489 = GOT_ENTRY_SIZE;
5490
5491 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5492 htab_traverse (htab->loc_hash_table,
5493 elf_x86_64_finish_local_dynamic_symbol,
5494 info);
5495
5496 return TRUE;
5497 }
5498
5499 /* Return an array of PLT entry symbol values. */
5500
5501 static bfd_vma *
5502 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt,
5503 asection *relplt)
5504 {
5505 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean);
5506 arelent *p;
5507 long count, i;
5508 bfd_vma *plt_sym_val;
5509 bfd_vma plt_offset;
5510 bfd_byte *plt_contents;
5511 const struct elf_x86_64_backend_data *bed;
5512 Elf_Internal_Shdr *hdr;
5513 asection *plt_bnd;
5514
5515 /* Get the .plt section contents. PLT passed down may point to the
5516 .plt.bnd section. Make sure that PLT always points to the .plt
5517 section. */
5518 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd");
5519 if (plt_bnd)
5520 {
5521 if (plt != plt_bnd)
5522 abort ();
5523 plt = bfd_get_section_by_name (abfd, ".plt");
5524 if (plt == NULL)
5525 abort ();
5526 bed = &elf_x86_64_bnd_arch_bed;
5527 }
5528 else
5529 bed = get_elf_x86_64_backend_data (abfd);
5530
5531 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
5532 if (plt_contents == NULL)
5533 return NULL;
5534 if (!bfd_get_section_contents (abfd, (asection *) plt,
5535 plt_contents, 0, plt->size))
5536 {
5537 bad_return:
5538 free (plt_contents);
5539 return NULL;
5540 }
5541
5542 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table;
5543 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE))
5544 goto bad_return;
5545
5546 hdr = &elf_section_data (relplt)->this_hdr;
5547 count = relplt->size / hdr->sh_entsize;
5548
5549 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count);
5550 if (plt_sym_val == NULL)
5551 goto bad_return;
5552
5553 for (i = 0; i < count; i++, p++)
5554 plt_sym_val[i] = -1;
5555
5556 plt_offset = bed->plt_entry_size;
5557 p = relplt->relocation;
5558 for (i = 0; i < count; i++, p++)
5559 {
5560 long reloc_index;
5561
5562 if (p->howto->type != R_X86_64_JUMP_SLOT
5563 && p->howto->type != R_X86_64_IRELATIVE)
5564 continue;
5565
5566 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset
5567 + bed->plt_reloc_offset));
5568 if (reloc_index >= count)
5569 abort ();
5570 if (plt_bnd)
5571 {
5572 /* This is the index in .plt section. */
5573 long plt_index = plt_offset / bed->plt_entry_size;
5574 /* Store VMA + the offset in .plt.bnd section. */
5575 plt_sym_val[reloc_index] =
5576 (plt_bnd->vma
5577 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry));
5578 }
5579 else
5580 plt_sym_val[reloc_index] = plt->vma + plt_offset;
5581 plt_offset += bed->plt_entry_size;
5582 }
5583
5584 free (plt_contents);
5585
5586 return plt_sym_val;
5587 }
5588
5589 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section
5590 support. */
5591
5592 static long
5593 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5594 long symcount,
5595 asymbol **syms,
5596 long dynsymcount,
5597 asymbol **dynsyms,
5598 asymbol **ret)
5599 {
5600 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab
5601 as PLT if it exists. */
5602 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd");
5603 if (plt == NULL)
5604 plt = bfd_get_section_by_name (abfd, ".plt");
5605 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms,
5606 dynsymcount, dynsyms, ret,
5607 plt,
5608 elf_x86_64_get_plt_sym_val);
5609 }
5610
5611 /* Handle an x86-64 specific section when reading an object file. This
5612 is called when elfcode.h finds a section with an unknown type. */
5613
5614 static bfd_boolean
5615 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5616 const char *name, int shindex)
5617 {
5618 if (hdr->sh_type != SHT_X86_64_UNWIND)
5619 return FALSE;
5620
5621 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5622 return FALSE;
5623
5624 return TRUE;
5625 }
5626
5627 /* Hook called by the linker routine which adds symbols from an object
5628 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5629 of .bss. */
5630
5631 static bfd_boolean
5632 elf_x86_64_add_symbol_hook (bfd *abfd,
5633 struct bfd_link_info *info,
5634 Elf_Internal_Sym *sym,
5635 const char **namep ATTRIBUTE_UNUSED,
5636 flagword *flagsp ATTRIBUTE_UNUSED,
5637 asection **secp,
5638 bfd_vma *valp)
5639 {
5640 asection *lcomm;
5641
5642 switch (sym->st_shndx)
5643 {
5644 case SHN_X86_64_LCOMMON:
5645 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5646 if (lcomm == NULL)
5647 {
5648 lcomm = bfd_make_section_with_flags (abfd,
5649 "LARGE_COMMON",
5650 (SEC_ALLOC
5651 | SEC_IS_COMMON
5652 | SEC_LINKER_CREATED));
5653 if (lcomm == NULL)
5654 return FALSE;
5655 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5656 }
5657 *secp = lcomm;
5658 *valp = sym->st_size;
5659 return TRUE;
5660 }
5661
5662 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
5663 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
5664 && (abfd->flags & DYNAMIC) == 0
5665 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
5666 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
5667
5668 return TRUE;
5669 }
5670
5671
5672 /* Given a BFD section, try to locate the corresponding ELF section
5673 index. */
5674
5675 static bfd_boolean
5676 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5677 asection *sec, int *index_return)
5678 {
5679 if (sec == &_bfd_elf_large_com_section)
5680 {
5681 *index_return = SHN_X86_64_LCOMMON;
5682 return TRUE;
5683 }
5684 return FALSE;
5685 }
5686
5687 /* Process a symbol. */
5688
5689 static void
5690 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5691 asymbol *asym)
5692 {
5693 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5694
5695 switch (elfsym->internal_elf_sym.st_shndx)
5696 {
5697 case SHN_X86_64_LCOMMON:
5698 asym->section = &_bfd_elf_large_com_section;
5699 asym->value = elfsym->internal_elf_sym.st_size;
5700 /* Common symbol doesn't set BSF_GLOBAL. */
5701 asym->flags &= ~BSF_GLOBAL;
5702 break;
5703 }
5704 }
5705
5706 static bfd_boolean
5707 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5708 {
5709 return (sym->st_shndx == SHN_COMMON
5710 || sym->st_shndx == SHN_X86_64_LCOMMON);
5711 }
5712
5713 static unsigned int
5714 elf_x86_64_common_section_index (asection *sec)
5715 {
5716 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5717 return SHN_COMMON;
5718 else
5719 return SHN_X86_64_LCOMMON;
5720 }
5721
5722 static asection *
5723 elf_x86_64_common_section (asection *sec)
5724 {
5725 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5726 return bfd_com_section_ptr;
5727 else
5728 return &_bfd_elf_large_com_section;
5729 }
5730
5731 static bfd_boolean
5732 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5733 const Elf_Internal_Sym *sym,
5734 asection **psec,
5735 bfd_boolean newdef,
5736 bfd_boolean olddef,
5737 bfd *oldbfd,
5738 const asection *oldsec)
5739 {
5740 /* A normal common symbol and a large common symbol result in a
5741 normal common symbol. We turn the large common symbol into a
5742 normal one. */
5743 if (!olddef
5744 && h->root.type == bfd_link_hash_common
5745 && !newdef
5746 && bfd_is_com_section (*psec)
5747 && oldsec != *psec)
5748 {
5749 if (sym->st_shndx == SHN_COMMON
5750 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5751 {
5752 h->root.u.c.p->section
5753 = bfd_make_section_old_way (oldbfd, "COMMON");
5754 h->root.u.c.p->section->flags = SEC_ALLOC;
5755 }
5756 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5757 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5758 *psec = bfd_com_section_ptr;
5759 }
5760
5761 return TRUE;
5762 }
5763
5764 static int
5765 elf_x86_64_additional_program_headers (bfd *abfd,
5766 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5767 {
5768 asection *s;
5769 int count = 0;
5770
5771 /* Check to see if we need a large readonly segment. */
5772 s = bfd_get_section_by_name (abfd, ".lrodata");
5773 if (s && (s->flags & SEC_LOAD))
5774 count++;
5775
5776 /* Check to see if we need a large data segment. Since .lbss sections
5777 is placed right after the .bss section, there should be no need for
5778 a large data segment just because of .lbss. */
5779 s = bfd_get_section_by_name (abfd, ".ldata");
5780 if (s && (s->flags & SEC_LOAD))
5781 count++;
5782
5783 return count;
5784 }
5785
5786 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */
5787
5788 static bfd_boolean
5789 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h)
5790 {
5791 if (h->plt.offset != (bfd_vma) -1
5792 && !h->def_regular
5793 && !h->pointer_equality_needed)
5794 return FALSE;
5795
5796 return _bfd_elf_hash_symbol (h);
5797 }
5798
5799 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5800
5801 static bfd_boolean
5802 elf_x86_64_relocs_compatible (const bfd_target *input,
5803 const bfd_target *output)
5804 {
5805 return ((xvec_get_elf_backend_data (input)->s->elfclass
5806 == xvec_get_elf_backend_data (output)->s->elfclass)
5807 && _bfd_elf_relocs_compatible (input, output));
5808 }
5809
5810 static const struct bfd_elf_special_section
5811 elf_x86_64_special_sections[]=
5812 {
5813 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5814 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5815 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5816 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5817 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5818 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5819 { NULL, 0, 0, 0, 0 }
5820 };
5821
5822 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5823 #define TARGET_LITTLE_NAME "elf64-x86-64"
5824 #define ELF_ARCH bfd_arch_i386
5825 #define ELF_TARGET_ID X86_64_ELF_DATA
5826 #define ELF_MACHINE_CODE EM_X86_64
5827 #define ELF_MAXPAGESIZE 0x200000
5828 #define ELF_MINPAGESIZE 0x1000
5829 #define ELF_COMMONPAGESIZE 0x1000
5830
5831 #define elf_backend_can_gc_sections 1
5832 #define elf_backend_can_refcount 1
5833 #define elf_backend_want_got_plt 1
5834 #define elf_backend_plt_readonly 1
5835 #define elf_backend_want_plt_sym 0
5836 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5837 #define elf_backend_rela_normal 1
5838 #define elf_backend_plt_alignment 4
5839
5840 #define elf_info_to_howto elf_x86_64_info_to_howto
5841
5842 #define bfd_elf64_bfd_link_hash_table_create \
5843 elf_x86_64_link_hash_table_create
5844 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5845 #define bfd_elf64_bfd_reloc_name_lookup \
5846 elf_x86_64_reloc_name_lookup
5847
5848 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol
5849 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5850 #define elf_backend_check_relocs elf_x86_64_check_relocs
5851 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol
5852 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections
5853 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5854 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5855 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook
5856 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook
5857 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5858 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5859 #ifdef CORE_HEADER
5860 #define elf_backend_write_core_note elf_x86_64_write_core_note
5861 #endif
5862 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5863 #define elf_backend_relocate_section elf_x86_64_relocate_section
5864 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections
5865 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5866 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5867 #define elf_backend_object_p elf64_x86_64_elf_object_p
5868 #define bfd_elf64_mkobject elf_x86_64_mkobject
5869 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5870
5871 #define elf_backend_section_from_shdr \
5872 elf_x86_64_section_from_shdr
5873
5874 #define elf_backend_section_from_bfd_section \
5875 elf_x86_64_elf_section_from_bfd_section
5876 #define elf_backend_add_symbol_hook \
5877 elf_x86_64_add_symbol_hook
5878 #define elf_backend_symbol_processing \
5879 elf_x86_64_symbol_processing
5880 #define elf_backend_common_section_index \
5881 elf_x86_64_common_section_index
5882 #define elf_backend_common_section \
5883 elf_x86_64_common_section
5884 #define elf_backend_common_definition \
5885 elf_x86_64_common_definition
5886 #define elf_backend_merge_symbol \
5887 elf_x86_64_merge_symbol
5888 #define elf_backend_special_sections \
5889 elf_x86_64_special_sections
5890 #define elf_backend_additional_program_headers \
5891 elf_x86_64_additional_program_headers
5892 #define elf_backend_hash_symbol \
5893 elf_x86_64_hash_symbol
5894
5895 #include "elf64-target.h"
5896
5897 /* FreeBSD support. */
5898
5899 #undef TARGET_LITTLE_SYM
5900 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5901 #undef TARGET_LITTLE_NAME
5902 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5903
5904 #undef ELF_OSABI
5905 #define ELF_OSABI ELFOSABI_FREEBSD
5906
5907 #undef elf64_bed
5908 #define elf64_bed elf64_x86_64_fbsd_bed
5909
5910 #include "elf64-target.h"
5911
5912 /* Solaris 2 support. */
5913
5914 #undef TARGET_LITTLE_SYM
5915 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5916 #undef TARGET_LITTLE_NAME
5917 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5918
5919 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5920 objects won't be recognized. */
5921 #undef ELF_OSABI
5922
5923 #undef elf64_bed
5924 #define elf64_bed elf64_x86_64_sol2_bed
5925
5926 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5927 boundary. */
5928 #undef elf_backend_static_tls_alignment
5929 #define elf_backend_static_tls_alignment 16
5930
5931 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5932
5933 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5934 File, p.63. */
5935 #undef elf_backend_want_plt_sym
5936 #define elf_backend_want_plt_sym 1
5937
5938 #include "elf64-target.h"
5939
5940 /* Native Client support. */
5941
5942 static bfd_boolean
5943 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5944 {
5945 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5946 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5947 return TRUE;
5948 }
5949
5950 #undef TARGET_LITTLE_SYM
5951 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5952 #undef TARGET_LITTLE_NAME
5953 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5954 #undef elf64_bed
5955 #define elf64_bed elf64_x86_64_nacl_bed
5956
5957 #undef ELF_MAXPAGESIZE
5958 #undef ELF_MINPAGESIZE
5959 #undef ELF_COMMONPAGESIZE
5960 #define ELF_MAXPAGESIZE 0x10000
5961 #define ELF_MINPAGESIZE 0x10000
5962 #define ELF_COMMONPAGESIZE 0x10000
5963
5964 /* Restore defaults. */
5965 #undef ELF_OSABI
5966 #undef elf_backend_static_tls_alignment
5967 #undef elf_backend_want_plt_sym
5968 #define elf_backend_want_plt_sym 0
5969
5970 /* NaCl uses substantially different PLT entries for the same effects. */
5971
5972 #undef elf_backend_plt_alignment
5973 #define elf_backend_plt_alignment 5
5974 #define NACL_PLT_ENTRY_SIZE 64
5975 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5976
5977 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5978 {
5979 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5980 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5981 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5982 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5983 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5984
5985 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5986 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5987
5988 /* 32 bytes of nop to pad out to the standard size. */
5989 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
5990 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5991 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
5992 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5993 0x66, /* excess data32 prefix */
5994 0x90 /* nop */
5995 };
5996
5997 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5998 {
5999 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
6000 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
6001 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
6002 0x41, 0xff, 0xe3, /* jmpq *%r11 */
6003
6004 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
6005 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6006 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6007
6008 /* Lazy GOT entries point here (32-byte aligned). */
6009 0x68, /* pushq immediate */
6010 0, 0, 0, 0, /* replaced with index into relocation table. */
6011 0xe9, /* jmp relative */
6012 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
6013
6014 /* 22 bytes of nop to pad out to the standard size. */
6015 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */
6016 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
6017 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
6018 };
6019
6020 /* .eh_frame covering the .plt section. */
6021
6022 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
6023 {
6024 #if (PLT_CIE_LENGTH != 20 \
6025 || PLT_FDE_LENGTH != 36 \
6026 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
6027 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
6028 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!"
6029 #endif
6030 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
6031 0, 0, 0, 0, /* CIE ID */
6032 1, /* CIE version */
6033 'z', 'R', 0, /* Augmentation string */
6034 1, /* Code alignment factor */
6035 0x78, /* Data alignment factor */
6036 16, /* Return address column */
6037 1, /* Augmentation size */
6038 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
6039 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
6040 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
6041 DW_CFA_nop, DW_CFA_nop,
6042
6043 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
6044 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
6045 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
6046 0, 0, 0, 0, /* .plt size goes here */
6047 0, /* Augmentation size */
6048 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
6049 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
6050 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
6051 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
6052 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
6053 13, /* Block length */
6054 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
6055 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
6056 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
6057 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
6058 DW_CFA_nop, DW_CFA_nop
6059 };
6060
6061 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed =
6062 {
6063 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
6064 elf_x86_64_nacl_plt_entry, /* plt_entry */
6065 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
6066 2, /* plt0_got1_offset */
6067 9, /* plt0_got2_offset */
6068 13, /* plt0_got2_insn_end */
6069 3, /* plt_got_offset */
6070 33, /* plt_reloc_offset */
6071 38, /* plt_plt_offset */
6072 7, /* plt_got_insn_size */
6073 42, /* plt_plt_insn_end */
6074 32, /* plt_lazy_offset */
6075 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
6076 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */
6077 };
6078
6079 #undef elf_backend_arch_data
6080 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
6081
6082 #undef elf_backend_object_p
6083 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
6084 #undef elf_backend_modify_segment_map
6085 #define elf_backend_modify_segment_map nacl_modify_segment_map
6086 #undef elf_backend_modify_program_headers
6087 #define elf_backend_modify_program_headers nacl_modify_program_headers
6088 #undef elf_backend_final_write_processing
6089 #define elf_backend_final_write_processing nacl_final_write_processing
6090
6091 #include "elf64-target.h"
6092
6093 /* Native Client x32 support. */
6094
6095 static bfd_boolean
6096 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
6097 {
6098 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
6099 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
6100 return TRUE;
6101 }
6102
6103 #undef TARGET_LITTLE_SYM
6104 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
6105 #undef TARGET_LITTLE_NAME
6106 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
6107 #undef elf32_bed
6108 #define elf32_bed elf32_x86_64_nacl_bed
6109
6110 #define bfd_elf32_bfd_link_hash_table_create \
6111 elf_x86_64_link_hash_table_create
6112 #define bfd_elf32_bfd_reloc_type_lookup \
6113 elf_x86_64_reloc_type_lookup
6114 #define bfd_elf32_bfd_reloc_name_lookup \
6115 elf_x86_64_reloc_name_lookup
6116 #define bfd_elf32_mkobject \
6117 elf_x86_64_mkobject
6118 #define bfd_elf32_get_synthetic_symtab \
6119 elf_x86_64_get_synthetic_symtab
6120
6121 #undef elf_backend_object_p
6122 #define elf_backend_object_p \
6123 elf32_x86_64_nacl_elf_object_p
6124
6125 #undef elf_backend_bfd_from_remote_memory
6126 #define elf_backend_bfd_from_remote_memory \
6127 _bfd_elf32_bfd_from_remote_memory
6128
6129 #undef elf_backend_size_info
6130 #define elf_backend_size_info \
6131 _bfd_elf32_size_info
6132
6133 #include "elf32-target.h"
6134
6135 /* Restore defaults. */
6136 #undef elf_backend_object_p
6137 #define elf_backend_object_p elf64_x86_64_elf_object_p
6138 #undef elf_backend_bfd_from_remote_memory
6139 #undef elf_backend_size_info
6140 #undef elf_backend_modify_segment_map
6141 #undef elf_backend_modify_program_headers
6142 #undef elf_backend_final_write_processing
6143
6144 /* Intel L1OM support. */
6145
6146 static bfd_boolean
6147 elf64_l1om_elf_object_p (bfd *abfd)
6148 {
6149 /* Set the right machine number for an L1OM elf64 file. */
6150 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
6151 return TRUE;
6152 }
6153
6154 #undef TARGET_LITTLE_SYM
6155 #define TARGET_LITTLE_SYM l1om_elf64_vec
6156 #undef TARGET_LITTLE_NAME
6157 #define TARGET_LITTLE_NAME "elf64-l1om"
6158 #undef ELF_ARCH
6159 #define ELF_ARCH bfd_arch_l1om
6160
6161 #undef ELF_MACHINE_CODE
6162 #define ELF_MACHINE_CODE EM_L1OM
6163
6164 #undef ELF_OSABI
6165
6166 #undef elf64_bed
6167 #define elf64_bed elf64_l1om_bed
6168
6169 #undef elf_backend_object_p
6170 #define elf_backend_object_p elf64_l1om_elf_object_p
6171
6172 /* Restore defaults. */
6173 #undef ELF_MAXPAGESIZE
6174 #undef ELF_MINPAGESIZE
6175 #undef ELF_COMMONPAGESIZE
6176 #define ELF_MAXPAGESIZE 0x200000
6177 #define ELF_MINPAGESIZE 0x1000
6178 #define ELF_COMMONPAGESIZE 0x1000
6179 #undef elf_backend_plt_alignment
6180 #define elf_backend_plt_alignment 4
6181 #undef elf_backend_arch_data
6182 #define elf_backend_arch_data &elf_x86_64_arch_bed
6183
6184 #include "elf64-target.h"
6185
6186 /* FreeBSD L1OM support. */
6187
6188 #undef TARGET_LITTLE_SYM
6189 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
6190 #undef TARGET_LITTLE_NAME
6191 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
6192
6193 #undef ELF_OSABI
6194 #define ELF_OSABI ELFOSABI_FREEBSD
6195
6196 #undef elf64_bed
6197 #define elf64_bed elf64_l1om_fbsd_bed
6198
6199 #include "elf64-target.h"
6200
6201 /* Intel K1OM support. */
6202
6203 static bfd_boolean
6204 elf64_k1om_elf_object_p (bfd *abfd)
6205 {
6206 /* Set the right machine number for an K1OM elf64 file. */
6207 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
6208 return TRUE;
6209 }
6210
6211 #undef TARGET_LITTLE_SYM
6212 #define TARGET_LITTLE_SYM k1om_elf64_vec
6213 #undef TARGET_LITTLE_NAME
6214 #define TARGET_LITTLE_NAME "elf64-k1om"
6215 #undef ELF_ARCH
6216 #define ELF_ARCH bfd_arch_k1om
6217
6218 #undef ELF_MACHINE_CODE
6219 #define ELF_MACHINE_CODE EM_K1OM
6220
6221 #undef ELF_OSABI
6222
6223 #undef elf64_bed
6224 #define elf64_bed elf64_k1om_bed
6225
6226 #undef elf_backend_object_p
6227 #define elf_backend_object_p elf64_k1om_elf_object_p
6228
6229 #undef elf_backend_static_tls_alignment
6230
6231 #undef elf_backend_want_plt_sym
6232 #define elf_backend_want_plt_sym 0
6233
6234 #include "elf64-target.h"
6235
6236 /* FreeBSD K1OM support. */
6237
6238 #undef TARGET_LITTLE_SYM
6239 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
6240 #undef TARGET_LITTLE_NAME
6241 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
6242
6243 #undef ELF_OSABI
6244 #define ELF_OSABI ELFOSABI_FREEBSD
6245
6246 #undef elf64_bed
6247 #define elf64_bed elf64_k1om_fbsd_bed
6248
6249 #include "elf64-target.h"
6250
6251 /* 32bit x86-64 support. */
6252
6253 #undef TARGET_LITTLE_SYM
6254 #define TARGET_LITTLE_SYM x86_64_elf32_vec
6255 #undef TARGET_LITTLE_NAME
6256 #define TARGET_LITTLE_NAME "elf32-x86-64"
6257 #undef elf32_bed
6258
6259 #undef ELF_ARCH
6260 #define ELF_ARCH bfd_arch_i386
6261
6262 #undef ELF_MACHINE_CODE
6263 #define ELF_MACHINE_CODE EM_X86_64
6264
6265 #undef ELF_OSABI
6266
6267 #undef elf_backend_object_p
6268 #define elf_backend_object_p \
6269 elf32_x86_64_elf_object_p
6270
6271 #undef elf_backend_bfd_from_remote_memory
6272 #define elf_backend_bfd_from_remote_memory \
6273 _bfd_elf32_bfd_from_remote_memory
6274
6275 #undef elf_backend_size_info
6276 #define elf_backend_size_info \
6277 _bfd_elf32_size_info
6278
6279 #include "elf32-target.h"
This page took 0.160661 seconds and 3 git commands to generate.