Automatic date update in version.in
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2021 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "dwarf2.h"
24 #include "libiberty.h"
25
26 #include "opcode/i386.h"
27 #include "elf/x86-64.h"
28
29 #ifdef CORE_HEADER
30 #include <stdarg.h>
31 #include CORE_HEADER
32 #endif
33
34 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
35 #define MINUS_ONE (~ (bfd_vma) 0)
36
37 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
38 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
39 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
40 since they are the same. */
41
42 /* The relocation "howto" table. Order of fields:
43 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
44 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
45 static reloc_howto_type x86_64_elf_howto_table[] =
46 {
47 HOWTO(R_X86_64_NONE, 0, 3, 0, false, 0, complain_overflow_dont,
48 bfd_elf_generic_reloc, "R_X86_64_NONE", false, 0, 0x00000000,
49 false),
50 HOWTO(R_X86_64_64, 0, 4, 64, false, 0, complain_overflow_dont,
51 bfd_elf_generic_reloc, "R_X86_64_64", false, 0, MINUS_ONE,
52 false),
53 HOWTO(R_X86_64_PC32, 0, 2, 32, true, 0, complain_overflow_signed,
54 bfd_elf_generic_reloc, "R_X86_64_PC32", false, 0, 0xffffffff,
55 true),
56 HOWTO(R_X86_64_GOT32, 0, 2, 32, false, 0, complain_overflow_signed,
57 bfd_elf_generic_reloc, "R_X86_64_GOT32", false, 0, 0xffffffff,
58 false),
59 HOWTO(R_X86_64_PLT32, 0, 2, 32, true, 0, complain_overflow_signed,
60 bfd_elf_generic_reloc, "R_X86_64_PLT32", false, 0, 0xffffffff,
61 true),
62 HOWTO(R_X86_64_COPY, 0, 2, 32, false, 0, complain_overflow_bitfield,
63 bfd_elf_generic_reloc, "R_X86_64_COPY", false, 0, 0xffffffff,
64 false),
65 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, false, 0, complain_overflow_dont,
66 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", false, 0, MINUS_ONE,
67 false),
68 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, false, 0, complain_overflow_dont,
69 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", false, 0, MINUS_ONE,
70 false),
71 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, false, 0, complain_overflow_dont,
72 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", false, 0, MINUS_ONE,
73 false),
74 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, true, 0, complain_overflow_signed,
75 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", false, 0, 0xffffffff,
76 true),
77 HOWTO(R_X86_64_32, 0, 2, 32, false, 0, complain_overflow_unsigned,
78 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
79 false),
80 HOWTO(R_X86_64_32S, 0, 2, 32, false, 0, complain_overflow_signed,
81 bfd_elf_generic_reloc, "R_X86_64_32S", false, 0, 0xffffffff,
82 false),
83 HOWTO(R_X86_64_16, 0, 1, 16, false, 0, complain_overflow_bitfield,
84 bfd_elf_generic_reloc, "R_X86_64_16", false, 0, 0xffff, false),
85 HOWTO(R_X86_64_PC16, 0, 1, 16, true, 0, complain_overflow_bitfield,
86 bfd_elf_generic_reloc, "R_X86_64_PC16", false, 0, 0xffff, true),
87 HOWTO(R_X86_64_8, 0, 0, 8, false, 0, complain_overflow_bitfield,
88 bfd_elf_generic_reloc, "R_X86_64_8", false, 0, 0xff, false),
89 HOWTO(R_X86_64_PC8, 0, 0, 8, true, 0, complain_overflow_signed,
90 bfd_elf_generic_reloc, "R_X86_64_PC8", false, 0, 0xff, true),
91 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, false, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", false, 0, MINUS_ONE,
93 false),
94 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
95 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", false, 0, MINUS_ONE,
96 false),
97 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
98 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", false, 0, MINUS_ONE,
99 false),
100 HOWTO(R_X86_64_TLSGD, 0, 2, 32, true, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_TLSGD", false, 0, 0xffffffff,
102 true),
103 HOWTO(R_X86_64_TLSLD, 0, 2, 32, true, 0, complain_overflow_signed,
104 bfd_elf_generic_reloc, "R_X86_64_TLSLD", false, 0, 0xffffffff,
105 true),
106 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, false, 0, complain_overflow_signed,
107 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", false, 0, 0xffffffff,
108 false),
109 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, true, 0, complain_overflow_signed,
110 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", false, 0, 0xffffffff,
111 true),
112 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, false, 0, complain_overflow_signed,
113 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", false, 0, 0xffffffff,
114 false),
115 HOWTO(R_X86_64_PC64, 0, 4, 64, true, 0, complain_overflow_dont,
116 bfd_elf_generic_reloc, "R_X86_64_PC64", false, 0, MINUS_ONE,
117 true),
118 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
119 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64", false, 0, MINUS_ONE,
120 false),
121 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, true, 0, complain_overflow_signed,
122 bfd_elf_generic_reloc, "R_X86_64_GOTPC32", false, 0, 0xffffffff,
123 true),
124 HOWTO(R_X86_64_GOT64, 0, 4, 64, false, 0, complain_overflow_signed,
125 bfd_elf_generic_reloc, "R_X86_64_GOT64", false, 0, MINUS_ONE,
126 false),
127 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, true, 0, complain_overflow_signed,
128 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", false, 0, MINUS_ONE,
129 true),
130 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, true, 0, complain_overflow_signed,
131 bfd_elf_generic_reloc, "R_X86_64_GOTPC64", false, 0, MINUS_ONE,
132 true),
133 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, false, 0, complain_overflow_signed,
134 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", false, 0, MINUS_ONE,
135 false),
136 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, false, 0, complain_overflow_signed,
137 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", false, 0, MINUS_ONE,
138 false),
139 HOWTO(R_X86_64_SIZE32, 0, 2, 32, false, 0, complain_overflow_unsigned,
140 bfd_elf_generic_reloc, "R_X86_64_SIZE32", false, 0, 0xffffffff,
141 false),
142 HOWTO(R_X86_64_SIZE64, 0, 4, 64, false, 0, complain_overflow_dont,
143 bfd_elf_generic_reloc, "R_X86_64_SIZE64", false, 0, MINUS_ONE,
144 false),
145 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, true, 0,
146 complain_overflow_bitfield, bfd_elf_generic_reloc,
147 "R_X86_64_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
148 HOWTO(R_X86_64_TLSDESC_CALL, 0, 3, 0, false, 0,
149 complain_overflow_dont, bfd_elf_generic_reloc,
150 "R_X86_64_TLSDESC_CALL",
151 false, 0, 0, false),
152 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, false, 0,
153 complain_overflow_dont, bfd_elf_generic_reloc,
154 "R_X86_64_TLSDESC", false, 0, MINUS_ONE, false),
155 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, false, 0, complain_overflow_dont,
156 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", false, 0, MINUS_ONE,
157 false),
158 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, false, 0, complain_overflow_dont,
159 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", false, 0, MINUS_ONE,
160 false),
161 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, true, 0, complain_overflow_signed,
162 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", false, 0, 0xffffffff,
163 true),
164 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, true, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", false, 0, 0xffffffff,
166 true),
167 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, true, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", false, 0, 0xffffffff,
169 true),
170 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, true, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", false, 0, 0xffffffff,
172 true),
173
174 /* We have a gap in the reloc numbers here.
175 R_X86_64_standard counts the number up to this point, and
176 R_X86_64_vt_offset is the value to subtract from a reloc type of
177 R_X86_64_GNU_VT* to form an index into this table. */
178 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
179 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
180
181 /* GNU extension to record C++ vtable hierarchy. */
182 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, false, 0, complain_overflow_dont,
183 NULL, "R_X86_64_GNU_VTINHERIT", false, 0, 0, false),
184
185 /* GNU extension to record C++ vtable member usage. */
186 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, false, 0, complain_overflow_dont,
187 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", false, 0, 0,
188 false),
189
190 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
191 HOWTO(R_X86_64_32, 0, 2, 32, false, 0, complain_overflow_bitfield,
192 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
193 false)
194 };
195
196 #define X86_PCREL_TYPE_P(TYPE) \
197 ( ((TYPE) == R_X86_64_PC8) \
198 || ((TYPE) == R_X86_64_PC16) \
199 || ((TYPE) == R_X86_64_PC32) \
200 || ((TYPE) == R_X86_64_PC32_BND) \
201 || ((TYPE) == R_X86_64_PC64))
202
203 #define X86_SIZE_TYPE_P(TYPE) \
204 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
205
206 /* Map BFD relocs to the x86_64 elf relocs. */
207 struct elf_reloc_map
208 {
209 bfd_reloc_code_real_type bfd_reloc_val;
210 unsigned char elf_reloc_val;
211 };
212
213 static const struct elf_reloc_map x86_64_reloc_map[] =
214 {
215 { BFD_RELOC_NONE, R_X86_64_NONE, },
216 { BFD_RELOC_64, R_X86_64_64, },
217 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
218 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
219 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
220 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
221 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
222 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
223 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
224 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
225 { BFD_RELOC_32, R_X86_64_32, },
226 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
227 { BFD_RELOC_16, R_X86_64_16, },
228 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
229 { BFD_RELOC_8, R_X86_64_8, },
230 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
231 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
232 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
233 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
234 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
235 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
236 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
237 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
238 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
239 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
240 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
241 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
242 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
243 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
244 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
245 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
246 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
247 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
248 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
249 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
250 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
251 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
252 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
253 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
254 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
255 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
256 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
257 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
258 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
259 };
260
261 static reloc_howto_type *
262 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
263 {
264 unsigned i;
265
266 if (r_type == (unsigned int) R_X86_64_32)
267 {
268 if (ABI_64_P (abfd))
269 i = r_type;
270 else
271 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
272 }
273 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
274 || r_type >= (unsigned int) R_X86_64_max)
275 {
276 if (r_type >= (unsigned int) R_X86_64_standard)
277 {
278 /* xgettext:c-format */
279 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
280 abfd, r_type);
281 bfd_set_error (bfd_error_bad_value);
282 return NULL;
283 }
284 i = r_type;
285 }
286 else
287 i = r_type - (unsigned int) R_X86_64_vt_offset;
288 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
289 return &x86_64_elf_howto_table[i];
290 }
291
292 /* Given a BFD reloc type, return a HOWTO structure. */
293 static reloc_howto_type *
294 elf_x86_64_reloc_type_lookup (bfd *abfd,
295 bfd_reloc_code_real_type code)
296 {
297 unsigned int i;
298
299 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
300 i++)
301 {
302 if (x86_64_reloc_map[i].bfd_reloc_val == code)
303 return elf_x86_64_rtype_to_howto (abfd,
304 x86_64_reloc_map[i].elf_reloc_val);
305 }
306 return NULL;
307 }
308
309 static reloc_howto_type *
310 elf_x86_64_reloc_name_lookup (bfd *abfd,
311 const char *r_name)
312 {
313 unsigned int i;
314
315 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
316 {
317 /* Get x32 R_X86_64_32. */
318 reloc_howto_type *reloc
319 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
320 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
321 return reloc;
322 }
323
324 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
325 if (x86_64_elf_howto_table[i].name != NULL
326 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
327 return &x86_64_elf_howto_table[i];
328
329 return NULL;
330 }
331
332 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
333
334 static bool
335 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
336 Elf_Internal_Rela *dst)
337 {
338 unsigned r_type;
339
340 r_type = ELF32_R_TYPE (dst->r_info);
341 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
342 if (cache_ptr->howto == NULL)
343 return false;
344 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
345 return true;
346 }
347 \f
348 /* Support for core dump NOTE sections. */
349 static bool
350 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
351 {
352 int offset;
353 size_t size;
354
355 switch (note->descsz)
356 {
357 default:
358 return false;
359
360 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
361 /* pr_cursig */
362 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
363
364 /* pr_pid */
365 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
366
367 /* pr_reg */
368 offset = 72;
369 size = 216;
370
371 break;
372
373 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
374 /* pr_cursig */
375 elf_tdata (abfd)->core->signal
376 = bfd_get_16 (abfd, note->descdata + 12);
377
378 /* pr_pid */
379 elf_tdata (abfd)->core->lwpid
380 = bfd_get_32 (abfd, note->descdata + 32);
381
382 /* pr_reg */
383 offset = 112;
384 size = 216;
385
386 break;
387 }
388
389 /* Make a ".reg/999" section. */
390 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
391 size, note->descpos + offset);
392 }
393
394 static bool
395 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
396 {
397 switch (note->descsz)
398 {
399 default:
400 return false;
401
402 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
403 elf_tdata (abfd)->core->pid
404 = bfd_get_32 (abfd, note->descdata + 12);
405 elf_tdata (abfd)->core->program
406 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
407 elf_tdata (abfd)->core->command
408 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
409 break;
410
411 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
412 elf_tdata (abfd)->core->pid
413 = bfd_get_32 (abfd, note->descdata + 24);
414 elf_tdata (abfd)->core->program
415 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
416 elf_tdata (abfd)->core->command
417 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
418 }
419
420 /* Note that for some reason, a spurious space is tacked
421 onto the end of the args in some (at least one anyway)
422 implementations, so strip it off if it exists. */
423
424 {
425 char *command = elf_tdata (abfd)->core->command;
426 int n = strlen (command);
427
428 if (0 < n && command[n - 1] == ' ')
429 command[n - 1] = '\0';
430 }
431
432 return true;
433 }
434
435 #ifdef CORE_HEADER
436 # if GCC_VERSION >= 8000
437 # pragma GCC diagnostic push
438 # pragma GCC diagnostic ignored "-Wstringop-truncation"
439 # endif
440 static char *
441 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
442 int note_type, ...)
443 {
444 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
445 va_list ap;
446 const char *fname, *psargs;
447 long pid;
448 int cursig;
449 const void *gregs;
450
451 switch (note_type)
452 {
453 default:
454 return NULL;
455
456 case NT_PRPSINFO:
457 va_start (ap, note_type);
458 fname = va_arg (ap, const char *);
459 psargs = va_arg (ap, const char *);
460 va_end (ap);
461
462 if (bed->s->elfclass == ELFCLASS32)
463 {
464 prpsinfo32_t data;
465 memset (&data, 0, sizeof (data));
466 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
467 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
468 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
469 &data, sizeof (data));
470 }
471 else
472 {
473 prpsinfo64_t data;
474 memset (&data, 0, sizeof (data));
475 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
476 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
477 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
478 &data, sizeof (data));
479 }
480 /* NOTREACHED */
481
482 case NT_PRSTATUS:
483 va_start (ap, note_type);
484 pid = va_arg (ap, long);
485 cursig = va_arg (ap, int);
486 gregs = va_arg (ap, const void *);
487 va_end (ap);
488
489 if (bed->s->elfclass == ELFCLASS32)
490 {
491 if (bed->elf_machine_code == EM_X86_64)
492 {
493 prstatusx32_t prstat;
494 memset (&prstat, 0, sizeof (prstat));
495 prstat.pr_pid = pid;
496 prstat.pr_cursig = cursig;
497 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
498 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
499 &prstat, sizeof (prstat));
500 }
501 else
502 {
503 prstatus32_t prstat;
504 memset (&prstat, 0, sizeof (prstat));
505 prstat.pr_pid = pid;
506 prstat.pr_cursig = cursig;
507 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
508 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
509 &prstat, sizeof (prstat));
510 }
511 }
512 else
513 {
514 prstatus64_t prstat;
515 memset (&prstat, 0, sizeof (prstat));
516 prstat.pr_pid = pid;
517 prstat.pr_cursig = cursig;
518 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
519 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
520 &prstat, sizeof (prstat));
521 }
522 }
523 /* NOTREACHED */
524 }
525 # if GCC_VERSION >= 8000
526 # pragma GCC diagnostic pop
527 # endif
528 #endif
529 \f
530 /* Functions for the x86-64 ELF linker. */
531
532 /* The size in bytes of an entry in the global offset table. */
533
534 #define GOT_ENTRY_SIZE 8
535
536 /* The size in bytes of an entry in the lazy procedure linkage table. */
537
538 #define LAZY_PLT_ENTRY_SIZE 16
539
540 /* The size in bytes of an entry in the non-lazy procedure linkage
541 table. */
542
543 #define NON_LAZY_PLT_ENTRY_SIZE 8
544
545 /* The first entry in a lazy procedure linkage table looks like this.
546 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
547 works. */
548
549 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
550 {
551 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
552 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
553 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
554 };
555
556 /* Subsequent entries in a lazy procedure linkage table look like this. */
557
558 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
559 {
560 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
561 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
562 0x68, /* pushq immediate */
563 0, 0, 0, 0, /* replaced with index into relocation table. */
564 0xe9, /* jmp relative */
565 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
566 };
567
568 /* The first entry in a lazy procedure linkage table with BND prefix
569 like this. */
570
571 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
572 {
573 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
574 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
575 0x0f, 0x1f, 0 /* nopl (%rax) */
576 };
577
578 /* Subsequent entries for branches with BND prefx in a lazy procedure
579 linkage table look like this. */
580
581 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
582 {
583 0x68, 0, 0, 0, 0, /* pushq immediate */
584 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
585 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
586 };
587
588 /* The first entry in the IBT-enabled lazy procedure linkage table is the
589 the same as the lazy PLT with BND prefix so that bound registers are
590 preserved when control is passed to dynamic linker. Subsequent
591 entries for a IBT-enabled lazy procedure linkage table look like
592 this. */
593
594 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
595 {
596 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
597 0x68, 0, 0, 0, 0, /* pushq immediate */
598 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
599 0x90 /* nop */
600 };
601
602 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
603 is the same as the normal lazy PLT. Subsequent entries for an
604 x32 IBT-enabled lazy procedure linkage table look like this. */
605
606 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
607 {
608 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
609 0x68, 0, 0, 0, 0, /* pushq immediate */
610 0xe9, 0, 0, 0, 0, /* jmpq relative */
611 0x66, 0x90 /* xchg %ax,%ax */
612 };
613
614 /* Entries in the non-lazey procedure linkage table look like this. */
615
616 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
617 {
618 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
619 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
620 0x66, 0x90 /* xchg %ax,%ax */
621 };
622
623 /* Entries for branches with BND prefix in the non-lazey procedure
624 linkage table look like this. */
625
626 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
627 {
628 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
629 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
630 0x90 /* nop */
631 };
632
633 /* Entries for branches with IBT-enabled in the non-lazey procedure
634 linkage table look like this. They have the same size as the lazy
635 PLT entry. */
636
637 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
638 {
639 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
640 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
641 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
642 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
643 };
644
645 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
646 linkage table look like this. They have the same size as the lazy
647 PLT entry. */
648
649 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
650 {
651 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
652 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
653 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
654 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
655 };
656
657 /* The TLSDESC entry in a lazy procedure linkage table. */
658 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
659 {
660 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
661 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
662 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
663 };
664
665 /* .eh_frame covering the lazy .plt section. */
666
667 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
668 {
669 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
670 0, 0, 0, 0, /* CIE ID */
671 1, /* CIE version */
672 'z', 'R', 0, /* Augmentation string */
673 1, /* Code alignment factor */
674 0x78, /* Data alignment factor */
675 16, /* Return address column */
676 1, /* Augmentation size */
677 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
678 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
679 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
680 DW_CFA_nop, DW_CFA_nop,
681
682 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
683 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
684 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
685 0, 0, 0, 0, /* .plt size goes here */
686 0, /* Augmentation size */
687 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
688 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
689 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
690 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
691 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
692 11, /* Block length */
693 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
694 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
695 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
696 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
697 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
698 };
699
700 /* .eh_frame covering the lazy BND .plt section. */
701
702 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
703 {
704 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
705 0, 0, 0, 0, /* CIE ID */
706 1, /* CIE version */
707 'z', 'R', 0, /* Augmentation string */
708 1, /* Code alignment factor */
709 0x78, /* Data alignment factor */
710 16, /* Return address column */
711 1, /* Augmentation size */
712 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
713 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
714 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
715 DW_CFA_nop, DW_CFA_nop,
716
717 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
718 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
719 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
720 0, 0, 0, 0, /* .plt size goes here */
721 0, /* Augmentation size */
722 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
723 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
724 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
725 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
726 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
727 11, /* Block length */
728 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
729 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
730 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
731 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
732 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
733 };
734
735 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
736
737 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
738 {
739 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
740 0, 0, 0, 0, /* CIE ID */
741 1, /* CIE version */
742 'z', 'R', 0, /* Augmentation string */
743 1, /* Code alignment factor */
744 0x78, /* Data alignment factor */
745 16, /* Return address column */
746 1, /* Augmentation size */
747 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
748 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
749 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
750 DW_CFA_nop, DW_CFA_nop,
751
752 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
753 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
754 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
755 0, 0, 0, 0, /* .plt size goes here */
756 0, /* Augmentation size */
757 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
758 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
759 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
760 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
761 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
762 11, /* Block length */
763 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
764 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
765 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
766 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
767 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
768 };
769
770 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
771
772 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
773 {
774 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
775 0, 0, 0, 0, /* CIE ID */
776 1, /* CIE version */
777 'z', 'R', 0, /* Augmentation string */
778 1, /* Code alignment factor */
779 0x78, /* Data alignment factor */
780 16, /* Return address column */
781 1, /* Augmentation size */
782 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
783 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
784 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
785 DW_CFA_nop, DW_CFA_nop,
786
787 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
788 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
789 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
790 0, 0, 0, 0, /* .plt size goes here */
791 0, /* Augmentation size */
792 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
793 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
794 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
795 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
796 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
797 11, /* Block length */
798 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
799 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
800 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
801 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
802 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
803 };
804
805 /* .eh_frame covering the non-lazy .plt section. */
806
807 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
808 {
809 #define PLT_GOT_FDE_LENGTH 20
810 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
811 0, 0, 0, 0, /* CIE ID */
812 1, /* CIE version */
813 'z', 'R', 0, /* Augmentation string */
814 1, /* Code alignment factor */
815 0x78, /* Data alignment factor */
816 16, /* Return address column */
817 1, /* Augmentation size */
818 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
819 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
820 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
821 DW_CFA_nop, DW_CFA_nop,
822
823 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
824 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
825 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
826 0, 0, 0, 0, /* non-lazy .plt size goes here */
827 0, /* Augmentation size */
828 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
829 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
830 };
831
832 /* These are the standard parameters. */
833 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
834 {
835 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
836 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
837 elf_x86_64_lazy_plt_entry, /* plt_entry */
838 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
839 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
840 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
841 6, /* plt_tlsdesc_got1_offset */
842 12, /* plt_tlsdesc_got2_offset */
843 10, /* plt_tlsdesc_got1_insn_end */
844 16, /* plt_tlsdesc_got2_insn_end */
845 2, /* plt0_got1_offset */
846 8, /* plt0_got2_offset */
847 12, /* plt0_got2_insn_end */
848 2, /* plt_got_offset */
849 7, /* plt_reloc_offset */
850 12, /* plt_plt_offset */
851 6, /* plt_got_insn_size */
852 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
853 6, /* plt_lazy_offset */
854 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
855 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
856 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
857 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
858 };
859
860 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
861 {
862 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
863 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
864 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
865 2, /* plt_got_offset */
866 6, /* plt_got_insn_size */
867 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
868 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
869 };
870
871 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
872 {
873 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
874 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
875 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
876 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
877 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
878 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
879 6, /* plt_tlsdesc_got1_offset */
880 12, /* plt_tlsdesc_got2_offset */
881 10, /* plt_tlsdesc_got1_insn_end */
882 16, /* plt_tlsdesc_got2_insn_end */
883 2, /* plt0_got1_offset */
884 1+8, /* plt0_got2_offset */
885 1+12, /* plt0_got2_insn_end */
886 1+2, /* plt_got_offset */
887 1, /* plt_reloc_offset */
888 7, /* plt_plt_offset */
889 1+6, /* plt_got_insn_size */
890 11, /* plt_plt_insn_end */
891 0, /* plt_lazy_offset */
892 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
893 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
894 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
895 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
896 };
897
898 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
899 {
900 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
901 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
902 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
903 1+2, /* plt_got_offset */
904 1+6, /* plt_got_insn_size */
905 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
906 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
907 };
908
909 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
910 {
911 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
912 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
913 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
914 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
915 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
916 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
917 6, /* plt_tlsdesc_got1_offset */
918 12, /* plt_tlsdesc_got2_offset */
919 10, /* plt_tlsdesc_got1_insn_end */
920 16, /* plt_tlsdesc_got2_insn_end */
921 2, /* plt0_got1_offset */
922 1+8, /* plt0_got2_offset */
923 1+12, /* plt0_got2_insn_end */
924 4+1+2, /* plt_got_offset */
925 4+1, /* plt_reloc_offset */
926 4+1+6, /* plt_plt_offset */
927 4+1+6, /* plt_got_insn_size */
928 4+1+5+5, /* plt_plt_insn_end */
929 0, /* plt_lazy_offset */
930 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
931 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
932 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
933 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
934 };
935
936 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
937 {
938 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
939 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
940 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
941 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
942 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
943 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
944 6, /* plt_tlsdesc_got1_offset */
945 12, /* plt_tlsdesc_got2_offset */
946 10, /* plt_tlsdesc_got1_insn_end */
947 16, /* plt_tlsdesc_got2_insn_end */
948 2, /* plt0_got1_offset */
949 8, /* plt0_got2_offset */
950 12, /* plt0_got2_insn_end */
951 4+2, /* plt_got_offset */
952 4+1, /* plt_reloc_offset */
953 4+6, /* plt_plt_offset */
954 4+6, /* plt_got_insn_size */
955 4+5+5, /* plt_plt_insn_end */
956 0, /* plt_lazy_offset */
957 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
958 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
959 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
960 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
961 };
962
963 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
964 {
965 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
966 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
967 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
968 4+1+2, /* plt_got_offset */
969 4+1+6, /* plt_got_insn_size */
970 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
971 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
972 };
973
974 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
975 {
976 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
977 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
978 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
979 4+2, /* plt_got_offset */
980 4+6, /* plt_got_insn_size */
981 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
982 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
983 };
984
985
986 static bool
987 elf64_x86_64_elf_object_p (bfd *abfd)
988 {
989 /* Set the right machine number for an x86-64 elf64 file. */
990 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
991 return true;
992 }
993
994 static bool
995 elf32_x86_64_elf_object_p (bfd *abfd)
996 {
997 /* Set the right machine number for an x86-64 elf32 file. */
998 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
999 return true;
1000 }
1001
1002 /* Return TRUE if the TLS access code sequence support transition
1003 from R_TYPE. */
1004
1005 static bool
1006 elf_x86_64_check_tls_transition (bfd *abfd,
1007 struct bfd_link_info *info,
1008 asection *sec,
1009 bfd_byte *contents,
1010 Elf_Internal_Shdr *symtab_hdr,
1011 struct elf_link_hash_entry **sym_hashes,
1012 unsigned int r_type,
1013 const Elf_Internal_Rela *rel,
1014 const Elf_Internal_Rela *relend)
1015 {
1016 unsigned int val;
1017 unsigned long r_symndx;
1018 bool largepic = false;
1019 struct elf_link_hash_entry *h;
1020 bfd_vma offset;
1021 struct elf_x86_link_hash_table *htab;
1022 bfd_byte *call;
1023 bool indirect_call;
1024
1025 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1026 offset = rel->r_offset;
1027 switch (r_type)
1028 {
1029 case R_X86_64_TLSGD:
1030 case R_X86_64_TLSLD:
1031 if ((rel + 1) >= relend)
1032 return false;
1033
1034 if (r_type == R_X86_64_TLSGD)
1035 {
1036 /* Check transition from GD access model. For 64bit, only
1037 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1038 .word 0x6666; rex64; call __tls_get_addr@PLT
1039 or
1040 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1041 .byte 0x66; rex64
1042 call *__tls_get_addr@GOTPCREL(%rip)
1043 which may be converted to
1044 addr32 call __tls_get_addr
1045 can transit to different access model. For 32bit, only
1046 leaq foo@tlsgd(%rip), %rdi
1047 .word 0x6666; rex64; call __tls_get_addr@PLT
1048 or
1049 leaq foo@tlsgd(%rip), %rdi
1050 .byte 0x66; rex64
1051 call *__tls_get_addr@GOTPCREL(%rip)
1052 which may be converted to
1053 addr32 call __tls_get_addr
1054 can transit to different access model. For largepic,
1055 we also support:
1056 leaq foo@tlsgd(%rip), %rdi
1057 movabsq $__tls_get_addr@pltoff, %rax
1058 addq $r15, %rax
1059 call *%rax
1060 or
1061 leaq foo@tlsgd(%rip), %rdi
1062 movabsq $__tls_get_addr@pltoff, %rax
1063 addq $rbx, %rax
1064 call *%rax */
1065
1066 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1067
1068 if ((offset + 12) > sec->size)
1069 return false;
1070
1071 call = contents + offset + 4;
1072 if (call[0] != 0x66
1073 || !((call[1] == 0x48
1074 && call[2] == 0xff
1075 && call[3] == 0x15)
1076 || (call[1] == 0x48
1077 && call[2] == 0x67
1078 && call[3] == 0xe8)
1079 || (call[1] == 0x66
1080 && call[2] == 0x48
1081 && call[3] == 0xe8)))
1082 {
1083 if (!ABI_64_P (abfd)
1084 || (offset + 19) > sec->size
1085 || offset < 3
1086 || memcmp (call - 7, leaq + 1, 3) != 0
1087 || memcmp (call, "\x48\xb8", 2) != 0
1088 || call[11] != 0x01
1089 || call[13] != 0xff
1090 || call[14] != 0xd0
1091 || !((call[10] == 0x48 && call[12] == 0xd8)
1092 || (call[10] == 0x4c && call[12] == 0xf8)))
1093 return false;
1094 largepic = true;
1095 }
1096 else if (ABI_64_P (abfd))
1097 {
1098 if (offset < 4
1099 || memcmp (contents + offset - 4, leaq, 4) != 0)
1100 return false;
1101 }
1102 else
1103 {
1104 if (offset < 3
1105 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1106 return false;
1107 }
1108 indirect_call = call[2] == 0xff;
1109 }
1110 else
1111 {
1112 /* Check transition from LD access model. Only
1113 leaq foo@tlsld(%rip), %rdi;
1114 call __tls_get_addr@PLT
1115 or
1116 leaq foo@tlsld(%rip), %rdi;
1117 call *__tls_get_addr@GOTPCREL(%rip)
1118 which may be converted to
1119 addr32 call __tls_get_addr
1120 can transit to different access model. For largepic
1121 we also support:
1122 leaq foo@tlsld(%rip), %rdi
1123 movabsq $__tls_get_addr@pltoff, %rax
1124 addq $r15, %rax
1125 call *%rax
1126 or
1127 leaq foo@tlsld(%rip), %rdi
1128 movabsq $__tls_get_addr@pltoff, %rax
1129 addq $rbx, %rax
1130 call *%rax */
1131
1132 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1133
1134 if (offset < 3 || (offset + 9) > sec->size)
1135 return false;
1136
1137 if (memcmp (contents + offset - 3, lea, 3) != 0)
1138 return false;
1139
1140 call = contents + offset + 4;
1141 if (!(call[0] == 0xe8
1142 || (call[0] == 0xff && call[1] == 0x15)
1143 || (call[0] == 0x67 && call[1] == 0xe8)))
1144 {
1145 if (!ABI_64_P (abfd)
1146 || (offset + 19) > sec->size
1147 || memcmp (call, "\x48\xb8", 2) != 0
1148 || call[11] != 0x01
1149 || call[13] != 0xff
1150 || call[14] != 0xd0
1151 || !((call[10] == 0x48 && call[12] == 0xd8)
1152 || (call[10] == 0x4c && call[12] == 0xf8)))
1153 return false;
1154 largepic = true;
1155 }
1156 indirect_call = call[0] == 0xff;
1157 }
1158
1159 r_symndx = htab->r_sym (rel[1].r_info);
1160 if (r_symndx < symtab_hdr->sh_info)
1161 return false;
1162
1163 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1164 if (h == NULL
1165 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1166 return false;
1167 else
1168 {
1169 r_type = (ELF32_R_TYPE (rel[1].r_info)
1170 & ~R_X86_64_converted_reloc_bit);
1171 if (largepic)
1172 return r_type == R_X86_64_PLTOFF64;
1173 else if (indirect_call)
1174 return r_type == R_X86_64_GOTPCRELX;
1175 else
1176 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1177 }
1178
1179 case R_X86_64_GOTTPOFF:
1180 /* Check transition from IE access model:
1181 mov foo@gottpoff(%rip), %reg
1182 add foo@gottpoff(%rip), %reg
1183 */
1184
1185 /* Check REX prefix first. */
1186 if (offset >= 3 && (offset + 4) <= sec->size)
1187 {
1188 val = bfd_get_8 (abfd, contents + offset - 3);
1189 if (val != 0x48 && val != 0x4c)
1190 {
1191 /* X32 may have 0x44 REX prefix or no REX prefix. */
1192 if (ABI_64_P (abfd))
1193 return false;
1194 }
1195 }
1196 else
1197 {
1198 /* X32 may not have any REX prefix. */
1199 if (ABI_64_P (abfd))
1200 return false;
1201 if (offset < 2 || (offset + 3) > sec->size)
1202 return false;
1203 }
1204
1205 val = bfd_get_8 (abfd, contents + offset - 2);
1206 if (val != 0x8b && val != 0x03)
1207 return false;
1208
1209 val = bfd_get_8 (abfd, contents + offset - 1);
1210 return (val & 0xc7) == 5;
1211
1212 case R_X86_64_GOTPC32_TLSDESC:
1213 /* Check transition from GDesc access model:
1214 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
1215 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
1216
1217 Make sure it's a leaq adding rip to a 32-bit offset
1218 into any register, although it's probably almost always
1219 going to be rax. */
1220
1221 if (offset < 3 || (offset + 4) > sec->size)
1222 return false;
1223
1224 val = bfd_get_8 (abfd, contents + offset - 3);
1225 val &= 0xfb;
1226 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
1227 return false;
1228
1229 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1230 return false;
1231
1232 val = bfd_get_8 (abfd, contents + offset - 1);
1233 return (val & 0xc7) == 0x05;
1234
1235 case R_X86_64_TLSDESC_CALL:
1236 /* Check transition from GDesc access model:
1237 call *x@tlsdesc(%rax) <--- LP64 mode.
1238 call *x@tlsdesc(%eax) <--- X32 mode.
1239 */
1240 if (offset + 2 <= sec->size)
1241 {
1242 unsigned int prefix;
1243 call = contents + offset;
1244 prefix = 0;
1245 if (!ABI_64_P (abfd))
1246 {
1247 /* Check for call *x@tlsdesc(%eax). */
1248 if (call[0] == 0x67)
1249 {
1250 prefix = 1;
1251 if (offset + 3 > sec->size)
1252 return false;
1253 }
1254 }
1255 /* Make sure that it's a call *x@tlsdesc(%rax). */
1256 return call[prefix] == 0xff && call[1 + prefix] == 0x10;
1257 }
1258
1259 return false;
1260
1261 default:
1262 abort ();
1263 }
1264 }
1265
1266 /* Return TRUE if the TLS access transition is OK or no transition
1267 will be performed. Update R_TYPE if there is a transition. */
1268
1269 static bool
1270 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1271 asection *sec, bfd_byte *contents,
1272 Elf_Internal_Shdr *symtab_hdr,
1273 struct elf_link_hash_entry **sym_hashes,
1274 unsigned int *r_type, int tls_type,
1275 const Elf_Internal_Rela *rel,
1276 const Elf_Internal_Rela *relend,
1277 struct elf_link_hash_entry *h,
1278 unsigned long r_symndx,
1279 bool from_relocate_section)
1280 {
1281 unsigned int from_type = *r_type;
1282 unsigned int to_type = from_type;
1283 bool check = true;
1284
1285 /* Skip TLS transition for functions. */
1286 if (h != NULL
1287 && (h->type == STT_FUNC
1288 || h->type == STT_GNU_IFUNC))
1289 return true;
1290
1291 switch (from_type)
1292 {
1293 case R_X86_64_TLSGD:
1294 case R_X86_64_GOTPC32_TLSDESC:
1295 case R_X86_64_TLSDESC_CALL:
1296 case R_X86_64_GOTTPOFF:
1297 if (bfd_link_executable (info))
1298 {
1299 if (h == NULL)
1300 to_type = R_X86_64_TPOFF32;
1301 else
1302 to_type = R_X86_64_GOTTPOFF;
1303 }
1304
1305 /* When we are called from elf_x86_64_relocate_section, there may
1306 be additional transitions based on TLS_TYPE. */
1307 if (from_relocate_section)
1308 {
1309 unsigned int new_to_type = to_type;
1310
1311 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1312 new_to_type = R_X86_64_TPOFF32;
1313
1314 if (to_type == R_X86_64_TLSGD
1315 || to_type == R_X86_64_GOTPC32_TLSDESC
1316 || to_type == R_X86_64_TLSDESC_CALL)
1317 {
1318 if (tls_type == GOT_TLS_IE)
1319 new_to_type = R_X86_64_GOTTPOFF;
1320 }
1321
1322 /* We checked the transition before when we were called from
1323 elf_x86_64_check_relocs. We only want to check the new
1324 transition which hasn't been checked before. */
1325 check = new_to_type != to_type && from_type == to_type;
1326 to_type = new_to_type;
1327 }
1328
1329 break;
1330
1331 case R_X86_64_TLSLD:
1332 if (bfd_link_executable (info))
1333 to_type = R_X86_64_TPOFF32;
1334 break;
1335
1336 default:
1337 return true;
1338 }
1339
1340 /* Return TRUE if there is no transition. */
1341 if (from_type == to_type)
1342 return true;
1343
1344 /* Check if the transition can be performed. */
1345 if (check
1346 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1347 symtab_hdr, sym_hashes,
1348 from_type, rel, relend))
1349 {
1350 reloc_howto_type *from, *to;
1351 const char *name;
1352
1353 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1354 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1355
1356 if (from == NULL || to == NULL)
1357 return false;
1358
1359 if (h)
1360 name = h->root.root.string;
1361 else
1362 {
1363 struct elf_x86_link_hash_table *htab;
1364
1365 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1366 if (htab == NULL)
1367 name = "*unknown*";
1368 else
1369 {
1370 Elf_Internal_Sym *isym;
1371
1372 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1373 abfd, r_symndx);
1374 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1375 }
1376 }
1377
1378 _bfd_error_handler
1379 /* xgettext:c-format */
1380 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1381 " in section `%pA' failed"),
1382 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1383 bfd_set_error (bfd_error_bad_value);
1384 return false;
1385 }
1386
1387 *r_type = to_type;
1388 return true;
1389 }
1390
1391 /* Rename some of the generic section flags to better document how they
1392 are used here. */
1393 #define check_relocs_failed sec_flg0
1394
1395 static bool
1396 elf_x86_64_need_pic (struct bfd_link_info *info,
1397 bfd *input_bfd, asection *sec,
1398 struct elf_link_hash_entry *h,
1399 Elf_Internal_Shdr *symtab_hdr,
1400 Elf_Internal_Sym *isym,
1401 reloc_howto_type *howto)
1402 {
1403 const char *v = "";
1404 const char *und = "";
1405 const char *pic = "";
1406 const char *object;
1407
1408 const char *name;
1409 if (h)
1410 {
1411 name = h->root.root.string;
1412 switch (ELF_ST_VISIBILITY (h->other))
1413 {
1414 case STV_HIDDEN:
1415 v = _("hidden symbol ");
1416 break;
1417 case STV_INTERNAL:
1418 v = _("internal symbol ");
1419 break;
1420 case STV_PROTECTED:
1421 v = _("protected symbol ");
1422 break;
1423 default:
1424 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1425 v = _("protected symbol ");
1426 else
1427 v = _("symbol ");
1428 pic = NULL;
1429 break;
1430 }
1431
1432 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1433 und = _("undefined ");
1434 }
1435 else
1436 {
1437 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1438 pic = NULL;
1439 }
1440
1441 if (bfd_link_dll (info))
1442 {
1443 object = _("a shared object");
1444 if (!pic)
1445 pic = _("; recompile with -fPIC");
1446 }
1447 else
1448 {
1449 if (bfd_link_pie (info))
1450 object = _("a PIE object");
1451 else
1452 object = _("a PDE object");
1453 if (!pic)
1454 pic = _("; recompile with -fPIE");
1455 }
1456
1457 /* xgettext:c-format */
1458 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1459 "not be used when making %s%s"),
1460 input_bfd, howto->name, und, v, name,
1461 object, pic);
1462 bfd_set_error (bfd_error_bad_value);
1463 sec->check_relocs_failed = 1;
1464 return false;
1465 }
1466
1467 /* With the local symbol, foo, we convert
1468 mov foo@GOTPCREL(%rip), %reg
1469 to
1470 lea foo(%rip), %reg
1471 and convert
1472 call/jmp *foo@GOTPCREL(%rip)
1473 to
1474 nop call foo/jmp foo nop
1475 When PIC is false, convert
1476 test %reg, foo@GOTPCREL(%rip)
1477 to
1478 test $foo, %reg
1479 and convert
1480 binop foo@GOTPCREL(%rip), %reg
1481 to
1482 binop $foo, %reg
1483 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1484 instructions. */
1485
1486 static bool
1487 elf_x86_64_convert_load_reloc (bfd *abfd,
1488 bfd_byte *contents,
1489 unsigned int *r_type_p,
1490 Elf_Internal_Rela *irel,
1491 struct elf_link_hash_entry *h,
1492 bool *converted,
1493 struct bfd_link_info *link_info)
1494 {
1495 struct elf_x86_link_hash_table *htab;
1496 bool is_pic;
1497 bool no_overflow;
1498 bool relocx;
1499 bool to_reloc_pc32;
1500 bool abs_symbol;
1501 bool local_ref;
1502 asection *tsec;
1503 bfd_signed_vma raddend;
1504 unsigned int opcode;
1505 unsigned int modrm;
1506 unsigned int r_type = *r_type_p;
1507 unsigned int r_symndx;
1508 bfd_vma roff = irel->r_offset;
1509 bfd_vma abs_relocation;
1510
1511 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1512 return true;
1513
1514 raddend = irel->r_addend;
1515 /* Addend for 32-bit PC-relative relocation must be -4. */
1516 if (raddend != -4)
1517 return true;
1518
1519 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1520 is_pic = bfd_link_pic (link_info);
1521
1522 relocx = (r_type == R_X86_64_GOTPCRELX
1523 || r_type == R_X86_64_REX_GOTPCRELX);
1524
1525 /* TRUE if --no-relax is used. */
1526 no_overflow = link_info->disable_target_specific_optimizations > 1;
1527
1528 r_symndx = htab->r_sym (irel->r_info);
1529
1530 opcode = bfd_get_8 (abfd, contents + roff - 2);
1531
1532 /* Convert mov to lea since it has been done for a while. */
1533 if (opcode != 0x8b)
1534 {
1535 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1536 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1537 test, xor instructions. */
1538 if (!relocx)
1539 return true;
1540 }
1541
1542 /* We convert only to R_X86_64_PC32:
1543 1. Branch.
1544 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1545 3. no_overflow is true.
1546 4. PIC.
1547 */
1548 to_reloc_pc32 = (opcode == 0xff
1549 || !relocx
1550 || no_overflow
1551 || is_pic);
1552
1553 abs_symbol = false;
1554 abs_relocation = 0;
1555
1556 /* Get the symbol referred to by the reloc. */
1557 if (h == NULL)
1558 {
1559 Elf_Internal_Sym *isym
1560 = bfd_sym_from_r_symndx (&htab->elf.sym_cache, abfd, r_symndx);
1561
1562 /* Skip relocation against undefined symbols. */
1563 if (isym->st_shndx == SHN_UNDEF)
1564 return true;
1565
1566 local_ref = true;
1567 if (isym->st_shndx == SHN_ABS)
1568 {
1569 tsec = bfd_abs_section_ptr;
1570 abs_symbol = true;
1571 abs_relocation = isym->st_value;
1572 }
1573 else if (isym->st_shndx == SHN_COMMON)
1574 tsec = bfd_com_section_ptr;
1575 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1576 tsec = &_bfd_elf_large_com_section;
1577 else
1578 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1579 }
1580 else
1581 {
1582 /* Undefined weak symbol is only bound locally in executable
1583 and its reference is resolved as 0 without relocation
1584 overflow. We can only perform this optimization for
1585 GOTPCRELX relocations since we need to modify REX byte.
1586 It is OK convert mov with R_X86_64_GOTPCREL to
1587 R_X86_64_PC32. */
1588 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1589
1590 abs_symbol = ABS_SYMBOL_P (h);
1591 abs_relocation = h->root.u.def.value;
1592
1593 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1594 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1595 if ((relocx || opcode == 0x8b)
1596 && (h->root.type == bfd_link_hash_undefweak
1597 && !eh->linker_def
1598 && local_ref))
1599 {
1600 if (opcode == 0xff)
1601 {
1602 /* Skip for branch instructions since R_X86_64_PC32
1603 may overflow. */
1604 if (no_overflow)
1605 return true;
1606 }
1607 else if (relocx)
1608 {
1609 /* For non-branch instructions, we can convert to
1610 R_X86_64_32/R_X86_64_32S since we know if there
1611 is a REX byte. */
1612 to_reloc_pc32 = false;
1613 }
1614
1615 /* Since we don't know the current PC when PIC is true,
1616 we can't convert to R_X86_64_PC32. */
1617 if (to_reloc_pc32 && is_pic)
1618 return true;
1619
1620 goto convert;
1621 }
1622 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1623 ld.so may use its link-time address. */
1624 else if (h->start_stop
1625 || eh->linker_def
1626 || ((h->def_regular
1627 || h->root.type == bfd_link_hash_defined
1628 || h->root.type == bfd_link_hash_defweak)
1629 && h != htab->elf.hdynamic
1630 && local_ref))
1631 {
1632 /* bfd_link_hash_new or bfd_link_hash_undefined is
1633 set by an assignment in a linker script in
1634 bfd_elf_record_link_assignment. start_stop is set
1635 on __start_SECNAME/__stop_SECNAME which mark section
1636 SECNAME. */
1637 if (h->start_stop
1638 || eh->linker_def
1639 || (h->def_regular
1640 && (h->root.type == bfd_link_hash_new
1641 || h->root.type == bfd_link_hash_undefined
1642 || ((h->root.type == bfd_link_hash_defined
1643 || h->root.type == bfd_link_hash_defweak)
1644 && h->root.u.def.section == bfd_und_section_ptr))))
1645 {
1646 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1647 if (no_overflow)
1648 return true;
1649 goto convert;
1650 }
1651 tsec = h->root.u.def.section;
1652 }
1653 else
1654 return true;
1655 }
1656
1657 /* Don't convert GOTPCREL relocation against large section. */
1658 if (elf_section_data (tsec) != NULL
1659 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1660 return true;
1661
1662 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1663 if (no_overflow)
1664 return true;
1665
1666 convert:
1667 if (opcode == 0xff)
1668 {
1669 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1670 unsigned int nop;
1671 unsigned int disp;
1672 bfd_vma nop_offset;
1673
1674 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1675 R_X86_64_PC32. */
1676 modrm = bfd_get_8 (abfd, contents + roff - 1);
1677 if (modrm == 0x25)
1678 {
1679 /* Convert to "jmp foo nop". */
1680 modrm = 0xe9;
1681 nop = NOP_OPCODE;
1682 nop_offset = irel->r_offset + 3;
1683 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1684 irel->r_offset -= 1;
1685 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1686 }
1687 else
1688 {
1689 struct elf_x86_link_hash_entry *eh
1690 = (struct elf_x86_link_hash_entry *) h;
1691
1692 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1693 is a nop prefix. */
1694 modrm = 0xe8;
1695 /* To support TLS optimization, always use addr32 prefix for
1696 "call *__tls_get_addr@GOTPCREL(%rip)". */
1697 if (eh && eh->tls_get_addr)
1698 {
1699 nop = 0x67;
1700 nop_offset = irel->r_offset - 2;
1701 }
1702 else
1703 {
1704 nop = htab->params->call_nop_byte;
1705 if (htab->params->call_nop_as_suffix)
1706 {
1707 nop_offset = irel->r_offset + 3;
1708 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1709 irel->r_offset -= 1;
1710 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1711 }
1712 else
1713 nop_offset = irel->r_offset - 2;
1714 }
1715 }
1716 bfd_put_8 (abfd, nop, contents + nop_offset);
1717 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1718 r_type = R_X86_64_PC32;
1719 }
1720 else
1721 {
1722 unsigned int rex;
1723 unsigned int rex_mask = REX_R;
1724
1725 if (r_type == R_X86_64_REX_GOTPCRELX)
1726 rex = bfd_get_8 (abfd, contents + roff - 3);
1727 else
1728 rex = 0;
1729
1730 if (opcode == 0x8b)
1731 {
1732 if (abs_symbol && local_ref && relocx)
1733 to_reloc_pc32 = false;
1734
1735 if (to_reloc_pc32)
1736 {
1737 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1738 "lea foo(%rip), %reg". */
1739 opcode = 0x8d;
1740 r_type = R_X86_64_PC32;
1741 }
1742 else
1743 {
1744 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1745 "mov $foo, %reg". */
1746 opcode = 0xc7;
1747 modrm = bfd_get_8 (abfd, contents + roff - 1);
1748 modrm = 0xc0 | (modrm & 0x38) >> 3;
1749 if ((rex & REX_W) != 0
1750 && ABI_64_P (link_info->output_bfd))
1751 {
1752 /* Keep the REX_W bit in REX byte for LP64. */
1753 r_type = R_X86_64_32S;
1754 goto rewrite_modrm_rex;
1755 }
1756 else
1757 {
1758 /* If the REX_W bit in REX byte isn't needed,
1759 use R_X86_64_32 and clear the W bit to avoid
1760 sign-extend imm32 to imm64. */
1761 r_type = R_X86_64_32;
1762 /* Clear the W bit in REX byte. */
1763 rex_mask |= REX_W;
1764 goto rewrite_modrm_rex;
1765 }
1766 }
1767 }
1768 else
1769 {
1770 /* R_X86_64_PC32 isn't supported. */
1771 if (to_reloc_pc32)
1772 return true;
1773
1774 modrm = bfd_get_8 (abfd, contents + roff - 1);
1775 if (opcode == 0x85)
1776 {
1777 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1778 "test $foo, %reg". */
1779 modrm = 0xc0 | (modrm & 0x38) >> 3;
1780 opcode = 0xf7;
1781 }
1782 else
1783 {
1784 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1785 "binop $foo, %reg". */
1786 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1787 opcode = 0x81;
1788 }
1789
1790 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1791 overflow when sign-extending imm32 to imm64. */
1792 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1793
1794 rewrite_modrm_rex:
1795 if (abs_relocation)
1796 {
1797 /* Check if R_X86_64_32S/R_X86_64_32 fits. */
1798 if (r_type == R_X86_64_32S)
1799 {
1800 if ((abs_relocation + 0x80000000) > 0xffffffff)
1801 return true;
1802 }
1803 else
1804 {
1805 if (abs_relocation > 0xffffffff)
1806 return true;
1807 }
1808 }
1809
1810 bfd_put_8 (abfd, modrm, contents + roff - 1);
1811
1812 if (rex)
1813 {
1814 /* Move the R bit to the B bit in REX byte. */
1815 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1816 bfd_put_8 (abfd, rex, contents + roff - 3);
1817 }
1818
1819 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1820 irel->r_addend = 0;
1821 }
1822
1823 bfd_put_8 (abfd, opcode, contents + roff - 2);
1824 }
1825
1826 *r_type_p = r_type;
1827 irel->r_info = htab->r_info (r_symndx,
1828 r_type | R_X86_64_converted_reloc_bit);
1829
1830 *converted = true;
1831
1832 return true;
1833 }
1834
1835 /* Look through the relocs for a section during the first phase, and
1836 calculate needed space in the global offset table, procedure
1837 linkage table, and dynamic reloc sections. */
1838
1839 static bool
1840 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1841 asection *sec,
1842 const Elf_Internal_Rela *relocs)
1843 {
1844 struct elf_x86_link_hash_table *htab;
1845 Elf_Internal_Shdr *symtab_hdr;
1846 struct elf_link_hash_entry **sym_hashes;
1847 const Elf_Internal_Rela *rel;
1848 const Elf_Internal_Rela *rel_end;
1849 asection *sreloc;
1850 bfd_byte *contents;
1851 bool converted;
1852
1853 if (bfd_link_relocatable (info))
1854 return true;
1855
1856 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1857 if (htab == NULL)
1858 {
1859 sec->check_relocs_failed = 1;
1860 return false;
1861 }
1862
1863 BFD_ASSERT (is_x86_elf (abfd, htab));
1864
1865 /* Get the section contents. */
1866 if (elf_section_data (sec)->this_hdr.contents != NULL)
1867 contents = elf_section_data (sec)->this_hdr.contents;
1868 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1869 {
1870 sec->check_relocs_failed = 1;
1871 return false;
1872 }
1873
1874 symtab_hdr = &elf_symtab_hdr (abfd);
1875 sym_hashes = elf_sym_hashes (abfd);
1876
1877 converted = false;
1878
1879 sreloc = NULL;
1880
1881 rel_end = relocs + sec->reloc_count;
1882 for (rel = relocs; rel < rel_end; rel++)
1883 {
1884 unsigned int r_type;
1885 unsigned int r_symndx;
1886 struct elf_link_hash_entry *h;
1887 struct elf_x86_link_hash_entry *eh;
1888 Elf_Internal_Sym *isym;
1889 const char *name;
1890 bool size_reloc;
1891 bool converted_reloc;
1892 bool no_dynreloc;
1893
1894 r_symndx = htab->r_sym (rel->r_info);
1895 r_type = ELF32_R_TYPE (rel->r_info);
1896
1897 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1898 {
1899 /* xgettext:c-format */
1900 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1901 abfd, r_symndx);
1902 goto error_return;
1903 }
1904
1905 if (r_symndx < symtab_hdr->sh_info)
1906 {
1907 /* A local symbol. */
1908 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1909 abfd, r_symndx);
1910 if (isym == NULL)
1911 goto error_return;
1912
1913 /* Check relocation against local STT_GNU_IFUNC symbol. */
1914 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1915 {
1916 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1917 true);
1918 if (h == NULL)
1919 goto error_return;
1920
1921 /* Fake a STT_GNU_IFUNC symbol. */
1922 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1923 isym, NULL);
1924 h->type = STT_GNU_IFUNC;
1925 h->def_regular = 1;
1926 h->ref_regular = 1;
1927 h->forced_local = 1;
1928 h->root.type = bfd_link_hash_defined;
1929 }
1930 else
1931 h = NULL;
1932 }
1933 else
1934 {
1935 isym = NULL;
1936 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1937 while (h->root.type == bfd_link_hash_indirect
1938 || h->root.type == bfd_link_hash_warning)
1939 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1940 }
1941
1942 /* Check invalid x32 relocations. */
1943 if (!ABI_64_P (abfd))
1944 switch (r_type)
1945 {
1946 default:
1947 break;
1948
1949 case R_X86_64_DTPOFF64:
1950 case R_X86_64_TPOFF64:
1951 case R_X86_64_PC64:
1952 case R_X86_64_GOTOFF64:
1953 case R_X86_64_GOT64:
1954 case R_X86_64_GOTPCREL64:
1955 case R_X86_64_GOTPC64:
1956 case R_X86_64_GOTPLT64:
1957 case R_X86_64_PLTOFF64:
1958 {
1959 if (h)
1960 name = h->root.root.string;
1961 else
1962 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1963 NULL);
1964 _bfd_error_handler
1965 /* xgettext:c-format */
1966 (_("%pB: relocation %s against symbol `%s' isn't "
1967 "supported in x32 mode"), abfd,
1968 x86_64_elf_howto_table[r_type].name, name);
1969 bfd_set_error (bfd_error_bad_value);
1970 goto error_return;
1971 }
1972 break;
1973 }
1974
1975 if (h != NULL)
1976 {
1977 /* It is referenced by a non-shared object. */
1978 h->ref_regular = 1;
1979 }
1980
1981 converted_reloc = false;
1982 if ((r_type == R_X86_64_GOTPCREL
1983 || r_type == R_X86_64_GOTPCRELX
1984 || r_type == R_X86_64_REX_GOTPCRELX)
1985 && (h == NULL || h->type != STT_GNU_IFUNC))
1986 {
1987 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1988 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1989 irel, h, &converted_reloc,
1990 info))
1991 goto error_return;
1992
1993 if (converted_reloc)
1994 converted = true;
1995 }
1996
1997 if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym,
1998 symtab_hdr, &no_dynreloc))
1999 return false;
2000
2001 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2002 symtab_hdr, sym_hashes,
2003 &r_type, GOT_UNKNOWN,
2004 rel, rel_end, h, r_symndx, false))
2005 goto error_return;
2006
2007 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
2008 if (h == htab->elf.hgot)
2009 htab->got_referenced = true;
2010
2011 eh = (struct elf_x86_link_hash_entry *) h;
2012 switch (r_type)
2013 {
2014 case R_X86_64_TLSLD:
2015 htab->tls_ld_or_ldm_got.refcount = 1;
2016 goto create_got;
2017
2018 case R_X86_64_TPOFF32:
2019 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2020 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2021 &x86_64_elf_howto_table[r_type]);
2022 if (eh != NULL)
2023 eh->zero_undefweak &= 0x2;
2024 break;
2025
2026 case R_X86_64_GOTTPOFF:
2027 if (!bfd_link_executable (info))
2028 info->flags |= DF_STATIC_TLS;
2029 /* Fall through */
2030
2031 case R_X86_64_GOT32:
2032 case R_X86_64_GOTPCREL:
2033 case R_X86_64_GOTPCRELX:
2034 case R_X86_64_REX_GOTPCRELX:
2035 case R_X86_64_TLSGD:
2036 case R_X86_64_GOT64:
2037 case R_X86_64_GOTPCREL64:
2038 case R_X86_64_GOTPLT64:
2039 case R_X86_64_GOTPC32_TLSDESC:
2040 case R_X86_64_TLSDESC_CALL:
2041 /* This symbol requires a global offset table entry. */
2042 {
2043 int tls_type, old_tls_type;
2044
2045 switch (r_type)
2046 {
2047 default:
2048 tls_type = GOT_NORMAL;
2049 if (h)
2050 {
2051 if (ABS_SYMBOL_P (h))
2052 tls_type = GOT_ABS;
2053 }
2054 else if (isym->st_shndx == SHN_ABS)
2055 tls_type = GOT_ABS;
2056 break;
2057 case R_X86_64_TLSGD:
2058 tls_type = GOT_TLS_GD;
2059 break;
2060 case R_X86_64_GOTTPOFF:
2061 tls_type = GOT_TLS_IE;
2062 break;
2063 case R_X86_64_GOTPC32_TLSDESC:
2064 case R_X86_64_TLSDESC_CALL:
2065 tls_type = GOT_TLS_GDESC;
2066 break;
2067 }
2068
2069 if (h != NULL)
2070 {
2071 h->got.refcount = 1;
2072 old_tls_type = eh->tls_type;
2073 }
2074 else
2075 {
2076 bfd_signed_vma *local_got_refcounts;
2077
2078 /* This is a global offset table entry for a local symbol. */
2079 local_got_refcounts = elf_local_got_refcounts (abfd);
2080 if (local_got_refcounts == NULL)
2081 {
2082 bfd_size_type size;
2083
2084 size = symtab_hdr->sh_info;
2085 size *= sizeof (bfd_signed_vma)
2086 + sizeof (bfd_vma) + sizeof (char);
2087 local_got_refcounts = ((bfd_signed_vma *)
2088 bfd_zalloc (abfd, size));
2089 if (local_got_refcounts == NULL)
2090 goto error_return;
2091 elf_local_got_refcounts (abfd) = local_got_refcounts;
2092 elf_x86_local_tlsdesc_gotent (abfd)
2093 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2094 elf_x86_local_got_tls_type (abfd)
2095 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2096 }
2097 local_got_refcounts[r_symndx] = 1;
2098 old_tls_type
2099 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2100 }
2101
2102 /* If a TLS symbol is accessed using IE at least once,
2103 there is no point to use dynamic model for it. */
2104 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2105 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2106 || tls_type != GOT_TLS_IE))
2107 {
2108 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2109 tls_type = old_tls_type;
2110 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2111 && GOT_TLS_GD_ANY_P (tls_type))
2112 tls_type |= old_tls_type;
2113 else
2114 {
2115 if (h)
2116 name = h->root.root.string;
2117 else
2118 name = bfd_elf_sym_name (abfd, symtab_hdr,
2119 isym, NULL);
2120 _bfd_error_handler
2121 /* xgettext:c-format */
2122 (_("%pB: '%s' accessed both as normal and"
2123 " thread local symbol"),
2124 abfd, name);
2125 bfd_set_error (bfd_error_bad_value);
2126 goto error_return;
2127 }
2128 }
2129
2130 if (old_tls_type != tls_type)
2131 {
2132 if (eh != NULL)
2133 eh->tls_type = tls_type;
2134 else
2135 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2136 }
2137 }
2138 /* Fall through */
2139
2140 case R_X86_64_GOTOFF64:
2141 case R_X86_64_GOTPC32:
2142 case R_X86_64_GOTPC64:
2143 create_got:
2144 if (eh != NULL)
2145 eh->zero_undefweak &= 0x2;
2146 break;
2147
2148 case R_X86_64_PLT32:
2149 case R_X86_64_PLT32_BND:
2150 /* This symbol requires a procedure linkage table entry. We
2151 actually build the entry in adjust_dynamic_symbol,
2152 because this might be a case of linking PIC code which is
2153 never referenced by a dynamic object, in which case we
2154 don't need to generate a procedure linkage table entry
2155 after all. */
2156
2157 /* If this is a local symbol, we resolve it directly without
2158 creating a procedure linkage table entry. */
2159 if (h == NULL)
2160 continue;
2161
2162 eh->zero_undefweak &= 0x2;
2163 h->needs_plt = 1;
2164 h->plt.refcount = 1;
2165 break;
2166
2167 case R_X86_64_PLTOFF64:
2168 /* This tries to form the 'address' of a function relative
2169 to GOT. For global symbols we need a PLT entry. */
2170 if (h != NULL)
2171 {
2172 h->needs_plt = 1;
2173 h->plt.refcount = 1;
2174 }
2175 goto create_got;
2176
2177 case R_X86_64_SIZE32:
2178 case R_X86_64_SIZE64:
2179 size_reloc = true;
2180 goto do_size;
2181
2182 case R_X86_64_32:
2183 if (!ABI_64_P (abfd))
2184 goto pointer;
2185 /* Fall through. */
2186 case R_X86_64_8:
2187 case R_X86_64_16:
2188 case R_X86_64_32S:
2189 /* Check relocation overflow as these relocs may lead to
2190 run-time relocation overflow. Don't error out for
2191 sections we don't care about, such as debug sections or
2192 when relocation overflow check is disabled. */
2193 if (!htab->params->no_reloc_overflow_check
2194 && !converted_reloc
2195 && (bfd_link_pic (info)
2196 || (bfd_link_executable (info)
2197 && h != NULL
2198 && !h->def_regular
2199 && h->def_dynamic
2200 && (sec->flags & SEC_READONLY) == 0)))
2201 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2202 &x86_64_elf_howto_table[r_type]);
2203 /* Fall through. */
2204
2205 case R_X86_64_PC8:
2206 case R_X86_64_PC16:
2207 case R_X86_64_PC32:
2208 case R_X86_64_PC32_BND:
2209 case R_X86_64_PC64:
2210 case R_X86_64_64:
2211 pointer:
2212 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2213 eh->zero_undefweak |= 0x2;
2214 /* We are called after all symbols have been resolved. Only
2215 relocation against STT_GNU_IFUNC symbol must go through
2216 PLT. */
2217 if (h != NULL
2218 && (bfd_link_executable (info)
2219 || h->type == STT_GNU_IFUNC))
2220 {
2221 bool func_pointer_ref = false;
2222
2223 if (r_type == R_X86_64_PC32)
2224 {
2225 /* Since something like ".long foo - ." may be used
2226 as pointer, make sure that PLT is used if foo is
2227 a function defined in a shared library. */
2228 if ((sec->flags & SEC_CODE) == 0)
2229 {
2230 h->pointer_equality_needed = 1;
2231 if (bfd_link_pie (info)
2232 && h->type == STT_FUNC
2233 && !h->def_regular
2234 && h->def_dynamic)
2235 {
2236 h->needs_plt = 1;
2237 h->plt.refcount = 1;
2238 }
2239 }
2240 }
2241 else if (r_type != R_X86_64_PC32_BND
2242 && r_type != R_X86_64_PC64)
2243 {
2244 h->pointer_equality_needed = 1;
2245 /* At run-time, R_X86_64_64 can be resolved for both
2246 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2247 can only be resolved for x32. */
2248 if ((sec->flags & SEC_READONLY) == 0
2249 && (r_type == R_X86_64_64
2250 || (!ABI_64_P (abfd)
2251 && (r_type == R_X86_64_32
2252 || r_type == R_X86_64_32S))))
2253 func_pointer_ref = true;
2254 }
2255
2256 if (!func_pointer_ref)
2257 {
2258 /* If this reloc is in a read-only section, we might
2259 need a copy reloc. We can't check reliably at this
2260 stage whether the section is read-only, as input
2261 sections have not yet been mapped to output sections.
2262 Tentatively set the flag for now, and correct in
2263 adjust_dynamic_symbol. */
2264 h->non_got_ref = 1;
2265
2266 /* We may need a .plt entry if the symbol is a function
2267 defined in a shared lib or is a function referenced
2268 from the code or read-only section. */
2269 if (!h->def_regular
2270 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2271 h->plt.refcount = 1;
2272 }
2273 }
2274
2275 size_reloc = false;
2276 do_size:
2277 if (!no_dynreloc
2278 && NEED_DYNAMIC_RELOCATION_P (info, true, h, sec, r_type,
2279 htab->pointer_r_type))
2280 {
2281 struct elf_dyn_relocs *p;
2282 struct elf_dyn_relocs **head;
2283
2284 /* We must copy these reloc types into the output file.
2285 Create a reloc section in dynobj and make room for
2286 this reloc. */
2287 if (sreloc == NULL)
2288 {
2289 sreloc = _bfd_elf_make_dynamic_reloc_section
2290 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2291 abfd, /*rela?*/ true);
2292
2293 if (sreloc == NULL)
2294 goto error_return;
2295 }
2296
2297 /* If this is a global symbol, we count the number of
2298 relocations we need for this symbol. */
2299 if (h != NULL)
2300 head = &h->dyn_relocs;
2301 else
2302 {
2303 /* Track dynamic relocs needed for local syms too.
2304 We really need local syms available to do this
2305 easily. Oh well. */
2306 asection *s;
2307 void **vpp;
2308
2309 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
2310 abfd, r_symndx);
2311 if (isym == NULL)
2312 goto error_return;
2313
2314 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2315 if (s == NULL)
2316 s = sec;
2317
2318 /* Beware of type punned pointers vs strict aliasing
2319 rules. */
2320 vpp = &(elf_section_data (s)->local_dynrel);
2321 head = (struct elf_dyn_relocs **)vpp;
2322 }
2323
2324 p = *head;
2325 if (p == NULL || p->sec != sec)
2326 {
2327 size_t amt = sizeof *p;
2328
2329 p = ((struct elf_dyn_relocs *)
2330 bfd_alloc (htab->elf.dynobj, amt));
2331 if (p == NULL)
2332 goto error_return;
2333 p->next = *head;
2334 *head = p;
2335 p->sec = sec;
2336 p->count = 0;
2337 p->pc_count = 0;
2338 }
2339
2340 p->count += 1;
2341 /* Count size relocation as PC-relative relocation. */
2342 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2343 p->pc_count += 1;
2344 }
2345 break;
2346
2347 /* This relocation describes the C++ object vtable hierarchy.
2348 Reconstruct it for later use during GC. */
2349 case R_X86_64_GNU_VTINHERIT:
2350 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2351 goto error_return;
2352 break;
2353
2354 /* This relocation describes which C++ vtable entries are actually
2355 used. Record for later use during GC. */
2356 case R_X86_64_GNU_VTENTRY:
2357 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2358 goto error_return;
2359 break;
2360
2361 default:
2362 break;
2363 }
2364 }
2365
2366 if (elf_section_data (sec)->this_hdr.contents != contents)
2367 {
2368 if (!converted && !info->keep_memory)
2369 free (contents);
2370 else
2371 {
2372 /* Cache the section contents for elf_link_input_bfd if any
2373 load is converted or --no-keep-memory isn't used. */
2374 elf_section_data (sec)->this_hdr.contents = contents;
2375 }
2376 }
2377
2378 /* Cache relocations if any load is converted. */
2379 if (elf_section_data (sec)->relocs != relocs && converted)
2380 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2381
2382 return true;
2383
2384 error_return:
2385 if (elf_section_data (sec)->this_hdr.contents != contents)
2386 free (contents);
2387 sec->check_relocs_failed = 1;
2388 return false;
2389 }
2390
2391 /* Return the relocation value for @tpoff relocation
2392 if STT_TLS virtual address is ADDRESS. */
2393
2394 static bfd_vma
2395 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2396 {
2397 struct elf_link_hash_table *htab = elf_hash_table (info);
2398 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2399 bfd_vma static_tls_size;
2400
2401 /* If tls_segment is NULL, we should have signalled an error already. */
2402 if (htab->tls_sec == NULL)
2403 return 0;
2404
2405 /* Consider special static TLS alignment requirements. */
2406 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2407 return address - static_tls_size - htab->tls_sec->vma;
2408 }
2409
2410 /* Relocate an x86_64 ELF section. */
2411
2412 static int
2413 elf_x86_64_relocate_section (bfd *output_bfd,
2414 struct bfd_link_info *info,
2415 bfd *input_bfd,
2416 asection *input_section,
2417 bfd_byte *contents,
2418 Elf_Internal_Rela *relocs,
2419 Elf_Internal_Sym *local_syms,
2420 asection **local_sections)
2421 {
2422 struct elf_x86_link_hash_table *htab;
2423 Elf_Internal_Shdr *symtab_hdr;
2424 struct elf_link_hash_entry **sym_hashes;
2425 bfd_vma *local_got_offsets;
2426 bfd_vma *local_tlsdesc_gotents;
2427 Elf_Internal_Rela *rel;
2428 Elf_Internal_Rela *wrel;
2429 Elf_Internal_Rela *relend;
2430 unsigned int plt_entry_size;
2431 bool status;
2432
2433 /* Skip if check_relocs failed. */
2434 if (input_section->check_relocs_failed)
2435 return false;
2436
2437 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2438 if (htab == NULL)
2439 return false;
2440
2441 if (!is_x86_elf (input_bfd, htab))
2442 {
2443 bfd_set_error (bfd_error_wrong_format);
2444 return false;
2445 }
2446
2447 plt_entry_size = htab->plt.plt_entry_size;
2448 symtab_hdr = &elf_symtab_hdr (input_bfd);
2449 sym_hashes = elf_sym_hashes (input_bfd);
2450 local_got_offsets = elf_local_got_offsets (input_bfd);
2451 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2452
2453 _bfd_x86_elf_set_tls_module_base (info);
2454
2455 status = true;
2456 rel = wrel = relocs;
2457 relend = relocs + input_section->reloc_count;
2458 for (; rel < relend; wrel++, rel++)
2459 {
2460 unsigned int r_type, r_type_tls;
2461 reloc_howto_type *howto;
2462 unsigned long r_symndx;
2463 struct elf_link_hash_entry *h;
2464 struct elf_x86_link_hash_entry *eh;
2465 Elf_Internal_Sym *sym;
2466 asection *sec;
2467 bfd_vma off, offplt, plt_offset;
2468 bfd_vma relocation;
2469 bool unresolved_reloc;
2470 bfd_reloc_status_type r;
2471 int tls_type;
2472 asection *base_got, *resolved_plt;
2473 bfd_vma st_size;
2474 bool resolved_to_zero;
2475 bool relative_reloc;
2476 bool converted_reloc;
2477 bool need_copy_reloc_in_pie;
2478 bool no_copyreloc_p;
2479
2480 r_type = ELF32_R_TYPE (rel->r_info);
2481 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2482 || r_type == (int) R_X86_64_GNU_VTENTRY)
2483 {
2484 if (wrel != rel)
2485 *wrel = *rel;
2486 continue;
2487 }
2488
2489 r_symndx = htab->r_sym (rel->r_info);
2490 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2491 if (converted_reloc)
2492 {
2493 r_type &= ~R_X86_64_converted_reloc_bit;
2494 rel->r_info = htab->r_info (r_symndx, r_type);
2495 }
2496
2497 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2498 if (howto == NULL)
2499 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2500
2501 h = NULL;
2502 sym = NULL;
2503 sec = NULL;
2504 unresolved_reloc = false;
2505 if (r_symndx < symtab_hdr->sh_info)
2506 {
2507 sym = local_syms + r_symndx;
2508 sec = local_sections[r_symndx];
2509
2510 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2511 &sec, rel);
2512 st_size = sym->st_size;
2513
2514 /* Relocate against local STT_GNU_IFUNC symbol. */
2515 if (!bfd_link_relocatable (info)
2516 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2517 {
2518 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2519 rel, false);
2520 if (h == NULL)
2521 abort ();
2522
2523 /* Set STT_GNU_IFUNC symbol value. */
2524 h->root.u.def.value = sym->st_value;
2525 h->root.u.def.section = sec;
2526 }
2527 }
2528 else
2529 {
2530 bool warned ATTRIBUTE_UNUSED;
2531 bool ignored ATTRIBUTE_UNUSED;
2532
2533 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2534 r_symndx, symtab_hdr, sym_hashes,
2535 h, sec, relocation,
2536 unresolved_reloc, warned, ignored);
2537 st_size = h->size;
2538 }
2539
2540 if (sec != NULL && discarded_section (sec))
2541 {
2542 _bfd_clear_contents (howto, input_bfd, input_section,
2543 contents, rel->r_offset);
2544 wrel->r_offset = rel->r_offset;
2545 wrel->r_info = 0;
2546 wrel->r_addend = 0;
2547
2548 /* For ld -r, remove relocations in debug sections against
2549 sections defined in discarded sections. Not done for
2550 eh_frame editing code expects to be present. */
2551 if (bfd_link_relocatable (info)
2552 && (input_section->flags & SEC_DEBUGGING))
2553 wrel--;
2554
2555 continue;
2556 }
2557
2558 if (bfd_link_relocatable (info))
2559 {
2560 if (wrel != rel)
2561 *wrel = *rel;
2562 continue;
2563 }
2564
2565 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2566 {
2567 if (r_type == R_X86_64_64)
2568 {
2569 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2570 zero-extend it to 64bit if addend is zero. */
2571 r_type = R_X86_64_32;
2572 memset (contents + rel->r_offset + 4, 0, 4);
2573 }
2574 else if (r_type == R_X86_64_SIZE64)
2575 {
2576 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2577 zero-extend it to 64bit if addend is zero. */
2578 r_type = R_X86_64_SIZE32;
2579 memset (contents + rel->r_offset + 4, 0, 4);
2580 }
2581 }
2582
2583 eh = (struct elf_x86_link_hash_entry *) h;
2584
2585 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2586 it here if it is defined in a non-shared object. */
2587 if (h != NULL
2588 && h->type == STT_GNU_IFUNC
2589 && h->def_regular)
2590 {
2591 bfd_vma plt_index;
2592 const char *name;
2593
2594 if ((input_section->flags & SEC_ALLOC) == 0)
2595 {
2596 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2597 STT_GNU_IFUNC symbol as STT_FUNC. */
2598 if (elf_section_type (input_section) == SHT_NOTE)
2599 goto skip_ifunc;
2600 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2601 sections because such sections are not SEC_ALLOC and
2602 thus ld.so will not process them. */
2603 if ((input_section->flags & SEC_DEBUGGING) != 0)
2604 continue;
2605 abort ();
2606 }
2607
2608 switch (r_type)
2609 {
2610 default:
2611 break;
2612
2613 case R_X86_64_GOTPCREL:
2614 case R_X86_64_GOTPCRELX:
2615 case R_X86_64_REX_GOTPCRELX:
2616 case R_X86_64_GOTPCREL64:
2617 base_got = htab->elf.sgot;
2618 off = h->got.offset;
2619
2620 if (base_got == NULL)
2621 abort ();
2622
2623 if (off == (bfd_vma) -1)
2624 {
2625 /* We can't use h->got.offset here to save state, or
2626 even just remember the offset, as finish_dynamic_symbol
2627 would use that as offset into .got. */
2628
2629 if (h->plt.offset == (bfd_vma) -1)
2630 abort ();
2631
2632 if (htab->elf.splt != NULL)
2633 {
2634 plt_index = (h->plt.offset / plt_entry_size
2635 - htab->plt.has_plt0);
2636 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2637 base_got = htab->elf.sgotplt;
2638 }
2639 else
2640 {
2641 plt_index = h->plt.offset / plt_entry_size;
2642 off = plt_index * GOT_ENTRY_SIZE;
2643 base_got = htab->elf.igotplt;
2644 }
2645
2646 if (h->dynindx == -1
2647 || h->forced_local
2648 || info->symbolic)
2649 {
2650 /* This references the local defitionion. We must
2651 initialize this entry in the global offset table.
2652 Since the offset must always be a multiple of 8,
2653 we use the least significant bit to record
2654 whether we have initialized it already.
2655
2656 When doing a dynamic link, we create a .rela.got
2657 relocation entry to initialize the value. This
2658 is done in the finish_dynamic_symbol routine. */
2659 if ((off & 1) != 0)
2660 off &= ~1;
2661 else
2662 {
2663 bfd_put_64 (output_bfd, relocation,
2664 base_got->contents + off);
2665 /* Note that this is harmless for the GOTPLT64
2666 case, as -1 | 1 still is -1. */
2667 h->got.offset |= 1;
2668 }
2669 }
2670 }
2671
2672 relocation = (base_got->output_section->vma
2673 + base_got->output_offset + off);
2674
2675 goto do_relocation;
2676 }
2677
2678 if (h->plt.offset == (bfd_vma) -1)
2679 {
2680 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2681 if (r_type == htab->pointer_r_type
2682 && (input_section->flags & SEC_CODE) == 0)
2683 goto do_ifunc_pointer;
2684 goto bad_ifunc_reloc;
2685 }
2686
2687 /* STT_GNU_IFUNC symbol must go through PLT. */
2688 if (htab->elf.splt != NULL)
2689 {
2690 if (htab->plt_second != NULL)
2691 {
2692 resolved_plt = htab->plt_second;
2693 plt_offset = eh->plt_second.offset;
2694 }
2695 else
2696 {
2697 resolved_plt = htab->elf.splt;
2698 plt_offset = h->plt.offset;
2699 }
2700 }
2701 else
2702 {
2703 resolved_plt = htab->elf.iplt;
2704 plt_offset = h->plt.offset;
2705 }
2706
2707 relocation = (resolved_plt->output_section->vma
2708 + resolved_plt->output_offset + plt_offset);
2709
2710 switch (r_type)
2711 {
2712 default:
2713 bad_ifunc_reloc:
2714 if (h->root.root.string)
2715 name = h->root.root.string;
2716 else
2717 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2718 NULL);
2719 _bfd_error_handler
2720 /* xgettext:c-format */
2721 (_("%pB: relocation %s against STT_GNU_IFUNC "
2722 "symbol `%s' isn't supported"), input_bfd,
2723 howto->name, name);
2724 bfd_set_error (bfd_error_bad_value);
2725 return false;
2726
2727 case R_X86_64_32S:
2728 if (bfd_link_pic (info))
2729 abort ();
2730 goto do_relocation;
2731
2732 case R_X86_64_32:
2733 if (ABI_64_P (output_bfd))
2734 goto do_relocation;
2735 /* FALLTHROUGH */
2736 case R_X86_64_64:
2737 do_ifunc_pointer:
2738 if (rel->r_addend != 0)
2739 {
2740 if (h->root.root.string)
2741 name = h->root.root.string;
2742 else
2743 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2744 sym, NULL);
2745 _bfd_error_handler
2746 /* xgettext:c-format */
2747 (_("%pB: relocation %s against STT_GNU_IFUNC "
2748 "symbol `%s' has non-zero addend: %" PRId64),
2749 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2750 bfd_set_error (bfd_error_bad_value);
2751 return false;
2752 }
2753
2754 /* Generate dynamic relcoation only when there is a
2755 non-GOT reference in a shared object or there is no
2756 PLT. */
2757 if ((bfd_link_pic (info) && h->non_got_ref)
2758 || h->plt.offset == (bfd_vma) -1)
2759 {
2760 Elf_Internal_Rela outrel;
2761 asection *sreloc;
2762
2763 /* Need a dynamic relocation to get the real function
2764 address. */
2765 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2766 info,
2767 input_section,
2768 rel->r_offset);
2769 if (outrel.r_offset == (bfd_vma) -1
2770 || outrel.r_offset == (bfd_vma) -2)
2771 abort ();
2772
2773 outrel.r_offset += (input_section->output_section->vma
2774 + input_section->output_offset);
2775
2776 if (POINTER_LOCAL_IFUNC_P (info, h))
2777 {
2778 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2779 h->root.root.string,
2780 h->root.u.def.section->owner);
2781
2782 /* This symbol is resolved locally. */
2783 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2784 outrel.r_addend = (h->root.u.def.value
2785 + h->root.u.def.section->output_section->vma
2786 + h->root.u.def.section->output_offset);
2787
2788 if (htab->params->report_relative_reloc)
2789 _bfd_x86_elf_link_report_relative_reloc
2790 (info, input_section, h, sym,
2791 "R_X86_64_IRELATIVE", &outrel);
2792 }
2793 else
2794 {
2795 outrel.r_info = htab->r_info (h->dynindx, r_type);
2796 outrel.r_addend = 0;
2797 }
2798
2799 /* Dynamic relocations are stored in
2800 1. .rela.ifunc section in PIC object.
2801 2. .rela.got section in dynamic executable.
2802 3. .rela.iplt section in static executable. */
2803 if (bfd_link_pic (info))
2804 sreloc = htab->elf.irelifunc;
2805 else if (htab->elf.splt != NULL)
2806 sreloc = htab->elf.srelgot;
2807 else
2808 sreloc = htab->elf.irelplt;
2809 elf_append_rela (output_bfd, sreloc, &outrel);
2810
2811 /* If this reloc is against an external symbol, we
2812 do not want to fiddle with the addend. Otherwise,
2813 we need to include the symbol value so that it
2814 becomes an addend for the dynamic reloc. For an
2815 internal symbol, we have updated addend. */
2816 continue;
2817 }
2818 /* FALLTHROUGH */
2819 case R_X86_64_PC32:
2820 case R_X86_64_PC32_BND:
2821 case R_X86_64_PC64:
2822 case R_X86_64_PLT32:
2823 case R_X86_64_PLT32_BND:
2824 goto do_relocation;
2825 }
2826 }
2827
2828 skip_ifunc:
2829 resolved_to_zero = (eh != NULL
2830 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2831
2832 /* When generating a shared object, the relocations handled here are
2833 copied into the output file to be resolved at run time. */
2834 switch (r_type)
2835 {
2836 case R_X86_64_GOT32:
2837 case R_X86_64_GOT64:
2838 /* Relocation is to the entry for this symbol in the global
2839 offset table. */
2840 case R_X86_64_GOTPCREL:
2841 case R_X86_64_GOTPCRELX:
2842 case R_X86_64_REX_GOTPCRELX:
2843 case R_X86_64_GOTPCREL64:
2844 /* Use global offset table entry as symbol value. */
2845 case R_X86_64_GOTPLT64:
2846 /* This is obsolete and treated the same as GOT64. */
2847 base_got = htab->elf.sgot;
2848
2849 if (htab->elf.sgot == NULL)
2850 abort ();
2851
2852 relative_reloc = false;
2853 if (h != NULL)
2854 {
2855 off = h->got.offset;
2856 if (h->needs_plt
2857 && h->plt.offset != (bfd_vma)-1
2858 && off == (bfd_vma)-1)
2859 {
2860 /* We can't use h->got.offset here to save
2861 state, or even just remember the offset, as
2862 finish_dynamic_symbol would use that as offset into
2863 .got. */
2864 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2865 - htab->plt.has_plt0);
2866 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2867 base_got = htab->elf.sgotplt;
2868 }
2869
2870 if (RESOLVED_LOCALLY_P (info, h, htab))
2871 {
2872 /* We must initialize this entry in the global offset
2873 table. Since the offset must always be a multiple
2874 of 8, we use the least significant bit to record
2875 whether we have initialized it already.
2876
2877 When doing a dynamic link, we create a .rela.got
2878 relocation entry to initialize the value. This is
2879 done in the finish_dynamic_symbol routine. */
2880 if ((off & 1) != 0)
2881 off &= ~1;
2882 else
2883 {
2884 bfd_put_64 (output_bfd, relocation,
2885 base_got->contents + off);
2886 /* Note that this is harmless for the GOTPLT64 case,
2887 as -1 | 1 still is -1. */
2888 h->got.offset |= 1;
2889
2890 if (GENERATE_RELATIVE_RELOC_P (info, h))
2891 {
2892 /* If this symbol isn't dynamic in PIC,
2893 generate R_X86_64_RELATIVE here. */
2894 eh->no_finish_dynamic_symbol = 1;
2895 relative_reloc = true;
2896 }
2897 }
2898 }
2899 else
2900 unresolved_reloc = false;
2901 }
2902 else
2903 {
2904 if (local_got_offsets == NULL)
2905 abort ();
2906
2907 off = local_got_offsets[r_symndx];
2908
2909 /* The offset must always be a multiple of 8. We use
2910 the least significant bit to record whether we have
2911 already generated the necessary reloc. */
2912 if ((off & 1) != 0)
2913 off &= ~1;
2914 else
2915 {
2916 bfd_put_64 (output_bfd, relocation,
2917 base_got->contents + off);
2918 local_got_offsets[r_symndx] |= 1;
2919
2920 /* NB: GOTPCREL relocations against local absolute
2921 symbol store relocation value in the GOT slot
2922 without relative relocation. */
2923 if (bfd_link_pic (info)
2924 && !(sym->st_shndx == SHN_ABS
2925 && (r_type == R_X86_64_GOTPCREL
2926 || r_type == R_X86_64_GOTPCRELX
2927 || r_type == R_X86_64_REX_GOTPCRELX)))
2928 relative_reloc = true;
2929 }
2930 }
2931
2932 if (relative_reloc)
2933 {
2934 asection *s;
2935 Elf_Internal_Rela outrel;
2936
2937 /* We need to generate a R_X86_64_RELATIVE reloc
2938 for the dynamic linker. */
2939 s = htab->elf.srelgot;
2940 if (s == NULL)
2941 abort ();
2942
2943 outrel.r_offset = (base_got->output_section->vma
2944 + base_got->output_offset
2945 + off);
2946 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2947 outrel.r_addend = relocation;
2948
2949 if (htab->params->report_relative_reloc)
2950 _bfd_x86_elf_link_report_relative_reloc
2951 (info, input_section, h, sym, "R_X86_64_RELATIVE",
2952 &outrel);
2953
2954 elf_append_rela (output_bfd, s, &outrel);
2955 }
2956
2957 if (off >= (bfd_vma) -2)
2958 abort ();
2959
2960 relocation = base_got->output_section->vma
2961 + base_got->output_offset + off;
2962 if (r_type != R_X86_64_GOTPCREL
2963 && r_type != R_X86_64_GOTPCRELX
2964 && r_type != R_X86_64_REX_GOTPCRELX
2965 && r_type != R_X86_64_GOTPCREL64)
2966 relocation -= htab->elf.sgotplt->output_section->vma
2967 - htab->elf.sgotplt->output_offset;
2968
2969 break;
2970
2971 case R_X86_64_GOTOFF64:
2972 /* Relocation is relative to the start of the global offset
2973 table. */
2974
2975 /* Check to make sure it isn't a protected function or data
2976 symbol for shared library since it may not be local when
2977 used as function address or with copy relocation. We also
2978 need to make sure that a symbol is referenced locally. */
2979 if (bfd_link_pic (info) && h)
2980 {
2981 if (!h->def_regular)
2982 {
2983 const char *v;
2984
2985 switch (ELF_ST_VISIBILITY (h->other))
2986 {
2987 case STV_HIDDEN:
2988 v = _("hidden symbol");
2989 break;
2990 case STV_INTERNAL:
2991 v = _("internal symbol");
2992 break;
2993 case STV_PROTECTED:
2994 v = _("protected symbol");
2995 break;
2996 default:
2997 v = _("symbol");
2998 break;
2999 }
3000
3001 _bfd_error_handler
3002 /* xgettext:c-format */
3003 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3004 " `%s' can not be used when making a shared object"),
3005 input_bfd, v, h->root.root.string);
3006 bfd_set_error (bfd_error_bad_value);
3007 return false;
3008 }
3009 else if (!bfd_link_executable (info)
3010 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3011 && (h->type == STT_FUNC
3012 || h->type == STT_OBJECT)
3013 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3014 {
3015 _bfd_error_handler
3016 /* xgettext:c-format */
3017 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3018 " `%s' can not be used when making a shared object"),
3019 input_bfd,
3020 h->type == STT_FUNC ? "function" : "data",
3021 h->root.root.string);
3022 bfd_set_error (bfd_error_bad_value);
3023 return false;
3024 }
3025 }
3026
3027 /* Note that sgot is not involved in this
3028 calculation. We always want the start of .got.plt. If we
3029 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3030 permitted by the ABI, we might have to change this
3031 calculation. */
3032 relocation -= htab->elf.sgotplt->output_section->vma
3033 + htab->elf.sgotplt->output_offset;
3034 break;
3035
3036 case R_X86_64_GOTPC32:
3037 case R_X86_64_GOTPC64:
3038 /* Use global offset table as symbol value. */
3039 relocation = htab->elf.sgotplt->output_section->vma
3040 + htab->elf.sgotplt->output_offset;
3041 unresolved_reloc = false;
3042 break;
3043
3044 case R_X86_64_PLTOFF64:
3045 /* Relocation is PLT entry relative to GOT. For local
3046 symbols it's the symbol itself relative to GOT. */
3047 if (h != NULL
3048 /* See PLT32 handling. */
3049 && (h->plt.offset != (bfd_vma) -1
3050 || eh->plt_got.offset != (bfd_vma) -1)
3051 && htab->elf.splt != NULL)
3052 {
3053 if (eh->plt_got.offset != (bfd_vma) -1)
3054 {
3055 /* Use the GOT PLT. */
3056 resolved_plt = htab->plt_got;
3057 plt_offset = eh->plt_got.offset;
3058 }
3059 else if (htab->plt_second != NULL)
3060 {
3061 resolved_plt = htab->plt_second;
3062 plt_offset = eh->plt_second.offset;
3063 }
3064 else
3065 {
3066 resolved_plt = htab->elf.splt;
3067 plt_offset = h->plt.offset;
3068 }
3069
3070 relocation = (resolved_plt->output_section->vma
3071 + resolved_plt->output_offset
3072 + plt_offset);
3073 unresolved_reloc = false;
3074 }
3075
3076 relocation -= htab->elf.sgotplt->output_section->vma
3077 + htab->elf.sgotplt->output_offset;
3078 break;
3079
3080 case R_X86_64_PLT32:
3081 case R_X86_64_PLT32_BND:
3082 /* Relocation is to the entry for this symbol in the
3083 procedure linkage table. */
3084
3085 /* Resolve a PLT32 reloc against a local symbol directly,
3086 without using the procedure linkage table. */
3087 if (h == NULL)
3088 break;
3089
3090 if ((h->plt.offset == (bfd_vma) -1
3091 && eh->plt_got.offset == (bfd_vma) -1)
3092 || htab->elf.splt == NULL)
3093 {
3094 /* We didn't make a PLT entry for this symbol. This
3095 happens when statically linking PIC code, or when
3096 using -Bsymbolic. */
3097 break;
3098 }
3099
3100 use_plt:
3101 if (h->plt.offset != (bfd_vma) -1)
3102 {
3103 if (htab->plt_second != NULL)
3104 {
3105 resolved_plt = htab->plt_second;
3106 plt_offset = eh->plt_second.offset;
3107 }
3108 else
3109 {
3110 resolved_plt = htab->elf.splt;
3111 plt_offset = h->plt.offset;
3112 }
3113 }
3114 else
3115 {
3116 /* Use the GOT PLT. */
3117 resolved_plt = htab->plt_got;
3118 plt_offset = eh->plt_got.offset;
3119 }
3120
3121 relocation = (resolved_plt->output_section->vma
3122 + resolved_plt->output_offset
3123 + plt_offset);
3124 unresolved_reloc = false;
3125 break;
3126
3127 case R_X86_64_SIZE32:
3128 case R_X86_64_SIZE64:
3129 /* Set to symbol size. */
3130 relocation = st_size;
3131 goto direct;
3132
3133 case R_X86_64_PC8:
3134 case R_X86_64_PC16:
3135 case R_X86_64_PC32:
3136 case R_X86_64_PC32_BND:
3137 /* Don't complain about -fPIC if the symbol is undefined when
3138 building executable unless it is unresolved weak symbol,
3139 references a dynamic definition in PIE or -z nocopyreloc
3140 is used. */
3141 no_copyreloc_p
3142 = (info->nocopyreloc
3143 || (h != NULL
3144 && !h->root.linker_def
3145 && !h->root.ldscript_def
3146 && eh->def_protected
3147 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
3148
3149 if ((input_section->flags & SEC_ALLOC) != 0
3150 && (input_section->flags & SEC_READONLY) != 0
3151 && h != NULL
3152 && ((bfd_link_executable (info)
3153 && ((h->root.type == bfd_link_hash_undefweak
3154 && (eh == NULL
3155 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3156 eh)))
3157 || (bfd_link_pie (info)
3158 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3159 && h->def_dynamic)
3160 || (no_copyreloc_p
3161 && h->def_dynamic
3162 && !(h->root.u.def.section->flags & SEC_CODE))))
3163 || bfd_link_dll (info)))
3164 {
3165 bool fail = false;
3166 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3167 {
3168 /* Symbol is referenced locally. Make sure it is
3169 defined locally. */
3170 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3171 }
3172 else if (bfd_link_pie (info))
3173 {
3174 /* We can only use PC-relative relocations in PIE
3175 from non-code sections. */
3176 if (h->type == STT_FUNC
3177 && (sec->flags & SEC_CODE) != 0)
3178 fail = true;
3179 }
3180 else if (no_copyreloc_p || bfd_link_dll (info))
3181 {
3182 /* Symbol doesn't need copy reloc and isn't
3183 referenced locally. Don't allow PC-relative
3184 relocations against default and protected
3185 symbols since address of protected function
3186 and location of protected data may not be in
3187 the shared object. */
3188 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3189 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3190 }
3191
3192 if (fail)
3193 return elf_x86_64_need_pic (info, input_bfd, input_section,
3194 h, NULL, NULL, howto);
3195 }
3196 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3197 as function address. */
3198 else if (h != NULL
3199 && (input_section->flags & SEC_CODE) == 0
3200 && bfd_link_pie (info)
3201 && h->type == STT_FUNC
3202 && !h->def_regular
3203 && h->def_dynamic)
3204 goto use_plt;
3205 /* Fall through. */
3206
3207 case R_X86_64_8:
3208 case R_X86_64_16:
3209 case R_X86_64_32:
3210 case R_X86_64_PC64:
3211 case R_X86_64_64:
3212 /* FIXME: The ABI says the linker should make sure the value is
3213 the same when it's zeroextended to 64 bit. */
3214
3215 direct:
3216 if ((input_section->flags & SEC_ALLOC) == 0)
3217 break;
3218
3219 need_copy_reloc_in_pie = (bfd_link_pie (info)
3220 && h != NULL
3221 && (h->needs_copy
3222 || eh->needs_copy
3223 || (h->root.type
3224 == bfd_link_hash_undefined))
3225 && (X86_PCREL_TYPE_P (r_type)
3226 || X86_SIZE_TYPE_P (r_type)));
3227
3228 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type, sec,
3229 need_copy_reloc_in_pie,
3230 resolved_to_zero, false))
3231 {
3232 Elf_Internal_Rela outrel;
3233 bool skip, relocate;
3234 asection *sreloc;
3235 const char *relative_reloc_name = NULL;
3236
3237 /* When generating a shared object, these relocations
3238 are copied into the output file to be resolved at run
3239 time. */
3240 skip = false;
3241 relocate = false;
3242
3243 outrel.r_offset =
3244 _bfd_elf_section_offset (output_bfd, info, input_section,
3245 rel->r_offset);
3246 if (outrel.r_offset == (bfd_vma) -1)
3247 skip = true;
3248 else if (outrel.r_offset == (bfd_vma) -2)
3249 skip = true, relocate = true;
3250
3251 outrel.r_offset += (input_section->output_section->vma
3252 + input_section->output_offset);
3253
3254 if (skip)
3255 memset (&outrel, 0, sizeof outrel);
3256
3257 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3258 {
3259 outrel.r_info = htab->r_info (h->dynindx, r_type);
3260 outrel.r_addend = rel->r_addend;
3261 }
3262 else
3263 {
3264 /* This symbol is local, or marked to become local.
3265 When relocation overflow check is disabled, we
3266 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3267 if (r_type == htab->pointer_r_type
3268 || (r_type == R_X86_64_32
3269 && htab->params->no_reloc_overflow_check))
3270 {
3271 relocate = true;
3272 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3273 outrel.r_addend = relocation + rel->r_addend;
3274 relative_reloc_name = "R_X86_64_RELATIVE";
3275 }
3276 else if (r_type == R_X86_64_64
3277 && !ABI_64_P (output_bfd))
3278 {
3279 relocate = true;
3280 outrel.r_info = htab->r_info (0,
3281 R_X86_64_RELATIVE64);
3282 outrel.r_addend = relocation + rel->r_addend;
3283 relative_reloc_name = "R_X86_64_RELATIVE64";
3284 /* Check addend overflow. */
3285 if ((outrel.r_addend & 0x80000000)
3286 != (rel->r_addend & 0x80000000))
3287 {
3288 const char *name;
3289 int addend = rel->r_addend;
3290 if (h && h->root.root.string)
3291 name = h->root.root.string;
3292 else
3293 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3294 sym, NULL);
3295 _bfd_error_handler
3296 /* xgettext:c-format */
3297 (_("%pB: addend %s%#x in relocation %s against "
3298 "symbol `%s' at %#" PRIx64
3299 " in section `%pA' is out of range"),
3300 input_bfd, addend < 0 ? "-" : "", addend,
3301 howto->name, name, (uint64_t) rel->r_offset,
3302 input_section);
3303 bfd_set_error (bfd_error_bad_value);
3304 return false;
3305 }
3306 }
3307 else
3308 {
3309 long sindx;
3310
3311 if (bfd_is_abs_section (sec))
3312 sindx = 0;
3313 else if (sec == NULL || sec->owner == NULL)
3314 {
3315 bfd_set_error (bfd_error_bad_value);
3316 return false;
3317 }
3318 else
3319 {
3320 asection *osec;
3321
3322 /* We are turning this relocation into one
3323 against a section symbol. It would be
3324 proper to subtract the symbol's value,
3325 osec->vma, from the emitted reloc addend,
3326 but ld.so expects buggy relocs. */
3327 osec = sec->output_section;
3328 sindx = elf_section_data (osec)->dynindx;
3329 if (sindx == 0)
3330 {
3331 asection *oi = htab->elf.text_index_section;
3332 sindx = elf_section_data (oi)->dynindx;
3333 }
3334 BFD_ASSERT (sindx != 0);
3335 }
3336
3337 outrel.r_info = htab->r_info (sindx, r_type);
3338 outrel.r_addend = relocation + rel->r_addend;
3339 }
3340 }
3341
3342 sreloc = elf_section_data (input_section)->sreloc;
3343
3344 if (sreloc == NULL || sreloc->contents == NULL)
3345 {
3346 r = bfd_reloc_notsupported;
3347 goto check_relocation_error;
3348 }
3349
3350 if (relative_reloc_name
3351 && htab->params->report_relative_reloc)
3352 _bfd_x86_elf_link_report_relative_reloc
3353 (info, input_section, h, sym, relative_reloc_name,
3354 &outrel);
3355
3356 elf_append_rela (output_bfd, sreloc, &outrel);
3357
3358 /* If this reloc is against an external symbol, we do
3359 not want to fiddle with the addend. Otherwise, we
3360 need to include the symbol value so that it becomes
3361 an addend for the dynamic reloc. */
3362 if (! relocate)
3363 continue;
3364 }
3365
3366 break;
3367
3368 case R_X86_64_TLSGD:
3369 case R_X86_64_GOTPC32_TLSDESC:
3370 case R_X86_64_TLSDESC_CALL:
3371 case R_X86_64_GOTTPOFF:
3372 tls_type = GOT_UNKNOWN;
3373 if (h == NULL && local_got_offsets)
3374 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3375 else if (h != NULL)
3376 tls_type = elf_x86_hash_entry (h)->tls_type;
3377
3378 r_type_tls = r_type;
3379 if (! elf_x86_64_tls_transition (info, input_bfd,
3380 input_section, contents,
3381 symtab_hdr, sym_hashes,
3382 &r_type_tls, tls_type, rel,
3383 relend, h, r_symndx, true))
3384 return false;
3385
3386 if (r_type_tls == R_X86_64_TPOFF32)
3387 {
3388 bfd_vma roff = rel->r_offset;
3389
3390 BFD_ASSERT (! unresolved_reloc);
3391
3392 if (r_type == R_X86_64_TLSGD)
3393 {
3394 /* GD->LE transition. For 64bit, change
3395 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3396 .word 0x6666; rex64; call __tls_get_addr@PLT
3397 or
3398 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3399 .byte 0x66; rex64
3400 call *__tls_get_addr@GOTPCREL(%rip)
3401 which may be converted to
3402 addr32 call __tls_get_addr
3403 into:
3404 movq %fs:0, %rax
3405 leaq foo@tpoff(%rax), %rax
3406 For 32bit, change
3407 leaq foo@tlsgd(%rip), %rdi
3408 .word 0x6666; rex64; call __tls_get_addr@PLT
3409 or
3410 leaq foo@tlsgd(%rip), %rdi
3411 .byte 0x66; rex64
3412 call *__tls_get_addr@GOTPCREL(%rip)
3413 which may be converted to
3414 addr32 call __tls_get_addr
3415 into:
3416 movl %fs:0, %eax
3417 leaq foo@tpoff(%rax), %rax
3418 For largepic, change:
3419 leaq foo@tlsgd(%rip), %rdi
3420 movabsq $__tls_get_addr@pltoff, %rax
3421 addq %r15, %rax
3422 call *%rax
3423 into:
3424 movq %fs:0, %rax
3425 leaq foo@tpoff(%rax), %rax
3426 nopw 0x0(%rax,%rax,1) */
3427 int largepic = 0;
3428 if (ABI_64_P (output_bfd))
3429 {
3430 if (contents[roff + 5] == 0xb8)
3431 {
3432 if (roff < 3
3433 || (roff - 3 + 22) > input_section->size)
3434 {
3435 corrupt_input:
3436 info->callbacks->einfo
3437 (_("%F%P: corrupt input: %pB\n"),
3438 input_bfd);
3439 return false;
3440 }
3441 memcpy (contents + roff - 3,
3442 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3443 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3444 largepic = 1;
3445 }
3446 else
3447 {
3448 if (roff < 4
3449 || (roff - 4 + 16) > input_section->size)
3450 goto corrupt_input;
3451 memcpy (contents + roff - 4,
3452 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3453 16);
3454 }
3455 }
3456 else
3457 {
3458 if (roff < 3
3459 || (roff - 3 + 15) > input_section->size)
3460 goto corrupt_input;
3461 memcpy (contents + roff - 3,
3462 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3463 15);
3464 }
3465 bfd_put_32 (output_bfd,
3466 elf_x86_64_tpoff (info, relocation),
3467 contents + roff + 8 + largepic);
3468 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3469 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3470 rel++;
3471 wrel++;
3472 continue;
3473 }
3474 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3475 {
3476 /* GDesc -> LE transition.
3477 It's originally something like:
3478 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3479 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3480
3481 Change it to:
3482 movq $x@tpoff, %rax <--- LP64 mode.
3483 rex movl $x@tpoff, %eax <--- X32 mode.
3484 */
3485
3486 unsigned int val, type;
3487
3488 if (roff < 3)
3489 goto corrupt_input;
3490 type = bfd_get_8 (input_bfd, contents + roff - 3);
3491 val = bfd_get_8 (input_bfd, contents + roff - 1);
3492 bfd_put_8 (output_bfd,
3493 (type & 0x48) | ((type >> 2) & 1),
3494 contents + roff - 3);
3495 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3496 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3497 contents + roff - 1);
3498 bfd_put_32 (output_bfd,
3499 elf_x86_64_tpoff (info, relocation),
3500 contents + roff);
3501 continue;
3502 }
3503 else if (r_type == R_X86_64_TLSDESC_CALL)
3504 {
3505 /* GDesc -> LE transition.
3506 It's originally:
3507 call *(%rax) <--- LP64 mode.
3508 call *(%eax) <--- X32 mode.
3509 Turn it into:
3510 xchg %ax,%ax <-- LP64 mode.
3511 nopl (%rax) <-- X32 mode.
3512 */
3513 unsigned int prefix = 0;
3514 if (!ABI_64_P (input_bfd))
3515 {
3516 /* Check for call *x@tlsdesc(%eax). */
3517 if (contents[roff] == 0x67)
3518 prefix = 1;
3519 }
3520 if (prefix)
3521 {
3522 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3523 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3524 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3525 }
3526 else
3527 {
3528 bfd_put_8 (output_bfd, 0x66, contents + roff);
3529 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3530 }
3531 continue;
3532 }
3533 else if (r_type == R_X86_64_GOTTPOFF)
3534 {
3535 /* IE->LE transition:
3536 For 64bit, originally it can be one of:
3537 movq foo@gottpoff(%rip), %reg
3538 addq foo@gottpoff(%rip), %reg
3539 We change it into:
3540 movq $foo, %reg
3541 leaq foo(%reg), %reg
3542 addq $foo, %reg.
3543 For 32bit, originally it can be one of:
3544 movq foo@gottpoff(%rip), %reg
3545 addl foo@gottpoff(%rip), %reg
3546 We change it into:
3547 movq $foo, %reg
3548 leal foo(%reg), %reg
3549 addl $foo, %reg. */
3550
3551 unsigned int val, type, reg;
3552
3553 if (roff >= 3)
3554 val = bfd_get_8 (input_bfd, contents + roff - 3);
3555 else
3556 {
3557 if (roff < 2)
3558 goto corrupt_input;
3559 val = 0;
3560 }
3561 type = bfd_get_8 (input_bfd, contents + roff - 2);
3562 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3563 reg >>= 3;
3564 if (type == 0x8b)
3565 {
3566 /* movq */
3567 if (val == 0x4c)
3568 {
3569 if (roff < 3)
3570 goto corrupt_input;
3571 bfd_put_8 (output_bfd, 0x49,
3572 contents + roff - 3);
3573 }
3574 else if (!ABI_64_P (output_bfd) && val == 0x44)
3575 {
3576 if (roff < 3)
3577 goto corrupt_input;
3578 bfd_put_8 (output_bfd, 0x41,
3579 contents + roff - 3);
3580 }
3581 bfd_put_8 (output_bfd, 0xc7,
3582 contents + roff - 2);
3583 bfd_put_8 (output_bfd, 0xc0 | reg,
3584 contents + roff - 1);
3585 }
3586 else if (reg == 4)
3587 {
3588 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3589 is special */
3590 if (val == 0x4c)
3591 {
3592 if (roff < 3)
3593 goto corrupt_input;
3594 bfd_put_8 (output_bfd, 0x49,
3595 contents + roff - 3);
3596 }
3597 else if (!ABI_64_P (output_bfd) && val == 0x44)
3598 {
3599 if (roff < 3)
3600 goto corrupt_input;
3601 bfd_put_8 (output_bfd, 0x41,
3602 contents + roff - 3);
3603 }
3604 bfd_put_8 (output_bfd, 0x81,
3605 contents + roff - 2);
3606 bfd_put_8 (output_bfd, 0xc0 | reg,
3607 contents + roff - 1);
3608 }
3609 else
3610 {
3611 /* addq/addl -> leaq/leal */
3612 if (val == 0x4c)
3613 {
3614 if (roff < 3)
3615 goto corrupt_input;
3616 bfd_put_8 (output_bfd, 0x4d,
3617 contents + roff - 3);
3618 }
3619 else if (!ABI_64_P (output_bfd) && val == 0x44)
3620 {
3621 if (roff < 3)
3622 goto corrupt_input;
3623 bfd_put_8 (output_bfd, 0x45,
3624 contents + roff - 3);
3625 }
3626 bfd_put_8 (output_bfd, 0x8d,
3627 contents + roff - 2);
3628 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3629 contents + roff - 1);
3630 }
3631 bfd_put_32 (output_bfd,
3632 elf_x86_64_tpoff (info, relocation),
3633 contents + roff);
3634 continue;
3635 }
3636 else
3637 BFD_ASSERT (false);
3638 }
3639
3640 if (htab->elf.sgot == NULL)
3641 abort ();
3642
3643 if (h != NULL)
3644 {
3645 off = h->got.offset;
3646 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3647 }
3648 else
3649 {
3650 if (local_got_offsets == NULL)
3651 abort ();
3652
3653 off = local_got_offsets[r_symndx];
3654 offplt = local_tlsdesc_gotents[r_symndx];
3655 }
3656
3657 if ((off & 1) != 0)
3658 off &= ~1;
3659 else
3660 {
3661 Elf_Internal_Rela outrel;
3662 int dr_type, indx;
3663 asection *sreloc;
3664
3665 if (htab->elf.srelgot == NULL)
3666 abort ();
3667
3668 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3669
3670 if (GOT_TLS_GDESC_P (tls_type))
3671 {
3672 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3673 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3674 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3675 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3676 + htab->elf.sgotplt->output_offset
3677 + offplt
3678 + htab->sgotplt_jump_table_size);
3679 sreloc = htab->elf.srelplt;
3680 if (indx == 0)
3681 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3682 else
3683 outrel.r_addend = 0;
3684 elf_append_rela (output_bfd, sreloc, &outrel);
3685 }
3686
3687 sreloc = htab->elf.srelgot;
3688
3689 outrel.r_offset = (htab->elf.sgot->output_section->vma
3690 + htab->elf.sgot->output_offset + off);
3691
3692 if (GOT_TLS_GD_P (tls_type))
3693 dr_type = R_X86_64_DTPMOD64;
3694 else if (GOT_TLS_GDESC_P (tls_type))
3695 goto dr_done;
3696 else
3697 dr_type = R_X86_64_TPOFF64;
3698
3699 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3700 outrel.r_addend = 0;
3701 if ((dr_type == R_X86_64_TPOFF64
3702 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3703 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3704 outrel.r_info = htab->r_info (indx, dr_type);
3705
3706 elf_append_rela (output_bfd, sreloc, &outrel);
3707
3708 if (GOT_TLS_GD_P (tls_type))
3709 {
3710 if (indx == 0)
3711 {
3712 BFD_ASSERT (! unresolved_reloc);
3713 bfd_put_64 (output_bfd,
3714 relocation - _bfd_x86_elf_dtpoff_base (info),
3715 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3716 }
3717 else
3718 {
3719 bfd_put_64 (output_bfd, 0,
3720 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3721 outrel.r_info = htab->r_info (indx,
3722 R_X86_64_DTPOFF64);
3723 outrel.r_offset += GOT_ENTRY_SIZE;
3724 elf_append_rela (output_bfd, sreloc,
3725 &outrel);
3726 }
3727 }
3728
3729 dr_done:
3730 if (h != NULL)
3731 h->got.offset |= 1;
3732 else
3733 local_got_offsets[r_symndx] |= 1;
3734 }
3735
3736 if (off >= (bfd_vma) -2
3737 && ! GOT_TLS_GDESC_P (tls_type))
3738 abort ();
3739 if (r_type_tls == r_type)
3740 {
3741 if (r_type == R_X86_64_GOTPC32_TLSDESC
3742 || r_type == R_X86_64_TLSDESC_CALL)
3743 relocation = htab->elf.sgotplt->output_section->vma
3744 + htab->elf.sgotplt->output_offset
3745 + offplt + htab->sgotplt_jump_table_size;
3746 else
3747 relocation = htab->elf.sgot->output_section->vma
3748 + htab->elf.sgot->output_offset + off;
3749 unresolved_reloc = false;
3750 }
3751 else
3752 {
3753 bfd_vma roff = rel->r_offset;
3754
3755 if (r_type == R_X86_64_TLSGD)
3756 {
3757 /* GD->IE transition. For 64bit, change
3758 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3759 .word 0x6666; rex64; call __tls_get_addr@PLT
3760 or
3761 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3762 .byte 0x66; rex64
3763 call *__tls_get_addr@GOTPCREL(%rip
3764 which may be converted to
3765 addr32 call __tls_get_addr
3766 into:
3767 movq %fs:0, %rax
3768 addq foo@gottpoff(%rip), %rax
3769 For 32bit, change
3770 leaq foo@tlsgd(%rip), %rdi
3771 .word 0x6666; rex64; call __tls_get_addr@PLT
3772 or
3773 leaq foo@tlsgd(%rip), %rdi
3774 .byte 0x66; rex64;
3775 call *__tls_get_addr@GOTPCREL(%rip)
3776 which may be converted to
3777 addr32 call __tls_get_addr
3778 into:
3779 movl %fs:0, %eax
3780 addq foo@gottpoff(%rip), %rax
3781 For largepic, change:
3782 leaq foo@tlsgd(%rip), %rdi
3783 movabsq $__tls_get_addr@pltoff, %rax
3784 addq %r15, %rax
3785 call *%rax
3786 into:
3787 movq %fs:0, %rax
3788 addq foo@gottpoff(%rax), %rax
3789 nopw 0x0(%rax,%rax,1) */
3790 int largepic = 0;
3791 if (ABI_64_P (output_bfd))
3792 {
3793 if (contents[roff + 5] == 0xb8)
3794 {
3795 if (roff < 3
3796 || (roff - 3 + 22) > input_section->size)
3797 goto corrupt_input;
3798 memcpy (contents + roff - 3,
3799 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3800 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3801 largepic = 1;
3802 }
3803 else
3804 {
3805 if (roff < 4
3806 || (roff - 4 + 16) > input_section->size)
3807 goto corrupt_input;
3808 memcpy (contents + roff - 4,
3809 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3810 16);
3811 }
3812 }
3813 else
3814 {
3815 if (roff < 3
3816 || (roff - 3 + 15) > input_section->size)
3817 goto corrupt_input;
3818 memcpy (contents + roff - 3,
3819 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3820 15);
3821 }
3822
3823 relocation = (htab->elf.sgot->output_section->vma
3824 + htab->elf.sgot->output_offset + off
3825 - roff
3826 - largepic
3827 - input_section->output_section->vma
3828 - input_section->output_offset
3829 - 12);
3830 bfd_put_32 (output_bfd, relocation,
3831 contents + roff + 8 + largepic);
3832 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3833 rel++;
3834 wrel++;
3835 continue;
3836 }
3837 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3838 {
3839 /* GDesc -> IE transition.
3840 It's originally something like:
3841 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3842 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3843
3844 Change it to:
3845 # before xchg %ax,%ax in LP64 mode.
3846 movq x@gottpoff(%rip), %rax
3847 # before nopl (%rax) in X32 mode.
3848 rex movl x@gottpoff(%rip), %eax
3849 */
3850
3851 /* Now modify the instruction as appropriate. To
3852 turn a lea into a mov in the form we use it, it
3853 suffices to change the second byte from 0x8d to
3854 0x8b. */
3855 if (roff < 2)
3856 goto corrupt_input;
3857 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3858
3859 bfd_put_32 (output_bfd,
3860 htab->elf.sgot->output_section->vma
3861 + htab->elf.sgot->output_offset + off
3862 - rel->r_offset
3863 - input_section->output_section->vma
3864 - input_section->output_offset
3865 - 4,
3866 contents + roff);
3867 continue;
3868 }
3869 else if (r_type == R_X86_64_TLSDESC_CALL)
3870 {
3871 /* GDesc -> IE transition.
3872 It's originally:
3873 call *(%rax) <--- LP64 mode.
3874 call *(%eax) <--- X32 mode.
3875
3876 Change it to:
3877 xchg %ax, %ax <-- LP64 mode.
3878 nopl (%rax) <-- X32 mode.
3879 */
3880
3881 unsigned int prefix = 0;
3882 if (!ABI_64_P (input_bfd))
3883 {
3884 /* Check for call *x@tlsdesc(%eax). */
3885 if (contents[roff] == 0x67)
3886 prefix = 1;
3887 }
3888 if (prefix)
3889 {
3890 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3891 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3892 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3893 }
3894 else
3895 {
3896 bfd_put_8 (output_bfd, 0x66, contents + roff);
3897 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3898 }
3899 continue;
3900 }
3901 else
3902 BFD_ASSERT (false);
3903 }
3904 break;
3905
3906 case R_X86_64_TLSLD:
3907 if (! elf_x86_64_tls_transition (info, input_bfd,
3908 input_section, contents,
3909 symtab_hdr, sym_hashes,
3910 &r_type, GOT_UNKNOWN, rel,
3911 relend, h, r_symndx, true))
3912 return false;
3913
3914 if (r_type != R_X86_64_TLSLD)
3915 {
3916 /* LD->LE transition:
3917 leaq foo@tlsld(%rip), %rdi
3918 call __tls_get_addr@PLT
3919 For 64bit, we change it into:
3920 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3921 For 32bit, we change it into:
3922 nopl 0x0(%rax); movl %fs:0, %eax
3923 Or
3924 leaq foo@tlsld(%rip), %rdi;
3925 call *__tls_get_addr@GOTPCREL(%rip)
3926 which may be converted to
3927 addr32 call __tls_get_addr
3928 For 64bit, we change it into:
3929 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3930 For 32bit, we change it into:
3931 nopw 0x0(%rax); movl %fs:0, %eax
3932 For largepic, change:
3933 leaq foo@tlsgd(%rip), %rdi
3934 movabsq $__tls_get_addr@pltoff, %rax
3935 addq %rbx, %rax
3936 call *%rax
3937 into
3938 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3939 movq %fs:0, %eax */
3940
3941 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3942 if (ABI_64_P (output_bfd))
3943 {
3944 if ((rel->r_offset + 5) >= input_section->size)
3945 goto corrupt_input;
3946 if (contents[rel->r_offset + 5] == 0xb8)
3947 {
3948 if (rel->r_offset < 3
3949 || (rel->r_offset - 3 + 22) > input_section->size)
3950 goto corrupt_input;
3951 memcpy (contents + rel->r_offset - 3,
3952 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3953 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3954 }
3955 else if (contents[rel->r_offset + 4] == 0xff
3956 || contents[rel->r_offset + 4] == 0x67)
3957 {
3958 if (rel->r_offset < 3
3959 || (rel->r_offset - 3 + 13) > input_section->size)
3960 goto corrupt_input;
3961 memcpy (contents + rel->r_offset - 3,
3962 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3963 13);
3964
3965 }
3966 else
3967 {
3968 if (rel->r_offset < 3
3969 || (rel->r_offset - 3 + 12) > input_section->size)
3970 goto corrupt_input;
3971 memcpy (contents + rel->r_offset - 3,
3972 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3973 }
3974 }
3975 else
3976 {
3977 if ((rel->r_offset + 4) >= input_section->size)
3978 goto corrupt_input;
3979 if (contents[rel->r_offset + 4] == 0xff)
3980 {
3981 if (rel->r_offset < 3
3982 || (rel->r_offset - 3 + 13) > input_section->size)
3983 goto corrupt_input;
3984 memcpy (contents + rel->r_offset - 3,
3985 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3986 13);
3987 }
3988 else
3989 {
3990 if (rel->r_offset < 3
3991 || (rel->r_offset - 3 + 12) > input_section->size)
3992 goto corrupt_input;
3993 memcpy (contents + rel->r_offset - 3,
3994 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3995 }
3996 }
3997 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3998 and R_X86_64_PLTOFF64. */
3999 rel++;
4000 wrel++;
4001 continue;
4002 }
4003
4004 if (htab->elf.sgot == NULL)
4005 abort ();
4006
4007 off = htab->tls_ld_or_ldm_got.offset;
4008 if (off & 1)
4009 off &= ~1;
4010 else
4011 {
4012 Elf_Internal_Rela outrel;
4013
4014 if (htab->elf.srelgot == NULL)
4015 abort ();
4016
4017 outrel.r_offset = (htab->elf.sgot->output_section->vma
4018 + htab->elf.sgot->output_offset + off);
4019
4020 bfd_put_64 (output_bfd, 0,
4021 htab->elf.sgot->contents + off);
4022 bfd_put_64 (output_bfd, 0,
4023 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4024 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4025 outrel.r_addend = 0;
4026 elf_append_rela (output_bfd, htab->elf.srelgot,
4027 &outrel);
4028 htab->tls_ld_or_ldm_got.offset |= 1;
4029 }
4030 relocation = htab->elf.sgot->output_section->vma
4031 + htab->elf.sgot->output_offset + off;
4032 unresolved_reloc = false;
4033 break;
4034
4035 case R_X86_64_DTPOFF32:
4036 if (!bfd_link_executable (info)
4037 || (input_section->flags & SEC_CODE) == 0)
4038 relocation -= _bfd_x86_elf_dtpoff_base (info);
4039 else
4040 relocation = elf_x86_64_tpoff (info, relocation);
4041 break;
4042
4043 case R_X86_64_TPOFF32:
4044 case R_X86_64_TPOFF64:
4045 BFD_ASSERT (bfd_link_executable (info));
4046 relocation = elf_x86_64_tpoff (info, relocation);
4047 break;
4048
4049 case R_X86_64_DTPOFF64:
4050 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4051 relocation -= _bfd_x86_elf_dtpoff_base (info);
4052 break;
4053
4054 default:
4055 break;
4056 }
4057
4058 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4059 because such sections are not SEC_ALLOC and thus ld.so will
4060 not process them. */
4061 if (unresolved_reloc
4062 && !((input_section->flags & SEC_DEBUGGING) != 0
4063 && h->def_dynamic)
4064 && _bfd_elf_section_offset (output_bfd, info, input_section,
4065 rel->r_offset) != (bfd_vma) -1)
4066 {
4067 switch (r_type)
4068 {
4069 case R_X86_64_32S:
4070 sec = h->root.u.def.section;
4071 if ((info->nocopyreloc
4072 || (eh->def_protected
4073 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
4074 && !(h->root.u.def.section->flags & SEC_CODE))
4075 return elf_x86_64_need_pic (info, input_bfd, input_section,
4076 h, NULL, NULL, howto);
4077 /* Fall through. */
4078
4079 default:
4080 _bfd_error_handler
4081 /* xgettext:c-format */
4082 (_("%pB(%pA+%#" PRIx64 "): "
4083 "unresolvable %s relocation against symbol `%s'"),
4084 input_bfd,
4085 input_section,
4086 (uint64_t) rel->r_offset,
4087 howto->name,
4088 h->root.root.string);
4089 return false;
4090 }
4091 }
4092
4093 do_relocation:
4094 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4095 contents, rel->r_offset,
4096 relocation, rel->r_addend);
4097
4098 check_relocation_error:
4099 if (r != bfd_reloc_ok)
4100 {
4101 const char *name;
4102
4103 if (h != NULL)
4104 name = h->root.root.string;
4105 else
4106 {
4107 name = bfd_elf_string_from_elf_section (input_bfd,
4108 symtab_hdr->sh_link,
4109 sym->st_name);
4110 if (name == NULL)
4111 return false;
4112 if (*name == '\0')
4113 name = bfd_section_name (sec);
4114 }
4115
4116 if (r == bfd_reloc_overflow)
4117 {
4118 if (converted_reloc)
4119 {
4120 info->callbacks->einfo
4121 ("%X%H:", input_bfd, input_section, rel->r_offset);
4122 info->callbacks->einfo
4123 (_(" failed to convert GOTPCREL relocation against "
4124 "'%s'; relink with --no-relax\n"),
4125 name);
4126 status = false;
4127 continue;
4128 }
4129 (*info->callbacks->reloc_overflow)
4130 (info, (h ? &h->root : NULL), name, howto->name,
4131 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4132 }
4133 else
4134 {
4135 _bfd_error_handler
4136 /* xgettext:c-format */
4137 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4138 input_bfd, input_section,
4139 (uint64_t) rel->r_offset, name, (int) r);
4140 return false;
4141 }
4142 }
4143
4144 if (wrel != rel)
4145 *wrel = *rel;
4146 }
4147
4148 if (wrel != rel)
4149 {
4150 Elf_Internal_Shdr *rel_hdr;
4151 size_t deleted = rel - wrel;
4152
4153 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4154 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4155 if (rel_hdr->sh_size == 0)
4156 {
4157 /* It is too late to remove an empty reloc section. Leave
4158 one NONE reloc.
4159 ??? What is wrong with an empty section??? */
4160 rel_hdr->sh_size = rel_hdr->sh_entsize;
4161 deleted -= 1;
4162 }
4163 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4164 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4165 input_section->reloc_count -= deleted;
4166 }
4167
4168 return status;
4169 }
4170
4171 /* Finish up dynamic symbol handling. We set the contents of various
4172 dynamic sections here. */
4173
4174 static bool
4175 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4176 struct bfd_link_info *info,
4177 struct elf_link_hash_entry *h,
4178 Elf_Internal_Sym *sym)
4179 {
4180 struct elf_x86_link_hash_table *htab;
4181 bool use_plt_second;
4182 struct elf_x86_link_hash_entry *eh;
4183 bool local_undefweak;
4184
4185 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4186 if (htab == NULL)
4187 return false;
4188
4189 /* Use the second PLT section only if there is .plt section. */
4190 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4191
4192 eh = (struct elf_x86_link_hash_entry *) h;
4193 if (eh->no_finish_dynamic_symbol)
4194 abort ();
4195
4196 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4197 resolved undefined weak symbols in executable so that their
4198 references have value 0 at run-time. */
4199 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4200
4201 if (h->plt.offset != (bfd_vma) -1)
4202 {
4203 bfd_vma plt_index;
4204 bfd_vma got_offset, plt_offset;
4205 Elf_Internal_Rela rela;
4206 bfd_byte *loc;
4207 asection *plt, *gotplt, *relplt, *resolved_plt;
4208 const struct elf_backend_data *bed;
4209 bfd_vma plt_got_pcrel_offset;
4210
4211 /* When building a static executable, use .iplt, .igot.plt and
4212 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4213 if (htab->elf.splt != NULL)
4214 {
4215 plt = htab->elf.splt;
4216 gotplt = htab->elf.sgotplt;
4217 relplt = htab->elf.srelplt;
4218 }
4219 else
4220 {
4221 plt = htab->elf.iplt;
4222 gotplt = htab->elf.igotplt;
4223 relplt = htab->elf.irelplt;
4224 }
4225
4226 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4227
4228 /* Get the index in the procedure linkage table which
4229 corresponds to this symbol. This is the index of this symbol
4230 in all the symbols for which we are making plt entries. The
4231 first entry in the procedure linkage table is reserved.
4232
4233 Get the offset into the .got table of the entry that
4234 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4235 bytes. The first three are reserved for the dynamic linker.
4236
4237 For static executables, we don't reserve anything. */
4238
4239 if (plt == htab->elf.splt)
4240 {
4241 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4242 - htab->plt.has_plt0);
4243 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4244 }
4245 else
4246 {
4247 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4248 got_offset = got_offset * GOT_ENTRY_SIZE;
4249 }
4250
4251 /* Fill in the entry in the procedure linkage table. */
4252 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4253 htab->plt.plt_entry_size);
4254 if (use_plt_second)
4255 {
4256 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4257 htab->non_lazy_plt->plt_entry,
4258 htab->non_lazy_plt->plt_entry_size);
4259
4260 resolved_plt = htab->plt_second;
4261 plt_offset = eh->plt_second.offset;
4262 }
4263 else
4264 {
4265 resolved_plt = plt;
4266 plt_offset = h->plt.offset;
4267 }
4268
4269 /* Insert the relocation positions of the plt section. */
4270
4271 /* Put offset the PC-relative instruction referring to the GOT entry,
4272 subtracting the size of that instruction. */
4273 plt_got_pcrel_offset = (gotplt->output_section->vma
4274 + gotplt->output_offset
4275 + got_offset
4276 - resolved_plt->output_section->vma
4277 - resolved_plt->output_offset
4278 - plt_offset
4279 - htab->plt.plt_got_insn_size);
4280
4281 /* Check PC-relative offset overflow in PLT entry. */
4282 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4283 /* xgettext:c-format */
4284 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4285 output_bfd, h->root.root.string);
4286
4287 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4288 (resolved_plt->contents + plt_offset
4289 + htab->plt.plt_got_offset));
4290
4291 /* Fill in the entry in the global offset table, initially this
4292 points to the second part of the PLT entry. Leave the entry
4293 as zero for undefined weak symbol in PIE. No PLT relocation
4294 against undefined weak symbol in PIE. */
4295 if (!local_undefweak)
4296 {
4297 if (htab->plt.has_plt0)
4298 bfd_put_64 (output_bfd, (plt->output_section->vma
4299 + plt->output_offset
4300 + h->plt.offset
4301 + htab->lazy_plt->plt_lazy_offset),
4302 gotplt->contents + got_offset);
4303
4304 /* Fill in the entry in the .rela.plt section. */
4305 rela.r_offset = (gotplt->output_section->vma
4306 + gotplt->output_offset
4307 + got_offset);
4308 if (PLT_LOCAL_IFUNC_P (info, h))
4309 {
4310 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4311 h->root.root.string,
4312 h->root.u.def.section->owner);
4313
4314 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4315 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4316 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4317 rela.r_addend = (h->root.u.def.value
4318 + h->root.u.def.section->output_section->vma
4319 + h->root.u.def.section->output_offset);
4320
4321 if (htab->params->report_relative_reloc)
4322 _bfd_x86_elf_link_report_relative_reloc
4323 (info, relplt, h, sym, "R_X86_64_IRELATIVE", &rela);
4324
4325 /* R_X86_64_IRELATIVE comes last. */
4326 plt_index = htab->next_irelative_index--;
4327 }
4328 else
4329 {
4330 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4331 rela.r_addend = 0;
4332 plt_index = htab->next_jump_slot_index++;
4333 }
4334
4335 /* Don't fill the second and third slots in PLT entry for
4336 static executables nor without PLT0. */
4337 if (plt == htab->elf.splt && htab->plt.has_plt0)
4338 {
4339 bfd_vma plt0_offset
4340 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4341
4342 /* Put relocation index. */
4343 bfd_put_32 (output_bfd, plt_index,
4344 (plt->contents + h->plt.offset
4345 + htab->lazy_plt->plt_reloc_offset));
4346
4347 /* Put offset for jmp .PLT0 and check for overflow. We don't
4348 check relocation index for overflow since branch displacement
4349 will overflow first. */
4350 if (plt0_offset > 0x80000000)
4351 /* xgettext:c-format */
4352 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4353 output_bfd, h->root.root.string);
4354 bfd_put_32 (output_bfd, - plt0_offset,
4355 (plt->contents + h->plt.offset
4356 + htab->lazy_plt->plt_plt_offset));
4357 }
4358
4359 bed = get_elf_backend_data (output_bfd);
4360 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4361 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4362 }
4363 }
4364 else if (eh->plt_got.offset != (bfd_vma) -1)
4365 {
4366 bfd_vma got_offset, plt_offset;
4367 asection *plt, *got;
4368 bool got_after_plt;
4369 int32_t got_pcrel_offset;
4370
4371 /* Set the entry in the GOT procedure linkage table. */
4372 plt = htab->plt_got;
4373 got = htab->elf.sgot;
4374 got_offset = h->got.offset;
4375
4376 if (got_offset == (bfd_vma) -1
4377 || (h->type == STT_GNU_IFUNC && h->def_regular)
4378 || plt == NULL
4379 || got == NULL)
4380 abort ();
4381
4382 /* Use the non-lazy PLT entry template for the GOT PLT since they
4383 are the identical. */
4384 /* Fill in the entry in the GOT procedure linkage table. */
4385 plt_offset = eh->plt_got.offset;
4386 memcpy (plt->contents + plt_offset,
4387 htab->non_lazy_plt->plt_entry,
4388 htab->non_lazy_plt->plt_entry_size);
4389
4390 /* Put offset the PC-relative instruction referring to the GOT
4391 entry, subtracting the size of that instruction. */
4392 got_pcrel_offset = (got->output_section->vma
4393 + got->output_offset
4394 + got_offset
4395 - plt->output_section->vma
4396 - plt->output_offset
4397 - plt_offset
4398 - htab->non_lazy_plt->plt_got_insn_size);
4399
4400 /* Check PC-relative offset overflow in GOT PLT entry. */
4401 got_after_plt = got->output_section->vma > plt->output_section->vma;
4402 if ((got_after_plt && got_pcrel_offset < 0)
4403 || (!got_after_plt && got_pcrel_offset > 0))
4404 /* xgettext:c-format */
4405 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4406 output_bfd, h->root.root.string);
4407
4408 bfd_put_32 (output_bfd, got_pcrel_offset,
4409 (plt->contents + plt_offset
4410 + htab->non_lazy_plt->plt_got_offset));
4411 }
4412
4413 if (!local_undefweak
4414 && !h->def_regular
4415 && (h->plt.offset != (bfd_vma) -1
4416 || eh->plt_got.offset != (bfd_vma) -1))
4417 {
4418 /* Mark the symbol as undefined, rather than as defined in
4419 the .plt section. Leave the value if there were any
4420 relocations where pointer equality matters (this is a clue
4421 for the dynamic linker, to make function pointer
4422 comparisons work between an application and shared
4423 library), otherwise set it to zero. If a function is only
4424 called from a binary, there is no need to slow down
4425 shared libraries because of that. */
4426 sym->st_shndx = SHN_UNDEF;
4427 if (!h->pointer_equality_needed)
4428 sym->st_value = 0;
4429 }
4430
4431 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4432
4433 /* Don't generate dynamic GOT relocation against undefined weak
4434 symbol in executable. */
4435 if (h->got.offset != (bfd_vma) -1
4436 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4437 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4438 && !local_undefweak)
4439 {
4440 Elf_Internal_Rela rela;
4441 asection *relgot = htab->elf.srelgot;
4442 const char *relative_reloc_name = NULL;
4443
4444 /* This symbol has an entry in the global offset table. Set it
4445 up. */
4446 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4447 abort ();
4448
4449 rela.r_offset = (htab->elf.sgot->output_section->vma
4450 + htab->elf.sgot->output_offset
4451 + (h->got.offset &~ (bfd_vma) 1));
4452
4453 /* If this is a static link, or it is a -Bsymbolic link and the
4454 symbol is defined locally or was forced to be local because
4455 of a version file, we just want to emit a RELATIVE reloc.
4456 The entry in the global offset table will already have been
4457 initialized in the relocate_section function. */
4458 if (h->def_regular
4459 && h->type == STT_GNU_IFUNC)
4460 {
4461 if (h->plt.offset == (bfd_vma) -1)
4462 {
4463 /* STT_GNU_IFUNC is referenced without PLT. */
4464 if (htab->elf.splt == NULL)
4465 {
4466 /* use .rel[a].iplt section to store .got relocations
4467 in static executable. */
4468 relgot = htab->elf.irelplt;
4469 }
4470 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4471 {
4472 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4473 h->root.root.string,
4474 h->root.u.def.section->owner);
4475
4476 rela.r_info = htab->r_info (0,
4477 R_X86_64_IRELATIVE);
4478 rela.r_addend = (h->root.u.def.value
4479 + h->root.u.def.section->output_section->vma
4480 + h->root.u.def.section->output_offset);
4481 relative_reloc_name = "R_X86_64_IRELATIVE";
4482 }
4483 else
4484 goto do_glob_dat;
4485 }
4486 else if (bfd_link_pic (info))
4487 {
4488 /* Generate R_X86_64_GLOB_DAT. */
4489 goto do_glob_dat;
4490 }
4491 else
4492 {
4493 asection *plt;
4494 bfd_vma plt_offset;
4495
4496 if (!h->pointer_equality_needed)
4497 abort ();
4498
4499 /* For non-shared object, we can't use .got.plt, which
4500 contains the real function addres if we need pointer
4501 equality. We load the GOT entry with the PLT entry. */
4502 if (htab->plt_second != NULL)
4503 {
4504 plt = htab->plt_second;
4505 plt_offset = eh->plt_second.offset;
4506 }
4507 else
4508 {
4509 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4510 plt_offset = h->plt.offset;
4511 }
4512 bfd_put_64 (output_bfd, (plt->output_section->vma
4513 + plt->output_offset
4514 + plt_offset),
4515 htab->elf.sgot->contents + h->got.offset);
4516 return true;
4517 }
4518 }
4519 else if (bfd_link_pic (info)
4520 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4521 {
4522 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4523 return false;
4524 BFD_ASSERT((h->got.offset & 1) != 0);
4525 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4526 rela.r_addend = (h->root.u.def.value
4527 + h->root.u.def.section->output_section->vma
4528 + h->root.u.def.section->output_offset);
4529 relative_reloc_name = "R_X86_64_RELATIVE";
4530 }
4531 else
4532 {
4533 BFD_ASSERT((h->got.offset & 1) == 0);
4534 do_glob_dat:
4535 bfd_put_64 (output_bfd, (bfd_vma) 0,
4536 htab->elf.sgot->contents + h->got.offset);
4537 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4538 rela.r_addend = 0;
4539 }
4540
4541 if (relative_reloc_name != NULL
4542 && htab->params->report_relative_reloc)
4543 _bfd_x86_elf_link_report_relative_reloc
4544 (info, relgot, h, sym, relative_reloc_name, &rela);
4545
4546 elf_append_rela (output_bfd, relgot, &rela);
4547 }
4548
4549 if (h->needs_copy)
4550 {
4551 Elf_Internal_Rela rela;
4552 asection *s;
4553
4554 /* This symbol needs a copy reloc. Set it up. */
4555 VERIFY_COPY_RELOC (h, htab)
4556
4557 rela.r_offset = (h->root.u.def.value
4558 + h->root.u.def.section->output_section->vma
4559 + h->root.u.def.section->output_offset);
4560 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4561 rela.r_addend = 0;
4562 if (h->root.u.def.section == htab->elf.sdynrelro)
4563 s = htab->elf.sreldynrelro;
4564 else
4565 s = htab->elf.srelbss;
4566 elf_append_rela (output_bfd, s, &rela);
4567 }
4568
4569 return true;
4570 }
4571
4572 /* Finish up local dynamic symbol handling. We set the contents of
4573 various dynamic sections here. */
4574
4575 static int
4576 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4577 {
4578 struct elf_link_hash_entry *h
4579 = (struct elf_link_hash_entry *) *slot;
4580 struct bfd_link_info *info
4581 = (struct bfd_link_info *) inf;
4582
4583 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4584 info, h, NULL);
4585 }
4586
4587 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4588 here since undefined weak symbol may not be dynamic and may not be
4589 called for elf_x86_64_finish_dynamic_symbol. */
4590
4591 static bool
4592 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4593 void *inf)
4594 {
4595 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4596 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4597
4598 if (h->root.type != bfd_link_hash_undefweak
4599 || h->dynindx != -1)
4600 return true;
4601
4602 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4603 info, h, NULL);
4604 }
4605
4606 /* Used to decide how to sort relocs in an optimal manner for the
4607 dynamic linker, before writing them out. */
4608
4609 static enum elf_reloc_type_class
4610 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4611 const asection *rel_sec ATTRIBUTE_UNUSED,
4612 const Elf_Internal_Rela *rela)
4613 {
4614 bfd *abfd = info->output_bfd;
4615 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4616 struct elf_x86_link_hash_table *htab
4617 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4618
4619 if (htab->elf.dynsym != NULL
4620 && htab->elf.dynsym->contents != NULL)
4621 {
4622 /* Check relocation against STT_GNU_IFUNC symbol if there are
4623 dynamic symbols. */
4624 unsigned long r_symndx = htab->r_sym (rela->r_info);
4625 if (r_symndx != STN_UNDEF)
4626 {
4627 Elf_Internal_Sym sym;
4628 if (!bed->s->swap_symbol_in (abfd,
4629 (htab->elf.dynsym->contents
4630 + r_symndx * bed->s->sizeof_sym),
4631 0, &sym))
4632 abort ();
4633
4634 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4635 return reloc_class_ifunc;
4636 }
4637 }
4638
4639 switch ((int) ELF32_R_TYPE (rela->r_info))
4640 {
4641 case R_X86_64_IRELATIVE:
4642 return reloc_class_ifunc;
4643 case R_X86_64_RELATIVE:
4644 case R_X86_64_RELATIVE64:
4645 return reloc_class_relative;
4646 case R_X86_64_JUMP_SLOT:
4647 return reloc_class_plt;
4648 case R_X86_64_COPY:
4649 return reloc_class_copy;
4650 default:
4651 return reloc_class_normal;
4652 }
4653 }
4654
4655 /* Finish up the dynamic sections. */
4656
4657 static bool
4658 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4659 struct bfd_link_info *info)
4660 {
4661 struct elf_x86_link_hash_table *htab;
4662
4663 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4664 if (htab == NULL)
4665 return false;
4666
4667 if (! htab->elf.dynamic_sections_created)
4668 return true;
4669
4670 if (htab->elf.splt && htab->elf.splt->size > 0)
4671 {
4672 elf_section_data (htab->elf.splt->output_section)
4673 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4674
4675 if (htab->plt.has_plt0)
4676 {
4677 /* Fill in the special first entry in the procedure linkage
4678 table. */
4679 memcpy (htab->elf.splt->contents,
4680 htab->lazy_plt->plt0_entry,
4681 htab->lazy_plt->plt0_entry_size);
4682 /* Add offset for pushq GOT+8(%rip), since the instruction
4683 uses 6 bytes subtract this value. */
4684 bfd_put_32 (output_bfd,
4685 (htab->elf.sgotplt->output_section->vma
4686 + htab->elf.sgotplt->output_offset
4687 + 8
4688 - htab->elf.splt->output_section->vma
4689 - htab->elf.splt->output_offset
4690 - 6),
4691 (htab->elf.splt->contents
4692 + htab->lazy_plt->plt0_got1_offset));
4693 /* Add offset for the PC-relative instruction accessing
4694 GOT+16, subtracting the offset to the end of that
4695 instruction. */
4696 bfd_put_32 (output_bfd,
4697 (htab->elf.sgotplt->output_section->vma
4698 + htab->elf.sgotplt->output_offset
4699 + 16
4700 - htab->elf.splt->output_section->vma
4701 - htab->elf.splt->output_offset
4702 - htab->lazy_plt->plt0_got2_insn_end),
4703 (htab->elf.splt->contents
4704 + htab->lazy_plt->plt0_got2_offset));
4705 }
4706
4707 if (htab->elf.tlsdesc_plt)
4708 {
4709 bfd_put_64 (output_bfd, (bfd_vma) 0,
4710 htab->elf.sgot->contents + htab->elf.tlsdesc_got);
4711
4712 memcpy (htab->elf.splt->contents + htab->elf.tlsdesc_plt,
4713 htab->lazy_plt->plt_tlsdesc_entry,
4714 htab->lazy_plt->plt_tlsdesc_entry_size);
4715
4716 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4717 bytes and the instruction uses 6 bytes, subtract these
4718 values. */
4719 bfd_put_32 (output_bfd,
4720 (htab->elf.sgotplt->output_section->vma
4721 + htab->elf.sgotplt->output_offset
4722 + 8
4723 - htab->elf.splt->output_section->vma
4724 - htab->elf.splt->output_offset
4725 - htab->elf.tlsdesc_plt
4726 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4727 (htab->elf.splt->contents
4728 + htab->elf.tlsdesc_plt
4729 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4730 /* Add offset for indirect branch via GOT+TDG, where TDG
4731 stands for htab->tlsdesc_got, subtracting the offset
4732 to the end of that instruction. */
4733 bfd_put_32 (output_bfd,
4734 (htab->elf.sgot->output_section->vma
4735 + htab->elf.sgot->output_offset
4736 + htab->elf.tlsdesc_got
4737 - htab->elf.splt->output_section->vma
4738 - htab->elf.splt->output_offset
4739 - htab->elf.tlsdesc_plt
4740 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4741 (htab->elf.splt->contents
4742 + htab->elf.tlsdesc_plt
4743 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4744 }
4745 }
4746
4747 /* Fill PLT entries for undefined weak symbols in PIE. */
4748 if (bfd_link_pie (info))
4749 bfd_hash_traverse (&info->hash->table,
4750 elf_x86_64_pie_finish_undefweak_symbol,
4751 info);
4752
4753 return true;
4754 }
4755
4756 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4757 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4758 It has to be done before elf_link_sort_relocs is called so that
4759 dynamic relocations are properly sorted. */
4760
4761 static bool
4762 elf_x86_64_output_arch_local_syms
4763 (bfd *output_bfd ATTRIBUTE_UNUSED,
4764 struct bfd_link_info *info,
4765 void *flaginfo ATTRIBUTE_UNUSED,
4766 int (*func) (void *, const char *,
4767 Elf_Internal_Sym *,
4768 asection *,
4769 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4770 {
4771 struct elf_x86_link_hash_table *htab
4772 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4773 if (htab == NULL)
4774 return false;
4775
4776 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4777 htab_traverse (htab->loc_hash_table,
4778 elf_x86_64_finish_local_dynamic_symbol,
4779 info);
4780
4781 return true;
4782 }
4783
4784 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4785 dynamic relocations. */
4786
4787 static long
4788 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4789 long symcount ATTRIBUTE_UNUSED,
4790 asymbol **syms ATTRIBUTE_UNUSED,
4791 long dynsymcount,
4792 asymbol **dynsyms,
4793 asymbol **ret)
4794 {
4795 long count, i, n;
4796 int j;
4797 bfd_byte *plt_contents;
4798 long relsize;
4799 const struct elf_x86_lazy_plt_layout *lazy_plt;
4800 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4801 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4802 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4803 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4804 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4805 asection *plt;
4806 enum elf_x86_plt_type plt_type;
4807 struct elf_x86_plt plts[] =
4808 {
4809 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4810 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4811 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4812 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4813 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4814 };
4815
4816 *ret = NULL;
4817
4818 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4819 return 0;
4820
4821 if (dynsymcount <= 0)
4822 return 0;
4823
4824 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4825 if (relsize <= 0)
4826 return -1;
4827
4828 lazy_plt = &elf_x86_64_lazy_plt;
4829 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4830 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4831 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4832 if (ABI_64_P (abfd))
4833 {
4834 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4835 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4836 }
4837 else
4838 {
4839 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4840 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4841 }
4842
4843 count = 0;
4844 for (j = 0; plts[j].name != NULL; j++)
4845 {
4846 plt = bfd_get_section_by_name (abfd, plts[j].name);
4847 if (plt == NULL || plt->size == 0)
4848 continue;
4849
4850 /* Get the PLT section contents. */
4851 if (!bfd_malloc_and_get_section (abfd, plt, &plt_contents))
4852 break;
4853
4854 /* Check what kind of PLT it is. */
4855 plt_type = plt_unknown;
4856 if (plts[j].type == plt_unknown
4857 && (plt->size >= (lazy_plt->plt_entry_size
4858 + lazy_plt->plt_entry_size)))
4859 {
4860 /* Match lazy PLT first. Need to check the first two
4861 instructions. */
4862 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4863 lazy_plt->plt0_got1_offset) == 0)
4864 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4865 2) == 0))
4866 plt_type = plt_lazy;
4867 else if (lazy_bnd_plt != NULL
4868 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4869 lazy_bnd_plt->plt0_got1_offset) == 0)
4870 && (memcmp (plt_contents + 6,
4871 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4872 {
4873 plt_type = plt_lazy | plt_second;
4874 /* The fist entry in the lazy IBT PLT is the same as the
4875 lazy BND PLT. */
4876 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4877 lazy_ibt_plt->plt_entry,
4878 lazy_ibt_plt->plt_got_offset) == 0))
4879 lazy_plt = lazy_ibt_plt;
4880 else
4881 lazy_plt = lazy_bnd_plt;
4882 }
4883 }
4884
4885 if (non_lazy_plt != NULL
4886 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4887 && plt->size >= non_lazy_plt->plt_entry_size)
4888 {
4889 /* Match non-lazy PLT. */
4890 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4891 non_lazy_plt->plt_got_offset) == 0)
4892 plt_type = plt_non_lazy;
4893 }
4894
4895 if (plt_type == plt_unknown || plt_type == plt_second)
4896 {
4897 if (non_lazy_bnd_plt != NULL
4898 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4899 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4900 non_lazy_bnd_plt->plt_got_offset) == 0))
4901 {
4902 /* Match BND PLT. */
4903 plt_type = plt_second;
4904 non_lazy_plt = non_lazy_bnd_plt;
4905 }
4906 else if (non_lazy_ibt_plt != NULL
4907 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4908 && (memcmp (plt_contents,
4909 non_lazy_ibt_plt->plt_entry,
4910 non_lazy_ibt_plt->plt_got_offset) == 0))
4911 {
4912 /* Match IBT PLT. */
4913 plt_type = plt_second;
4914 non_lazy_plt = non_lazy_ibt_plt;
4915 }
4916 }
4917
4918 if (plt_type == plt_unknown)
4919 {
4920 free (plt_contents);
4921 continue;
4922 }
4923
4924 plts[j].sec = plt;
4925 plts[j].type = plt_type;
4926
4927 if ((plt_type & plt_lazy))
4928 {
4929 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4930 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4931 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4932 /* Skip PLT0 in lazy PLT. */
4933 i = 1;
4934 }
4935 else
4936 {
4937 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4938 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4939 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4940 i = 0;
4941 }
4942
4943 /* Skip lazy PLT when the second PLT is used. */
4944 if (plt_type == (plt_lazy | plt_second))
4945 plts[j].count = 0;
4946 else
4947 {
4948 n = plt->size / plts[j].plt_entry_size;
4949 plts[j].count = n;
4950 count += n - i;
4951 }
4952
4953 plts[j].contents = plt_contents;
4954 }
4955
4956 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4957 (bfd_vma) 0, plts, dynsyms,
4958 ret);
4959 }
4960
4961 /* Handle an x86-64 specific section when reading an object file. This
4962 is called when elfcode.h finds a section with an unknown type. */
4963
4964 static bool
4965 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4966 const char *name, int shindex)
4967 {
4968 if (hdr->sh_type != SHT_X86_64_UNWIND)
4969 return false;
4970
4971 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4972 return false;
4973
4974 return true;
4975 }
4976
4977 /* Hook called by the linker routine which adds symbols from an object
4978 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4979 of .bss. */
4980
4981 static bool
4982 elf_x86_64_add_symbol_hook (bfd *abfd,
4983 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4984 Elf_Internal_Sym *sym,
4985 const char **namep ATTRIBUTE_UNUSED,
4986 flagword *flagsp ATTRIBUTE_UNUSED,
4987 asection **secp,
4988 bfd_vma *valp)
4989 {
4990 asection *lcomm;
4991
4992 switch (sym->st_shndx)
4993 {
4994 case SHN_X86_64_LCOMMON:
4995 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4996 if (lcomm == NULL)
4997 {
4998 lcomm = bfd_make_section_with_flags (abfd,
4999 "LARGE_COMMON",
5000 (SEC_ALLOC
5001 | SEC_IS_COMMON
5002 | SEC_LINKER_CREATED));
5003 if (lcomm == NULL)
5004 return false;
5005 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5006 }
5007 *secp = lcomm;
5008 *valp = sym->st_size;
5009 return true;
5010 }
5011
5012 return true;
5013 }
5014
5015
5016 /* Given a BFD section, try to locate the corresponding ELF section
5017 index. */
5018
5019 static bool
5020 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5021 asection *sec, int *index_return)
5022 {
5023 if (sec == &_bfd_elf_large_com_section)
5024 {
5025 *index_return = SHN_X86_64_LCOMMON;
5026 return true;
5027 }
5028 return false;
5029 }
5030
5031 /* Process a symbol. */
5032
5033 static void
5034 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5035 asymbol *asym)
5036 {
5037 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5038
5039 switch (elfsym->internal_elf_sym.st_shndx)
5040 {
5041 case SHN_X86_64_LCOMMON:
5042 asym->section = &_bfd_elf_large_com_section;
5043 asym->value = elfsym->internal_elf_sym.st_size;
5044 /* Common symbol doesn't set BSF_GLOBAL. */
5045 asym->flags &= ~BSF_GLOBAL;
5046 break;
5047 }
5048 }
5049
5050 static bool
5051 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5052 {
5053 return (sym->st_shndx == SHN_COMMON
5054 || sym->st_shndx == SHN_X86_64_LCOMMON);
5055 }
5056
5057 static unsigned int
5058 elf_x86_64_common_section_index (asection *sec)
5059 {
5060 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5061 return SHN_COMMON;
5062 else
5063 return SHN_X86_64_LCOMMON;
5064 }
5065
5066 static asection *
5067 elf_x86_64_common_section (asection *sec)
5068 {
5069 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5070 return bfd_com_section_ptr;
5071 else
5072 return &_bfd_elf_large_com_section;
5073 }
5074
5075 static bool
5076 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5077 const Elf_Internal_Sym *sym,
5078 asection **psec,
5079 bool newdef,
5080 bool olddef,
5081 bfd *oldbfd,
5082 const asection *oldsec)
5083 {
5084 /* A normal common symbol and a large common symbol result in a
5085 normal common symbol. We turn the large common symbol into a
5086 normal one. */
5087 if (!olddef
5088 && h->root.type == bfd_link_hash_common
5089 && !newdef
5090 && bfd_is_com_section (*psec)
5091 && oldsec != *psec)
5092 {
5093 if (sym->st_shndx == SHN_COMMON
5094 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5095 {
5096 h->root.u.c.p->section
5097 = bfd_make_section_old_way (oldbfd, "COMMON");
5098 h->root.u.c.p->section->flags = SEC_ALLOC;
5099 }
5100 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5101 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5102 *psec = bfd_com_section_ptr;
5103 }
5104
5105 return true;
5106 }
5107
5108 static int
5109 elf_x86_64_additional_program_headers (bfd *abfd,
5110 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5111 {
5112 asection *s;
5113 int count = 0;
5114
5115 /* Check to see if we need a large readonly segment. */
5116 s = bfd_get_section_by_name (abfd, ".lrodata");
5117 if (s && (s->flags & SEC_LOAD))
5118 count++;
5119
5120 /* Check to see if we need a large data segment. Since .lbss sections
5121 is placed right after the .bss section, there should be no need for
5122 a large data segment just because of .lbss. */
5123 s = bfd_get_section_by_name (abfd, ".ldata");
5124 if (s && (s->flags & SEC_LOAD))
5125 count++;
5126
5127 return count;
5128 }
5129
5130 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5131
5132 static bool
5133 elf_x86_64_relocs_compatible (const bfd_target *input,
5134 const bfd_target *output)
5135 {
5136 return ((xvec_get_elf_backend_data (input)->s->elfclass
5137 == xvec_get_elf_backend_data (output)->s->elfclass)
5138 && _bfd_elf_relocs_compatible (input, output));
5139 }
5140
5141 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5142 with GNU properties if found. Otherwise, return NULL. */
5143
5144 static bfd *
5145 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5146 {
5147 struct elf_x86_init_table init_table;
5148 const struct elf_backend_data *bed;
5149 struct elf_x86_link_hash_table *htab;
5150
5151 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5152 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5153 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5154 != (int) R_X86_64_GNU_VTINHERIT)
5155 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5156 != (int) R_X86_64_GNU_VTENTRY))
5157 abort ();
5158
5159 /* This is unused for x86-64. */
5160 init_table.plt0_pad_byte = 0x90;
5161
5162 bed = get_elf_backend_data (info->output_bfd);
5163 htab = elf_x86_hash_table (info, bed->target_id);
5164 if (!htab)
5165 abort ();
5166 if (htab->params->bndplt)
5167 {
5168 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5169 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5170 }
5171 else
5172 {
5173 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5174 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5175 }
5176
5177 if (ABI_64_P (info->output_bfd))
5178 {
5179 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5180 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5181 }
5182 else
5183 {
5184 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5185 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5186 }
5187
5188 if (ABI_64_P (info->output_bfd))
5189 {
5190 init_table.r_info = elf64_r_info;
5191 init_table.r_sym = elf64_r_sym;
5192 }
5193 else
5194 {
5195 init_table.r_info = elf32_r_info;
5196 init_table.r_sym = elf32_r_sym;
5197 }
5198
5199 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5200 }
5201
5202 static const struct bfd_elf_special_section
5203 elf_x86_64_special_sections[]=
5204 {
5205 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5206 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5207 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5208 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5209 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5210 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5211 { NULL, 0, 0, 0, 0 }
5212 };
5213
5214 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5215 #define TARGET_LITTLE_NAME "elf64-x86-64"
5216 #define ELF_ARCH bfd_arch_i386
5217 #define ELF_TARGET_ID X86_64_ELF_DATA
5218 #define ELF_MACHINE_CODE EM_X86_64
5219 #if DEFAULT_LD_Z_SEPARATE_CODE
5220 # define ELF_MAXPAGESIZE 0x1000
5221 #else
5222 # define ELF_MAXPAGESIZE 0x200000
5223 #endif
5224 #define ELF_MINPAGESIZE 0x1000
5225 #define ELF_COMMONPAGESIZE 0x1000
5226
5227 #define elf_backend_can_gc_sections 1
5228 #define elf_backend_can_refcount 1
5229 #define elf_backend_want_got_plt 1
5230 #define elf_backend_plt_readonly 1
5231 #define elf_backend_want_plt_sym 0
5232 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5233 #define elf_backend_rela_normal 1
5234 #define elf_backend_plt_alignment 4
5235 #define elf_backend_extern_protected_data 1
5236 #define elf_backend_caches_rawsize 1
5237 #define elf_backend_dtrel_excludes_plt 1
5238 #define elf_backend_want_dynrelro 1
5239
5240 #define elf_info_to_howto elf_x86_64_info_to_howto
5241
5242 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5243 #define bfd_elf64_bfd_reloc_name_lookup \
5244 elf_x86_64_reloc_name_lookup
5245
5246 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5247 #define elf_backend_check_relocs elf_x86_64_check_relocs
5248 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5249 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5250 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5251 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5252 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5253 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5254 #ifdef CORE_HEADER
5255 #define elf_backend_write_core_note elf_x86_64_write_core_note
5256 #endif
5257 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5258 #define elf_backend_relocate_section elf_x86_64_relocate_section
5259 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5260 #define elf_backend_object_p elf64_x86_64_elf_object_p
5261 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5262
5263 #define elf_backend_section_from_shdr \
5264 elf_x86_64_section_from_shdr
5265
5266 #define elf_backend_section_from_bfd_section \
5267 elf_x86_64_elf_section_from_bfd_section
5268 #define elf_backend_add_symbol_hook \
5269 elf_x86_64_add_symbol_hook
5270 #define elf_backend_symbol_processing \
5271 elf_x86_64_symbol_processing
5272 #define elf_backend_common_section_index \
5273 elf_x86_64_common_section_index
5274 #define elf_backend_common_section \
5275 elf_x86_64_common_section
5276 #define elf_backend_common_definition \
5277 elf_x86_64_common_definition
5278 #define elf_backend_merge_symbol \
5279 elf_x86_64_merge_symbol
5280 #define elf_backend_special_sections \
5281 elf_x86_64_special_sections
5282 #define elf_backend_additional_program_headers \
5283 elf_x86_64_additional_program_headers
5284 #define elf_backend_setup_gnu_properties \
5285 elf_x86_64_link_setup_gnu_properties
5286 #define elf_backend_hide_symbol \
5287 _bfd_x86_elf_hide_symbol
5288
5289 #undef elf64_bed
5290 #define elf64_bed elf64_x86_64_bed
5291
5292 #include "elf64-target.h"
5293
5294 /* CloudABI support. */
5295
5296 #undef TARGET_LITTLE_SYM
5297 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5298 #undef TARGET_LITTLE_NAME
5299 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5300
5301 #undef ELF_OSABI
5302 #define ELF_OSABI ELFOSABI_CLOUDABI
5303
5304 #undef elf64_bed
5305 #define elf64_bed elf64_x86_64_cloudabi_bed
5306
5307 #include "elf64-target.h"
5308
5309 /* FreeBSD support. */
5310
5311 #undef TARGET_LITTLE_SYM
5312 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5313 #undef TARGET_LITTLE_NAME
5314 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5315
5316 #undef ELF_OSABI
5317 #define ELF_OSABI ELFOSABI_FREEBSD
5318
5319 #undef elf64_bed
5320 #define elf64_bed elf64_x86_64_fbsd_bed
5321
5322 #include "elf64-target.h"
5323
5324 /* Solaris 2 support. */
5325
5326 #undef TARGET_LITTLE_SYM
5327 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5328 #undef TARGET_LITTLE_NAME
5329 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5330
5331 #undef ELF_TARGET_OS
5332 #define ELF_TARGET_OS is_solaris
5333
5334 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5335 objects won't be recognized. */
5336 #undef ELF_OSABI
5337
5338 #undef elf64_bed
5339 #define elf64_bed elf64_x86_64_sol2_bed
5340
5341 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5342 boundary. */
5343 #undef elf_backend_static_tls_alignment
5344 #define elf_backend_static_tls_alignment 16
5345
5346 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5347
5348 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5349 File, p.63. */
5350 #undef elf_backend_want_plt_sym
5351 #define elf_backend_want_plt_sym 1
5352
5353 #undef elf_backend_strtab_flags
5354 #define elf_backend_strtab_flags SHF_STRINGS
5355
5356 static bool
5357 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5358 bfd *obfd ATTRIBUTE_UNUSED,
5359 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5360 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5361 {
5362 /* PR 19938: FIXME: Need to add code for setting the sh_info
5363 and sh_link fields of Solaris specific section types. */
5364 return false;
5365 }
5366
5367 #undef elf_backend_copy_special_section_fields
5368 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5369
5370 #include "elf64-target.h"
5371
5372 /* Restore defaults. */
5373 #undef ELF_OSABI
5374 #undef elf_backend_static_tls_alignment
5375 #undef elf_backend_want_plt_sym
5376 #define elf_backend_want_plt_sym 0
5377 #undef elf_backend_strtab_flags
5378 #undef elf_backend_copy_special_section_fields
5379
5380 /* Intel L1OM support. */
5381
5382 static bool
5383 elf64_l1om_elf_object_p (bfd *abfd)
5384 {
5385 /* Set the right machine number for an L1OM elf64 file. */
5386 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5387 return true;
5388 }
5389
5390 #undef TARGET_LITTLE_SYM
5391 #define TARGET_LITTLE_SYM l1om_elf64_vec
5392 #undef TARGET_LITTLE_NAME
5393 #define TARGET_LITTLE_NAME "elf64-l1om"
5394 #undef ELF_ARCH
5395 #define ELF_ARCH bfd_arch_l1om
5396
5397 #undef ELF_MACHINE_CODE
5398 #define ELF_MACHINE_CODE EM_L1OM
5399
5400 #undef ELF_OSABI
5401
5402 #undef elf64_bed
5403 #define elf64_bed elf64_l1om_bed
5404
5405 #undef elf_backend_object_p
5406 #define elf_backend_object_p elf64_l1om_elf_object_p
5407
5408 /* Restore defaults. */
5409 #undef ELF_MAXPAGESIZE
5410 #undef ELF_MINPAGESIZE
5411 #undef ELF_COMMONPAGESIZE
5412 #if DEFAULT_LD_Z_SEPARATE_CODE
5413 # define ELF_MAXPAGESIZE 0x1000
5414 #else
5415 # define ELF_MAXPAGESIZE 0x200000
5416 #endif
5417 #define ELF_MINPAGESIZE 0x1000
5418 #define ELF_COMMONPAGESIZE 0x1000
5419 #undef elf_backend_plt_alignment
5420 #define elf_backend_plt_alignment 4
5421 #undef ELF_TARGET_OS
5422
5423 #include "elf64-target.h"
5424
5425 /* FreeBSD L1OM support. */
5426
5427 #undef TARGET_LITTLE_SYM
5428 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5429 #undef TARGET_LITTLE_NAME
5430 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5431
5432 #undef ELF_OSABI
5433 #define ELF_OSABI ELFOSABI_FREEBSD
5434
5435 #undef elf64_bed
5436 #define elf64_bed elf64_l1om_fbsd_bed
5437
5438 #include "elf64-target.h"
5439
5440 /* Intel K1OM support. */
5441
5442 static bool
5443 elf64_k1om_elf_object_p (bfd *abfd)
5444 {
5445 /* Set the right machine number for an K1OM elf64 file. */
5446 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5447 return true;
5448 }
5449
5450 #undef TARGET_LITTLE_SYM
5451 #define TARGET_LITTLE_SYM k1om_elf64_vec
5452 #undef TARGET_LITTLE_NAME
5453 #define TARGET_LITTLE_NAME "elf64-k1om"
5454 #undef ELF_ARCH
5455 #define ELF_ARCH bfd_arch_k1om
5456
5457 #undef ELF_MACHINE_CODE
5458 #define ELF_MACHINE_CODE EM_K1OM
5459
5460 #undef ELF_OSABI
5461
5462 #undef elf64_bed
5463 #define elf64_bed elf64_k1om_bed
5464
5465 #undef elf_backend_object_p
5466 #define elf_backend_object_p elf64_k1om_elf_object_p
5467
5468 #include "elf64-target.h"
5469
5470 /* FreeBSD K1OM support. */
5471
5472 #undef TARGET_LITTLE_SYM
5473 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5474 #undef TARGET_LITTLE_NAME
5475 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5476
5477 #undef ELF_OSABI
5478 #define ELF_OSABI ELFOSABI_FREEBSD
5479
5480 #undef elf64_bed
5481 #define elf64_bed elf64_k1om_fbsd_bed
5482
5483 #include "elf64-target.h"
5484
5485 /* 32bit x86-64 support. */
5486
5487 #undef TARGET_LITTLE_SYM
5488 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5489 #undef TARGET_LITTLE_NAME
5490 #define TARGET_LITTLE_NAME "elf32-x86-64"
5491 #undef elf32_bed
5492 #define elf32_bed elf32_x86_64_bed
5493
5494 #undef ELF_ARCH
5495 #define ELF_ARCH bfd_arch_i386
5496
5497 #undef ELF_MACHINE_CODE
5498 #define ELF_MACHINE_CODE EM_X86_64
5499
5500 #undef ELF_OSABI
5501
5502 #define bfd_elf32_bfd_reloc_type_lookup \
5503 elf_x86_64_reloc_type_lookup
5504 #define bfd_elf32_bfd_reloc_name_lookup \
5505 elf_x86_64_reloc_name_lookup
5506 #define bfd_elf32_get_synthetic_symtab \
5507 elf_x86_64_get_synthetic_symtab
5508
5509 #undef elf_backend_object_p
5510 #define elf_backend_object_p \
5511 elf32_x86_64_elf_object_p
5512
5513 #undef elf_backend_bfd_from_remote_memory
5514 #define elf_backend_bfd_from_remote_memory \
5515 _bfd_elf32_bfd_from_remote_memory
5516
5517 #undef elf_backend_size_info
5518 #define elf_backend_size_info \
5519 _bfd_elf32_size_info
5520
5521 #include "elf32-target.h"
This page took 0.143879 seconds and 4 git commands to generate.