83a749e6ddc2c4869ff9743db2bf8d4e2bc2807e
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2021 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "dwarf2.h"
24 #include "libiberty.h"
25
26 #include "opcode/i386.h"
27 #include "elf/x86-64.h"
28
29 #ifdef CORE_HEADER
30 #include <stdarg.h>
31 #include CORE_HEADER
32 #endif
33
34 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
35 #define MINUS_ONE (~ (bfd_vma) 0)
36
37 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
38 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
39 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
40 since they are the same. */
41
42 /* The relocation "howto" table. Order of fields:
43 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
44 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
45 static reloc_howto_type x86_64_elf_howto_table[] =
46 {
47 HOWTO(R_X86_64_NONE, 0, 3, 0, false, 0, complain_overflow_dont,
48 bfd_elf_generic_reloc, "R_X86_64_NONE", false, 0x00000000, 0x00000000,
49 false),
50 HOWTO(R_X86_64_64, 0, 4, 64, false, 0, complain_overflow_dont,
51 bfd_elf_generic_reloc, "R_X86_64_64", false, MINUS_ONE, MINUS_ONE,
52 false),
53 HOWTO(R_X86_64_PC32, 0, 2, 32, true, 0, complain_overflow_signed,
54 bfd_elf_generic_reloc, "R_X86_64_PC32", false, 0xffffffff, 0xffffffff,
55 true),
56 HOWTO(R_X86_64_GOT32, 0, 2, 32, false, 0, complain_overflow_signed,
57 bfd_elf_generic_reloc, "R_X86_64_GOT32", false, 0xffffffff, 0xffffffff,
58 false),
59 HOWTO(R_X86_64_PLT32, 0, 2, 32, true, 0, complain_overflow_signed,
60 bfd_elf_generic_reloc, "R_X86_64_PLT32", false, 0xffffffff, 0xffffffff,
61 true),
62 HOWTO(R_X86_64_COPY, 0, 2, 32, false, 0, complain_overflow_bitfield,
63 bfd_elf_generic_reloc, "R_X86_64_COPY", false, 0xffffffff, 0xffffffff,
64 false),
65 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, false, 0, complain_overflow_dont,
66 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", false, MINUS_ONE,
67 MINUS_ONE, false),
68 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, false, 0, complain_overflow_dont,
69 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", false, MINUS_ONE,
70 MINUS_ONE, false),
71 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, false, 0, complain_overflow_dont,
72 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", false, MINUS_ONE,
73 MINUS_ONE, false),
74 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, true, 0, complain_overflow_signed,
75 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", false, 0xffffffff,
76 0xffffffff, true),
77 HOWTO(R_X86_64_32, 0, 2, 32, false, 0, complain_overflow_unsigned,
78 bfd_elf_generic_reloc, "R_X86_64_32", false, 0xffffffff, 0xffffffff,
79 false),
80 HOWTO(R_X86_64_32S, 0, 2, 32, false, 0, complain_overflow_signed,
81 bfd_elf_generic_reloc, "R_X86_64_32S", false, 0xffffffff, 0xffffffff,
82 false),
83 HOWTO(R_X86_64_16, 0, 1, 16, false, 0, complain_overflow_bitfield,
84 bfd_elf_generic_reloc, "R_X86_64_16", false, 0xffff, 0xffff, false),
85 HOWTO(R_X86_64_PC16, 0, 1, 16, true, 0, complain_overflow_signed,
86 bfd_elf_generic_reloc, "R_X86_64_PC16", false, 0xffff, 0xffff, true),
87 HOWTO(R_X86_64_8, 0, 0, 8, false, 0, complain_overflow_bitfield,
88 bfd_elf_generic_reloc, "R_X86_64_8", false, 0xff, 0xff, false),
89 HOWTO(R_X86_64_PC8, 0, 0, 8, true, 0, complain_overflow_signed,
90 bfd_elf_generic_reloc, "R_X86_64_PC8", false, 0xff, 0xff, true),
91 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, false, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", false, MINUS_ONE,
93 MINUS_ONE, false),
94 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
95 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", false, MINUS_ONE,
96 MINUS_ONE, false),
97 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
98 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", false, MINUS_ONE,
99 MINUS_ONE, false),
100 HOWTO(R_X86_64_TLSGD, 0, 2, 32, true, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_TLSGD", false, 0xffffffff,
102 0xffffffff, true),
103 HOWTO(R_X86_64_TLSLD, 0, 2, 32, true, 0, complain_overflow_signed,
104 bfd_elf_generic_reloc, "R_X86_64_TLSLD", false, 0xffffffff,
105 0xffffffff, true),
106 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, false, 0, complain_overflow_signed,
107 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", false, 0xffffffff,
108 0xffffffff, false),
109 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, true, 0, complain_overflow_signed,
110 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", false, 0xffffffff,
111 0xffffffff, true),
112 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, false, 0, complain_overflow_signed,
113 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", false, 0xffffffff,
114 0xffffffff, false),
115 HOWTO(R_X86_64_PC64, 0, 4, 64, true, 0, complain_overflow_dont,
116 bfd_elf_generic_reloc, "R_X86_64_PC64", false, MINUS_ONE, MINUS_ONE,
117 true),
118 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, false, 0, complain_overflow_dont,
119 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
120 false, MINUS_ONE, MINUS_ONE, false),
121 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, true, 0, complain_overflow_signed,
122 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
123 false, 0xffffffff, 0xffffffff, true),
124 HOWTO(R_X86_64_GOT64, 0, 4, 64, false, 0, complain_overflow_signed,
125 bfd_elf_generic_reloc, "R_X86_64_GOT64", false, MINUS_ONE, MINUS_ONE,
126 false),
127 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, true, 0, complain_overflow_signed,
128 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", false, MINUS_ONE,
129 MINUS_ONE, true),
130 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, true, 0, complain_overflow_signed,
131 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
132 false, MINUS_ONE, MINUS_ONE, true),
133 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, false, 0, complain_overflow_signed,
134 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", false, MINUS_ONE,
135 MINUS_ONE, false),
136 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, false, 0, complain_overflow_signed,
137 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", false, MINUS_ONE,
138 MINUS_ONE, false),
139 HOWTO(R_X86_64_SIZE32, 0, 2, 32, false, 0, complain_overflow_unsigned,
140 bfd_elf_generic_reloc, "R_X86_64_SIZE32", false, 0xffffffff, 0xffffffff,
141 false),
142 HOWTO(R_X86_64_SIZE64, 0, 4, 64, false, 0, complain_overflow_dont,
143 bfd_elf_generic_reloc, "R_X86_64_SIZE64", false, MINUS_ONE, MINUS_ONE,
144 false),
145 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, true, 0,
146 complain_overflow_bitfield, bfd_elf_generic_reloc,
147 "R_X86_64_GOTPC32_TLSDESC",
148 false, 0xffffffff, 0xffffffff, true),
149 HOWTO(R_X86_64_TLSDESC_CALL, 0, 3, 0, false, 0,
150 complain_overflow_dont, bfd_elf_generic_reloc,
151 "R_X86_64_TLSDESC_CALL",
152 false, 0, 0, false),
153 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, false, 0,
154 complain_overflow_dont, bfd_elf_generic_reloc,
155 "R_X86_64_TLSDESC",
156 false, MINUS_ONE, MINUS_ONE, false),
157 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, false, 0, complain_overflow_dont,
158 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", false, MINUS_ONE,
159 MINUS_ONE, false),
160 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, false, 0, complain_overflow_dont,
161 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", false, MINUS_ONE,
162 MINUS_ONE, false),
163 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, true, 0, complain_overflow_signed,
164 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", false, 0xffffffff, 0xffffffff,
165 true),
166 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, true, 0, complain_overflow_signed,
167 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", false, 0xffffffff, 0xffffffff,
168 true),
169 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, true, 0, complain_overflow_signed,
170 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", false, 0xffffffff,
171 0xffffffff, true),
172 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, true, 0, complain_overflow_signed,
173 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", false, 0xffffffff,
174 0xffffffff, true),
175
176 /* We have a gap in the reloc numbers here.
177 R_X86_64_standard counts the number up to this point, and
178 R_X86_64_vt_offset is the value to subtract from a reloc type of
179 R_X86_64_GNU_VT* to form an index into this table. */
180 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
181 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
182
183 /* GNU extension to record C++ vtable hierarchy. */
184 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, false, 0, complain_overflow_dont,
185 NULL, "R_X86_64_GNU_VTINHERIT", false, 0, 0, false),
186
187 /* GNU extension to record C++ vtable member usage. */
188 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, false, 0, complain_overflow_dont,
189 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", false, 0, 0,
190 false),
191
192 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
193 HOWTO(R_X86_64_32, 0, 2, 32, false, 0, complain_overflow_bitfield,
194 bfd_elf_generic_reloc, "R_X86_64_32", false, 0xffffffff, 0xffffffff,
195 false)
196 };
197
198 #define X86_PCREL_TYPE_P(TYPE) \
199 ( ((TYPE) == R_X86_64_PC8) \
200 || ((TYPE) == R_X86_64_PC16) \
201 || ((TYPE) == R_X86_64_PC32) \
202 || ((TYPE) == R_X86_64_PC32_BND) \
203 || ((TYPE) == R_X86_64_PC64))
204
205 #define X86_SIZE_TYPE_P(TYPE) \
206 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
207
208 /* Map BFD relocs to the x86_64 elf relocs. */
209 struct elf_reloc_map
210 {
211 bfd_reloc_code_real_type bfd_reloc_val;
212 unsigned char elf_reloc_val;
213 };
214
215 static const struct elf_reloc_map x86_64_reloc_map[] =
216 {
217 { BFD_RELOC_NONE, R_X86_64_NONE, },
218 { BFD_RELOC_64, R_X86_64_64, },
219 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
220 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
221 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
222 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
223 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
224 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
225 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
226 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
227 { BFD_RELOC_32, R_X86_64_32, },
228 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
229 { BFD_RELOC_16, R_X86_64_16, },
230 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
231 { BFD_RELOC_8, R_X86_64_8, },
232 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
233 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
234 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
235 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
236 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
237 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
238 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
239 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
240 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
241 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
242 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
243 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
244 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
245 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
246 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
247 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
248 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
249 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
250 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
251 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
252 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
253 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
254 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
255 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
256 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
257 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
258 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
259 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
260 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
261 };
262
263 static reloc_howto_type *
264 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
265 {
266 unsigned i;
267
268 if (r_type == (unsigned int) R_X86_64_32)
269 {
270 if (ABI_64_P (abfd))
271 i = r_type;
272 else
273 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
274 }
275 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
276 || r_type >= (unsigned int) R_X86_64_max)
277 {
278 if (r_type >= (unsigned int) R_X86_64_standard)
279 {
280 /* xgettext:c-format */
281 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
282 abfd, r_type);
283 bfd_set_error (bfd_error_bad_value);
284 return NULL;
285 }
286 i = r_type;
287 }
288 else
289 i = r_type - (unsigned int) R_X86_64_vt_offset;
290 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
291 return &x86_64_elf_howto_table[i];
292 }
293
294 /* Given a BFD reloc type, return a HOWTO structure. */
295 static reloc_howto_type *
296 elf_x86_64_reloc_type_lookup (bfd *abfd,
297 bfd_reloc_code_real_type code)
298 {
299 unsigned int i;
300
301 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
302 i++)
303 {
304 if (x86_64_reloc_map[i].bfd_reloc_val == code)
305 return elf_x86_64_rtype_to_howto (abfd,
306 x86_64_reloc_map[i].elf_reloc_val);
307 }
308 return NULL;
309 }
310
311 static reloc_howto_type *
312 elf_x86_64_reloc_name_lookup (bfd *abfd,
313 const char *r_name)
314 {
315 unsigned int i;
316
317 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
318 {
319 /* Get x32 R_X86_64_32. */
320 reloc_howto_type *reloc
321 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
322 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
323 return reloc;
324 }
325
326 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
327 if (x86_64_elf_howto_table[i].name != NULL
328 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
329 return &x86_64_elf_howto_table[i];
330
331 return NULL;
332 }
333
334 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
335
336 static bool
337 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
338 Elf_Internal_Rela *dst)
339 {
340 unsigned r_type;
341
342 r_type = ELF32_R_TYPE (dst->r_info);
343 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
344 if (cache_ptr->howto == NULL)
345 return false;
346 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
347 return true;
348 }
349 \f
350 /* Support for core dump NOTE sections. */
351 static bool
352 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
353 {
354 int offset;
355 size_t size;
356
357 switch (note->descsz)
358 {
359 default:
360 return false;
361
362 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
363 /* pr_cursig */
364 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
365
366 /* pr_pid */
367 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
368
369 /* pr_reg */
370 offset = 72;
371 size = 216;
372
373 break;
374
375 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
376 /* pr_cursig */
377 elf_tdata (abfd)->core->signal
378 = bfd_get_16 (abfd, note->descdata + 12);
379
380 /* pr_pid */
381 elf_tdata (abfd)->core->lwpid
382 = bfd_get_32 (abfd, note->descdata + 32);
383
384 /* pr_reg */
385 offset = 112;
386 size = 216;
387
388 break;
389 }
390
391 /* Make a ".reg/999" section. */
392 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
393 size, note->descpos + offset);
394 }
395
396 static bool
397 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
398 {
399 switch (note->descsz)
400 {
401 default:
402 return false;
403
404 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
405 elf_tdata (abfd)->core->pid
406 = bfd_get_32 (abfd, note->descdata + 12);
407 elf_tdata (abfd)->core->program
408 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
409 elf_tdata (abfd)->core->command
410 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
411 break;
412
413 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
414 elf_tdata (abfd)->core->pid
415 = bfd_get_32 (abfd, note->descdata + 24);
416 elf_tdata (abfd)->core->program
417 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
418 elf_tdata (abfd)->core->command
419 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
420 }
421
422 /* Note that for some reason, a spurious space is tacked
423 onto the end of the args in some (at least one anyway)
424 implementations, so strip it off if it exists. */
425
426 {
427 char *command = elf_tdata (abfd)->core->command;
428 int n = strlen (command);
429
430 if (0 < n && command[n - 1] == ' ')
431 command[n - 1] = '\0';
432 }
433
434 return true;
435 }
436
437 #ifdef CORE_HEADER
438 # if GCC_VERSION >= 8000
439 # pragma GCC diagnostic push
440 # pragma GCC diagnostic ignored "-Wstringop-truncation"
441 # endif
442 static char *
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
444 int note_type, ...)
445 {
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
447 va_list ap;
448 const char *fname, *psargs;
449 long pid;
450 int cursig;
451 const void *gregs;
452
453 switch (note_type)
454 {
455 default:
456 return NULL;
457
458 case NT_PRPSINFO:
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
462 va_end (ap);
463
464 if (bed->s->elfclass == ELFCLASS32)
465 {
466 prpsinfo32_t data;
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
472 }
473 else
474 {
475 prpsinfo64_t data;
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
481 }
482 /* NOTREACHED */
483
484 case NT_PRSTATUS:
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
489 va_end (ap);
490
491 if (bed->s->elfclass == ELFCLASS32)
492 {
493 if (bed->elf_machine_code == EM_X86_64)
494 {
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
497 prstat.pr_pid = pid;
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
502 }
503 else
504 {
505 prstatus32_t prstat;
506 memset (&prstat, 0, sizeof (prstat));
507 prstat.pr_pid = pid;
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
512 }
513 }
514 else
515 {
516 prstatus64_t prstat;
517 memset (&prstat, 0, sizeof (prstat));
518 prstat.pr_pid = pid;
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
523 }
524 }
525 /* NOTREACHED */
526 }
527 # if GCC_VERSION >= 8000
528 # pragma GCC diagnostic pop
529 # endif
530 #endif
531 \f
532 /* Functions for the x86-64 ELF linker. */
533
534 /* The size in bytes of an entry in the global offset table. */
535
536 #define GOT_ENTRY_SIZE 8
537
538 /* The size in bytes of an entry in the lazy procedure linkage table. */
539
540 #define LAZY_PLT_ENTRY_SIZE 16
541
542 /* The size in bytes of an entry in the non-lazy procedure linkage
543 table. */
544
545 #define NON_LAZY_PLT_ENTRY_SIZE 8
546
547 /* The first entry in a lazy procedure linkage table looks like this.
548 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
549 works. */
550
551 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
552 {
553 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
554 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
555 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
556 };
557
558 /* Subsequent entries in a lazy procedure linkage table look like this. */
559
560 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
561 {
562 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
563 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
564 0x68, /* pushq immediate */
565 0, 0, 0, 0, /* replaced with index into relocation table. */
566 0xe9, /* jmp relative */
567 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
568 };
569
570 /* The first entry in a lazy procedure linkage table with BND prefix
571 like this. */
572
573 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
574 {
575 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
576 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
577 0x0f, 0x1f, 0 /* nopl (%rax) */
578 };
579
580 /* Subsequent entries for branches with BND prefx in a lazy procedure
581 linkage table look like this. */
582
583 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
584 {
585 0x68, 0, 0, 0, 0, /* pushq immediate */
586 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
587 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
588 };
589
590 /* The first entry in the IBT-enabled lazy procedure linkage table is the
591 the same as the lazy PLT with BND prefix so that bound registers are
592 preserved when control is passed to dynamic linker. Subsequent
593 entries for a IBT-enabled lazy procedure linkage table look like
594 this. */
595
596 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
597 {
598 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
599 0x68, 0, 0, 0, 0, /* pushq immediate */
600 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
601 0x90 /* nop */
602 };
603
604 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
605 is the same as the normal lazy PLT. Subsequent entries for an
606 x32 IBT-enabled lazy procedure linkage table look like this. */
607
608 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
609 {
610 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
611 0x68, 0, 0, 0, 0, /* pushq immediate */
612 0xe9, 0, 0, 0, 0, /* jmpq relative */
613 0x66, 0x90 /* xchg %ax,%ax */
614 };
615
616 /* Entries in the non-lazey procedure linkage table look like this. */
617
618 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
619 {
620 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
621 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
622 0x66, 0x90 /* xchg %ax,%ax */
623 };
624
625 /* Entries for branches with BND prefix in the non-lazey procedure
626 linkage table look like this. */
627
628 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
629 {
630 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
631 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
632 0x90 /* nop */
633 };
634
635 /* Entries for branches with IBT-enabled in the non-lazey procedure
636 linkage table look like this. They have the same size as the lazy
637 PLT entry. */
638
639 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
640 {
641 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
642 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
643 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
644 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
645 };
646
647 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
648 linkage table look like this. They have the same size as the lazy
649 PLT entry. */
650
651 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
652 {
653 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
654 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
655 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
656 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
657 };
658
659 /* The TLSDESC entry in a lazy procedure linkage table. */
660 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
661 {
662 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
663 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
664 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
665 };
666
667 /* .eh_frame covering the lazy .plt section. */
668
669 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
670 {
671 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
672 0, 0, 0, 0, /* CIE ID */
673 1, /* CIE version */
674 'z', 'R', 0, /* Augmentation string */
675 1, /* Code alignment factor */
676 0x78, /* Data alignment factor */
677 16, /* Return address column */
678 1, /* Augmentation size */
679 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
680 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
681 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
682 DW_CFA_nop, DW_CFA_nop,
683
684 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
685 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
686 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
687 0, 0, 0, 0, /* .plt size goes here */
688 0, /* Augmentation size */
689 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
690 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
691 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
692 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
693 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
694 11, /* Block length */
695 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
696 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
697 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
698 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
699 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
700 };
701
702 /* .eh_frame covering the lazy BND .plt section. */
703
704 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
705 {
706 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
707 0, 0, 0, 0, /* CIE ID */
708 1, /* CIE version */
709 'z', 'R', 0, /* Augmentation string */
710 1, /* Code alignment factor */
711 0x78, /* Data alignment factor */
712 16, /* Return address column */
713 1, /* Augmentation size */
714 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
715 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
716 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
717 DW_CFA_nop, DW_CFA_nop,
718
719 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
720 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
721 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
722 0, 0, 0, 0, /* .plt size goes here */
723 0, /* Augmentation size */
724 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
725 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
726 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
727 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
728 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
729 11, /* Block length */
730 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
731 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
732 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
733 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
734 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
735 };
736
737 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
738
739 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
740 {
741 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
742 0, 0, 0, 0, /* CIE ID */
743 1, /* CIE version */
744 'z', 'R', 0, /* Augmentation string */
745 1, /* Code alignment factor */
746 0x78, /* Data alignment factor */
747 16, /* Return address column */
748 1, /* Augmentation size */
749 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
750 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
751 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
752 DW_CFA_nop, DW_CFA_nop,
753
754 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
755 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
756 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
757 0, 0, 0, 0, /* .plt size goes here */
758 0, /* Augmentation size */
759 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
760 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
761 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
762 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
763 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
764 11, /* Block length */
765 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
766 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
767 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
768 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
769 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
770 };
771
772 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
773
774 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
775 {
776 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
777 0, 0, 0, 0, /* CIE ID */
778 1, /* CIE version */
779 'z', 'R', 0, /* Augmentation string */
780 1, /* Code alignment factor */
781 0x78, /* Data alignment factor */
782 16, /* Return address column */
783 1, /* Augmentation size */
784 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
785 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
786 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
787 DW_CFA_nop, DW_CFA_nop,
788
789 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
790 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
791 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
792 0, 0, 0, 0, /* .plt size goes here */
793 0, /* Augmentation size */
794 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
795 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
796 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
797 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
798 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
799 11, /* Block length */
800 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
801 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
802 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
803 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
804 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
805 };
806
807 /* .eh_frame covering the non-lazy .plt section. */
808
809 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
810 {
811 #define PLT_GOT_FDE_LENGTH 20
812 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
813 0, 0, 0, 0, /* CIE ID */
814 1, /* CIE version */
815 'z', 'R', 0, /* Augmentation string */
816 1, /* Code alignment factor */
817 0x78, /* Data alignment factor */
818 16, /* Return address column */
819 1, /* Augmentation size */
820 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
821 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
822 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
823 DW_CFA_nop, DW_CFA_nop,
824
825 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
826 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
827 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
828 0, 0, 0, 0, /* non-lazy .plt size goes here */
829 0, /* Augmentation size */
830 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
831 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
832 };
833
834 /* These are the standard parameters. */
835 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
836 {
837 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
838 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
839 elf_x86_64_lazy_plt_entry, /* plt_entry */
840 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
841 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
842 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
843 6, /* plt_tlsdesc_got1_offset */
844 12, /* plt_tlsdesc_got2_offset */
845 10, /* plt_tlsdesc_got1_insn_end */
846 16, /* plt_tlsdesc_got2_insn_end */
847 2, /* plt0_got1_offset */
848 8, /* plt0_got2_offset */
849 12, /* plt0_got2_insn_end */
850 2, /* plt_got_offset */
851 7, /* plt_reloc_offset */
852 12, /* plt_plt_offset */
853 6, /* plt_got_insn_size */
854 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
855 6, /* plt_lazy_offset */
856 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
857 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
858 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
859 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
860 };
861
862 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
863 {
864 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
865 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
866 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
867 2, /* plt_got_offset */
868 6, /* plt_got_insn_size */
869 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
870 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
871 };
872
873 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
874 {
875 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
876 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
877 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
878 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
879 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
880 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
881 6, /* plt_tlsdesc_got1_offset */
882 12, /* plt_tlsdesc_got2_offset */
883 10, /* plt_tlsdesc_got1_insn_end */
884 16, /* plt_tlsdesc_got2_insn_end */
885 2, /* plt0_got1_offset */
886 1+8, /* plt0_got2_offset */
887 1+12, /* plt0_got2_insn_end */
888 1+2, /* plt_got_offset */
889 1, /* plt_reloc_offset */
890 7, /* plt_plt_offset */
891 1+6, /* plt_got_insn_size */
892 11, /* plt_plt_insn_end */
893 0, /* plt_lazy_offset */
894 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
895 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
896 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
897 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
898 };
899
900 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
901 {
902 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
903 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
904 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
905 1+2, /* plt_got_offset */
906 1+6, /* plt_got_insn_size */
907 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
908 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
909 };
910
911 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
912 {
913 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
914 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
915 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
916 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
917 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
918 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
919 6, /* plt_tlsdesc_got1_offset */
920 12, /* plt_tlsdesc_got2_offset */
921 10, /* plt_tlsdesc_got1_insn_end */
922 16, /* plt_tlsdesc_got2_insn_end */
923 2, /* plt0_got1_offset */
924 1+8, /* plt0_got2_offset */
925 1+12, /* plt0_got2_insn_end */
926 4+1+2, /* plt_got_offset */
927 4+1, /* plt_reloc_offset */
928 4+1+6, /* plt_plt_offset */
929 4+1+6, /* plt_got_insn_size */
930 4+1+5+5, /* plt_plt_insn_end */
931 0, /* plt_lazy_offset */
932 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
933 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
934 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
935 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
936 };
937
938 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
939 {
940 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
941 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
942 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
943 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
944 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
946 6, /* plt_tlsdesc_got1_offset */
947 12, /* plt_tlsdesc_got2_offset */
948 10, /* plt_tlsdesc_got1_insn_end */
949 16, /* plt_tlsdesc_got2_insn_end */
950 2, /* plt0_got1_offset */
951 8, /* plt0_got2_offset */
952 12, /* plt0_got2_insn_end */
953 4+2, /* plt_got_offset */
954 4+1, /* plt_reloc_offset */
955 4+6, /* plt_plt_offset */
956 4+6, /* plt_got_insn_size */
957 4+5+5, /* plt_plt_insn_end */
958 0, /* plt_lazy_offset */
959 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
960 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
961 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
962 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
963 };
964
965 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
966 {
967 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
968 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
969 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
970 4+1+2, /* plt_got_offset */
971 4+1+6, /* plt_got_insn_size */
972 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
973 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
974 };
975
976 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
977 {
978 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
979 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
980 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
981 4+2, /* plt_got_offset */
982 4+6, /* plt_got_insn_size */
983 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
984 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
985 };
986
987
988 static bool
989 elf64_x86_64_elf_object_p (bfd *abfd)
990 {
991 /* Set the right machine number for an x86-64 elf64 file. */
992 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
993 return true;
994 }
995
996 static bool
997 elf32_x86_64_elf_object_p (bfd *abfd)
998 {
999 /* Set the right machine number for an x86-64 elf32 file. */
1000 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1001 return true;
1002 }
1003
1004 /* Return TRUE if the TLS access code sequence support transition
1005 from R_TYPE. */
1006
1007 static bool
1008 elf_x86_64_check_tls_transition (bfd *abfd,
1009 struct bfd_link_info *info,
1010 asection *sec,
1011 bfd_byte *contents,
1012 Elf_Internal_Shdr *symtab_hdr,
1013 struct elf_link_hash_entry **sym_hashes,
1014 unsigned int r_type,
1015 const Elf_Internal_Rela *rel,
1016 const Elf_Internal_Rela *relend)
1017 {
1018 unsigned int val;
1019 unsigned long r_symndx;
1020 bool largepic = false;
1021 struct elf_link_hash_entry *h;
1022 bfd_vma offset;
1023 struct elf_x86_link_hash_table *htab;
1024 bfd_byte *call;
1025 bool indirect_call;
1026
1027 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1028 offset = rel->r_offset;
1029 switch (r_type)
1030 {
1031 case R_X86_64_TLSGD:
1032 case R_X86_64_TLSLD:
1033 if ((rel + 1) >= relend)
1034 return false;
1035
1036 if (r_type == R_X86_64_TLSGD)
1037 {
1038 /* Check transition from GD access model. For 64bit, only
1039 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1040 .word 0x6666; rex64; call __tls_get_addr@PLT
1041 or
1042 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1043 .byte 0x66; rex64
1044 call *__tls_get_addr@GOTPCREL(%rip)
1045 which may be converted to
1046 addr32 call __tls_get_addr
1047 can transit to different access model. For 32bit, only
1048 leaq foo@tlsgd(%rip), %rdi
1049 .word 0x6666; rex64; call __tls_get_addr@PLT
1050 or
1051 leaq foo@tlsgd(%rip), %rdi
1052 .byte 0x66; rex64
1053 call *__tls_get_addr@GOTPCREL(%rip)
1054 which may be converted to
1055 addr32 call __tls_get_addr
1056 can transit to different access model. For largepic,
1057 we also support:
1058 leaq foo@tlsgd(%rip), %rdi
1059 movabsq $__tls_get_addr@pltoff, %rax
1060 addq $r15, %rax
1061 call *%rax
1062 or
1063 leaq foo@tlsgd(%rip), %rdi
1064 movabsq $__tls_get_addr@pltoff, %rax
1065 addq $rbx, %rax
1066 call *%rax */
1067
1068 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1069
1070 if ((offset + 12) > sec->size)
1071 return false;
1072
1073 call = contents + offset + 4;
1074 if (call[0] != 0x66
1075 || !((call[1] == 0x48
1076 && call[2] == 0xff
1077 && call[3] == 0x15)
1078 || (call[1] == 0x48
1079 && call[2] == 0x67
1080 && call[3] == 0xe8)
1081 || (call[1] == 0x66
1082 && call[2] == 0x48
1083 && call[3] == 0xe8)))
1084 {
1085 if (!ABI_64_P (abfd)
1086 || (offset + 19) > sec->size
1087 || offset < 3
1088 || memcmp (call - 7, leaq + 1, 3) != 0
1089 || memcmp (call, "\x48\xb8", 2) != 0
1090 || call[11] != 0x01
1091 || call[13] != 0xff
1092 || call[14] != 0xd0
1093 || !((call[10] == 0x48 && call[12] == 0xd8)
1094 || (call[10] == 0x4c && call[12] == 0xf8)))
1095 return false;
1096 largepic = true;
1097 }
1098 else if (ABI_64_P (abfd))
1099 {
1100 if (offset < 4
1101 || memcmp (contents + offset - 4, leaq, 4) != 0)
1102 return false;
1103 }
1104 else
1105 {
1106 if (offset < 3
1107 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1108 return false;
1109 }
1110 indirect_call = call[2] == 0xff;
1111 }
1112 else
1113 {
1114 /* Check transition from LD access model. Only
1115 leaq foo@tlsld(%rip), %rdi;
1116 call __tls_get_addr@PLT
1117 or
1118 leaq foo@tlsld(%rip), %rdi;
1119 call *__tls_get_addr@GOTPCREL(%rip)
1120 which may be converted to
1121 addr32 call __tls_get_addr
1122 can transit to different access model. For largepic
1123 we also support:
1124 leaq foo@tlsld(%rip), %rdi
1125 movabsq $__tls_get_addr@pltoff, %rax
1126 addq $r15, %rax
1127 call *%rax
1128 or
1129 leaq foo@tlsld(%rip), %rdi
1130 movabsq $__tls_get_addr@pltoff, %rax
1131 addq $rbx, %rax
1132 call *%rax */
1133
1134 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1135
1136 if (offset < 3 || (offset + 9) > sec->size)
1137 return false;
1138
1139 if (memcmp (contents + offset - 3, lea, 3) != 0)
1140 return false;
1141
1142 call = contents + offset + 4;
1143 if (!(call[0] == 0xe8
1144 || (call[0] == 0xff && call[1] == 0x15)
1145 || (call[0] == 0x67 && call[1] == 0xe8)))
1146 {
1147 if (!ABI_64_P (abfd)
1148 || (offset + 19) > sec->size
1149 || memcmp (call, "\x48\xb8", 2) != 0
1150 || call[11] != 0x01
1151 || call[13] != 0xff
1152 || call[14] != 0xd0
1153 || !((call[10] == 0x48 && call[12] == 0xd8)
1154 || (call[10] == 0x4c && call[12] == 0xf8)))
1155 return false;
1156 largepic = true;
1157 }
1158 indirect_call = call[0] == 0xff;
1159 }
1160
1161 r_symndx = htab->r_sym (rel[1].r_info);
1162 if (r_symndx < symtab_hdr->sh_info)
1163 return false;
1164
1165 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1166 if (h == NULL
1167 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1168 return false;
1169 else
1170 {
1171 r_type = (ELF32_R_TYPE (rel[1].r_info)
1172 & ~R_X86_64_converted_reloc_bit);
1173 if (largepic)
1174 return r_type == R_X86_64_PLTOFF64;
1175 else if (indirect_call)
1176 return r_type == R_X86_64_GOTPCRELX;
1177 else
1178 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1179 }
1180
1181 case R_X86_64_GOTTPOFF:
1182 /* Check transition from IE access model:
1183 mov foo@gottpoff(%rip), %reg
1184 add foo@gottpoff(%rip), %reg
1185 */
1186
1187 /* Check REX prefix first. */
1188 if (offset >= 3 && (offset + 4) <= sec->size)
1189 {
1190 val = bfd_get_8 (abfd, contents + offset - 3);
1191 if (val != 0x48 && val != 0x4c)
1192 {
1193 /* X32 may have 0x44 REX prefix or no REX prefix. */
1194 if (ABI_64_P (abfd))
1195 return false;
1196 }
1197 }
1198 else
1199 {
1200 /* X32 may not have any REX prefix. */
1201 if (ABI_64_P (abfd))
1202 return false;
1203 if (offset < 2 || (offset + 3) > sec->size)
1204 return false;
1205 }
1206
1207 val = bfd_get_8 (abfd, contents + offset - 2);
1208 if (val != 0x8b && val != 0x03)
1209 return false;
1210
1211 val = bfd_get_8 (abfd, contents + offset - 1);
1212 return (val & 0xc7) == 5;
1213
1214 case R_X86_64_GOTPC32_TLSDESC:
1215 /* Check transition from GDesc access model:
1216 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
1217 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
1218
1219 Make sure it's a leaq adding rip to a 32-bit offset
1220 into any register, although it's probably almost always
1221 going to be rax. */
1222
1223 if (offset < 3 || (offset + 4) > sec->size)
1224 return false;
1225
1226 val = bfd_get_8 (abfd, contents + offset - 3);
1227 val &= 0xfb;
1228 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
1229 return false;
1230
1231 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1232 return false;
1233
1234 val = bfd_get_8 (abfd, contents + offset - 1);
1235 return (val & 0xc7) == 0x05;
1236
1237 case R_X86_64_TLSDESC_CALL:
1238 /* Check transition from GDesc access model:
1239 call *x@tlsdesc(%rax) <--- LP64 mode.
1240 call *x@tlsdesc(%eax) <--- X32 mode.
1241 */
1242 if (offset + 2 <= sec->size)
1243 {
1244 unsigned int prefix;
1245 call = contents + offset;
1246 prefix = 0;
1247 if (!ABI_64_P (abfd))
1248 {
1249 /* Check for call *x@tlsdesc(%eax). */
1250 if (call[0] == 0x67)
1251 {
1252 prefix = 1;
1253 if (offset + 3 > sec->size)
1254 return false;
1255 }
1256 }
1257 /* Make sure that it's a call *x@tlsdesc(%rax). */
1258 return call[prefix] == 0xff && call[1 + prefix] == 0x10;
1259 }
1260
1261 return false;
1262
1263 default:
1264 abort ();
1265 }
1266 }
1267
1268 /* Return TRUE if the TLS access transition is OK or no transition
1269 will be performed. Update R_TYPE if there is a transition. */
1270
1271 static bool
1272 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1273 asection *sec, bfd_byte *contents,
1274 Elf_Internal_Shdr *symtab_hdr,
1275 struct elf_link_hash_entry **sym_hashes,
1276 unsigned int *r_type, int tls_type,
1277 const Elf_Internal_Rela *rel,
1278 const Elf_Internal_Rela *relend,
1279 struct elf_link_hash_entry *h,
1280 unsigned long r_symndx,
1281 bool from_relocate_section)
1282 {
1283 unsigned int from_type = *r_type;
1284 unsigned int to_type = from_type;
1285 bool check = true;
1286
1287 /* Skip TLS transition for functions. */
1288 if (h != NULL
1289 && (h->type == STT_FUNC
1290 || h->type == STT_GNU_IFUNC))
1291 return true;
1292
1293 switch (from_type)
1294 {
1295 case R_X86_64_TLSGD:
1296 case R_X86_64_GOTPC32_TLSDESC:
1297 case R_X86_64_TLSDESC_CALL:
1298 case R_X86_64_GOTTPOFF:
1299 if (bfd_link_executable (info))
1300 {
1301 if (h == NULL)
1302 to_type = R_X86_64_TPOFF32;
1303 else
1304 to_type = R_X86_64_GOTTPOFF;
1305 }
1306
1307 /* When we are called from elf_x86_64_relocate_section, there may
1308 be additional transitions based on TLS_TYPE. */
1309 if (from_relocate_section)
1310 {
1311 unsigned int new_to_type = to_type;
1312
1313 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1314 new_to_type = R_X86_64_TPOFF32;
1315
1316 if (to_type == R_X86_64_TLSGD
1317 || to_type == R_X86_64_GOTPC32_TLSDESC
1318 || to_type == R_X86_64_TLSDESC_CALL)
1319 {
1320 if (tls_type == GOT_TLS_IE)
1321 new_to_type = R_X86_64_GOTTPOFF;
1322 }
1323
1324 /* We checked the transition before when we were called from
1325 elf_x86_64_check_relocs. We only want to check the new
1326 transition which hasn't been checked before. */
1327 check = new_to_type != to_type && from_type == to_type;
1328 to_type = new_to_type;
1329 }
1330
1331 break;
1332
1333 case R_X86_64_TLSLD:
1334 if (bfd_link_executable (info))
1335 to_type = R_X86_64_TPOFF32;
1336 break;
1337
1338 default:
1339 return true;
1340 }
1341
1342 /* Return TRUE if there is no transition. */
1343 if (from_type == to_type)
1344 return true;
1345
1346 /* Check if the transition can be performed. */
1347 if (check
1348 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1349 symtab_hdr, sym_hashes,
1350 from_type, rel, relend))
1351 {
1352 reloc_howto_type *from, *to;
1353 const char *name;
1354
1355 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1356 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1357
1358 if (from == NULL || to == NULL)
1359 return false;
1360
1361 if (h)
1362 name = h->root.root.string;
1363 else
1364 {
1365 struct elf_x86_link_hash_table *htab;
1366
1367 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1368 if (htab == NULL)
1369 name = "*unknown*";
1370 else
1371 {
1372 Elf_Internal_Sym *isym;
1373
1374 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1375 abfd, r_symndx);
1376 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1377 }
1378 }
1379
1380 _bfd_error_handler
1381 /* xgettext:c-format */
1382 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1383 " in section `%pA' failed"),
1384 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1385 bfd_set_error (bfd_error_bad_value);
1386 return false;
1387 }
1388
1389 *r_type = to_type;
1390 return true;
1391 }
1392
1393 /* Rename some of the generic section flags to better document how they
1394 are used here. */
1395 #define check_relocs_failed sec_flg0
1396
1397 static bool
1398 elf_x86_64_need_pic (struct bfd_link_info *info,
1399 bfd *input_bfd, asection *sec,
1400 struct elf_link_hash_entry *h,
1401 Elf_Internal_Shdr *symtab_hdr,
1402 Elf_Internal_Sym *isym,
1403 reloc_howto_type *howto)
1404 {
1405 const char *v = "";
1406 const char *und = "";
1407 const char *pic = "";
1408 const char *object;
1409
1410 const char *name;
1411 if (h)
1412 {
1413 name = h->root.root.string;
1414 switch (ELF_ST_VISIBILITY (h->other))
1415 {
1416 case STV_HIDDEN:
1417 v = _("hidden symbol ");
1418 break;
1419 case STV_INTERNAL:
1420 v = _("internal symbol ");
1421 break;
1422 case STV_PROTECTED:
1423 v = _("protected symbol ");
1424 break;
1425 default:
1426 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1427 v = _("protected symbol ");
1428 else
1429 v = _("symbol ");
1430 pic = NULL;
1431 break;
1432 }
1433
1434 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1435 und = _("undefined ");
1436 }
1437 else
1438 {
1439 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1440 pic = NULL;
1441 }
1442
1443 if (bfd_link_dll (info))
1444 {
1445 object = _("a shared object");
1446 if (!pic)
1447 pic = _("; recompile with -fPIC");
1448 }
1449 else
1450 {
1451 if (bfd_link_pie (info))
1452 object = _("a PIE object");
1453 else
1454 object = _("a PDE object");
1455 if (!pic)
1456 pic = _("; recompile with -fPIE");
1457 }
1458
1459 /* xgettext:c-format */
1460 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1461 "not be used when making %s%s"),
1462 input_bfd, howto->name, und, v, name,
1463 object, pic);
1464 bfd_set_error (bfd_error_bad_value);
1465 sec->check_relocs_failed = 1;
1466 return false;
1467 }
1468
1469 /* With the local symbol, foo, we convert
1470 mov foo@GOTPCREL(%rip), %reg
1471 to
1472 lea foo(%rip), %reg
1473 and convert
1474 call/jmp *foo@GOTPCREL(%rip)
1475 to
1476 nop call foo/jmp foo nop
1477 When PIC is false, convert
1478 test %reg, foo@GOTPCREL(%rip)
1479 to
1480 test $foo, %reg
1481 and convert
1482 binop foo@GOTPCREL(%rip), %reg
1483 to
1484 binop $foo, %reg
1485 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1486 instructions. */
1487
1488 static bool
1489 elf_x86_64_convert_load_reloc (bfd *abfd,
1490 bfd_byte *contents,
1491 unsigned int *r_type_p,
1492 Elf_Internal_Rela *irel,
1493 struct elf_link_hash_entry *h,
1494 bool *converted,
1495 struct bfd_link_info *link_info)
1496 {
1497 struct elf_x86_link_hash_table *htab;
1498 bool is_pic;
1499 bool no_overflow;
1500 bool relocx;
1501 bool to_reloc_pc32;
1502 bool abs_symbol;
1503 bool local_ref;
1504 asection *tsec;
1505 bfd_signed_vma raddend;
1506 unsigned int opcode;
1507 unsigned int modrm;
1508 unsigned int r_type = *r_type_p;
1509 unsigned int r_symndx;
1510 bfd_vma roff = irel->r_offset;
1511 bfd_vma abs_relocation;
1512
1513 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1514 return true;
1515
1516 raddend = irel->r_addend;
1517 /* Addend for 32-bit PC-relative relocation must be -4. */
1518 if (raddend != -4)
1519 return true;
1520
1521 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1522 is_pic = bfd_link_pic (link_info);
1523
1524 relocx = (r_type == R_X86_64_GOTPCRELX
1525 || r_type == R_X86_64_REX_GOTPCRELX);
1526
1527 /* TRUE if --no-relax is used. */
1528 no_overflow = link_info->disable_target_specific_optimizations > 1;
1529
1530 r_symndx = htab->r_sym (irel->r_info);
1531
1532 opcode = bfd_get_8 (abfd, contents + roff - 2);
1533
1534 /* Convert mov to lea since it has been done for a while. */
1535 if (opcode != 0x8b)
1536 {
1537 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1538 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1539 test, xor instructions. */
1540 if (!relocx)
1541 return true;
1542 }
1543
1544 /* We convert only to R_X86_64_PC32:
1545 1. Branch.
1546 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1547 3. no_overflow is true.
1548 4. PIC.
1549 */
1550 to_reloc_pc32 = (opcode == 0xff
1551 || !relocx
1552 || no_overflow
1553 || is_pic);
1554
1555 abs_symbol = false;
1556 abs_relocation = 0;
1557
1558 /* Get the symbol referred to by the reloc. */
1559 if (h == NULL)
1560 {
1561 Elf_Internal_Sym *isym
1562 = bfd_sym_from_r_symndx (&htab->elf.sym_cache, abfd, r_symndx);
1563
1564 /* Skip relocation against undefined symbols. */
1565 if (isym->st_shndx == SHN_UNDEF)
1566 return true;
1567
1568 local_ref = true;
1569 if (isym->st_shndx == SHN_ABS)
1570 {
1571 tsec = bfd_abs_section_ptr;
1572 abs_symbol = true;
1573 abs_relocation = isym->st_value;
1574 }
1575 else if (isym->st_shndx == SHN_COMMON)
1576 tsec = bfd_com_section_ptr;
1577 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1578 tsec = &_bfd_elf_large_com_section;
1579 else
1580 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1581 }
1582 else
1583 {
1584 /* Undefined weak symbol is only bound locally in executable
1585 and its reference is resolved as 0 without relocation
1586 overflow. We can only perform this optimization for
1587 GOTPCRELX relocations since we need to modify REX byte.
1588 It is OK convert mov with R_X86_64_GOTPCREL to
1589 R_X86_64_PC32. */
1590 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1591
1592 abs_symbol = ABS_SYMBOL_P (h);
1593 abs_relocation = h->root.u.def.value;
1594
1595 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1596 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1597 if ((relocx || opcode == 0x8b)
1598 && (h->root.type == bfd_link_hash_undefweak
1599 && !eh->linker_def
1600 && local_ref))
1601 {
1602 if (opcode == 0xff)
1603 {
1604 /* Skip for branch instructions since R_X86_64_PC32
1605 may overflow. */
1606 if (no_overflow)
1607 return true;
1608 }
1609 else if (relocx)
1610 {
1611 /* For non-branch instructions, we can convert to
1612 R_X86_64_32/R_X86_64_32S since we know if there
1613 is a REX byte. */
1614 to_reloc_pc32 = false;
1615 }
1616
1617 /* Since we don't know the current PC when PIC is true,
1618 we can't convert to R_X86_64_PC32. */
1619 if (to_reloc_pc32 && is_pic)
1620 return true;
1621
1622 goto convert;
1623 }
1624 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1625 ld.so may use its link-time address. */
1626 else if (h->start_stop
1627 || eh->linker_def
1628 || ((h->def_regular
1629 || h->root.type == bfd_link_hash_defined
1630 || h->root.type == bfd_link_hash_defweak)
1631 && h != htab->elf.hdynamic
1632 && local_ref))
1633 {
1634 /* bfd_link_hash_new or bfd_link_hash_undefined is
1635 set by an assignment in a linker script in
1636 bfd_elf_record_link_assignment. start_stop is set
1637 on __start_SECNAME/__stop_SECNAME which mark section
1638 SECNAME. */
1639 if (h->start_stop
1640 || eh->linker_def
1641 || (h->def_regular
1642 && (h->root.type == bfd_link_hash_new
1643 || h->root.type == bfd_link_hash_undefined
1644 || ((h->root.type == bfd_link_hash_defined
1645 || h->root.type == bfd_link_hash_defweak)
1646 && h->root.u.def.section == bfd_und_section_ptr))))
1647 {
1648 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1649 if (no_overflow)
1650 return true;
1651 goto convert;
1652 }
1653 tsec = h->root.u.def.section;
1654 }
1655 else
1656 return true;
1657 }
1658
1659 /* Don't convert GOTPCREL relocation against large section. */
1660 if (elf_section_data (tsec) != NULL
1661 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1662 return true;
1663
1664 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1665 if (no_overflow)
1666 return true;
1667
1668 convert:
1669 if (opcode == 0xff)
1670 {
1671 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1672 unsigned int nop;
1673 unsigned int disp;
1674 bfd_vma nop_offset;
1675
1676 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1677 R_X86_64_PC32. */
1678 modrm = bfd_get_8 (abfd, contents + roff - 1);
1679 if (modrm == 0x25)
1680 {
1681 /* Convert to "jmp foo nop". */
1682 modrm = 0xe9;
1683 nop = NOP_OPCODE;
1684 nop_offset = irel->r_offset + 3;
1685 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1686 irel->r_offset -= 1;
1687 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1688 }
1689 else
1690 {
1691 struct elf_x86_link_hash_entry *eh
1692 = (struct elf_x86_link_hash_entry *) h;
1693
1694 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1695 is a nop prefix. */
1696 modrm = 0xe8;
1697 /* To support TLS optimization, always use addr32 prefix for
1698 "call *__tls_get_addr@GOTPCREL(%rip)". */
1699 if (eh && eh->tls_get_addr)
1700 {
1701 nop = 0x67;
1702 nop_offset = irel->r_offset - 2;
1703 }
1704 else
1705 {
1706 nop = htab->params->call_nop_byte;
1707 if (htab->params->call_nop_as_suffix)
1708 {
1709 nop_offset = irel->r_offset + 3;
1710 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1711 irel->r_offset -= 1;
1712 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1713 }
1714 else
1715 nop_offset = irel->r_offset - 2;
1716 }
1717 }
1718 bfd_put_8 (abfd, nop, contents + nop_offset);
1719 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1720 r_type = R_X86_64_PC32;
1721 }
1722 else
1723 {
1724 unsigned int rex;
1725 unsigned int rex_mask = REX_R;
1726
1727 if (r_type == R_X86_64_REX_GOTPCRELX)
1728 rex = bfd_get_8 (abfd, contents + roff - 3);
1729 else
1730 rex = 0;
1731
1732 if (opcode == 0x8b)
1733 {
1734 if (abs_symbol && local_ref && relocx)
1735 to_reloc_pc32 = false;
1736
1737 if (to_reloc_pc32)
1738 {
1739 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1740 "lea foo(%rip), %reg". */
1741 opcode = 0x8d;
1742 r_type = R_X86_64_PC32;
1743 }
1744 else
1745 {
1746 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1747 "mov $foo, %reg". */
1748 opcode = 0xc7;
1749 modrm = bfd_get_8 (abfd, contents + roff - 1);
1750 modrm = 0xc0 | (modrm & 0x38) >> 3;
1751 if ((rex & REX_W) != 0
1752 && ABI_64_P (link_info->output_bfd))
1753 {
1754 /* Keep the REX_W bit in REX byte for LP64. */
1755 r_type = R_X86_64_32S;
1756 goto rewrite_modrm_rex;
1757 }
1758 else
1759 {
1760 /* If the REX_W bit in REX byte isn't needed,
1761 use R_X86_64_32 and clear the W bit to avoid
1762 sign-extend imm32 to imm64. */
1763 r_type = R_X86_64_32;
1764 /* Clear the W bit in REX byte. */
1765 rex_mask |= REX_W;
1766 goto rewrite_modrm_rex;
1767 }
1768 }
1769 }
1770 else
1771 {
1772 /* R_X86_64_PC32 isn't supported. */
1773 if (to_reloc_pc32)
1774 return true;
1775
1776 modrm = bfd_get_8 (abfd, contents + roff - 1);
1777 if (opcode == 0x85)
1778 {
1779 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1780 "test $foo, %reg". */
1781 modrm = 0xc0 | (modrm & 0x38) >> 3;
1782 opcode = 0xf7;
1783 }
1784 else
1785 {
1786 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1787 "binop $foo, %reg". */
1788 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1789 opcode = 0x81;
1790 }
1791
1792 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1793 overflow when sign-extending imm32 to imm64. */
1794 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1795
1796 rewrite_modrm_rex:
1797 if (abs_relocation)
1798 {
1799 /* Check if R_X86_64_32S/R_X86_64_32 fits. */
1800 if (r_type == R_X86_64_32S)
1801 {
1802 if ((abs_relocation + 0x80000000) > 0xffffffff)
1803 return true;
1804 }
1805 else
1806 {
1807 if (abs_relocation > 0xffffffff)
1808 return true;
1809 }
1810 }
1811
1812 bfd_put_8 (abfd, modrm, contents + roff - 1);
1813
1814 if (rex)
1815 {
1816 /* Move the R bit to the B bit in REX byte. */
1817 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1818 bfd_put_8 (abfd, rex, contents + roff - 3);
1819 }
1820
1821 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1822 irel->r_addend = 0;
1823 }
1824
1825 bfd_put_8 (abfd, opcode, contents + roff - 2);
1826 }
1827
1828 *r_type_p = r_type;
1829 irel->r_info = htab->r_info (r_symndx,
1830 r_type | R_X86_64_converted_reloc_bit);
1831
1832 *converted = true;
1833
1834 return true;
1835 }
1836
1837 /* Look through the relocs for a section during the first phase, and
1838 calculate needed space in the global offset table, procedure
1839 linkage table, and dynamic reloc sections. */
1840
1841 static bool
1842 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1843 asection *sec,
1844 const Elf_Internal_Rela *relocs)
1845 {
1846 struct elf_x86_link_hash_table *htab;
1847 Elf_Internal_Shdr *symtab_hdr;
1848 struct elf_link_hash_entry **sym_hashes;
1849 const Elf_Internal_Rela *rel;
1850 const Elf_Internal_Rela *rel_end;
1851 asection *sreloc;
1852 bfd_byte *contents;
1853 bool converted;
1854
1855 if (bfd_link_relocatable (info))
1856 return true;
1857
1858 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1859 if (htab == NULL)
1860 {
1861 sec->check_relocs_failed = 1;
1862 return false;
1863 }
1864
1865 BFD_ASSERT (is_x86_elf (abfd, htab));
1866
1867 /* Get the section contents. */
1868 if (elf_section_data (sec)->this_hdr.contents != NULL)
1869 contents = elf_section_data (sec)->this_hdr.contents;
1870 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1871 {
1872 sec->check_relocs_failed = 1;
1873 return false;
1874 }
1875
1876 symtab_hdr = &elf_symtab_hdr (abfd);
1877 sym_hashes = elf_sym_hashes (abfd);
1878
1879 converted = false;
1880
1881 sreloc = NULL;
1882
1883 rel_end = relocs + sec->reloc_count;
1884 for (rel = relocs; rel < rel_end; rel++)
1885 {
1886 unsigned int r_type;
1887 unsigned int r_symndx;
1888 struct elf_link_hash_entry *h;
1889 struct elf_x86_link_hash_entry *eh;
1890 Elf_Internal_Sym *isym;
1891 const char *name;
1892 bool size_reloc;
1893 bool converted_reloc;
1894 bool no_dynreloc;
1895
1896 r_symndx = htab->r_sym (rel->r_info);
1897 r_type = ELF32_R_TYPE (rel->r_info);
1898
1899 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1900 {
1901 /* xgettext:c-format */
1902 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1903 abfd, r_symndx);
1904 goto error_return;
1905 }
1906
1907 if (r_symndx < symtab_hdr->sh_info)
1908 {
1909 /* A local symbol. */
1910 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1911 abfd, r_symndx);
1912 if (isym == NULL)
1913 goto error_return;
1914
1915 /* Check relocation against local STT_GNU_IFUNC symbol. */
1916 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1917 {
1918 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1919 true);
1920 if (h == NULL)
1921 goto error_return;
1922
1923 /* Fake a STT_GNU_IFUNC symbol. */
1924 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1925 isym, NULL);
1926 h->type = STT_GNU_IFUNC;
1927 h->def_regular = 1;
1928 h->ref_regular = 1;
1929 h->forced_local = 1;
1930 h->root.type = bfd_link_hash_defined;
1931 }
1932 else
1933 h = NULL;
1934 }
1935 else
1936 {
1937 isym = NULL;
1938 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1939 while (h->root.type == bfd_link_hash_indirect
1940 || h->root.type == bfd_link_hash_warning)
1941 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1942 }
1943
1944 /* Check invalid x32 relocations. */
1945 if (!ABI_64_P (abfd))
1946 switch (r_type)
1947 {
1948 default:
1949 break;
1950
1951 case R_X86_64_DTPOFF64:
1952 case R_X86_64_TPOFF64:
1953 case R_X86_64_PC64:
1954 case R_X86_64_GOTOFF64:
1955 case R_X86_64_GOT64:
1956 case R_X86_64_GOTPCREL64:
1957 case R_X86_64_GOTPC64:
1958 case R_X86_64_GOTPLT64:
1959 case R_X86_64_PLTOFF64:
1960 {
1961 if (h)
1962 name = h->root.root.string;
1963 else
1964 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1965 NULL);
1966 _bfd_error_handler
1967 /* xgettext:c-format */
1968 (_("%pB: relocation %s against symbol `%s' isn't "
1969 "supported in x32 mode"), abfd,
1970 x86_64_elf_howto_table[r_type].name, name);
1971 bfd_set_error (bfd_error_bad_value);
1972 goto error_return;
1973 }
1974 break;
1975 }
1976
1977 if (h != NULL)
1978 {
1979 /* It is referenced by a non-shared object. */
1980 h->ref_regular = 1;
1981 }
1982
1983 converted_reloc = false;
1984 if ((r_type == R_X86_64_GOTPCREL
1985 || r_type == R_X86_64_GOTPCRELX
1986 || r_type == R_X86_64_REX_GOTPCRELX)
1987 && (h == NULL || h->type != STT_GNU_IFUNC))
1988 {
1989 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1990 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1991 irel, h, &converted_reloc,
1992 info))
1993 goto error_return;
1994
1995 if (converted_reloc)
1996 converted = true;
1997 }
1998
1999 if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym,
2000 symtab_hdr, &no_dynreloc))
2001 return false;
2002
2003 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2004 symtab_hdr, sym_hashes,
2005 &r_type, GOT_UNKNOWN,
2006 rel, rel_end, h, r_symndx, false))
2007 goto error_return;
2008
2009 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
2010 if (h == htab->elf.hgot)
2011 htab->got_referenced = true;
2012
2013 eh = (struct elf_x86_link_hash_entry *) h;
2014 switch (r_type)
2015 {
2016 case R_X86_64_TLSLD:
2017 htab->tls_ld_or_ldm_got.refcount = 1;
2018 goto create_got;
2019
2020 case R_X86_64_TPOFF32:
2021 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2022 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2023 &x86_64_elf_howto_table[r_type]);
2024 if (eh != NULL)
2025 eh->zero_undefweak &= 0x2;
2026 break;
2027
2028 case R_X86_64_GOTTPOFF:
2029 if (!bfd_link_executable (info))
2030 info->flags |= DF_STATIC_TLS;
2031 /* Fall through */
2032
2033 case R_X86_64_GOT32:
2034 case R_X86_64_GOTPCREL:
2035 case R_X86_64_GOTPCRELX:
2036 case R_X86_64_REX_GOTPCRELX:
2037 case R_X86_64_TLSGD:
2038 case R_X86_64_GOT64:
2039 case R_X86_64_GOTPCREL64:
2040 case R_X86_64_GOTPLT64:
2041 case R_X86_64_GOTPC32_TLSDESC:
2042 case R_X86_64_TLSDESC_CALL:
2043 /* This symbol requires a global offset table entry. */
2044 {
2045 int tls_type, old_tls_type;
2046
2047 switch (r_type)
2048 {
2049 default:
2050 tls_type = GOT_NORMAL;
2051 if (h)
2052 {
2053 if (ABS_SYMBOL_P (h))
2054 tls_type = GOT_ABS;
2055 }
2056 else if (isym->st_shndx == SHN_ABS)
2057 tls_type = GOT_ABS;
2058 break;
2059 case R_X86_64_TLSGD:
2060 tls_type = GOT_TLS_GD;
2061 break;
2062 case R_X86_64_GOTTPOFF:
2063 tls_type = GOT_TLS_IE;
2064 break;
2065 case R_X86_64_GOTPC32_TLSDESC:
2066 case R_X86_64_TLSDESC_CALL:
2067 tls_type = GOT_TLS_GDESC;
2068 break;
2069 }
2070
2071 if (h != NULL)
2072 {
2073 h->got.refcount = 1;
2074 old_tls_type = eh->tls_type;
2075 }
2076 else
2077 {
2078 bfd_signed_vma *local_got_refcounts;
2079
2080 /* This is a global offset table entry for a local symbol. */
2081 local_got_refcounts = elf_local_got_refcounts (abfd);
2082 if (local_got_refcounts == NULL)
2083 {
2084 bfd_size_type size;
2085
2086 size = symtab_hdr->sh_info;
2087 size *= sizeof (bfd_signed_vma)
2088 + sizeof (bfd_vma) + sizeof (char);
2089 local_got_refcounts = ((bfd_signed_vma *)
2090 bfd_zalloc (abfd, size));
2091 if (local_got_refcounts == NULL)
2092 goto error_return;
2093 elf_local_got_refcounts (abfd) = local_got_refcounts;
2094 elf_x86_local_tlsdesc_gotent (abfd)
2095 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2096 elf_x86_local_got_tls_type (abfd)
2097 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2098 }
2099 local_got_refcounts[r_symndx] = 1;
2100 old_tls_type
2101 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2102 }
2103
2104 /* If a TLS symbol is accessed using IE at least once,
2105 there is no point to use dynamic model for it. */
2106 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2107 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2108 || tls_type != GOT_TLS_IE))
2109 {
2110 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2111 tls_type = old_tls_type;
2112 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2113 && GOT_TLS_GD_ANY_P (tls_type))
2114 tls_type |= old_tls_type;
2115 else
2116 {
2117 if (h)
2118 name = h->root.root.string;
2119 else
2120 name = bfd_elf_sym_name (abfd, symtab_hdr,
2121 isym, NULL);
2122 _bfd_error_handler
2123 /* xgettext:c-format */
2124 (_("%pB: '%s' accessed both as normal and"
2125 " thread local symbol"),
2126 abfd, name);
2127 bfd_set_error (bfd_error_bad_value);
2128 goto error_return;
2129 }
2130 }
2131
2132 if (old_tls_type != tls_type)
2133 {
2134 if (eh != NULL)
2135 eh->tls_type = tls_type;
2136 else
2137 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2138 }
2139 }
2140 /* Fall through */
2141
2142 case R_X86_64_GOTOFF64:
2143 case R_X86_64_GOTPC32:
2144 case R_X86_64_GOTPC64:
2145 create_got:
2146 if (eh != NULL)
2147 eh->zero_undefweak &= 0x2;
2148 break;
2149
2150 case R_X86_64_PLT32:
2151 case R_X86_64_PLT32_BND:
2152 /* This symbol requires a procedure linkage table entry. We
2153 actually build the entry in adjust_dynamic_symbol,
2154 because this might be a case of linking PIC code which is
2155 never referenced by a dynamic object, in which case we
2156 don't need to generate a procedure linkage table entry
2157 after all. */
2158
2159 /* If this is a local symbol, we resolve it directly without
2160 creating a procedure linkage table entry. */
2161 if (h == NULL)
2162 continue;
2163
2164 eh->zero_undefweak &= 0x2;
2165 h->needs_plt = 1;
2166 h->plt.refcount = 1;
2167 break;
2168
2169 case R_X86_64_PLTOFF64:
2170 /* This tries to form the 'address' of a function relative
2171 to GOT. For global symbols we need a PLT entry. */
2172 if (h != NULL)
2173 {
2174 h->needs_plt = 1;
2175 h->plt.refcount = 1;
2176 }
2177 goto create_got;
2178
2179 case R_X86_64_SIZE32:
2180 case R_X86_64_SIZE64:
2181 size_reloc = true;
2182 goto do_size;
2183
2184 case R_X86_64_32:
2185 if (!ABI_64_P (abfd))
2186 goto pointer;
2187 /* Fall through. */
2188 case R_X86_64_8:
2189 case R_X86_64_16:
2190 case R_X86_64_32S:
2191 /* Check relocation overflow as these relocs may lead to
2192 run-time relocation overflow. Don't error out for
2193 sections we don't care about, such as debug sections or
2194 when relocation overflow check is disabled. */
2195 if (!htab->params->no_reloc_overflow_check
2196 && !converted_reloc
2197 && (bfd_link_pic (info)
2198 || (bfd_link_executable (info)
2199 && h != NULL
2200 && !h->def_regular
2201 && h->def_dynamic
2202 && (sec->flags & SEC_READONLY) == 0)))
2203 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2204 &x86_64_elf_howto_table[r_type]);
2205 /* Fall through. */
2206
2207 case R_X86_64_PC8:
2208 case R_X86_64_PC16:
2209 case R_X86_64_PC32:
2210 case R_X86_64_PC32_BND:
2211 case R_X86_64_PC64:
2212 case R_X86_64_64:
2213 pointer:
2214 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2215 eh->zero_undefweak |= 0x2;
2216 /* We are called after all symbols have been resolved. Only
2217 relocation against STT_GNU_IFUNC symbol must go through
2218 PLT. */
2219 if (h != NULL
2220 && (bfd_link_executable (info)
2221 || h->type == STT_GNU_IFUNC))
2222 {
2223 bool func_pointer_ref = false;
2224
2225 if (r_type == R_X86_64_PC32)
2226 {
2227 /* Since something like ".long foo - ." may be used
2228 as pointer, make sure that PLT is used if foo is
2229 a function defined in a shared library. */
2230 if ((sec->flags & SEC_CODE) == 0)
2231 {
2232 h->pointer_equality_needed = 1;
2233 if (bfd_link_pie (info)
2234 && h->type == STT_FUNC
2235 && !h->def_regular
2236 && h->def_dynamic)
2237 {
2238 h->needs_plt = 1;
2239 h->plt.refcount = 1;
2240 }
2241 }
2242 }
2243 else if (r_type != R_X86_64_PC32_BND
2244 && r_type != R_X86_64_PC64)
2245 {
2246 h->pointer_equality_needed = 1;
2247 /* At run-time, R_X86_64_64 can be resolved for both
2248 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2249 can only be resolved for x32. */
2250 if ((sec->flags & SEC_READONLY) == 0
2251 && (r_type == R_X86_64_64
2252 || (!ABI_64_P (abfd)
2253 && (r_type == R_X86_64_32
2254 || r_type == R_X86_64_32S))))
2255 func_pointer_ref = true;
2256 }
2257
2258 if (!func_pointer_ref)
2259 {
2260 /* If this reloc is in a read-only section, we might
2261 need a copy reloc. We can't check reliably at this
2262 stage whether the section is read-only, as input
2263 sections have not yet been mapped to output sections.
2264 Tentatively set the flag for now, and correct in
2265 adjust_dynamic_symbol. */
2266 h->non_got_ref = 1;
2267
2268 /* We may need a .plt entry if the symbol is a function
2269 defined in a shared lib or is a function referenced
2270 from the code or read-only section. */
2271 if (!h->def_regular
2272 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2273 h->plt.refcount = 1;
2274 }
2275 }
2276
2277 size_reloc = false;
2278 do_size:
2279 if (!no_dynreloc
2280 && NEED_DYNAMIC_RELOCATION_P (info, true, h, sec, r_type,
2281 htab->pointer_r_type))
2282 {
2283 struct elf_dyn_relocs *p;
2284 struct elf_dyn_relocs **head;
2285
2286 /* We must copy these reloc types into the output file.
2287 Create a reloc section in dynobj and make room for
2288 this reloc. */
2289 if (sreloc == NULL)
2290 {
2291 sreloc = _bfd_elf_make_dynamic_reloc_section
2292 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2293 abfd, /*rela?*/ true);
2294
2295 if (sreloc == NULL)
2296 goto error_return;
2297 }
2298
2299 /* If this is a global symbol, we count the number of
2300 relocations we need for this symbol. */
2301 if (h != NULL)
2302 head = &h->dyn_relocs;
2303 else
2304 {
2305 /* Track dynamic relocs needed for local syms too.
2306 We really need local syms available to do this
2307 easily. Oh well. */
2308 asection *s;
2309 void **vpp;
2310
2311 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
2312 abfd, r_symndx);
2313 if (isym == NULL)
2314 goto error_return;
2315
2316 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2317 if (s == NULL)
2318 s = sec;
2319
2320 /* Beware of type punned pointers vs strict aliasing
2321 rules. */
2322 vpp = &(elf_section_data (s)->local_dynrel);
2323 head = (struct elf_dyn_relocs **)vpp;
2324 }
2325
2326 p = *head;
2327 if (p == NULL || p->sec != sec)
2328 {
2329 size_t amt = sizeof *p;
2330
2331 p = ((struct elf_dyn_relocs *)
2332 bfd_alloc (htab->elf.dynobj, amt));
2333 if (p == NULL)
2334 goto error_return;
2335 p->next = *head;
2336 *head = p;
2337 p->sec = sec;
2338 p->count = 0;
2339 p->pc_count = 0;
2340 }
2341
2342 p->count += 1;
2343 /* Count size relocation as PC-relative relocation. */
2344 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2345 p->pc_count += 1;
2346 }
2347 break;
2348
2349 /* This relocation describes the C++ object vtable hierarchy.
2350 Reconstruct it for later use during GC. */
2351 case R_X86_64_GNU_VTINHERIT:
2352 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2353 goto error_return;
2354 break;
2355
2356 /* This relocation describes which C++ vtable entries are actually
2357 used. Record for later use during GC. */
2358 case R_X86_64_GNU_VTENTRY:
2359 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2360 goto error_return;
2361 break;
2362
2363 default:
2364 break;
2365 }
2366 }
2367
2368 if (elf_section_data (sec)->this_hdr.contents != contents)
2369 {
2370 if (!converted && !info->keep_memory)
2371 free (contents);
2372 else
2373 {
2374 /* Cache the section contents for elf_link_input_bfd if any
2375 load is converted or --no-keep-memory isn't used. */
2376 elf_section_data (sec)->this_hdr.contents = contents;
2377 }
2378 }
2379
2380 /* Cache relocations if any load is converted. */
2381 if (elf_section_data (sec)->relocs != relocs && converted)
2382 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2383
2384 return true;
2385
2386 error_return:
2387 if (elf_section_data (sec)->this_hdr.contents != contents)
2388 free (contents);
2389 sec->check_relocs_failed = 1;
2390 return false;
2391 }
2392
2393 /* Return the relocation value for @tpoff relocation
2394 if STT_TLS virtual address is ADDRESS. */
2395
2396 static bfd_vma
2397 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2398 {
2399 struct elf_link_hash_table *htab = elf_hash_table (info);
2400 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2401 bfd_vma static_tls_size;
2402
2403 /* If tls_segment is NULL, we should have signalled an error already. */
2404 if (htab->tls_sec == NULL)
2405 return 0;
2406
2407 /* Consider special static TLS alignment requirements. */
2408 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2409 return address - static_tls_size - htab->tls_sec->vma;
2410 }
2411
2412 /* Relocate an x86_64 ELF section. */
2413
2414 static int
2415 elf_x86_64_relocate_section (bfd *output_bfd,
2416 struct bfd_link_info *info,
2417 bfd *input_bfd,
2418 asection *input_section,
2419 bfd_byte *contents,
2420 Elf_Internal_Rela *relocs,
2421 Elf_Internal_Sym *local_syms,
2422 asection **local_sections)
2423 {
2424 struct elf_x86_link_hash_table *htab;
2425 Elf_Internal_Shdr *symtab_hdr;
2426 struct elf_link_hash_entry **sym_hashes;
2427 bfd_vma *local_got_offsets;
2428 bfd_vma *local_tlsdesc_gotents;
2429 Elf_Internal_Rela *rel;
2430 Elf_Internal_Rela *wrel;
2431 Elf_Internal_Rela *relend;
2432 unsigned int plt_entry_size;
2433 bool status;
2434
2435 /* Skip if check_relocs failed. */
2436 if (input_section->check_relocs_failed)
2437 return false;
2438
2439 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2440 if (htab == NULL)
2441 return false;
2442
2443 if (!is_x86_elf (input_bfd, htab))
2444 {
2445 bfd_set_error (bfd_error_wrong_format);
2446 return false;
2447 }
2448
2449 plt_entry_size = htab->plt.plt_entry_size;
2450 symtab_hdr = &elf_symtab_hdr (input_bfd);
2451 sym_hashes = elf_sym_hashes (input_bfd);
2452 local_got_offsets = elf_local_got_offsets (input_bfd);
2453 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2454
2455 _bfd_x86_elf_set_tls_module_base (info);
2456
2457 status = true;
2458 rel = wrel = relocs;
2459 relend = relocs + input_section->reloc_count;
2460 for (; rel < relend; wrel++, rel++)
2461 {
2462 unsigned int r_type, r_type_tls;
2463 reloc_howto_type *howto;
2464 unsigned long r_symndx;
2465 struct elf_link_hash_entry *h;
2466 struct elf_x86_link_hash_entry *eh;
2467 Elf_Internal_Sym *sym;
2468 asection *sec;
2469 bfd_vma off, offplt, plt_offset;
2470 bfd_vma relocation;
2471 bool unresolved_reloc;
2472 bfd_reloc_status_type r;
2473 int tls_type;
2474 asection *base_got, *resolved_plt;
2475 bfd_vma st_size;
2476 bool resolved_to_zero;
2477 bool relative_reloc;
2478 bool converted_reloc;
2479 bool need_copy_reloc_in_pie;
2480 bool no_copyreloc_p;
2481
2482 r_type = ELF32_R_TYPE (rel->r_info);
2483 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2484 || r_type == (int) R_X86_64_GNU_VTENTRY)
2485 {
2486 if (wrel != rel)
2487 *wrel = *rel;
2488 continue;
2489 }
2490
2491 r_symndx = htab->r_sym (rel->r_info);
2492 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2493 if (converted_reloc)
2494 {
2495 r_type &= ~R_X86_64_converted_reloc_bit;
2496 rel->r_info = htab->r_info (r_symndx, r_type);
2497 }
2498
2499 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2500 if (howto == NULL)
2501 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2502
2503 h = NULL;
2504 sym = NULL;
2505 sec = NULL;
2506 unresolved_reloc = false;
2507 if (r_symndx < symtab_hdr->sh_info)
2508 {
2509 sym = local_syms + r_symndx;
2510 sec = local_sections[r_symndx];
2511
2512 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2513 &sec, rel);
2514 st_size = sym->st_size;
2515
2516 /* Relocate against local STT_GNU_IFUNC symbol. */
2517 if (!bfd_link_relocatable (info)
2518 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2519 {
2520 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2521 rel, false);
2522 if (h == NULL)
2523 abort ();
2524
2525 /* Set STT_GNU_IFUNC symbol value. */
2526 h->root.u.def.value = sym->st_value;
2527 h->root.u.def.section = sec;
2528 }
2529 }
2530 else
2531 {
2532 bool warned ATTRIBUTE_UNUSED;
2533 bool ignored ATTRIBUTE_UNUSED;
2534
2535 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2536 r_symndx, symtab_hdr, sym_hashes,
2537 h, sec, relocation,
2538 unresolved_reloc, warned, ignored);
2539 st_size = h->size;
2540 }
2541
2542 if (sec != NULL && discarded_section (sec))
2543 {
2544 _bfd_clear_contents (howto, input_bfd, input_section,
2545 contents, rel->r_offset);
2546 wrel->r_offset = rel->r_offset;
2547 wrel->r_info = 0;
2548 wrel->r_addend = 0;
2549
2550 /* For ld -r, remove relocations in debug sections against
2551 sections defined in discarded sections. Not done for
2552 eh_frame editing code expects to be present. */
2553 if (bfd_link_relocatable (info)
2554 && (input_section->flags & SEC_DEBUGGING))
2555 wrel--;
2556
2557 continue;
2558 }
2559
2560 if (bfd_link_relocatable (info))
2561 {
2562 if (wrel != rel)
2563 *wrel = *rel;
2564 continue;
2565 }
2566
2567 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2568 {
2569 if (r_type == R_X86_64_64)
2570 {
2571 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2572 zero-extend it to 64bit if addend is zero. */
2573 r_type = R_X86_64_32;
2574 memset (contents + rel->r_offset + 4, 0, 4);
2575 }
2576 else if (r_type == R_X86_64_SIZE64)
2577 {
2578 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2579 zero-extend it to 64bit if addend is zero. */
2580 r_type = R_X86_64_SIZE32;
2581 memset (contents + rel->r_offset + 4, 0, 4);
2582 }
2583 }
2584
2585 eh = (struct elf_x86_link_hash_entry *) h;
2586
2587 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2588 it here if it is defined in a non-shared object. */
2589 if (h != NULL
2590 && h->type == STT_GNU_IFUNC
2591 && h->def_regular)
2592 {
2593 bfd_vma plt_index;
2594 const char *name;
2595
2596 if ((input_section->flags & SEC_ALLOC) == 0)
2597 {
2598 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2599 STT_GNU_IFUNC symbol as STT_FUNC. */
2600 if (elf_section_type (input_section) == SHT_NOTE)
2601 goto skip_ifunc;
2602 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2603 sections because such sections are not SEC_ALLOC and
2604 thus ld.so will not process them. */
2605 if ((input_section->flags & SEC_DEBUGGING) != 0)
2606 continue;
2607 abort ();
2608 }
2609
2610 switch (r_type)
2611 {
2612 default:
2613 break;
2614
2615 case R_X86_64_GOTPCREL:
2616 case R_X86_64_GOTPCRELX:
2617 case R_X86_64_REX_GOTPCRELX:
2618 case R_X86_64_GOTPCREL64:
2619 base_got = htab->elf.sgot;
2620 off = h->got.offset;
2621
2622 if (base_got == NULL)
2623 abort ();
2624
2625 if (off == (bfd_vma) -1)
2626 {
2627 /* We can't use h->got.offset here to save state, or
2628 even just remember the offset, as finish_dynamic_symbol
2629 would use that as offset into .got. */
2630
2631 if (h->plt.offset == (bfd_vma) -1)
2632 abort ();
2633
2634 if (htab->elf.splt != NULL)
2635 {
2636 plt_index = (h->plt.offset / plt_entry_size
2637 - htab->plt.has_plt0);
2638 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2639 base_got = htab->elf.sgotplt;
2640 }
2641 else
2642 {
2643 plt_index = h->plt.offset / plt_entry_size;
2644 off = plt_index * GOT_ENTRY_SIZE;
2645 base_got = htab->elf.igotplt;
2646 }
2647
2648 if (h->dynindx == -1
2649 || h->forced_local
2650 || info->symbolic)
2651 {
2652 /* This references the local defitionion. We must
2653 initialize this entry in the global offset table.
2654 Since the offset must always be a multiple of 8,
2655 we use the least significant bit to record
2656 whether we have initialized it already.
2657
2658 When doing a dynamic link, we create a .rela.got
2659 relocation entry to initialize the value. This
2660 is done in the finish_dynamic_symbol routine. */
2661 if ((off & 1) != 0)
2662 off &= ~1;
2663 else
2664 {
2665 bfd_put_64 (output_bfd, relocation,
2666 base_got->contents + off);
2667 /* Note that this is harmless for the GOTPLT64
2668 case, as -1 | 1 still is -1. */
2669 h->got.offset |= 1;
2670 }
2671 }
2672 }
2673
2674 relocation = (base_got->output_section->vma
2675 + base_got->output_offset + off);
2676
2677 goto do_relocation;
2678 }
2679
2680 if (h->plt.offset == (bfd_vma) -1)
2681 {
2682 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2683 if (r_type == htab->pointer_r_type
2684 && (input_section->flags & SEC_CODE) == 0)
2685 goto do_ifunc_pointer;
2686 goto bad_ifunc_reloc;
2687 }
2688
2689 /* STT_GNU_IFUNC symbol must go through PLT. */
2690 if (htab->elf.splt != NULL)
2691 {
2692 if (htab->plt_second != NULL)
2693 {
2694 resolved_plt = htab->plt_second;
2695 plt_offset = eh->plt_second.offset;
2696 }
2697 else
2698 {
2699 resolved_plt = htab->elf.splt;
2700 plt_offset = h->plt.offset;
2701 }
2702 }
2703 else
2704 {
2705 resolved_plt = htab->elf.iplt;
2706 plt_offset = h->plt.offset;
2707 }
2708
2709 relocation = (resolved_plt->output_section->vma
2710 + resolved_plt->output_offset + plt_offset);
2711
2712 switch (r_type)
2713 {
2714 default:
2715 bad_ifunc_reloc:
2716 if (h->root.root.string)
2717 name = h->root.root.string;
2718 else
2719 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2720 NULL);
2721 _bfd_error_handler
2722 /* xgettext:c-format */
2723 (_("%pB: relocation %s against STT_GNU_IFUNC "
2724 "symbol `%s' isn't supported"), input_bfd,
2725 howto->name, name);
2726 bfd_set_error (bfd_error_bad_value);
2727 return false;
2728
2729 case R_X86_64_32S:
2730 if (bfd_link_pic (info))
2731 abort ();
2732 goto do_relocation;
2733
2734 case R_X86_64_32:
2735 if (ABI_64_P (output_bfd))
2736 goto do_relocation;
2737 /* FALLTHROUGH */
2738 case R_X86_64_64:
2739 do_ifunc_pointer:
2740 if (rel->r_addend != 0)
2741 {
2742 if (h->root.root.string)
2743 name = h->root.root.string;
2744 else
2745 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2746 sym, NULL);
2747 _bfd_error_handler
2748 /* xgettext:c-format */
2749 (_("%pB: relocation %s against STT_GNU_IFUNC "
2750 "symbol `%s' has non-zero addend: %" PRId64),
2751 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2752 bfd_set_error (bfd_error_bad_value);
2753 return false;
2754 }
2755
2756 /* Generate dynamic relcoation only when there is a
2757 non-GOT reference in a shared object or there is no
2758 PLT. */
2759 if ((bfd_link_pic (info) && h->non_got_ref)
2760 || h->plt.offset == (bfd_vma) -1)
2761 {
2762 Elf_Internal_Rela outrel;
2763 asection *sreloc;
2764
2765 /* Need a dynamic relocation to get the real function
2766 address. */
2767 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2768 info,
2769 input_section,
2770 rel->r_offset);
2771 if (outrel.r_offset == (bfd_vma) -1
2772 || outrel.r_offset == (bfd_vma) -2)
2773 abort ();
2774
2775 outrel.r_offset += (input_section->output_section->vma
2776 + input_section->output_offset);
2777
2778 if (POINTER_LOCAL_IFUNC_P (info, h))
2779 {
2780 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2781 h->root.root.string,
2782 h->root.u.def.section->owner);
2783
2784 /* This symbol is resolved locally. */
2785 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2786 outrel.r_addend = (h->root.u.def.value
2787 + h->root.u.def.section->output_section->vma
2788 + h->root.u.def.section->output_offset);
2789
2790 if (htab->params->report_relative_reloc)
2791 _bfd_x86_elf_link_report_relative_reloc
2792 (info, input_section, h, sym,
2793 "R_X86_64_IRELATIVE", &outrel);
2794 }
2795 else
2796 {
2797 outrel.r_info = htab->r_info (h->dynindx, r_type);
2798 outrel.r_addend = 0;
2799 }
2800
2801 /* Dynamic relocations are stored in
2802 1. .rela.ifunc section in PIC object.
2803 2. .rela.got section in dynamic executable.
2804 3. .rela.iplt section in static executable. */
2805 if (bfd_link_pic (info))
2806 sreloc = htab->elf.irelifunc;
2807 else if (htab->elf.splt != NULL)
2808 sreloc = htab->elf.srelgot;
2809 else
2810 sreloc = htab->elf.irelplt;
2811 elf_append_rela (output_bfd, sreloc, &outrel);
2812
2813 /* If this reloc is against an external symbol, we
2814 do not want to fiddle with the addend. Otherwise,
2815 we need to include the symbol value so that it
2816 becomes an addend for the dynamic reloc. For an
2817 internal symbol, we have updated addend. */
2818 continue;
2819 }
2820 /* FALLTHROUGH */
2821 case R_X86_64_PC32:
2822 case R_X86_64_PC32_BND:
2823 case R_X86_64_PC64:
2824 case R_X86_64_PLT32:
2825 case R_X86_64_PLT32_BND:
2826 goto do_relocation;
2827 }
2828 }
2829
2830 skip_ifunc:
2831 resolved_to_zero = (eh != NULL
2832 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2833
2834 /* When generating a shared object, the relocations handled here are
2835 copied into the output file to be resolved at run time. */
2836 switch (r_type)
2837 {
2838 case R_X86_64_GOT32:
2839 case R_X86_64_GOT64:
2840 /* Relocation is to the entry for this symbol in the global
2841 offset table. */
2842 case R_X86_64_GOTPCREL:
2843 case R_X86_64_GOTPCRELX:
2844 case R_X86_64_REX_GOTPCRELX:
2845 case R_X86_64_GOTPCREL64:
2846 /* Use global offset table entry as symbol value. */
2847 case R_X86_64_GOTPLT64:
2848 /* This is obsolete and treated the same as GOT64. */
2849 base_got = htab->elf.sgot;
2850
2851 if (htab->elf.sgot == NULL)
2852 abort ();
2853
2854 relative_reloc = false;
2855 if (h != NULL)
2856 {
2857 off = h->got.offset;
2858 if (h->needs_plt
2859 && h->plt.offset != (bfd_vma)-1
2860 && off == (bfd_vma)-1)
2861 {
2862 /* We can't use h->got.offset here to save
2863 state, or even just remember the offset, as
2864 finish_dynamic_symbol would use that as offset into
2865 .got. */
2866 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2867 - htab->plt.has_plt0);
2868 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2869 base_got = htab->elf.sgotplt;
2870 }
2871
2872 if (RESOLVED_LOCALLY_P (info, h, htab))
2873 {
2874 /* We must initialize this entry in the global offset
2875 table. Since the offset must always be a multiple
2876 of 8, we use the least significant bit to record
2877 whether we have initialized it already.
2878
2879 When doing a dynamic link, we create a .rela.got
2880 relocation entry to initialize the value. This is
2881 done in the finish_dynamic_symbol routine. */
2882 if ((off & 1) != 0)
2883 off &= ~1;
2884 else
2885 {
2886 bfd_put_64 (output_bfd, relocation,
2887 base_got->contents + off);
2888 /* Note that this is harmless for the GOTPLT64 case,
2889 as -1 | 1 still is -1. */
2890 h->got.offset |= 1;
2891
2892 if (GENERATE_RELATIVE_RELOC_P (info, h))
2893 {
2894 /* If this symbol isn't dynamic in PIC,
2895 generate R_X86_64_RELATIVE here. */
2896 eh->no_finish_dynamic_symbol = 1;
2897 relative_reloc = true;
2898 }
2899 }
2900 }
2901 else
2902 unresolved_reloc = false;
2903 }
2904 else
2905 {
2906 if (local_got_offsets == NULL)
2907 abort ();
2908
2909 off = local_got_offsets[r_symndx];
2910
2911 /* The offset must always be a multiple of 8. We use
2912 the least significant bit to record whether we have
2913 already generated the necessary reloc. */
2914 if ((off & 1) != 0)
2915 off &= ~1;
2916 else
2917 {
2918 bfd_put_64 (output_bfd, relocation,
2919 base_got->contents + off);
2920 local_got_offsets[r_symndx] |= 1;
2921
2922 /* NB: GOTPCREL relocations against local absolute
2923 symbol store relocation value in the GOT slot
2924 without relative relocation. */
2925 if (bfd_link_pic (info)
2926 && !(sym->st_shndx == SHN_ABS
2927 && (r_type == R_X86_64_GOTPCREL
2928 || r_type == R_X86_64_GOTPCRELX
2929 || r_type == R_X86_64_REX_GOTPCRELX)))
2930 relative_reloc = true;
2931 }
2932 }
2933
2934 if (relative_reloc)
2935 {
2936 asection *s;
2937 Elf_Internal_Rela outrel;
2938
2939 /* We need to generate a R_X86_64_RELATIVE reloc
2940 for the dynamic linker. */
2941 s = htab->elf.srelgot;
2942 if (s == NULL)
2943 abort ();
2944
2945 outrel.r_offset = (base_got->output_section->vma
2946 + base_got->output_offset
2947 + off);
2948 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2949 outrel.r_addend = relocation;
2950
2951 if (htab->params->report_relative_reloc)
2952 _bfd_x86_elf_link_report_relative_reloc
2953 (info, input_section, h, sym, "R_X86_64_RELATIVE",
2954 &outrel);
2955
2956 elf_append_rela (output_bfd, s, &outrel);
2957 }
2958
2959 if (off >= (bfd_vma) -2)
2960 abort ();
2961
2962 relocation = base_got->output_section->vma
2963 + base_got->output_offset + off;
2964 if (r_type != R_X86_64_GOTPCREL
2965 && r_type != R_X86_64_GOTPCRELX
2966 && r_type != R_X86_64_REX_GOTPCRELX
2967 && r_type != R_X86_64_GOTPCREL64)
2968 relocation -= htab->elf.sgotplt->output_section->vma
2969 - htab->elf.sgotplt->output_offset;
2970
2971 break;
2972
2973 case R_X86_64_GOTOFF64:
2974 /* Relocation is relative to the start of the global offset
2975 table. */
2976
2977 /* Check to make sure it isn't a protected function or data
2978 symbol for shared library since it may not be local when
2979 used as function address or with copy relocation. We also
2980 need to make sure that a symbol is referenced locally. */
2981 if (bfd_link_pic (info) && h)
2982 {
2983 if (!h->def_regular)
2984 {
2985 const char *v;
2986
2987 switch (ELF_ST_VISIBILITY (h->other))
2988 {
2989 case STV_HIDDEN:
2990 v = _("hidden symbol");
2991 break;
2992 case STV_INTERNAL:
2993 v = _("internal symbol");
2994 break;
2995 case STV_PROTECTED:
2996 v = _("protected symbol");
2997 break;
2998 default:
2999 v = _("symbol");
3000 break;
3001 }
3002
3003 _bfd_error_handler
3004 /* xgettext:c-format */
3005 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3006 " `%s' can not be used when making a shared object"),
3007 input_bfd, v, h->root.root.string);
3008 bfd_set_error (bfd_error_bad_value);
3009 return false;
3010 }
3011 else if (!bfd_link_executable (info)
3012 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3013 && (h->type == STT_FUNC
3014 || h->type == STT_OBJECT)
3015 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3016 {
3017 _bfd_error_handler
3018 /* xgettext:c-format */
3019 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3020 " `%s' can not be used when making a shared object"),
3021 input_bfd,
3022 h->type == STT_FUNC ? "function" : "data",
3023 h->root.root.string);
3024 bfd_set_error (bfd_error_bad_value);
3025 return false;
3026 }
3027 }
3028
3029 /* Note that sgot is not involved in this
3030 calculation. We always want the start of .got.plt. If we
3031 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3032 permitted by the ABI, we might have to change this
3033 calculation. */
3034 relocation -= htab->elf.sgotplt->output_section->vma
3035 + htab->elf.sgotplt->output_offset;
3036 break;
3037
3038 case R_X86_64_GOTPC32:
3039 case R_X86_64_GOTPC64:
3040 /* Use global offset table as symbol value. */
3041 relocation = htab->elf.sgotplt->output_section->vma
3042 + htab->elf.sgotplt->output_offset;
3043 unresolved_reloc = false;
3044 break;
3045
3046 case R_X86_64_PLTOFF64:
3047 /* Relocation is PLT entry relative to GOT. For local
3048 symbols it's the symbol itself relative to GOT. */
3049 if (h != NULL
3050 /* See PLT32 handling. */
3051 && (h->plt.offset != (bfd_vma) -1
3052 || eh->plt_got.offset != (bfd_vma) -1)
3053 && htab->elf.splt != NULL)
3054 {
3055 if (eh->plt_got.offset != (bfd_vma) -1)
3056 {
3057 /* Use the GOT PLT. */
3058 resolved_plt = htab->plt_got;
3059 plt_offset = eh->plt_got.offset;
3060 }
3061 else if (htab->plt_second != NULL)
3062 {
3063 resolved_plt = htab->plt_second;
3064 plt_offset = eh->plt_second.offset;
3065 }
3066 else
3067 {
3068 resolved_plt = htab->elf.splt;
3069 plt_offset = h->plt.offset;
3070 }
3071
3072 relocation = (resolved_plt->output_section->vma
3073 + resolved_plt->output_offset
3074 + plt_offset);
3075 unresolved_reloc = false;
3076 }
3077
3078 relocation -= htab->elf.sgotplt->output_section->vma
3079 + htab->elf.sgotplt->output_offset;
3080 break;
3081
3082 case R_X86_64_PLT32:
3083 case R_X86_64_PLT32_BND:
3084 /* Relocation is to the entry for this symbol in the
3085 procedure linkage table. */
3086
3087 /* Resolve a PLT32 reloc against a local symbol directly,
3088 without using the procedure linkage table. */
3089 if (h == NULL)
3090 break;
3091
3092 if ((h->plt.offset == (bfd_vma) -1
3093 && eh->plt_got.offset == (bfd_vma) -1)
3094 || htab->elf.splt == NULL)
3095 {
3096 /* We didn't make a PLT entry for this symbol. This
3097 happens when statically linking PIC code, or when
3098 using -Bsymbolic. */
3099 break;
3100 }
3101
3102 use_plt:
3103 if (h->plt.offset != (bfd_vma) -1)
3104 {
3105 if (htab->plt_second != NULL)
3106 {
3107 resolved_plt = htab->plt_second;
3108 plt_offset = eh->plt_second.offset;
3109 }
3110 else
3111 {
3112 resolved_plt = htab->elf.splt;
3113 plt_offset = h->plt.offset;
3114 }
3115 }
3116 else
3117 {
3118 /* Use the GOT PLT. */
3119 resolved_plt = htab->plt_got;
3120 plt_offset = eh->plt_got.offset;
3121 }
3122
3123 relocation = (resolved_plt->output_section->vma
3124 + resolved_plt->output_offset
3125 + plt_offset);
3126 unresolved_reloc = false;
3127 break;
3128
3129 case R_X86_64_SIZE32:
3130 case R_X86_64_SIZE64:
3131 /* Set to symbol size. */
3132 relocation = st_size;
3133 goto direct;
3134
3135 case R_X86_64_PC8:
3136 case R_X86_64_PC16:
3137 case R_X86_64_PC32:
3138 case R_X86_64_PC32_BND:
3139 /* Don't complain about -fPIC if the symbol is undefined when
3140 building executable unless it is unresolved weak symbol,
3141 references a dynamic definition in PIE or -z nocopyreloc
3142 is used. */
3143 no_copyreloc_p
3144 = (info->nocopyreloc
3145 || (h != NULL
3146 && !h->root.linker_def
3147 && !h->root.ldscript_def
3148 && eh->def_protected
3149 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
3150
3151 if ((input_section->flags & SEC_ALLOC) != 0
3152 && (input_section->flags & SEC_READONLY) != 0
3153 && h != NULL
3154 && ((bfd_link_executable (info)
3155 && ((h->root.type == bfd_link_hash_undefweak
3156 && (eh == NULL
3157 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3158 eh)))
3159 || (bfd_link_pie (info)
3160 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3161 && h->def_dynamic)
3162 || (no_copyreloc_p
3163 && h->def_dynamic
3164 && !(h->root.u.def.section->flags & SEC_CODE))))
3165 || bfd_link_dll (info)))
3166 {
3167 bool fail = false;
3168 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3169 {
3170 /* Symbol is referenced locally. Make sure it is
3171 defined locally. */
3172 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3173 }
3174 else if (bfd_link_pie (info))
3175 {
3176 /* We can only use PC-relative relocations in PIE
3177 from non-code sections. */
3178 if (h->type == STT_FUNC
3179 && (sec->flags & SEC_CODE) != 0)
3180 fail = true;
3181 }
3182 else if (no_copyreloc_p || bfd_link_dll (info))
3183 {
3184 /* Symbol doesn't need copy reloc and isn't
3185 referenced locally. Don't allow PC-relative
3186 relocations against default and protected
3187 symbols since address of protected function
3188 and location of protected data may not be in
3189 the shared object. */
3190 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3191 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3192 }
3193
3194 if (fail)
3195 return elf_x86_64_need_pic (info, input_bfd, input_section,
3196 h, NULL, NULL, howto);
3197 }
3198 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3199 as function address. */
3200 else if (h != NULL
3201 && (input_section->flags & SEC_CODE) == 0
3202 && bfd_link_pie (info)
3203 && h->type == STT_FUNC
3204 && !h->def_regular
3205 && h->def_dynamic)
3206 goto use_plt;
3207 /* Fall through. */
3208
3209 case R_X86_64_8:
3210 case R_X86_64_16:
3211 case R_X86_64_32:
3212 case R_X86_64_PC64:
3213 case R_X86_64_64:
3214 /* FIXME: The ABI says the linker should make sure the value is
3215 the same when it's zeroextended to 64 bit. */
3216
3217 direct:
3218 if ((input_section->flags & SEC_ALLOC) == 0)
3219 break;
3220
3221 need_copy_reloc_in_pie = (bfd_link_pie (info)
3222 && h != NULL
3223 && (h->needs_copy
3224 || eh->needs_copy
3225 || (h->root.type
3226 == bfd_link_hash_undefined))
3227 && (X86_PCREL_TYPE_P (r_type)
3228 || X86_SIZE_TYPE_P (r_type)));
3229
3230 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type, sec,
3231 need_copy_reloc_in_pie,
3232 resolved_to_zero, false))
3233 {
3234 Elf_Internal_Rela outrel;
3235 bool skip, relocate;
3236 asection *sreloc;
3237 const char *relative_reloc_name = NULL;
3238
3239 /* When generating a shared object, these relocations
3240 are copied into the output file to be resolved at run
3241 time. */
3242 skip = false;
3243 relocate = false;
3244
3245 outrel.r_offset =
3246 _bfd_elf_section_offset (output_bfd, info, input_section,
3247 rel->r_offset);
3248 if (outrel.r_offset == (bfd_vma) -1)
3249 skip = true;
3250 else if (outrel.r_offset == (bfd_vma) -2)
3251 skip = true, relocate = true;
3252
3253 outrel.r_offset += (input_section->output_section->vma
3254 + input_section->output_offset);
3255
3256 if (skip)
3257 memset (&outrel, 0, sizeof outrel);
3258
3259 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3260 {
3261 outrel.r_info = htab->r_info (h->dynindx, r_type);
3262 outrel.r_addend = rel->r_addend;
3263 }
3264 else
3265 {
3266 /* This symbol is local, or marked to become local.
3267 When relocation overflow check is disabled, we
3268 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3269 if (r_type == htab->pointer_r_type
3270 || (r_type == R_X86_64_32
3271 && htab->params->no_reloc_overflow_check))
3272 {
3273 relocate = true;
3274 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3275 outrel.r_addend = relocation + rel->r_addend;
3276 relative_reloc_name = "R_X86_64_RELATIVE";
3277 }
3278 else if (r_type == R_X86_64_64
3279 && !ABI_64_P (output_bfd))
3280 {
3281 relocate = true;
3282 outrel.r_info = htab->r_info (0,
3283 R_X86_64_RELATIVE64);
3284 outrel.r_addend = relocation + rel->r_addend;
3285 relative_reloc_name = "R_X86_64_RELATIVE64";
3286 /* Check addend overflow. */
3287 if ((outrel.r_addend & 0x80000000)
3288 != (rel->r_addend & 0x80000000))
3289 {
3290 const char *name;
3291 int addend = rel->r_addend;
3292 if (h && h->root.root.string)
3293 name = h->root.root.string;
3294 else
3295 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3296 sym, NULL);
3297 _bfd_error_handler
3298 /* xgettext:c-format */
3299 (_("%pB: addend %s%#x in relocation %s against "
3300 "symbol `%s' at %#" PRIx64
3301 " in section `%pA' is out of range"),
3302 input_bfd, addend < 0 ? "-" : "", addend,
3303 howto->name, name, (uint64_t) rel->r_offset,
3304 input_section);
3305 bfd_set_error (bfd_error_bad_value);
3306 return false;
3307 }
3308 }
3309 else
3310 {
3311 long sindx;
3312
3313 if (bfd_is_abs_section (sec))
3314 sindx = 0;
3315 else if (sec == NULL || sec->owner == NULL)
3316 {
3317 bfd_set_error (bfd_error_bad_value);
3318 return false;
3319 }
3320 else
3321 {
3322 asection *osec;
3323
3324 /* We are turning this relocation into one
3325 against a section symbol. It would be
3326 proper to subtract the symbol's value,
3327 osec->vma, from the emitted reloc addend,
3328 but ld.so expects buggy relocs. */
3329 osec = sec->output_section;
3330 sindx = elf_section_data (osec)->dynindx;
3331 if (sindx == 0)
3332 {
3333 asection *oi = htab->elf.text_index_section;
3334 sindx = elf_section_data (oi)->dynindx;
3335 }
3336 BFD_ASSERT (sindx != 0);
3337 }
3338
3339 outrel.r_info = htab->r_info (sindx, r_type);
3340 outrel.r_addend = relocation + rel->r_addend;
3341 }
3342 }
3343
3344 sreloc = elf_section_data (input_section)->sreloc;
3345
3346 if (sreloc == NULL || sreloc->contents == NULL)
3347 {
3348 r = bfd_reloc_notsupported;
3349 goto check_relocation_error;
3350 }
3351
3352 if (relative_reloc_name
3353 && htab->params->report_relative_reloc)
3354 _bfd_x86_elf_link_report_relative_reloc
3355 (info, input_section, h, sym, relative_reloc_name,
3356 &outrel);
3357
3358 elf_append_rela (output_bfd, sreloc, &outrel);
3359
3360 /* If this reloc is against an external symbol, we do
3361 not want to fiddle with the addend. Otherwise, we
3362 need to include the symbol value so that it becomes
3363 an addend for the dynamic reloc. */
3364 if (! relocate)
3365 continue;
3366 }
3367
3368 break;
3369
3370 case R_X86_64_TLSGD:
3371 case R_X86_64_GOTPC32_TLSDESC:
3372 case R_X86_64_TLSDESC_CALL:
3373 case R_X86_64_GOTTPOFF:
3374 tls_type = GOT_UNKNOWN;
3375 if (h == NULL && local_got_offsets)
3376 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3377 else if (h != NULL)
3378 tls_type = elf_x86_hash_entry (h)->tls_type;
3379
3380 r_type_tls = r_type;
3381 if (! elf_x86_64_tls_transition (info, input_bfd,
3382 input_section, contents,
3383 symtab_hdr, sym_hashes,
3384 &r_type_tls, tls_type, rel,
3385 relend, h, r_symndx, true))
3386 return false;
3387
3388 if (r_type_tls == R_X86_64_TPOFF32)
3389 {
3390 bfd_vma roff = rel->r_offset;
3391
3392 BFD_ASSERT (! unresolved_reloc);
3393
3394 if (r_type == R_X86_64_TLSGD)
3395 {
3396 /* GD->LE transition. For 64bit, change
3397 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3398 .word 0x6666; rex64; call __tls_get_addr@PLT
3399 or
3400 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3401 .byte 0x66; rex64
3402 call *__tls_get_addr@GOTPCREL(%rip)
3403 which may be converted to
3404 addr32 call __tls_get_addr
3405 into:
3406 movq %fs:0, %rax
3407 leaq foo@tpoff(%rax), %rax
3408 For 32bit, change
3409 leaq foo@tlsgd(%rip), %rdi
3410 .word 0x6666; rex64; call __tls_get_addr@PLT
3411 or
3412 leaq foo@tlsgd(%rip), %rdi
3413 .byte 0x66; rex64
3414 call *__tls_get_addr@GOTPCREL(%rip)
3415 which may be converted to
3416 addr32 call __tls_get_addr
3417 into:
3418 movl %fs:0, %eax
3419 leaq foo@tpoff(%rax), %rax
3420 For largepic, change:
3421 leaq foo@tlsgd(%rip), %rdi
3422 movabsq $__tls_get_addr@pltoff, %rax
3423 addq %r15, %rax
3424 call *%rax
3425 into:
3426 movq %fs:0, %rax
3427 leaq foo@tpoff(%rax), %rax
3428 nopw 0x0(%rax,%rax,1) */
3429 int largepic = 0;
3430 if (ABI_64_P (output_bfd))
3431 {
3432 if (contents[roff + 5] == 0xb8)
3433 {
3434 if (roff < 3
3435 || (roff - 3 + 22) > input_section->size)
3436 {
3437 corrupt_input:
3438 info->callbacks->einfo
3439 (_("%F%P: corrupt input: %pB\n"),
3440 input_bfd);
3441 return false;
3442 }
3443 memcpy (contents + roff - 3,
3444 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3445 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3446 largepic = 1;
3447 }
3448 else
3449 {
3450 if (roff < 4
3451 || (roff - 4 + 16) > input_section->size)
3452 goto corrupt_input;
3453 memcpy (contents + roff - 4,
3454 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3455 16);
3456 }
3457 }
3458 else
3459 {
3460 if (roff < 3
3461 || (roff - 3 + 15) > input_section->size)
3462 goto corrupt_input;
3463 memcpy (contents + roff - 3,
3464 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3465 15);
3466 }
3467 bfd_put_32 (output_bfd,
3468 elf_x86_64_tpoff (info, relocation),
3469 contents + roff + 8 + largepic);
3470 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3471 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3472 rel++;
3473 wrel++;
3474 continue;
3475 }
3476 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3477 {
3478 /* GDesc -> LE transition.
3479 It's originally something like:
3480 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3481 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3482
3483 Change it to:
3484 movq $x@tpoff, %rax <--- LP64 mode.
3485 rex movl $x@tpoff, %eax <--- X32 mode.
3486 */
3487
3488 unsigned int val, type;
3489
3490 if (roff < 3)
3491 goto corrupt_input;
3492 type = bfd_get_8 (input_bfd, contents + roff - 3);
3493 val = bfd_get_8 (input_bfd, contents + roff - 1);
3494 bfd_put_8 (output_bfd,
3495 (type & 0x48) | ((type >> 2) & 1),
3496 contents + roff - 3);
3497 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3498 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3499 contents + roff - 1);
3500 bfd_put_32 (output_bfd,
3501 elf_x86_64_tpoff (info, relocation),
3502 contents + roff);
3503 continue;
3504 }
3505 else if (r_type == R_X86_64_TLSDESC_CALL)
3506 {
3507 /* GDesc -> LE transition.
3508 It's originally:
3509 call *(%rax) <--- LP64 mode.
3510 call *(%eax) <--- X32 mode.
3511 Turn it into:
3512 xchg %ax,%ax <-- LP64 mode.
3513 nopl (%rax) <-- X32 mode.
3514 */
3515 unsigned int prefix = 0;
3516 if (!ABI_64_P (input_bfd))
3517 {
3518 /* Check for call *x@tlsdesc(%eax). */
3519 if (contents[roff] == 0x67)
3520 prefix = 1;
3521 }
3522 if (prefix)
3523 {
3524 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3525 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3526 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3527 }
3528 else
3529 {
3530 bfd_put_8 (output_bfd, 0x66, contents + roff);
3531 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3532 }
3533 continue;
3534 }
3535 else if (r_type == R_X86_64_GOTTPOFF)
3536 {
3537 /* IE->LE transition:
3538 For 64bit, originally it can be one of:
3539 movq foo@gottpoff(%rip), %reg
3540 addq foo@gottpoff(%rip), %reg
3541 We change it into:
3542 movq $foo, %reg
3543 leaq foo(%reg), %reg
3544 addq $foo, %reg.
3545 For 32bit, originally it can be one of:
3546 movq foo@gottpoff(%rip), %reg
3547 addl foo@gottpoff(%rip), %reg
3548 We change it into:
3549 movq $foo, %reg
3550 leal foo(%reg), %reg
3551 addl $foo, %reg. */
3552
3553 unsigned int val, type, reg;
3554
3555 if (roff >= 3)
3556 val = bfd_get_8 (input_bfd, contents + roff - 3);
3557 else
3558 {
3559 if (roff < 2)
3560 goto corrupt_input;
3561 val = 0;
3562 }
3563 type = bfd_get_8 (input_bfd, contents + roff - 2);
3564 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3565 reg >>= 3;
3566 if (type == 0x8b)
3567 {
3568 /* movq */
3569 if (val == 0x4c)
3570 {
3571 if (roff < 3)
3572 goto corrupt_input;
3573 bfd_put_8 (output_bfd, 0x49,
3574 contents + roff - 3);
3575 }
3576 else if (!ABI_64_P (output_bfd) && val == 0x44)
3577 {
3578 if (roff < 3)
3579 goto corrupt_input;
3580 bfd_put_8 (output_bfd, 0x41,
3581 contents + roff - 3);
3582 }
3583 bfd_put_8 (output_bfd, 0xc7,
3584 contents + roff - 2);
3585 bfd_put_8 (output_bfd, 0xc0 | reg,
3586 contents + roff - 1);
3587 }
3588 else if (reg == 4)
3589 {
3590 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3591 is special */
3592 if (val == 0x4c)
3593 {
3594 if (roff < 3)
3595 goto corrupt_input;
3596 bfd_put_8 (output_bfd, 0x49,
3597 contents + roff - 3);
3598 }
3599 else if (!ABI_64_P (output_bfd) && val == 0x44)
3600 {
3601 if (roff < 3)
3602 goto corrupt_input;
3603 bfd_put_8 (output_bfd, 0x41,
3604 contents + roff - 3);
3605 }
3606 bfd_put_8 (output_bfd, 0x81,
3607 contents + roff - 2);
3608 bfd_put_8 (output_bfd, 0xc0 | reg,
3609 contents + roff - 1);
3610 }
3611 else
3612 {
3613 /* addq/addl -> leaq/leal */
3614 if (val == 0x4c)
3615 {
3616 if (roff < 3)
3617 goto corrupt_input;
3618 bfd_put_8 (output_bfd, 0x4d,
3619 contents + roff - 3);
3620 }
3621 else if (!ABI_64_P (output_bfd) && val == 0x44)
3622 {
3623 if (roff < 3)
3624 goto corrupt_input;
3625 bfd_put_8 (output_bfd, 0x45,
3626 contents + roff - 3);
3627 }
3628 bfd_put_8 (output_bfd, 0x8d,
3629 contents + roff - 2);
3630 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3631 contents + roff - 1);
3632 }
3633 bfd_put_32 (output_bfd,
3634 elf_x86_64_tpoff (info, relocation),
3635 contents + roff);
3636 continue;
3637 }
3638 else
3639 BFD_ASSERT (false);
3640 }
3641
3642 if (htab->elf.sgot == NULL)
3643 abort ();
3644
3645 if (h != NULL)
3646 {
3647 off = h->got.offset;
3648 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3649 }
3650 else
3651 {
3652 if (local_got_offsets == NULL)
3653 abort ();
3654
3655 off = local_got_offsets[r_symndx];
3656 offplt = local_tlsdesc_gotents[r_symndx];
3657 }
3658
3659 if ((off & 1) != 0)
3660 off &= ~1;
3661 else
3662 {
3663 Elf_Internal_Rela outrel;
3664 int dr_type, indx;
3665 asection *sreloc;
3666
3667 if (htab->elf.srelgot == NULL)
3668 abort ();
3669
3670 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3671
3672 if (GOT_TLS_GDESC_P (tls_type))
3673 {
3674 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3675 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3676 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3677 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3678 + htab->elf.sgotplt->output_offset
3679 + offplt
3680 + htab->sgotplt_jump_table_size);
3681 sreloc = htab->elf.srelplt;
3682 if (indx == 0)
3683 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3684 else
3685 outrel.r_addend = 0;
3686 elf_append_rela (output_bfd, sreloc, &outrel);
3687 }
3688
3689 sreloc = htab->elf.srelgot;
3690
3691 outrel.r_offset = (htab->elf.sgot->output_section->vma
3692 + htab->elf.sgot->output_offset + off);
3693
3694 if (GOT_TLS_GD_P (tls_type))
3695 dr_type = R_X86_64_DTPMOD64;
3696 else if (GOT_TLS_GDESC_P (tls_type))
3697 goto dr_done;
3698 else
3699 dr_type = R_X86_64_TPOFF64;
3700
3701 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3702 outrel.r_addend = 0;
3703 if ((dr_type == R_X86_64_TPOFF64
3704 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3705 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3706 outrel.r_info = htab->r_info (indx, dr_type);
3707
3708 elf_append_rela (output_bfd, sreloc, &outrel);
3709
3710 if (GOT_TLS_GD_P (tls_type))
3711 {
3712 if (indx == 0)
3713 {
3714 BFD_ASSERT (! unresolved_reloc);
3715 bfd_put_64 (output_bfd,
3716 relocation - _bfd_x86_elf_dtpoff_base (info),
3717 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3718 }
3719 else
3720 {
3721 bfd_put_64 (output_bfd, 0,
3722 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3723 outrel.r_info = htab->r_info (indx,
3724 R_X86_64_DTPOFF64);
3725 outrel.r_offset += GOT_ENTRY_SIZE;
3726 elf_append_rela (output_bfd, sreloc,
3727 &outrel);
3728 }
3729 }
3730
3731 dr_done:
3732 if (h != NULL)
3733 h->got.offset |= 1;
3734 else
3735 local_got_offsets[r_symndx] |= 1;
3736 }
3737
3738 if (off >= (bfd_vma) -2
3739 && ! GOT_TLS_GDESC_P (tls_type))
3740 abort ();
3741 if (r_type_tls == r_type)
3742 {
3743 if (r_type == R_X86_64_GOTPC32_TLSDESC
3744 || r_type == R_X86_64_TLSDESC_CALL)
3745 relocation = htab->elf.sgotplt->output_section->vma
3746 + htab->elf.sgotplt->output_offset
3747 + offplt + htab->sgotplt_jump_table_size;
3748 else
3749 relocation = htab->elf.sgot->output_section->vma
3750 + htab->elf.sgot->output_offset + off;
3751 unresolved_reloc = false;
3752 }
3753 else
3754 {
3755 bfd_vma roff = rel->r_offset;
3756
3757 if (r_type == R_X86_64_TLSGD)
3758 {
3759 /* GD->IE transition. For 64bit, change
3760 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3761 .word 0x6666; rex64; call __tls_get_addr@PLT
3762 or
3763 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3764 .byte 0x66; rex64
3765 call *__tls_get_addr@GOTPCREL(%rip
3766 which may be converted to
3767 addr32 call __tls_get_addr
3768 into:
3769 movq %fs:0, %rax
3770 addq foo@gottpoff(%rip), %rax
3771 For 32bit, change
3772 leaq foo@tlsgd(%rip), %rdi
3773 .word 0x6666; rex64; call __tls_get_addr@PLT
3774 or
3775 leaq foo@tlsgd(%rip), %rdi
3776 .byte 0x66; rex64;
3777 call *__tls_get_addr@GOTPCREL(%rip)
3778 which may be converted to
3779 addr32 call __tls_get_addr
3780 into:
3781 movl %fs:0, %eax
3782 addq foo@gottpoff(%rip), %rax
3783 For largepic, change:
3784 leaq foo@tlsgd(%rip), %rdi
3785 movabsq $__tls_get_addr@pltoff, %rax
3786 addq %r15, %rax
3787 call *%rax
3788 into:
3789 movq %fs:0, %rax
3790 addq foo@gottpoff(%rax), %rax
3791 nopw 0x0(%rax,%rax,1) */
3792 int largepic = 0;
3793 if (ABI_64_P (output_bfd))
3794 {
3795 if (contents[roff + 5] == 0xb8)
3796 {
3797 if (roff < 3
3798 || (roff - 3 + 22) > input_section->size)
3799 goto corrupt_input;
3800 memcpy (contents + roff - 3,
3801 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3802 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3803 largepic = 1;
3804 }
3805 else
3806 {
3807 if (roff < 4
3808 || (roff - 4 + 16) > input_section->size)
3809 goto corrupt_input;
3810 memcpy (contents + roff - 4,
3811 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3812 16);
3813 }
3814 }
3815 else
3816 {
3817 if (roff < 3
3818 || (roff - 3 + 15) > input_section->size)
3819 goto corrupt_input;
3820 memcpy (contents + roff - 3,
3821 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3822 15);
3823 }
3824
3825 relocation = (htab->elf.sgot->output_section->vma
3826 + htab->elf.sgot->output_offset + off
3827 - roff
3828 - largepic
3829 - input_section->output_section->vma
3830 - input_section->output_offset
3831 - 12);
3832 bfd_put_32 (output_bfd, relocation,
3833 contents + roff + 8 + largepic);
3834 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3835 rel++;
3836 wrel++;
3837 continue;
3838 }
3839 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3840 {
3841 /* GDesc -> IE transition.
3842 It's originally something like:
3843 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3844 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3845
3846 Change it to:
3847 # before xchg %ax,%ax in LP64 mode.
3848 movq x@gottpoff(%rip), %rax
3849 # before nopl (%rax) in X32 mode.
3850 rex movl x@gottpoff(%rip), %eax
3851 */
3852
3853 /* Now modify the instruction as appropriate. To
3854 turn a lea into a mov in the form we use it, it
3855 suffices to change the second byte from 0x8d to
3856 0x8b. */
3857 if (roff < 2)
3858 goto corrupt_input;
3859 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3860
3861 bfd_put_32 (output_bfd,
3862 htab->elf.sgot->output_section->vma
3863 + htab->elf.sgot->output_offset + off
3864 - rel->r_offset
3865 - input_section->output_section->vma
3866 - input_section->output_offset
3867 - 4,
3868 contents + roff);
3869 continue;
3870 }
3871 else if (r_type == R_X86_64_TLSDESC_CALL)
3872 {
3873 /* GDesc -> IE transition.
3874 It's originally:
3875 call *(%rax) <--- LP64 mode.
3876 call *(%eax) <--- X32 mode.
3877
3878 Change it to:
3879 xchg %ax, %ax <-- LP64 mode.
3880 nopl (%rax) <-- X32 mode.
3881 */
3882
3883 unsigned int prefix = 0;
3884 if (!ABI_64_P (input_bfd))
3885 {
3886 /* Check for call *x@tlsdesc(%eax). */
3887 if (contents[roff] == 0x67)
3888 prefix = 1;
3889 }
3890 if (prefix)
3891 {
3892 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3893 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3894 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3895 }
3896 else
3897 {
3898 bfd_put_8 (output_bfd, 0x66, contents + roff);
3899 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3900 }
3901 continue;
3902 }
3903 else
3904 BFD_ASSERT (false);
3905 }
3906 break;
3907
3908 case R_X86_64_TLSLD:
3909 if (! elf_x86_64_tls_transition (info, input_bfd,
3910 input_section, contents,
3911 symtab_hdr, sym_hashes,
3912 &r_type, GOT_UNKNOWN, rel,
3913 relend, h, r_symndx, true))
3914 return false;
3915
3916 if (r_type != R_X86_64_TLSLD)
3917 {
3918 /* LD->LE transition:
3919 leaq foo@tlsld(%rip), %rdi
3920 call __tls_get_addr@PLT
3921 For 64bit, we change it into:
3922 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3923 For 32bit, we change it into:
3924 nopl 0x0(%rax); movl %fs:0, %eax
3925 Or
3926 leaq foo@tlsld(%rip), %rdi;
3927 call *__tls_get_addr@GOTPCREL(%rip)
3928 which may be converted to
3929 addr32 call __tls_get_addr
3930 For 64bit, we change it into:
3931 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3932 For 32bit, we change it into:
3933 nopw 0x0(%rax); movl %fs:0, %eax
3934 For largepic, change:
3935 leaq foo@tlsgd(%rip), %rdi
3936 movabsq $__tls_get_addr@pltoff, %rax
3937 addq %rbx, %rax
3938 call *%rax
3939 into
3940 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3941 movq %fs:0, %eax */
3942
3943 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3944 if (ABI_64_P (output_bfd))
3945 {
3946 if ((rel->r_offset + 5) >= input_section->size)
3947 goto corrupt_input;
3948 if (contents[rel->r_offset + 5] == 0xb8)
3949 {
3950 if (rel->r_offset < 3
3951 || (rel->r_offset - 3 + 22) > input_section->size)
3952 goto corrupt_input;
3953 memcpy (contents + rel->r_offset - 3,
3954 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3955 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3956 }
3957 else if (contents[rel->r_offset + 4] == 0xff
3958 || contents[rel->r_offset + 4] == 0x67)
3959 {
3960 if (rel->r_offset < 3
3961 || (rel->r_offset - 3 + 13) > input_section->size)
3962 goto corrupt_input;
3963 memcpy (contents + rel->r_offset - 3,
3964 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3965 13);
3966
3967 }
3968 else
3969 {
3970 if (rel->r_offset < 3
3971 || (rel->r_offset - 3 + 12) > input_section->size)
3972 goto corrupt_input;
3973 memcpy (contents + rel->r_offset - 3,
3974 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3975 }
3976 }
3977 else
3978 {
3979 if ((rel->r_offset + 4) >= input_section->size)
3980 goto corrupt_input;
3981 if (contents[rel->r_offset + 4] == 0xff)
3982 {
3983 if (rel->r_offset < 3
3984 || (rel->r_offset - 3 + 13) > input_section->size)
3985 goto corrupt_input;
3986 memcpy (contents + rel->r_offset - 3,
3987 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3988 13);
3989 }
3990 else
3991 {
3992 if (rel->r_offset < 3
3993 || (rel->r_offset - 3 + 12) > input_section->size)
3994 goto corrupt_input;
3995 memcpy (contents + rel->r_offset - 3,
3996 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3997 }
3998 }
3999 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
4000 and R_X86_64_PLTOFF64. */
4001 rel++;
4002 wrel++;
4003 continue;
4004 }
4005
4006 if (htab->elf.sgot == NULL)
4007 abort ();
4008
4009 off = htab->tls_ld_or_ldm_got.offset;
4010 if (off & 1)
4011 off &= ~1;
4012 else
4013 {
4014 Elf_Internal_Rela outrel;
4015
4016 if (htab->elf.srelgot == NULL)
4017 abort ();
4018
4019 outrel.r_offset = (htab->elf.sgot->output_section->vma
4020 + htab->elf.sgot->output_offset + off);
4021
4022 bfd_put_64 (output_bfd, 0,
4023 htab->elf.sgot->contents + off);
4024 bfd_put_64 (output_bfd, 0,
4025 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4026 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4027 outrel.r_addend = 0;
4028 elf_append_rela (output_bfd, htab->elf.srelgot,
4029 &outrel);
4030 htab->tls_ld_or_ldm_got.offset |= 1;
4031 }
4032 relocation = htab->elf.sgot->output_section->vma
4033 + htab->elf.sgot->output_offset + off;
4034 unresolved_reloc = false;
4035 break;
4036
4037 case R_X86_64_DTPOFF32:
4038 if (!bfd_link_executable (info)
4039 || (input_section->flags & SEC_CODE) == 0)
4040 relocation -= _bfd_x86_elf_dtpoff_base (info);
4041 else
4042 relocation = elf_x86_64_tpoff (info, relocation);
4043 break;
4044
4045 case R_X86_64_TPOFF32:
4046 case R_X86_64_TPOFF64:
4047 BFD_ASSERT (bfd_link_executable (info));
4048 relocation = elf_x86_64_tpoff (info, relocation);
4049 break;
4050
4051 case R_X86_64_DTPOFF64:
4052 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4053 relocation -= _bfd_x86_elf_dtpoff_base (info);
4054 break;
4055
4056 default:
4057 break;
4058 }
4059
4060 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4061 because such sections are not SEC_ALLOC and thus ld.so will
4062 not process them. */
4063 if (unresolved_reloc
4064 && !((input_section->flags & SEC_DEBUGGING) != 0
4065 && h->def_dynamic)
4066 && _bfd_elf_section_offset (output_bfd, info, input_section,
4067 rel->r_offset) != (bfd_vma) -1)
4068 {
4069 switch (r_type)
4070 {
4071 case R_X86_64_32S:
4072 sec = h->root.u.def.section;
4073 if ((info->nocopyreloc
4074 || (eh->def_protected
4075 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
4076 && !(h->root.u.def.section->flags & SEC_CODE))
4077 return elf_x86_64_need_pic (info, input_bfd, input_section,
4078 h, NULL, NULL, howto);
4079 /* Fall through. */
4080
4081 default:
4082 _bfd_error_handler
4083 /* xgettext:c-format */
4084 (_("%pB(%pA+%#" PRIx64 "): "
4085 "unresolvable %s relocation against symbol `%s'"),
4086 input_bfd,
4087 input_section,
4088 (uint64_t) rel->r_offset,
4089 howto->name,
4090 h->root.root.string);
4091 return false;
4092 }
4093 }
4094
4095 do_relocation:
4096 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4097 contents, rel->r_offset,
4098 relocation, rel->r_addend);
4099
4100 check_relocation_error:
4101 if (r != bfd_reloc_ok)
4102 {
4103 const char *name;
4104
4105 if (h != NULL)
4106 name = h->root.root.string;
4107 else
4108 {
4109 name = bfd_elf_string_from_elf_section (input_bfd,
4110 symtab_hdr->sh_link,
4111 sym->st_name);
4112 if (name == NULL)
4113 return false;
4114 if (*name == '\0')
4115 name = bfd_section_name (sec);
4116 }
4117
4118 if (r == bfd_reloc_overflow)
4119 {
4120 if (converted_reloc)
4121 {
4122 info->callbacks->einfo
4123 ("%X%H:", input_bfd, input_section, rel->r_offset);
4124 info->callbacks->einfo
4125 (_(" failed to convert GOTPCREL relocation against "
4126 "'%s'; relink with --no-relax\n"),
4127 name);
4128 status = false;
4129 continue;
4130 }
4131 (*info->callbacks->reloc_overflow)
4132 (info, (h ? &h->root : NULL), name, howto->name,
4133 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4134 }
4135 else
4136 {
4137 _bfd_error_handler
4138 /* xgettext:c-format */
4139 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4140 input_bfd, input_section,
4141 (uint64_t) rel->r_offset, name, (int) r);
4142 return false;
4143 }
4144 }
4145
4146 if (wrel != rel)
4147 *wrel = *rel;
4148 }
4149
4150 if (wrel != rel)
4151 {
4152 Elf_Internal_Shdr *rel_hdr;
4153 size_t deleted = rel - wrel;
4154
4155 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4156 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4157 if (rel_hdr->sh_size == 0)
4158 {
4159 /* It is too late to remove an empty reloc section. Leave
4160 one NONE reloc.
4161 ??? What is wrong with an empty section??? */
4162 rel_hdr->sh_size = rel_hdr->sh_entsize;
4163 deleted -= 1;
4164 }
4165 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4166 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4167 input_section->reloc_count -= deleted;
4168 }
4169
4170 return status;
4171 }
4172
4173 /* Finish up dynamic symbol handling. We set the contents of various
4174 dynamic sections here. */
4175
4176 static bool
4177 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4178 struct bfd_link_info *info,
4179 struct elf_link_hash_entry *h,
4180 Elf_Internal_Sym *sym)
4181 {
4182 struct elf_x86_link_hash_table *htab;
4183 bool use_plt_second;
4184 struct elf_x86_link_hash_entry *eh;
4185 bool local_undefweak;
4186
4187 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4188 if (htab == NULL)
4189 return false;
4190
4191 /* Use the second PLT section only if there is .plt section. */
4192 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4193
4194 eh = (struct elf_x86_link_hash_entry *) h;
4195 if (eh->no_finish_dynamic_symbol)
4196 abort ();
4197
4198 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4199 resolved undefined weak symbols in executable so that their
4200 references have value 0 at run-time. */
4201 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4202
4203 if (h->plt.offset != (bfd_vma) -1)
4204 {
4205 bfd_vma plt_index;
4206 bfd_vma got_offset, plt_offset;
4207 Elf_Internal_Rela rela;
4208 bfd_byte *loc;
4209 asection *plt, *gotplt, *relplt, *resolved_plt;
4210 const struct elf_backend_data *bed;
4211 bfd_vma plt_got_pcrel_offset;
4212
4213 /* When building a static executable, use .iplt, .igot.plt and
4214 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4215 if (htab->elf.splt != NULL)
4216 {
4217 plt = htab->elf.splt;
4218 gotplt = htab->elf.sgotplt;
4219 relplt = htab->elf.srelplt;
4220 }
4221 else
4222 {
4223 plt = htab->elf.iplt;
4224 gotplt = htab->elf.igotplt;
4225 relplt = htab->elf.irelplt;
4226 }
4227
4228 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4229
4230 /* Get the index in the procedure linkage table which
4231 corresponds to this symbol. This is the index of this symbol
4232 in all the symbols for which we are making plt entries. The
4233 first entry in the procedure linkage table is reserved.
4234
4235 Get the offset into the .got table of the entry that
4236 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4237 bytes. The first three are reserved for the dynamic linker.
4238
4239 For static executables, we don't reserve anything. */
4240
4241 if (plt == htab->elf.splt)
4242 {
4243 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4244 - htab->plt.has_plt0);
4245 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4246 }
4247 else
4248 {
4249 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4250 got_offset = got_offset * GOT_ENTRY_SIZE;
4251 }
4252
4253 /* Fill in the entry in the procedure linkage table. */
4254 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4255 htab->plt.plt_entry_size);
4256 if (use_plt_second)
4257 {
4258 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4259 htab->non_lazy_plt->plt_entry,
4260 htab->non_lazy_plt->plt_entry_size);
4261
4262 resolved_plt = htab->plt_second;
4263 plt_offset = eh->plt_second.offset;
4264 }
4265 else
4266 {
4267 resolved_plt = plt;
4268 plt_offset = h->plt.offset;
4269 }
4270
4271 /* Insert the relocation positions of the plt section. */
4272
4273 /* Put offset the PC-relative instruction referring to the GOT entry,
4274 subtracting the size of that instruction. */
4275 plt_got_pcrel_offset = (gotplt->output_section->vma
4276 + gotplt->output_offset
4277 + got_offset
4278 - resolved_plt->output_section->vma
4279 - resolved_plt->output_offset
4280 - plt_offset
4281 - htab->plt.plt_got_insn_size);
4282
4283 /* Check PC-relative offset overflow in PLT entry. */
4284 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4285 /* xgettext:c-format */
4286 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4287 output_bfd, h->root.root.string);
4288
4289 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4290 (resolved_plt->contents + plt_offset
4291 + htab->plt.plt_got_offset));
4292
4293 /* Fill in the entry in the global offset table, initially this
4294 points to the second part of the PLT entry. Leave the entry
4295 as zero for undefined weak symbol in PIE. No PLT relocation
4296 against undefined weak symbol in PIE. */
4297 if (!local_undefweak)
4298 {
4299 if (htab->plt.has_plt0)
4300 bfd_put_64 (output_bfd, (plt->output_section->vma
4301 + plt->output_offset
4302 + h->plt.offset
4303 + htab->lazy_plt->plt_lazy_offset),
4304 gotplt->contents + got_offset);
4305
4306 /* Fill in the entry in the .rela.plt section. */
4307 rela.r_offset = (gotplt->output_section->vma
4308 + gotplt->output_offset
4309 + got_offset);
4310 if (PLT_LOCAL_IFUNC_P (info, h))
4311 {
4312 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4313 h->root.root.string,
4314 h->root.u.def.section->owner);
4315
4316 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4317 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4318 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4319 rela.r_addend = (h->root.u.def.value
4320 + h->root.u.def.section->output_section->vma
4321 + h->root.u.def.section->output_offset);
4322
4323 if (htab->params->report_relative_reloc)
4324 _bfd_x86_elf_link_report_relative_reloc
4325 (info, relplt, h, sym, "R_X86_64_IRELATIVE", &rela);
4326
4327 /* R_X86_64_IRELATIVE comes last. */
4328 plt_index = htab->next_irelative_index--;
4329 }
4330 else
4331 {
4332 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4333 rela.r_addend = 0;
4334 plt_index = htab->next_jump_slot_index++;
4335 }
4336
4337 /* Don't fill the second and third slots in PLT entry for
4338 static executables nor without PLT0. */
4339 if (plt == htab->elf.splt && htab->plt.has_plt0)
4340 {
4341 bfd_vma plt0_offset
4342 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4343
4344 /* Put relocation index. */
4345 bfd_put_32 (output_bfd, plt_index,
4346 (plt->contents + h->plt.offset
4347 + htab->lazy_plt->plt_reloc_offset));
4348
4349 /* Put offset for jmp .PLT0 and check for overflow. We don't
4350 check relocation index for overflow since branch displacement
4351 will overflow first. */
4352 if (plt0_offset > 0x80000000)
4353 /* xgettext:c-format */
4354 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4355 output_bfd, h->root.root.string);
4356 bfd_put_32 (output_bfd, - plt0_offset,
4357 (plt->contents + h->plt.offset
4358 + htab->lazy_plt->plt_plt_offset));
4359 }
4360
4361 bed = get_elf_backend_data (output_bfd);
4362 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4363 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4364 }
4365 }
4366 else if (eh->plt_got.offset != (bfd_vma) -1)
4367 {
4368 bfd_vma got_offset, plt_offset;
4369 asection *plt, *got;
4370 bool got_after_plt;
4371 int32_t got_pcrel_offset;
4372
4373 /* Set the entry in the GOT procedure linkage table. */
4374 plt = htab->plt_got;
4375 got = htab->elf.sgot;
4376 got_offset = h->got.offset;
4377
4378 if (got_offset == (bfd_vma) -1
4379 || (h->type == STT_GNU_IFUNC && h->def_regular)
4380 || plt == NULL
4381 || got == NULL)
4382 abort ();
4383
4384 /* Use the non-lazy PLT entry template for the GOT PLT since they
4385 are the identical. */
4386 /* Fill in the entry in the GOT procedure linkage table. */
4387 plt_offset = eh->plt_got.offset;
4388 memcpy (plt->contents + plt_offset,
4389 htab->non_lazy_plt->plt_entry,
4390 htab->non_lazy_plt->plt_entry_size);
4391
4392 /* Put offset the PC-relative instruction referring to the GOT
4393 entry, subtracting the size of that instruction. */
4394 got_pcrel_offset = (got->output_section->vma
4395 + got->output_offset
4396 + got_offset
4397 - plt->output_section->vma
4398 - plt->output_offset
4399 - plt_offset
4400 - htab->non_lazy_plt->plt_got_insn_size);
4401
4402 /* Check PC-relative offset overflow in GOT PLT entry. */
4403 got_after_plt = got->output_section->vma > plt->output_section->vma;
4404 if ((got_after_plt && got_pcrel_offset < 0)
4405 || (!got_after_plt && got_pcrel_offset > 0))
4406 /* xgettext:c-format */
4407 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4408 output_bfd, h->root.root.string);
4409
4410 bfd_put_32 (output_bfd, got_pcrel_offset,
4411 (plt->contents + plt_offset
4412 + htab->non_lazy_plt->plt_got_offset));
4413 }
4414
4415 if (!local_undefweak
4416 && !h->def_regular
4417 && (h->plt.offset != (bfd_vma) -1
4418 || eh->plt_got.offset != (bfd_vma) -1))
4419 {
4420 /* Mark the symbol as undefined, rather than as defined in
4421 the .plt section. Leave the value if there were any
4422 relocations where pointer equality matters (this is a clue
4423 for the dynamic linker, to make function pointer
4424 comparisons work between an application and shared
4425 library), otherwise set it to zero. If a function is only
4426 called from a binary, there is no need to slow down
4427 shared libraries because of that. */
4428 sym->st_shndx = SHN_UNDEF;
4429 if (!h->pointer_equality_needed)
4430 sym->st_value = 0;
4431 }
4432
4433 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4434
4435 /* Don't generate dynamic GOT relocation against undefined weak
4436 symbol in executable. */
4437 if (h->got.offset != (bfd_vma) -1
4438 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4439 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4440 && !local_undefweak)
4441 {
4442 Elf_Internal_Rela rela;
4443 asection *relgot = htab->elf.srelgot;
4444 const char *relative_reloc_name = NULL;
4445
4446 /* This symbol has an entry in the global offset table. Set it
4447 up. */
4448 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4449 abort ();
4450
4451 rela.r_offset = (htab->elf.sgot->output_section->vma
4452 + htab->elf.sgot->output_offset
4453 + (h->got.offset &~ (bfd_vma) 1));
4454
4455 /* If this is a static link, or it is a -Bsymbolic link and the
4456 symbol is defined locally or was forced to be local because
4457 of a version file, we just want to emit a RELATIVE reloc.
4458 The entry in the global offset table will already have been
4459 initialized in the relocate_section function. */
4460 if (h->def_regular
4461 && h->type == STT_GNU_IFUNC)
4462 {
4463 if (h->plt.offset == (bfd_vma) -1)
4464 {
4465 /* STT_GNU_IFUNC is referenced without PLT. */
4466 if (htab->elf.splt == NULL)
4467 {
4468 /* use .rel[a].iplt section to store .got relocations
4469 in static executable. */
4470 relgot = htab->elf.irelplt;
4471 }
4472 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4473 {
4474 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4475 h->root.root.string,
4476 h->root.u.def.section->owner);
4477
4478 rela.r_info = htab->r_info (0,
4479 R_X86_64_IRELATIVE);
4480 rela.r_addend = (h->root.u.def.value
4481 + h->root.u.def.section->output_section->vma
4482 + h->root.u.def.section->output_offset);
4483 relative_reloc_name = "R_X86_64_IRELATIVE";
4484 }
4485 else
4486 goto do_glob_dat;
4487 }
4488 else if (bfd_link_pic (info))
4489 {
4490 /* Generate R_X86_64_GLOB_DAT. */
4491 goto do_glob_dat;
4492 }
4493 else
4494 {
4495 asection *plt;
4496 bfd_vma plt_offset;
4497
4498 if (!h->pointer_equality_needed)
4499 abort ();
4500
4501 /* For non-shared object, we can't use .got.plt, which
4502 contains the real function addres if we need pointer
4503 equality. We load the GOT entry with the PLT entry. */
4504 if (htab->plt_second != NULL)
4505 {
4506 plt = htab->plt_second;
4507 plt_offset = eh->plt_second.offset;
4508 }
4509 else
4510 {
4511 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4512 plt_offset = h->plt.offset;
4513 }
4514 bfd_put_64 (output_bfd, (plt->output_section->vma
4515 + plt->output_offset
4516 + plt_offset),
4517 htab->elf.sgot->contents + h->got.offset);
4518 return true;
4519 }
4520 }
4521 else if (bfd_link_pic (info)
4522 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4523 {
4524 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4525 return false;
4526 BFD_ASSERT((h->got.offset & 1) != 0);
4527 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4528 rela.r_addend = (h->root.u.def.value
4529 + h->root.u.def.section->output_section->vma
4530 + h->root.u.def.section->output_offset);
4531 relative_reloc_name = "R_X86_64_RELATIVE";
4532 }
4533 else
4534 {
4535 BFD_ASSERT((h->got.offset & 1) == 0);
4536 do_glob_dat:
4537 bfd_put_64 (output_bfd, (bfd_vma) 0,
4538 htab->elf.sgot->contents + h->got.offset);
4539 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4540 rela.r_addend = 0;
4541 }
4542
4543 if (relative_reloc_name != NULL
4544 && htab->params->report_relative_reloc)
4545 _bfd_x86_elf_link_report_relative_reloc
4546 (info, relgot, h, sym, relative_reloc_name, &rela);
4547
4548 elf_append_rela (output_bfd, relgot, &rela);
4549 }
4550
4551 if (h->needs_copy)
4552 {
4553 Elf_Internal_Rela rela;
4554 asection *s;
4555
4556 /* This symbol needs a copy reloc. Set it up. */
4557 VERIFY_COPY_RELOC (h, htab)
4558
4559 rela.r_offset = (h->root.u.def.value
4560 + h->root.u.def.section->output_section->vma
4561 + h->root.u.def.section->output_offset);
4562 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4563 rela.r_addend = 0;
4564 if (h->root.u.def.section == htab->elf.sdynrelro)
4565 s = htab->elf.sreldynrelro;
4566 else
4567 s = htab->elf.srelbss;
4568 elf_append_rela (output_bfd, s, &rela);
4569 }
4570
4571 return true;
4572 }
4573
4574 /* Finish up local dynamic symbol handling. We set the contents of
4575 various dynamic sections here. */
4576
4577 static int
4578 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4579 {
4580 struct elf_link_hash_entry *h
4581 = (struct elf_link_hash_entry *) *slot;
4582 struct bfd_link_info *info
4583 = (struct bfd_link_info *) inf;
4584
4585 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4586 info, h, NULL);
4587 }
4588
4589 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4590 here since undefined weak symbol may not be dynamic and may not be
4591 called for elf_x86_64_finish_dynamic_symbol. */
4592
4593 static bool
4594 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4595 void *inf)
4596 {
4597 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4598 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4599
4600 if (h->root.type != bfd_link_hash_undefweak
4601 || h->dynindx != -1)
4602 return true;
4603
4604 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4605 info, h, NULL);
4606 }
4607
4608 /* Used to decide how to sort relocs in an optimal manner for the
4609 dynamic linker, before writing them out. */
4610
4611 static enum elf_reloc_type_class
4612 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4613 const asection *rel_sec ATTRIBUTE_UNUSED,
4614 const Elf_Internal_Rela *rela)
4615 {
4616 bfd *abfd = info->output_bfd;
4617 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4618 struct elf_x86_link_hash_table *htab
4619 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4620
4621 if (htab->elf.dynsym != NULL
4622 && htab->elf.dynsym->contents != NULL)
4623 {
4624 /* Check relocation against STT_GNU_IFUNC symbol if there are
4625 dynamic symbols. */
4626 unsigned long r_symndx = htab->r_sym (rela->r_info);
4627 if (r_symndx != STN_UNDEF)
4628 {
4629 Elf_Internal_Sym sym;
4630 if (!bed->s->swap_symbol_in (abfd,
4631 (htab->elf.dynsym->contents
4632 + r_symndx * bed->s->sizeof_sym),
4633 0, &sym))
4634 abort ();
4635
4636 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4637 return reloc_class_ifunc;
4638 }
4639 }
4640
4641 switch ((int) ELF32_R_TYPE (rela->r_info))
4642 {
4643 case R_X86_64_IRELATIVE:
4644 return reloc_class_ifunc;
4645 case R_X86_64_RELATIVE:
4646 case R_X86_64_RELATIVE64:
4647 return reloc_class_relative;
4648 case R_X86_64_JUMP_SLOT:
4649 return reloc_class_plt;
4650 case R_X86_64_COPY:
4651 return reloc_class_copy;
4652 default:
4653 return reloc_class_normal;
4654 }
4655 }
4656
4657 /* Finish up the dynamic sections. */
4658
4659 static bool
4660 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4661 struct bfd_link_info *info)
4662 {
4663 struct elf_x86_link_hash_table *htab;
4664
4665 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4666 if (htab == NULL)
4667 return false;
4668
4669 if (! htab->elf.dynamic_sections_created)
4670 return true;
4671
4672 if (htab->elf.splt && htab->elf.splt->size > 0)
4673 {
4674 elf_section_data (htab->elf.splt->output_section)
4675 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4676
4677 if (htab->plt.has_plt0)
4678 {
4679 /* Fill in the special first entry in the procedure linkage
4680 table. */
4681 memcpy (htab->elf.splt->contents,
4682 htab->lazy_plt->plt0_entry,
4683 htab->lazy_plt->plt0_entry_size);
4684 /* Add offset for pushq GOT+8(%rip), since the instruction
4685 uses 6 bytes subtract this value. */
4686 bfd_put_32 (output_bfd,
4687 (htab->elf.sgotplt->output_section->vma
4688 + htab->elf.sgotplt->output_offset
4689 + 8
4690 - htab->elf.splt->output_section->vma
4691 - htab->elf.splt->output_offset
4692 - 6),
4693 (htab->elf.splt->contents
4694 + htab->lazy_plt->plt0_got1_offset));
4695 /* Add offset for the PC-relative instruction accessing
4696 GOT+16, subtracting the offset to the end of that
4697 instruction. */
4698 bfd_put_32 (output_bfd,
4699 (htab->elf.sgotplt->output_section->vma
4700 + htab->elf.sgotplt->output_offset
4701 + 16
4702 - htab->elf.splt->output_section->vma
4703 - htab->elf.splt->output_offset
4704 - htab->lazy_plt->plt0_got2_insn_end),
4705 (htab->elf.splt->contents
4706 + htab->lazy_plt->plt0_got2_offset));
4707 }
4708
4709 if (htab->elf.tlsdesc_plt)
4710 {
4711 bfd_put_64 (output_bfd, (bfd_vma) 0,
4712 htab->elf.sgot->contents + htab->elf.tlsdesc_got);
4713
4714 memcpy (htab->elf.splt->contents + htab->elf.tlsdesc_plt,
4715 htab->lazy_plt->plt_tlsdesc_entry,
4716 htab->lazy_plt->plt_tlsdesc_entry_size);
4717
4718 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4719 bytes and the instruction uses 6 bytes, subtract these
4720 values. */
4721 bfd_put_32 (output_bfd,
4722 (htab->elf.sgotplt->output_section->vma
4723 + htab->elf.sgotplt->output_offset
4724 + 8
4725 - htab->elf.splt->output_section->vma
4726 - htab->elf.splt->output_offset
4727 - htab->elf.tlsdesc_plt
4728 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4729 (htab->elf.splt->contents
4730 + htab->elf.tlsdesc_plt
4731 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4732 /* Add offset for indirect branch via GOT+TDG, where TDG
4733 stands for htab->tlsdesc_got, subtracting the offset
4734 to the end of that instruction. */
4735 bfd_put_32 (output_bfd,
4736 (htab->elf.sgot->output_section->vma
4737 + htab->elf.sgot->output_offset
4738 + htab->elf.tlsdesc_got
4739 - htab->elf.splt->output_section->vma
4740 - htab->elf.splt->output_offset
4741 - htab->elf.tlsdesc_plt
4742 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4743 (htab->elf.splt->contents
4744 + htab->elf.tlsdesc_plt
4745 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4746 }
4747 }
4748
4749 /* Fill PLT entries for undefined weak symbols in PIE. */
4750 if (bfd_link_pie (info))
4751 bfd_hash_traverse (&info->hash->table,
4752 elf_x86_64_pie_finish_undefweak_symbol,
4753 info);
4754
4755 return true;
4756 }
4757
4758 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4759 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4760 It has to be done before elf_link_sort_relocs is called so that
4761 dynamic relocations are properly sorted. */
4762
4763 static bool
4764 elf_x86_64_output_arch_local_syms
4765 (bfd *output_bfd ATTRIBUTE_UNUSED,
4766 struct bfd_link_info *info,
4767 void *flaginfo ATTRIBUTE_UNUSED,
4768 int (*func) (void *, const char *,
4769 Elf_Internal_Sym *,
4770 asection *,
4771 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4772 {
4773 struct elf_x86_link_hash_table *htab
4774 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4775 if (htab == NULL)
4776 return false;
4777
4778 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4779 htab_traverse (htab->loc_hash_table,
4780 elf_x86_64_finish_local_dynamic_symbol,
4781 info);
4782
4783 return true;
4784 }
4785
4786 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4787 dynamic relocations. */
4788
4789 static long
4790 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4791 long symcount ATTRIBUTE_UNUSED,
4792 asymbol **syms ATTRIBUTE_UNUSED,
4793 long dynsymcount,
4794 asymbol **dynsyms,
4795 asymbol **ret)
4796 {
4797 long count, i, n;
4798 int j;
4799 bfd_byte *plt_contents;
4800 long relsize;
4801 const struct elf_x86_lazy_plt_layout *lazy_plt;
4802 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4803 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4804 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4805 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4806 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4807 asection *plt;
4808 enum elf_x86_plt_type plt_type;
4809 struct elf_x86_plt plts[] =
4810 {
4811 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4812 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4813 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4814 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4815 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4816 };
4817
4818 *ret = NULL;
4819
4820 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4821 return 0;
4822
4823 if (dynsymcount <= 0)
4824 return 0;
4825
4826 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4827 if (relsize <= 0)
4828 return -1;
4829
4830 lazy_plt = &elf_x86_64_lazy_plt;
4831 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4832 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4833 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4834 if (ABI_64_P (abfd))
4835 {
4836 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4837 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4838 }
4839 else
4840 {
4841 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4842 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4843 }
4844
4845 count = 0;
4846 for (j = 0; plts[j].name != NULL; j++)
4847 {
4848 plt = bfd_get_section_by_name (abfd, plts[j].name);
4849 if (plt == NULL || plt->size == 0)
4850 continue;
4851
4852 /* Get the PLT section contents. */
4853 if (!bfd_malloc_and_get_section (abfd, plt, &plt_contents))
4854 break;
4855
4856 /* Check what kind of PLT it is. */
4857 plt_type = plt_unknown;
4858 if (plts[j].type == plt_unknown
4859 && (plt->size >= (lazy_plt->plt_entry_size
4860 + lazy_plt->plt_entry_size)))
4861 {
4862 /* Match lazy PLT first. Need to check the first two
4863 instructions. */
4864 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4865 lazy_plt->plt0_got1_offset) == 0)
4866 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4867 2) == 0))
4868 plt_type = plt_lazy;
4869 else if (lazy_bnd_plt != NULL
4870 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4871 lazy_bnd_plt->plt0_got1_offset) == 0)
4872 && (memcmp (plt_contents + 6,
4873 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4874 {
4875 plt_type = plt_lazy | plt_second;
4876 /* The fist entry in the lazy IBT PLT is the same as the
4877 lazy BND PLT. */
4878 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4879 lazy_ibt_plt->plt_entry,
4880 lazy_ibt_plt->plt_got_offset) == 0))
4881 lazy_plt = lazy_ibt_plt;
4882 else
4883 lazy_plt = lazy_bnd_plt;
4884 }
4885 }
4886
4887 if (non_lazy_plt != NULL
4888 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4889 && plt->size >= non_lazy_plt->plt_entry_size)
4890 {
4891 /* Match non-lazy PLT. */
4892 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4893 non_lazy_plt->plt_got_offset) == 0)
4894 plt_type = plt_non_lazy;
4895 }
4896
4897 if (plt_type == plt_unknown || plt_type == plt_second)
4898 {
4899 if (non_lazy_bnd_plt != NULL
4900 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4901 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4902 non_lazy_bnd_plt->plt_got_offset) == 0))
4903 {
4904 /* Match BND PLT. */
4905 plt_type = plt_second;
4906 non_lazy_plt = non_lazy_bnd_plt;
4907 }
4908 else if (non_lazy_ibt_plt != NULL
4909 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4910 && (memcmp (plt_contents,
4911 non_lazy_ibt_plt->plt_entry,
4912 non_lazy_ibt_plt->plt_got_offset) == 0))
4913 {
4914 /* Match IBT PLT. */
4915 plt_type = plt_second;
4916 non_lazy_plt = non_lazy_ibt_plt;
4917 }
4918 }
4919
4920 if (plt_type == plt_unknown)
4921 {
4922 free (plt_contents);
4923 continue;
4924 }
4925
4926 plts[j].sec = plt;
4927 plts[j].type = plt_type;
4928
4929 if ((plt_type & plt_lazy))
4930 {
4931 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4932 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4933 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4934 /* Skip PLT0 in lazy PLT. */
4935 i = 1;
4936 }
4937 else
4938 {
4939 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4940 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4941 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4942 i = 0;
4943 }
4944
4945 /* Skip lazy PLT when the second PLT is used. */
4946 if (plt_type == (plt_lazy | plt_second))
4947 plts[j].count = 0;
4948 else
4949 {
4950 n = plt->size / plts[j].plt_entry_size;
4951 plts[j].count = n;
4952 count += n - i;
4953 }
4954
4955 plts[j].contents = plt_contents;
4956 }
4957
4958 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4959 (bfd_vma) 0, plts, dynsyms,
4960 ret);
4961 }
4962
4963 /* Handle an x86-64 specific section when reading an object file. This
4964 is called when elfcode.h finds a section with an unknown type. */
4965
4966 static bool
4967 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4968 const char *name, int shindex)
4969 {
4970 if (hdr->sh_type != SHT_X86_64_UNWIND)
4971 return false;
4972
4973 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4974 return false;
4975
4976 return true;
4977 }
4978
4979 /* Hook called by the linker routine which adds symbols from an object
4980 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4981 of .bss. */
4982
4983 static bool
4984 elf_x86_64_add_symbol_hook (bfd *abfd,
4985 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4986 Elf_Internal_Sym *sym,
4987 const char **namep ATTRIBUTE_UNUSED,
4988 flagword *flagsp ATTRIBUTE_UNUSED,
4989 asection **secp,
4990 bfd_vma *valp)
4991 {
4992 asection *lcomm;
4993
4994 switch (sym->st_shndx)
4995 {
4996 case SHN_X86_64_LCOMMON:
4997 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4998 if (lcomm == NULL)
4999 {
5000 lcomm = bfd_make_section_with_flags (abfd,
5001 "LARGE_COMMON",
5002 (SEC_ALLOC
5003 | SEC_IS_COMMON
5004 | SEC_LINKER_CREATED));
5005 if (lcomm == NULL)
5006 return false;
5007 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5008 }
5009 *secp = lcomm;
5010 *valp = sym->st_size;
5011 return true;
5012 }
5013
5014 return true;
5015 }
5016
5017
5018 /* Given a BFD section, try to locate the corresponding ELF section
5019 index. */
5020
5021 static bool
5022 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5023 asection *sec, int *index_return)
5024 {
5025 if (sec == &_bfd_elf_large_com_section)
5026 {
5027 *index_return = SHN_X86_64_LCOMMON;
5028 return true;
5029 }
5030 return false;
5031 }
5032
5033 /* Process a symbol. */
5034
5035 static void
5036 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5037 asymbol *asym)
5038 {
5039 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5040
5041 switch (elfsym->internal_elf_sym.st_shndx)
5042 {
5043 case SHN_X86_64_LCOMMON:
5044 asym->section = &_bfd_elf_large_com_section;
5045 asym->value = elfsym->internal_elf_sym.st_size;
5046 /* Common symbol doesn't set BSF_GLOBAL. */
5047 asym->flags &= ~BSF_GLOBAL;
5048 break;
5049 }
5050 }
5051
5052 static bool
5053 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5054 {
5055 return (sym->st_shndx == SHN_COMMON
5056 || sym->st_shndx == SHN_X86_64_LCOMMON);
5057 }
5058
5059 static unsigned int
5060 elf_x86_64_common_section_index (asection *sec)
5061 {
5062 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5063 return SHN_COMMON;
5064 else
5065 return SHN_X86_64_LCOMMON;
5066 }
5067
5068 static asection *
5069 elf_x86_64_common_section (asection *sec)
5070 {
5071 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5072 return bfd_com_section_ptr;
5073 else
5074 return &_bfd_elf_large_com_section;
5075 }
5076
5077 static bool
5078 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5079 const Elf_Internal_Sym *sym,
5080 asection **psec,
5081 bool newdef,
5082 bool olddef,
5083 bfd *oldbfd,
5084 const asection *oldsec)
5085 {
5086 /* A normal common symbol and a large common symbol result in a
5087 normal common symbol. We turn the large common symbol into a
5088 normal one. */
5089 if (!olddef
5090 && h->root.type == bfd_link_hash_common
5091 && !newdef
5092 && bfd_is_com_section (*psec)
5093 && oldsec != *psec)
5094 {
5095 if (sym->st_shndx == SHN_COMMON
5096 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5097 {
5098 h->root.u.c.p->section
5099 = bfd_make_section_old_way (oldbfd, "COMMON");
5100 h->root.u.c.p->section->flags = SEC_ALLOC;
5101 }
5102 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5103 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5104 *psec = bfd_com_section_ptr;
5105 }
5106
5107 return true;
5108 }
5109
5110 static int
5111 elf_x86_64_additional_program_headers (bfd *abfd,
5112 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5113 {
5114 asection *s;
5115 int count = 0;
5116
5117 /* Check to see if we need a large readonly segment. */
5118 s = bfd_get_section_by_name (abfd, ".lrodata");
5119 if (s && (s->flags & SEC_LOAD))
5120 count++;
5121
5122 /* Check to see if we need a large data segment. Since .lbss sections
5123 is placed right after the .bss section, there should be no need for
5124 a large data segment just because of .lbss. */
5125 s = bfd_get_section_by_name (abfd, ".ldata");
5126 if (s && (s->flags & SEC_LOAD))
5127 count++;
5128
5129 return count;
5130 }
5131
5132 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5133
5134 static bool
5135 elf_x86_64_relocs_compatible (const bfd_target *input,
5136 const bfd_target *output)
5137 {
5138 return ((xvec_get_elf_backend_data (input)->s->elfclass
5139 == xvec_get_elf_backend_data (output)->s->elfclass)
5140 && _bfd_elf_relocs_compatible (input, output));
5141 }
5142
5143 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5144 with GNU properties if found. Otherwise, return NULL. */
5145
5146 static bfd *
5147 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5148 {
5149 struct elf_x86_init_table init_table;
5150 const struct elf_backend_data *bed;
5151 struct elf_x86_link_hash_table *htab;
5152
5153 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5154 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5155 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5156 != (int) R_X86_64_GNU_VTINHERIT)
5157 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5158 != (int) R_X86_64_GNU_VTENTRY))
5159 abort ();
5160
5161 /* This is unused for x86-64. */
5162 init_table.plt0_pad_byte = 0x90;
5163
5164 bed = get_elf_backend_data (info->output_bfd);
5165 htab = elf_x86_hash_table (info, bed->target_id);
5166 if (!htab)
5167 abort ();
5168 if (htab->params->bndplt)
5169 {
5170 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5171 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5172 }
5173 else
5174 {
5175 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5176 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5177 }
5178
5179 if (ABI_64_P (info->output_bfd))
5180 {
5181 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5182 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5183 }
5184 else
5185 {
5186 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5187 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5188 }
5189
5190 if (ABI_64_P (info->output_bfd))
5191 {
5192 init_table.r_info = elf64_r_info;
5193 init_table.r_sym = elf64_r_sym;
5194 }
5195 else
5196 {
5197 init_table.r_info = elf32_r_info;
5198 init_table.r_sym = elf32_r_sym;
5199 }
5200
5201 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5202 }
5203
5204 static const struct bfd_elf_special_section
5205 elf_x86_64_special_sections[]=
5206 {
5207 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5208 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5209 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5210 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5211 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5212 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5213 { NULL, 0, 0, 0, 0 }
5214 };
5215
5216 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5217 #define TARGET_LITTLE_NAME "elf64-x86-64"
5218 #define ELF_ARCH bfd_arch_i386
5219 #define ELF_TARGET_ID X86_64_ELF_DATA
5220 #define ELF_MACHINE_CODE EM_X86_64
5221 #if DEFAULT_LD_Z_SEPARATE_CODE
5222 # define ELF_MAXPAGESIZE 0x1000
5223 #else
5224 # define ELF_MAXPAGESIZE 0x200000
5225 #endif
5226 #define ELF_MINPAGESIZE 0x1000
5227 #define ELF_COMMONPAGESIZE 0x1000
5228
5229 #define elf_backend_can_gc_sections 1
5230 #define elf_backend_can_refcount 1
5231 #define elf_backend_want_got_plt 1
5232 #define elf_backend_plt_readonly 1
5233 #define elf_backend_want_plt_sym 0
5234 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5235 #define elf_backend_rela_normal 1
5236 #define elf_backend_plt_alignment 4
5237 #define elf_backend_extern_protected_data 1
5238 #define elf_backend_caches_rawsize 1
5239 #define elf_backend_dtrel_excludes_plt 1
5240 #define elf_backend_want_dynrelro 1
5241
5242 #define elf_info_to_howto elf_x86_64_info_to_howto
5243
5244 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5245 #define bfd_elf64_bfd_reloc_name_lookup \
5246 elf_x86_64_reloc_name_lookup
5247
5248 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5249 #define elf_backend_check_relocs elf_x86_64_check_relocs
5250 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5251 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5252 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5253 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5254 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5255 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5256 #ifdef CORE_HEADER
5257 #define elf_backend_write_core_note elf_x86_64_write_core_note
5258 #endif
5259 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5260 #define elf_backend_relocate_section elf_x86_64_relocate_section
5261 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5262 #define elf_backend_object_p elf64_x86_64_elf_object_p
5263 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5264
5265 #define elf_backend_section_from_shdr \
5266 elf_x86_64_section_from_shdr
5267
5268 #define elf_backend_section_from_bfd_section \
5269 elf_x86_64_elf_section_from_bfd_section
5270 #define elf_backend_add_symbol_hook \
5271 elf_x86_64_add_symbol_hook
5272 #define elf_backend_symbol_processing \
5273 elf_x86_64_symbol_processing
5274 #define elf_backend_common_section_index \
5275 elf_x86_64_common_section_index
5276 #define elf_backend_common_section \
5277 elf_x86_64_common_section
5278 #define elf_backend_common_definition \
5279 elf_x86_64_common_definition
5280 #define elf_backend_merge_symbol \
5281 elf_x86_64_merge_symbol
5282 #define elf_backend_special_sections \
5283 elf_x86_64_special_sections
5284 #define elf_backend_additional_program_headers \
5285 elf_x86_64_additional_program_headers
5286 #define elf_backend_setup_gnu_properties \
5287 elf_x86_64_link_setup_gnu_properties
5288 #define elf_backend_hide_symbol \
5289 _bfd_x86_elf_hide_symbol
5290
5291 #undef elf64_bed
5292 #define elf64_bed elf64_x86_64_bed
5293
5294 #include "elf64-target.h"
5295
5296 /* CloudABI support. */
5297
5298 #undef TARGET_LITTLE_SYM
5299 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5300 #undef TARGET_LITTLE_NAME
5301 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5302
5303 #undef ELF_OSABI
5304 #define ELF_OSABI ELFOSABI_CLOUDABI
5305
5306 #undef elf64_bed
5307 #define elf64_bed elf64_x86_64_cloudabi_bed
5308
5309 #include "elf64-target.h"
5310
5311 /* FreeBSD support. */
5312
5313 #undef TARGET_LITTLE_SYM
5314 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5315 #undef TARGET_LITTLE_NAME
5316 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5317
5318 #undef ELF_OSABI
5319 #define ELF_OSABI ELFOSABI_FREEBSD
5320
5321 #undef elf64_bed
5322 #define elf64_bed elf64_x86_64_fbsd_bed
5323
5324 #include "elf64-target.h"
5325
5326 /* Solaris 2 support. */
5327
5328 #undef TARGET_LITTLE_SYM
5329 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5330 #undef TARGET_LITTLE_NAME
5331 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5332
5333 #undef ELF_TARGET_OS
5334 #define ELF_TARGET_OS is_solaris
5335
5336 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5337 objects won't be recognized. */
5338 #undef ELF_OSABI
5339
5340 #undef elf64_bed
5341 #define elf64_bed elf64_x86_64_sol2_bed
5342
5343 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5344 boundary. */
5345 #undef elf_backend_static_tls_alignment
5346 #define elf_backend_static_tls_alignment 16
5347
5348 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5349
5350 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5351 File, p.63. */
5352 #undef elf_backend_want_plt_sym
5353 #define elf_backend_want_plt_sym 1
5354
5355 #undef elf_backend_strtab_flags
5356 #define elf_backend_strtab_flags SHF_STRINGS
5357
5358 static bool
5359 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5360 bfd *obfd ATTRIBUTE_UNUSED,
5361 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5362 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5363 {
5364 /* PR 19938: FIXME: Need to add code for setting the sh_info
5365 and sh_link fields of Solaris specific section types. */
5366 return false;
5367 }
5368
5369 #undef elf_backend_copy_special_section_fields
5370 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5371
5372 #include "elf64-target.h"
5373
5374 /* Restore defaults. */
5375 #undef ELF_OSABI
5376 #undef elf_backend_static_tls_alignment
5377 #undef elf_backend_want_plt_sym
5378 #define elf_backend_want_plt_sym 0
5379 #undef elf_backend_strtab_flags
5380 #undef elf_backend_copy_special_section_fields
5381
5382 /* Intel L1OM support. */
5383
5384 static bool
5385 elf64_l1om_elf_object_p (bfd *abfd)
5386 {
5387 /* Set the right machine number for an L1OM elf64 file. */
5388 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5389 return true;
5390 }
5391
5392 #undef TARGET_LITTLE_SYM
5393 #define TARGET_LITTLE_SYM l1om_elf64_vec
5394 #undef TARGET_LITTLE_NAME
5395 #define TARGET_LITTLE_NAME "elf64-l1om"
5396 #undef ELF_ARCH
5397 #define ELF_ARCH bfd_arch_l1om
5398
5399 #undef ELF_MACHINE_CODE
5400 #define ELF_MACHINE_CODE EM_L1OM
5401
5402 #undef ELF_OSABI
5403
5404 #undef elf64_bed
5405 #define elf64_bed elf64_l1om_bed
5406
5407 #undef elf_backend_object_p
5408 #define elf_backend_object_p elf64_l1om_elf_object_p
5409
5410 /* Restore defaults. */
5411 #undef ELF_MAXPAGESIZE
5412 #undef ELF_MINPAGESIZE
5413 #undef ELF_COMMONPAGESIZE
5414 #if DEFAULT_LD_Z_SEPARATE_CODE
5415 # define ELF_MAXPAGESIZE 0x1000
5416 #else
5417 # define ELF_MAXPAGESIZE 0x200000
5418 #endif
5419 #define ELF_MINPAGESIZE 0x1000
5420 #define ELF_COMMONPAGESIZE 0x1000
5421 #undef elf_backend_plt_alignment
5422 #define elf_backend_plt_alignment 4
5423 #undef ELF_TARGET_OS
5424
5425 #include "elf64-target.h"
5426
5427 /* FreeBSD L1OM support. */
5428
5429 #undef TARGET_LITTLE_SYM
5430 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5431 #undef TARGET_LITTLE_NAME
5432 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5433
5434 #undef ELF_OSABI
5435 #define ELF_OSABI ELFOSABI_FREEBSD
5436
5437 #undef elf64_bed
5438 #define elf64_bed elf64_l1om_fbsd_bed
5439
5440 #include "elf64-target.h"
5441
5442 /* Intel K1OM support. */
5443
5444 static bool
5445 elf64_k1om_elf_object_p (bfd *abfd)
5446 {
5447 /* Set the right machine number for an K1OM elf64 file. */
5448 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5449 return true;
5450 }
5451
5452 #undef TARGET_LITTLE_SYM
5453 #define TARGET_LITTLE_SYM k1om_elf64_vec
5454 #undef TARGET_LITTLE_NAME
5455 #define TARGET_LITTLE_NAME "elf64-k1om"
5456 #undef ELF_ARCH
5457 #define ELF_ARCH bfd_arch_k1om
5458
5459 #undef ELF_MACHINE_CODE
5460 #define ELF_MACHINE_CODE EM_K1OM
5461
5462 #undef ELF_OSABI
5463
5464 #undef elf64_bed
5465 #define elf64_bed elf64_k1om_bed
5466
5467 #undef elf_backend_object_p
5468 #define elf_backend_object_p elf64_k1om_elf_object_p
5469
5470 #include "elf64-target.h"
5471
5472 /* FreeBSD K1OM support. */
5473
5474 #undef TARGET_LITTLE_SYM
5475 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5476 #undef TARGET_LITTLE_NAME
5477 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5478
5479 #undef ELF_OSABI
5480 #define ELF_OSABI ELFOSABI_FREEBSD
5481
5482 #undef elf64_bed
5483 #define elf64_bed elf64_k1om_fbsd_bed
5484
5485 #include "elf64-target.h"
5486
5487 /* 32bit x86-64 support. */
5488
5489 #undef TARGET_LITTLE_SYM
5490 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5491 #undef TARGET_LITTLE_NAME
5492 #define TARGET_LITTLE_NAME "elf32-x86-64"
5493 #undef elf32_bed
5494 #define elf32_bed elf32_x86_64_bed
5495
5496 #undef ELF_ARCH
5497 #define ELF_ARCH bfd_arch_i386
5498
5499 #undef ELF_MACHINE_CODE
5500 #define ELF_MACHINE_CODE EM_X86_64
5501
5502 #undef ELF_OSABI
5503
5504 #define bfd_elf32_bfd_reloc_type_lookup \
5505 elf_x86_64_reloc_type_lookup
5506 #define bfd_elf32_bfd_reloc_name_lookup \
5507 elf_x86_64_reloc_name_lookup
5508 #define bfd_elf32_get_synthetic_symtab \
5509 elf_x86_64_get_synthetic_symtab
5510
5511 #undef elf_backend_object_p
5512 #define elf_backend_object_p \
5513 elf32_x86_64_elf_object_p
5514
5515 #undef elf_backend_bfd_from_remote_memory
5516 #define elf_backend_bfd_from_remote_memory \
5517 _bfd_elf32_bfd_from_remote_memory
5518
5519 #undef elf_backend_size_info
5520 #define elf_backend_size_info \
5521 _bfd_elf32_size_info
5522
5523 #include "elf32-target.h"
This page took 0.1809 seconds and 3 git commands to generate.