x86: Define GNU_PROPERTY_X86_ISA_1_AVX512_BF16
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2019 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
348 if (cache_ptr->howto == NULL)
349 return FALSE;
350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
351 return TRUE;
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 # if GCC_VERSION >= 8000
443 # pragma GCC diagnostic push
444 # pragma GCC diagnostic ignored "-Wstringop-truncation"
445 # endif
446 static char *
447 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
448 int note_type, ...)
449 {
450 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
451 va_list ap;
452 const char *fname, *psargs;
453 long pid;
454 int cursig;
455 const void *gregs;
456
457 switch (note_type)
458 {
459 default:
460 return NULL;
461
462 case NT_PRPSINFO:
463 va_start (ap, note_type);
464 fname = va_arg (ap, const char *);
465 psargs = va_arg (ap, const char *);
466 va_end (ap);
467
468 if (bed->s->elfclass == ELFCLASS32)
469 {
470 prpsinfo32_t data;
471 memset (&data, 0, sizeof (data));
472 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
473 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
474 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
475 &data, sizeof (data));
476 }
477 else
478 {
479 prpsinfo64_t data;
480 memset (&data, 0, sizeof (data));
481 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
482 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
483 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
484 &data, sizeof (data));
485 }
486 /* NOTREACHED */
487
488 case NT_PRSTATUS:
489 va_start (ap, note_type);
490 pid = va_arg (ap, long);
491 cursig = va_arg (ap, int);
492 gregs = va_arg (ap, const void *);
493 va_end (ap);
494
495 if (bed->s->elfclass == ELFCLASS32)
496 {
497 if (bed->elf_machine_code == EM_X86_64)
498 {
499 prstatusx32_t prstat;
500 memset (&prstat, 0, sizeof (prstat));
501 prstat.pr_pid = pid;
502 prstat.pr_cursig = cursig;
503 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
504 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
505 &prstat, sizeof (prstat));
506 }
507 else
508 {
509 prstatus32_t prstat;
510 memset (&prstat, 0, sizeof (prstat));
511 prstat.pr_pid = pid;
512 prstat.pr_cursig = cursig;
513 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
514 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
515 &prstat, sizeof (prstat));
516 }
517 }
518 else
519 {
520 prstatus64_t prstat;
521 memset (&prstat, 0, sizeof (prstat));
522 prstat.pr_pid = pid;
523 prstat.pr_cursig = cursig;
524 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
525 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
526 &prstat, sizeof (prstat));
527 }
528 }
529 /* NOTREACHED */
530 }
531 # if GCC_VERSION >= 8000
532 # pragma GCC diagnostic pop
533 # endif
534 #endif
535 \f
536 /* Functions for the x86-64 ELF linker. */
537
538 /* The size in bytes of an entry in the global offset table. */
539
540 #define GOT_ENTRY_SIZE 8
541
542 /* The size in bytes of an entry in the lazy procedure linkage table. */
543
544 #define LAZY_PLT_ENTRY_SIZE 16
545
546 /* The size in bytes of an entry in the non-lazy procedure linkage
547 table. */
548
549 #define NON_LAZY_PLT_ENTRY_SIZE 8
550
551 /* The first entry in a lazy procedure linkage table looks like this.
552 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
553 works. */
554
555 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
556 {
557 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
558 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
559 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
560 };
561
562 /* Subsequent entries in a lazy procedure linkage table look like this. */
563
564 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
565 {
566 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
567 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
568 0x68, /* pushq immediate */
569 0, 0, 0, 0, /* replaced with index into relocation table. */
570 0xe9, /* jmp relative */
571 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
572 };
573
574 /* The first entry in a lazy procedure linkage table with BND prefix
575 like this. */
576
577 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
578 {
579 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
580 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
581 0x0f, 0x1f, 0 /* nopl (%rax) */
582 };
583
584 /* Subsequent entries for branches with BND prefx in a lazy procedure
585 linkage table look like this. */
586
587 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
588 {
589 0x68, 0, 0, 0, 0, /* pushq immediate */
590 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
591 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
592 };
593
594 /* The first entry in the IBT-enabled lazy procedure linkage table is the
595 the same as the lazy PLT with BND prefix so that bound registers are
596 preserved when control is passed to dynamic linker. Subsequent
597 entries for a IBT-enabled lazy procedure linkage table look like
598 this. */
599
600 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
601 {
602 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
603 0x68, 0, 0, 0, 0, /* pushq immediate */
604 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
605 0x90 /* nop */
606 };
607
608 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
609 is the same as the normal lazy PLT. Subsequent entries for an
610 x32 IBT-enabled lazy procedure linkage table look like this. */
611
612 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
613 {
614 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
615 0x68, 0, 0, 0, 0, /* pushq immediate */
616 0xe9, 0, 0, 0, 0, /* jmpq relative */
617 0x66, 0x90 /* xchg %ax,%ax */
618 };
619
620 /* Entries in the non-lazey procedure linkage table look like this. */
621
622 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
623 {
624 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
625 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
626 0x66, 0x90 /* xchg %ax,%ax */
627 };
628
629 /* Entries for branches with BND prefix in the non-lazey procedure
630 linkage table look like this. */
631
632 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
633 {
634 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
635 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
636 0x90 /* nop */
637 };
638
639 /* Entries for branches with IBT-enabled in the non-lazey procedure
640 linkage table look like this. They have the same size as the lazy
641 PLT entry. */
642
643 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
644 {
645 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
646 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
647 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
648 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
649 };
650
651 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
652 linkage table look like this. They have the same size as the lazy
653 PLT entry. */
654
655 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
656 {
657 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
658 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
659 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
660 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
661 };
662
663 /* The TLSDESC entry in a lazy procedure linkage table. */
664 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
665 {
666 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
667 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
668 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
669 };
670
671 /* .eh_frame covering the lazy .plt section. */
672
673 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
674 {
675 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
676 0, 0, 0, 0, /* CIE ID */
677 1, /* CIE version */
678 'z', 'R', 0, /* Augmentation string */
679 1, /* Code alignment factor */
680 0x78, /* Data alignment factor */
681 16, /* Return address column */
682 1, /* Augmentation size */
683 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
684 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
685 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
686 DW_CFA_nop, DW_CFA_nop,
687
688 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
689 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
690 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
691 0, 0, 0, 0, /* .plt size goes here */
692 0, /* Augmentation size */
693 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
694 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
695 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
696 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
697 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
698 11, /* Block length */
699 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
700 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
701 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
702 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
703 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
704 };
705
706 /* .eh_frame covering the lazy BND .plt section. */
707
708 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
709 {
710 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
711 0, 0, 0, 0, /* CIE ID */
712 1, /* CIE version */
713 'z', 'R', 0, /* Augmentation string */
714 1, /* Code alignment factor */
715 0x78, /* Data alignment factor */
716 16, /* Return address column */
717 1, /* Augmentation size */
718 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
719 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
720 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
721 DW_CFA_nop, DW_CFA_nop,
722
723 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
724 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
725 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
726 0, 0, 0, 0, /* .plt size goes here */
727 0, /* Augmentation size */
728 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
729 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
730 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
731 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
732 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
733 11, /* Block length */
734 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
735 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
736 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
737 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
738 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
739 };
740
741 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
742
743 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
744 {
745 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
746 0, 0, 0, 0, /* CIE ID */
747 1, /* CIE version */
748 'z', 'R', 0, /* Augmentation string */
749 1, /* Code alignment factor */
750 0x78, /* Data alignment factor */
751 16, /* Return address column */
752 1, /* Augmentation size */
753 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
754 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
755 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
756 DW_CFA_nop, DW_CFA_nop,
757
758 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
759 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
760 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
761 0, 0, 0, 0, /* .plt size goes here */
762 0, /* Augmentation size */
763 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
764 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
765 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
766 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
767 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
768 11, /* Block length */
769 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
770 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
771 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
772 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
773 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
774 };
775
776 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
777
778 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
779 {
780 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
781 0, 0, 0, 0, /* CIE ID */
782 1, /* CIE version */
783 'z', 'R', 0, /* Augmentation string */
784 1, /* Code alignment factor */
785 0x78, /* Data alignment factor */
786 16, /* Return address column */
787 1, /* Augmentation size */
788 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
789 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
790 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
791 DW_CFA_nop, DW_CFA_nop,
792
793 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
794 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
795 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
796 0, 0, 0, 0, /* .plt size goes here */
797 0, /* Augmentation size */
798 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
799 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
800 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
801 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
802 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
803 11, /* Block length */
804 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
805 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
806 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
807 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
808 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
809 };
810
811 /* .eh_frame covering the non-lazy .plt section. */
812
813 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
814 {
815 #define PLT_GOT_FDE_LENGTH 20
816 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
817 0, 0, 0, 0, /* CIE ID */
818 1, /* CIE version */
819 'z', 'R', 0, /* Augmentation string */
820 1, /* Code alignment factor */
821 0x78, /* Data alignment factor */
822 16, /* Return address column */
823 1, /* Augmentation size */
824 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
825 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
826 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
827 DW_CFA_nop, DW_CFA_nop,
828
829 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
830 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
831 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
832 0, 0, 0, 0, /* non-lazy .plt size goes here */
833 0, /* Augmentation size */
834 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
835 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
836 };
837
838 /* These are the standard parameters. */
839 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
840 {
841 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
842 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
843 elf_x86_64_lazy_plt_entry, /* plt_entry */
844 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
845 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
846 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
847 6, /* plt_tlsdesc_got1_offset */
848 12, /* plt_tlsdesc_got2_offset */
849 10, /* plt_tlsdesc_got1_insn_end */
850 16, /* plt_tlsdesc_got2_insn_end */
851 2, /* plt0_got1_offset */
852 8, /* plt0_got2_offset */
853 12, /* plt0_got2_insn_end */
854 2, /* plt_got_offset */
855 7, /* plt_reloc_offset */
856 12, /* plt_plt_offset */
857 6, /* plt_got_insn_size */
858 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
859 6, /* plt_lazy_offset */
860 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
861 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
862 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
863 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
864 };
865
866 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
867 {
868 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
869 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
870 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
871 2, /* plt_got_offset */
872 6, /* plt_got_insn_size */
873 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
878 {
879 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
880 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
881 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
882 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
883 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
884 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
885 6, /* plt_tlsdesc_got1_offset */
886 12, /* plt_tlsdesc_got2_offset */
887 10, /* plt_tlsdesc_got1_insn_end */
888 16, /* plt_tlsdesc_got2_insn_end */
889 2, /* plt0_got1_offset */
890 1+8, /* plt0_got2_offset */
891 1+12, /* plt0_got2_insn_end */
892 1+2, /* plt_got_offset */
893 1, /* plt_reloc_offset */
894 7, /* plt_plt_offset */
895 1+6, /* plt_got_insn_size */
896 11, /* plt_plt_insn_end */
897 0, /* plt_lazy_offset */
898 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
899 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
900 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
901 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
902 };
903
904 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
905 {
906 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
907 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
908 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
909 1+2, /* plt_got_offset */
910 1+6, /* plt_got_insn_size */
911 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
912 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
913 };
914
915 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
916 {
917 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
918 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
919 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
920 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
921 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
922 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
923 6, /* plt_tlsdesc_got1_offset */
924 12, /* plt_tlsdesc_got2_offset */
925 10, /* plt_tlsdesc_got1_insn_end */
926 16, /* plt_tlsdesc_got2_insn_end */
927 2, /* plt0_got1_offset */
928 1+8, /* plt0_got2_offset */
929 1+12, /* plt0_got2_insn_end */
930 4+1+2, /* plt_got_offset */
931 4+1, /* plt_reloc_offset */
932 4+1+6, /* plt_plt_offset */
933 4+1+6, /* plt_got_insn_size */
934 4+1+5+5, /* plt_plt_insn_end */
935 0, /* plt_lazy_offset */
936 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
937 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
938 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
939 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
940 };
941
942 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
943 {
944 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
946 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
947 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
948 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
949 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
950 6, /* plt_tlsdesc_got1_offset */
951 12, /* plt_tlsdesc_got2_offset */
952 10, /* plt_tlsdesc_got1_insn_end */
953 16, /* plt_tlsdesc_got2_insn_end */
954 2, /* plt0_got1_offset */
955 8, /* plt0_got2_offset */
956 12, /* plt0_got2_insn_end */
957 4+2, /* plt_got_offset */
958 4+1, /* plt_reloc_offset */
959 4+6, /* plt_plt_offset */
960 4+6, /* plt_got_insn_size */
961 4+5+5, /* plt_plt_insn_end */
962 0, /* plt_lazy_offset */
963 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
964 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
965 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
966 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
967 };
968
969 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
970 {
971 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
972 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
973 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
974 4+1+2, /* plt_got_offset */
975 4+1+6, /* plt_got_insn_size */
976 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
977 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
978 };
979
980 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
981 {
982 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
983 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
984 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
985 4+2, /* plt_got_offset */
986 4+6, /* plt_got_insn_size */
987 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
988 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
989 };
990
991 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
992 {
993 is_normal /* os */
994 };
995
996 #define elf_backend_arch_data &elf_x86_64_arch_bed
997
998 static bfd_boolean
999 elf64_x86_64_elf_object_p (bfd *abfd)
1000 {
1001 /* Set the right machine number for an x86-64 elf64 file. */
1002 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1003 return TRUE;
1004 }
1005
1006 static bfd_boolean
1007 elf32_x86_64_elf_object_p (bfd *abfd)
1008 {
1009 /* Set the right machine number for an x86-64 elf32 file. */
1010 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1011 return TRUE;
1012 }
1013
1014 /* Return TRUE if the TLS access code sequence support transition
1015 from R_TYPE. */
1016
1017 static bfd_boolean
1018 elf_x86_64_check_tls_transition (bfd *abfd,
1019 struct bfd_link_info *info,
1020 asection *sec,
1021 bfd_byte *contents,
1022 Elf_Internal_Shdr *symtab_hdr,
1023 struct elf_link_hash_entry **sym_hashes,
1024 unsigned int r_type,
1025 const Elf_Internal_Rela *rel,
1026 const Elf_Internal_Rela *relend)
1027 {
1028 unsigned int val;
1029 unsigned long r_symndx;
1030 bfd_boolean largepic = FALSE;
1031 struct elf_link_hash_entry *h;
1032 bfd_vma offset;
1033 struct elf_x86_link_hash_table *htab;
1034 bfd_byte *call;
1035 bfd_boolean indirect_call;
1036
1037 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1038 offset = rel->r_offset;
1039 switch (r_type)
1040 {
1041 case R_X86_64_TLSGD:
1042 case R_X86_64_TLSLD:
1043 if ((rel + 1) >= relend)
1044 return FALSE;
1045
1046 if (r_type == R_X86_64_TLSGD)
1047 {
1048 /* Check transition from GD access model. For 64bit, only
1049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1050 .word 0x6666; rex64; call __tls_get_addr@PLT
1051 or
1052 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1053 .byte 0x66; rex64
1054 call *__tls_get_addr@GOTPCREL(%rip)
1055 which may be converted to
1056 addr32 call __tls_get_addr
1057 can transit to different access model. For 32bit, only
1058 leaq foo@tlsgd(%rip), %rdi
1059 .word 0x6666; rex64; call __tls_get_addr@PLT
1060 or
1061 leaq foo@tlsgd(%rip), %rdi
1062 .byte 0x66; rex64
1063 call *__tls_get_addr@GOTPCREL(%rip)
1064 which may be converted to
1065 addr32 call __tls_get_addr
1066 can transit to different access model. For largepic,
1067 we also support:
1068 leaq foo@tlsgd(%rip), %rdi
1069 movabsq $__tls_get_addr@pltoff, %rax
1070 addq $r15, %rax
1071 call *%rax
1072 or
1073 leaq foo@tlsgd(%rip), %rdi
1074 movabsq $__tls_get_addr@pltoff, %rax
1075 addq $rbx, %rax
1076 call *%rax */
1077
1078 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1079
1080 if ((offset + 12) > sec->size)
1081 return FALSE;
1082
1083 call = contents + offset + 4;
1084 if (call[0] != 0x66
1085 || !((call[1] == 0x48
1086 && call[2] == 0xff
1087 && call[3] == 0x15)
1088 || (call[1] == 0x48
1089 && call[2] == 0x67
1090 && call[3] == 0xe8)
1091 || (call[1] == 0x66
1092 && call[2] == 0x48
1093 && call[3] == 0xe8)))
1094 {
1095 if (!ABI_64_P (abfd)
1096 || (offset + 19) > sec->size
1097 || offset < 3
1098 || memcmp (call - 7, leaq + 1, 3) != 0
1099 || memcmp (call, "\x48\xb8", 2) != 0
1100 || call[11] != 0x01
1101 || call[13] != 0xff
1102 || call[14] != 0xd0
1103 || !((call[10] == 0x48 && call[12] == 0xd8)
1104 || (call[10] == 0x4c && call[12] == 0xf8)))
1105 return FALSE;
1106 largepic = TRUE;
1107 }
1108 else if (ABI_64_P (abfd))
1109 {
1110 if (offset < 4
1111 || memcmp (contents + offset - 4, leaq, 4) != 0)
1112 return FALSE;
1113 }
1114 else
1115 {
1116 if (offset < 3
1117 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1118 return FALSE;
1119 }
1120 indirect_call = call[2] == 0xff;
1121 }
1122 else
1123 {
1124 /* Check transition from LD access model. Only
1125 leaq foo@tlsld(%rip), %rdi;
1126 call __tls_get_addr@PLT
1127 or
1128 leaq foo@tlsld(%rip), %rdi;
1129 call *__tls_get_addr@GOTPCREL(%rip)
1130 which may be converted to
1131 addr32 call __tls_get_addr
1132 can transit to different access model. For largepic
1133 we also support:
1134 leaq foo@tlsld(%rip), %rdi
1135 movabsq $__tls_get_addr@pltoff, %rax
1136 addq $r15, %rax
1137 call *%rax
1138 or
1139 leaq foo@tlsld(%rip), %rdi
1140 movabsq $__tls_get_addr@pltoff, %rax
1141 addq $rbx, %rax
1142 call *%rax */
1143
1144 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1145
1146 if (offset < 3 || (offset + 9) > sec->size)
1147 return FALSE;
1148
1149 if (memcmp (contents + offset - 3, lea, 3) != 0)
1150 return FALSE;
1151
1152 call = contents + offset + 4;
1153 if (!(call[0] == 0xe8
1154 || (call[0] == 0xff && call[1] == 0x15)
1155 || (call[0] == 0x67 && call[1] == 0xe8)))
1156 {
1157 if (!ABI_64_P (abfd)
1158 || (offset + 19) > sec->size
1159 || memcmp (call, "\x48\xb8", 2) != 0
1160 || call[11] != 0x01
1161 || call[13] != 0xff
1162 || call[14] != 0xd0
1163 || !((call[10] == 0x48 && call[12] == 0xd8)
1164 || (call[10] == 0x4c && call[12] == 0xf8)))
1165 return FALSE;
1166 largepic = TRUE;
1167 }
1168 indirect_call = call[0] == 0xff;
1169 }
1170
1171 r_symndx = htab->r_sym (rel[1].r_info);
1172 if (r_symndx < symtab_hdr->sh_info)
1173 return FALSE;
1174
1175 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1176 if (h == NULL
1177 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1178 return FALSE;
1179 else
1180 {
1181 r_type = (ELF32_R_TYPE (rel[1].r_info)
1182 & ~R_X86_64_converted_reloc_bit);
1183 if (largepic)
1184 return r_type == R_X86_64_PLTOFF64;
1185 else if (indirect_call)
1186 return r_type == R_X86_64_GOTPCRELX;
1187 else
1188 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1189 }
1190
1191 case R_X86_64_GOTTPOFF:
1192 /* Check transition from IE access model:
1193 mov foo@gottpoff(%rip), %reg
1194 add foo@gottpoff(%rip), %reg
1195 */
1196
1197 /* Check REX prefix first. */
1198 if (offset >= 3 && (offset + 4) <= sec->size)
1199 {
1200 val = bfd_get_8 (abfd, contents + offset - 3);
1201 if (val != 0x48 && val != 0x4c)
1202 {
1203 /* X32 may have 0x44 REX prefix or no REX prefix. */
1204 if (ABI_64_P (abfd))
1205 return FALSE;
1206 }
1207 }
1208 else
1209 {
1210 /* X32 may not have any REX prefix. */
1211 if (ABI_64_P (abfd))
1212 return FALSE;
1213 if (offset < 2 || (offset + 3) > sec->size)
1214 return FALSE;
1215 }
1216
1217 val = bfd_get_8 (abfd, contents + offset - 2);
1218 if (val != 0x8b && val != 0x03)
1219 return FALSE;
1220
1221 val = bfd_get_8 (abfd, contents + offset - 1);
1222 return (val & 0xc7) == 5;
1223
1224 case R_X86_64_GOTPC32_TLSDESC:
1225 /* Check transition from GDesc access model:
1226 leaq x@tlsdesc(%rip), %rax
1227
1228 Make sure it's a leaq adding rip to a 32-bit offset
1229 into any register, although it's probably almost always
1230 going to be rax. */
1231
1232 if (offset < 3 || (offset + 4) > sec->size)
1233 return FALSE;
1234
1235 val = bfd_get_8 (abfd, contents + offset - 3);
1236 if ((val & 0xfb) != 0x48)
1237 return FALSE;
1238
1239 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1240 return FALSE;
1241
1242 val = bfd_get_8 (abfd, contents + offset - 1);
1243 return (val & 0xc7) == 0x05;
1244
1245 case R_X86_64_TLSDESC_CALL:
1246 /* Check transition from GDesc access model:
1247 call *x@tlsdesc(%rax)
1248 */
1249 if (offset + 2 <= sec->size)
1250 {
1251 /* Make sure that it's a call *x@tlsdesc(%rax). */
1252 call = contents + offset;
1253 return call[0] == 0xff && call[1] == 0x10;
1254 }
1255
1256 return FALSE;
1257
1258 default:
1259 abort ();
1260 }
1261 }
1262
1263 /* Return TRUE if the TLS access transition is OK or no transition
1264 will be performed. Update R_TYPE if there is a transition. */
1265
1266 static bfd_boolean
1267 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1268 asection *sec, bfd_byte *contents,
1269 Elf_Internal_Shdr *symtab_hdr,
1270 struct elf_link_hash_entry **sym_hashes,
1271 unsigned int *r_type, int tls_type,
1272 const Elf_Internal_Rela *rel,
1273 const Elf_Internal_Rela *relend,
1274 struct elf_link_hash_entry *h,
1275 unsigned long r_symndx,
1276 bfd_boolean from_relocate_section)
1277 {
1278 unsigned int from_type = *r_type;
1279 unsigned int to_type = from_type;
1280 bfd_boolean check = TRUE;
1281
1282 /* Skip TLS transition for functions. */
1283 if (h != NULL
1284 && (h->type == STT_FUNC
1285 || h->type == STT_GNU_IFUNC))
1286 return TRUE;
1287
1288 switch (from_type)
1289 {
1290 case R_X86_64_TLSGD:
1291 case R_X86_64_GOTPC32_TLSDESC:
1292 case R_X86_64_TLSDESC_CALL:
1293 case R_X86_64_GOTTPOFF:
1294 if (bfd_link_executable (info))
1295 {
1296 if (h == NULL)
1297 to_type = R_X86_64_TPOFF32;
1298 else
1299 to_type = R_X86_64_GOTTPOFF;
1300 }
1301
1302 /* When we are called from elf_x86_64_relocate_section, there may
1303 be additional transitions based on TLS_TYPE. */
1304 if (from_relocate_section)
1305 {
1306 unsigned int new_to_type = to_type;
1307
1308 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1309 new_to_type = R_X86_64_TPOFF32;
1310
1311 if (to_type == R_X86_64_TLSGD
1312 || to_type == R_X86_64_GOTPC32_TLSDESC
1313 || to_type == R_X86_64_TLSDESC_CALL)
1314 {
1315 if (tls_type == GOT_TLS_IE)
1316 new_to_type = R_X86_64_GOTTPOFF;
1317 }
1318
1319 /* We checked the transition before when we were called from
1320 elf_x86_64_check_relocs. We only want to check the new
1321 transition which hasn't been checked before. */
1322 check = new_to_type != to_type && from_type == to_type;
1323 to_type = new_to_type;
1324 }
1325
1326 break;
1327
1328 case R_X86_64_TLSLD:
1329 if (bfd_link_executable (info))
1330 to_type = R_X86_64_TPOFF32;
1331 break;
1332
1333 default:
1334 return TRUE;
1335 }
1336
1337 /* Return TRUE if there is no transition. */
1338 if (from_type == to_type)
1339 return TRUE;
1340
1341 /* Check if the transition can be performed. */
1342 if (check
1343 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1344 symtab_hdr, sym_hashes,
1345 from_type, rel, relend))
1346 {
1347 reloc_howto_type *from, *to;
1348 const char *name;
1349
1350 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1351 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1352
1353 if (from == NULL || to == NULL)
1354 return FALSE;
1355
1356 if (h)
1357 name = h->root.root.string;
1358 else
1359 {
1360 struct elf_x86_link_hash_table *htab;
1361
1362 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1363 if (htab == NULL)
1364 name = "*unknown*";
1365 else
1366 {
1367 Elf_Internal_Sym *isym;
1368
1369 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1370 abfd, r_symndx);
1371 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1372 }
1373 }
1374
1375 _bfd_error_handler
1376 /* xgettext:c-format */
1377 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1378 " in section `%pA' failed"),
1379 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1380 bfd_set_error (bfd_error_bad_value);
1381 return FALSE;
1382 }
1383
1384 *r_type = to_type;
1385 return TRUE;
1386 }
1387
1388 /* Rename some of the generic section flags to better document how they
1389 are used here. */
1390 #define check_relocs_failed sec_flg0
1391
1392 static bfd_boolean
1393 elf_x86_64_need_pic (struct bfd_link_info *info,
1394 bfd *input_bfd, asection *sec,
1395 struct elf_link_hash_entry *h,
1396 Elf_Internal_Shdr *symtab_hdr,
1397 Elf_Internal_Sym *isym,
1398 reloc_howto_type *howto)
1399 {
1400 const char *v = "";
1401 const char *und = "";
1402 const char *pic = "";
1403 const char *object;
1404
1405 const char *name;
1406 if (h)
1407 {
1408 name = h->root.root.string;
1409 switch (ELF_ST_VISIBILITY (h->other))
1410 {
1411 case STV_HIDDEN:
1412 v = _("hidden symbol ");
1413 break;
1414 case STV_INTERNAL:
1415 v = _("internal symbol ");
1416 break;
1417 case STV_PROTECTED:
1418 v = _("protected symbol ");
1419 break;
1420 default:
1421 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1422 v = _("protected symbol ");
1423 else
1424 v = _("symbol ");
1425 pic = _("; recompile with -fPIC");
1426 break;
1427 }
1428
1429 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1430 und = _("undefined ");
1431 }
1432 else
1433 {
1434 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1435 pic = _("; recompile with -fPIC");
1436 }
1437
1438 if (bfd_link_dll (info))
1439 object = _("a shared object");
1440 else if (bfd_link_pie (info))
1441 object = _("a PIE object");
1442 else
1443 object = _("a PDE object");
1444
1445 /* xgettext:c-format */
1446 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1447 "not be used when making %s%s"),
1448 input_bfd, howto->name, und, v, name,
1449 object, pic);
1450 bfd_set_error (bfd_error_bad_value);
1451 sec->check_relocs_failed = 1;
1452 return FALSE;
1453 }
1454
1455 /* With the local symbol, foo, we convert
1456 mov foo@GOTPCREL(%rip), %reg
1457 to
1458 lea foo(%rip), %reg
1459 and convert
1460 call/jmp *foo@GOTPCREL(%rip)
1461 to
1462 nop call foo/jmp foo nop
1463 When PIC is false, convert
1464 test %reg, foo@GOTPCREL(%rip)
1465 to
1466 test $foo, %reg
1467 and convert
1468 binop foo@GOTPCREL(%rip), %reg
1469 to
1470 binop $foo, %reg
1471 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1472 instructions. */
1473
1474 static bfd_boolean
1475 elf_x86_64_convert_load_reloc (bfd *abfd,
1476 bfd_byte *contents,
1477 unsigned int *r_type_p,
1478 Elf_Internal_Rela *irel,
1479 struct elf_link_hash_entry *h,
1480 bfd_boolean *converted,
1481 struct bfd_link_info *link_info)
1482 {
1483 struct elf_x86_link_hash_table *htab;
1484 bfd_boolean is_pic;
1485 bfd_boolean no_overflow;
1486 bfd_boolean relocx;
1487 bfd_boolean to_reloc_pc32;
1488 asection *tsec;
1489 bfd_signed_vma raddend;
1490 unsigned int opcode;
1491 unsigned int modrm;
1492 unsigned int r_type = *r_type_p;
1493 unsigned int r_symndx;
1494 bfd_vma roff = irel->r_offset;
1495
1496 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1497 return TRUE;
1498
1499 raddend = irel->r_addend;
1500 /* Addend for 32-bit PC-relative relocation must be -4. */
1501 if (raddend != -4)
1502 return TRUE;
1503
1504 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1505 is_pic = bfd_link_pic (link_info);
1506
1507 relocx = (r_type == R_X86_64_GOTPCRELX
1508 || r_type == R_X86_64_REX_GOTPCRELX);
1509
1510 /* TRUE if --no-relax is used. */
1511 no_overflow = link_info->disable_target_specific_optimizations > 1;
1512
1513 r_symndx = htab->r_sym (irel->r_info);
1514
1515 opcode = bfd_get_8 (abfd, contents + roff - 2);
1516
1517 /* Convert mov to lea since it has been done for a while. */
1518 if (opcode != 0x8b)
1519 {
1520 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1521 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1522 test, xor instructions. */
1523 if (!relocx)
1524 return TRUE;
1525 }
1526
1527 /* We convert only to R_X86_64_PC32:
1528 1. Branch.
1529 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1530 3. no_overflow is true.
1531 4. PIC.
1532 */
1533 to_reloc_pc32 = (opcode == 0xff
1534 || !relocx
1535 || no_overflow
1536 || is_pic);
1537
1538 /* Get the symbol referred to by the reloc. */
1539 if (h == NULL)
1540 {
1541 Elf_Internal_Sym *isym
1542 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1543
1544 /* Skip relocation against undefined symbols. */
1545 if (isym->st_shndx == SHN_UNDEF)
1546 return TRUE;
1547
1548 if (isym->st_shndx == SHN_ABS)
1549 tsec = bfd_abs_section_ptr;
1550 else if (isym->st_shndx == SHN_COMMON)
1551 tsec = bfd_com_section_ptr;
1552 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1553 tsec = &_bfd_elf_large_com_section;
1554 else
1555 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1556 }
1557 else
1558 {
1559 /* Undefined weak symbol is only bound locally in executable
1560 and its reference is resolved as 0 without relocation
1561 overflow. We can only perform this optimization for
1562 GOTPCRELX relocations since we need to modify REX byte.
1563 It is OK convert mov with R_X86_64_GOTPCREL to
1564 R_X86_64_PC32. */
1565 bfd_boolean local_ref;
1566 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1567
1568 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1569 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1570 if ((relocx || opcode == 0x8b)
1571 && (h->root.type == bfd_link_hash_undefweak
1572 && !eh->linker_def
1573 && local_ref))
1574 {
1575 if (opcode == 0xff)
1576 {
1577 /* Skip for branch instructions since R_X86_64_PC32
1578 may overflow. */
1579 if (no_overflow)
1580 return TRUE;
1581 }
1582 else if (relocx)
1583 {
1584 /* For non-branch instructions, we can convert to
1585 R_X86_64_32/R_X86_64_32S since we know if there
1586 is a REX byte. */
1587 to_reloc_pc32 = FALSE;
1588 }
1589
1590 /* Since we don't know the current PC when PIC is true,
1591 we can't convert to R_X86_64_PC32. */
1592 if (to_reloc_pc32 && is_pic)
1593 return TRUE;
1594
1595 goto convert;
1596 }
1597 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1598 ld.so may use its link-time address. */
1599 else if (h->start_stop
1600 || eh->linker_def
1601 || ((h->def_regular
1602 || h->root.type == bfd_link_hash_defined
1603 || h->root.type == bfd_link_hash_defweak)
1604 && h != htab->elf.hdynamic
1605 && local_ref))
1606 {
1607 /* bfd_link_hash_new or bfd_link_hash_undefined is
1608 set by an assignment in a linker script in
1609 bfd_elf_record_link_assignment. start_stop is set
1610 on __start_SECNAME/__stop_SECNAME which mark section
1611 SECNAME. */
1612 if (h->start_stop
1613 || eh->linker_def
1614 || (h->def_regular
1615 && (h->root.type == bfd_link_hash_new
1616 || h->root.type == bfd_link_hash_undefined
1617 || ((h->root.type == bfd_link_hash_defined
1618 || h->root.type == bfd_link_hash_defweak)
1619 && h->root.u.def.section == bfd_und_section_ptr))))
1620 {
1621 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1622 if (no_overflow)
1623 return TRUE;
1624 goto convert;
1625 }
1626 tsec = h->root.u.def.section;
1627 }
1628 else
1629 return TRUE;
1630 }
1631
1632 /* Don't convert GOTPCREL relocation against large section. */
1633 if (elf_section_data (tsec) != NULL
1634 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1635 return TRUE;
1636
1637 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1638 if (no_overflow)
1639 return TRUE;
1640
1641 convert:
1642 if (opcode == 0xff)
1643 {
1644 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1645 unsigned int nop;
1646 unsigned int disp;
1647 bfd_vma nop_offset;
1648
1649 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1650 R_X86_64_PC32. */
1651 modrm = bfd_get_8 (abfd, contents + roff - 1);
1652 if (modrm == 0x25)
1653 {
1654 /* Convert to "jmp foo nop". */
1655 modrm = 0xe9;
1656 nop = NOP_OPCODE;
1657 nop_offset = irel->r_offset + 3;
1658 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1659 irel->r_offset -= 1;
1660 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1661 }
1662 else
1663 {
1664 struct elf_x86_link_hash_entry *eh
1665 = (struct elf_x86_link_hash_entry *) h;
1666
1667 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1668 is a nop prefix. */
1669 modrm = 0xe8;
1670 /* To support TLS optimization, always use addr32 prefix for
1671 "call *__tls_get_addr@GOTPCREL(%rip)". */
1672 if (eh && eh->tls_get_addr)
1673 {
1674 nop = 0x67;
1675 nop_offset = irel->r_offset - 2;
1676 }
1677 else
1678 {
1679 nop = htab->params->call_nop_byte;
1680 if (htab->params->call_nop_as_suffix)
1681 {
1682 nop_offset = irel->r_offset + 3;
1683 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1684 irel->r_offset -= 1;
1685 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1686 }
1687 else
1688 nop_offset = irel->r_offset - 2;
1689 }
1690 }
1691 bfd_put_8 (abfd, nop, contents + nop_offset);
1692 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1693 r_type = R_X86_64_PC32;
1694 }
1695 else
1696 {
1697 unsigned int rex;
1698 unsigned int rex_mask = REX_R;
1699
1700 if (r_type == R_X86_64_REX_GOTPCRELX)
1701 rex = bfd_get_8 (abfd, contents + roff - 3);
1702 else
1703 rex = 0;
1704
1705 if (opcode == 0x8b)
1706 {
1707 if (to_reloc_pc32)
1708 {
1709 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1710 "lea foo(%rip), %reg". */
1711 opcode = 0x8d;
1712 r_type = R_X86_64_PC32;
1713 }
1714 else
1715 {
1716 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1717 "mov $foo, %reg". */
1718 opcode = 0xc7;
1719 modrm = bfd_get_8 (abfd, contents + roff - 1);
1720 modrm = 0xc0 | (modrm & 0x38) >> 3;
1721 if ((rex & REX_W) != 0
1722 && ABI_64_P (link_info->output_bfd))
1723 {
1724 /* Keep the REX_W bit in REX byte for LP64. */
1725 r_type = R_X86_64_32S;
1726 goto rewrite_modrm_rex;
1727 }
1728 else
1729 {
1730 /* If the REX_W bit in REX byte isn't needed,
1731 use R_X86_64_32 and clear the W bit to avoid
1732 sign-extend imm32 to imm64. */
1733 r_type = R_X86_64_32;
1734 /* Clear the W bit in REX byte. */
1735 rex_mask |= REX_W;
1736 goto rewrite_modrm_rex;
1737 }
1738 }
1739 }
1740 else
1741 {
1742 /* R_X86_64_PC32 isn't supported. */
1743 if (to_reloc_pc32)
1744 return TRUE;
1745
1746 modrm = bfd_get_8 (abfd, contents + roff - 1);
1747 if (opcode == 0x85)
1748 {
1749 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1750 "test $foo, %reg". */
1751 modrm = 0xc0 | (modrm & 0x38) >> 3;
1752 opcode = 0xf7;
1753 }
1754 else
1755 {
1756 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1757 "binop $foo, %reg". */
1758 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1759 opcode = 0x81;
1760 }
1761
1762 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1763 overflow when sign-extending imm32 to imm64. */
1764 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1765
1766 rewrite_modrm_rex:
1767 bfd_put_8 (abfd, modrm, contents + roff - 1);
1768
1769 if (rex)
1770 {
1771 /* Move the R bit to the B bit in REX byte. */
1772 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1773 bfd_put_8 (abfd, rex, contents + roff - 3);
1774 }
1775
1776 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1777 irel->r_addend = 0;
1778 }
1779
1780 bfd_put_8 (abfd, opcode, contents + roff - 2);
1781 }
1782
1783 *r_type_p = r_type;
1784 irel->r_info = htab->r_info (r_symndx,
1785 r_type | R_X86_64_converted_reloc_bit);
1786
1787 *converted = TRUE;
1788
1789 return TRUE;
1790 }
1791
1792 /* Look through the relocs for a section during the first phase, and
1793 calculate needed space in the global offset table, procedure
1794 linkage table, and dynamic reloc sections. */
1795
1796 static bfd_boolean
1797 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1798 asection *sec,
1799 const Elf_Internal_Rela *relocs)
1800 {
1801 struct elf_x86_link_hash_table *htab;
1802 Elf_Internal_Shdr *symtab_hdr;
1803 struct elf_link_hash_entry **sym_hashes;
1804 const Elf_Internal_Rela *rel;
1805 const Elf_Internal_Rela *rel_end;
1806 asection *sreloc;
1807 bfd_byte *contents;
1808 bfd_boolean converted;
1809
1810 if (bfd_link_relocatable (info))
1811 return TRUE;
1812
1813 /* Don't do anything special with non-loaded, non-alloced sections.
1814 In particular, any relocs in such sections should not affect GOT
1815 and PLT reference counting (ie. we don't allow them to create GOT
1816 or PLT entries), there's no possibility or desire to optimize TLS
1817 relocs, and there's not much point in propagating relocs to shared
1818 libs that the dynamic linker won't relocate. */
1819 if ((sec->flags & SEC_ALLOC) == 0)
1820 return TRUE;
1821
1822 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1823 if (htab == NULL)
1824 {
1825 sec->check_relocs_failed = 1;
1826 return FALSE;
1827 }
1828
1829 BFD_ASSERT (is_x86_elf (abfd, htab));
1830
1831 /* Get the section contents. */
1832 if (elf_section_data (sec)->this_hdr.contents != NULL)
1833 contents = elf_section_data (sec)->this_hdr.contents;
1834 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1835 {
1836 sec->check_relocs_failed = 1;
1837 return FALSE;
1838 }
1839
1840 symtab_hdr = &elf_symtab_hdr (abfd);
1841 sym_hashes = elf_sym_hashes (abfd);
1842
1843 converted = FALSE;
1844
1845 sreloc = NULL;
1846
1847 rel_end = relocs + sec->reloc_count;
1848 for (rel = relocs; rel < rel_end; rel++)
1849 {
1850 unsigned int r_type;
1851 unsigned int r_symndx;
1852 struct elf_link_hash_entry *h;
1853 struct elf_x86_link_hash_entry *eh;
1854 Elf_Internal_Sym *isym;
1855 const char *name;
1856 bfd_boolean size_reloc;
1857 bfd_boolean converted_reloc;
1858 bfd_boolean do_check_pic;
1859
1860 r_symndx = htab->r_sym (rel->r_info);
1861 r_type = ELF32_R_TYPE (rel->r_info);
1862
1863 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1864 {
1865 /* xgettext:c-format */
1866 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1867 abfd, r_symndx);
1868 goto error_return;
1869 }
1870
1871 if (r_symndx < symtab_hdr->sh_info)
1872 {
1873 /* A local symbol. */
1874 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1875 abfd, r_symndx);
1876 if (isym == NULL)
1877 goto error_return;
1878
1879 /* Check relocation against local STT_GNU_IFUNC symbol. */
1880 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1881 {
1882 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1883 TRUE);
1884 if (h == NULL)
1885 goto error_return;
1886
1887 /* Fake a STT_GNU_IFUNC symbol. */
1888 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1889 isym, NULL);
1890 h->type = STT_GNU_IFUNC;
1891 h->def_regular = 1;
1892 h->ref_regular = 1;
1893 h->forced_local = 1;
1894 h->root.type = bfd_link_hash_defined;
1895 }
1896 else
1897 h = NULL;
1898 }
1899 else
1900 {
1901 isym = NULL;
1902 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1903 while (h->root.type == bfd_link_hash_indirect
1904 || h->root.type == bfd_link_hash_warning)
1905 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1906 }
1907
1908 /* Check invalid x32 relocations. */
1909 if (!ABI_64_P (abfd))
1910 switch (r_type)
1911 {
1912 default:
1913 break;
1914
1915 case R_X86_64_DTPOFF64:
1916 case R_X86_64_TPOFF64:
1917 case R_X86_64_PC64:
1918 case R_X86_64_GOTOFF64:
1919 case R_X86_64_GOT64:
1920 case R_X86_64_GOTPCREL64:
1921 case R_X86_64_GOTPC64:
1922 case R_X86_64_GOTPLT64:
1923 case R_X86_64_PLTOFF64:
1924 {
1925 if (h)
1926 name = h->root.root.string;
1927 else
1928 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1929 NULL);
1930 _bfd_error_handler
1931 /* xgettext:c-format */
1932 (_("%pB: relocation %s against symbol `%s' isn't "
1933 "supported in x32 mode"), abfd,
1934 x86_64_elf_howto_table[r_type].name, name);
1935 bfd_set_error (bfd_error_bad_value);
1936 goto error_return;
1937 }
1938 break;
1939 }
1940
1941 if (h != NULL)
1942 {
1943 /* It is referenced by a non-shared object. */
1944 h->ref_regular = 1;
1945
1946 if (h->type == STT_GNU_IFUNC)
1947 elf_tdata (info->output_bfd)->has_gnu_symbols
1948 |= elf_gnu_symbol_ifunc;
1949 }
1950
1951 converted_reloc = FALSE;
1952 if ((r_type == R_X86_64_GOTPCREL
1953 || r_type == R_X86_64_GOTPCRELX
1954 || r_type == R_X86_64_REX_GOTPCRELX)
1955 && (h == NULL || h->type != STT_GNU_IFUNC))
1956 {
1957 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1958 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1959 irel, h, &converted_reloc,
1960 info))
1961 goto error_return;
1962
1963 if (converted_reloc)
1964 converted = TRUE;
1965 }
1966
1967 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1968 symtab_hdr, sym_hashes,
1969 &r_type, GOT_UNKNOWN,
1970 rel, rel_end, h, r_symndx, FALSE))
1971 goto error_return;
1972
1973 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1974 if (h == htab->elf.hgot)
1975 htab->got_referenced = TRUE;
1976
1977 eh = (struct elf_x86_link_hash_entry *) h;
1978 switch (r_type)
1979 {
1980 case R_X86_64_TLSLD:
1981 htab->tls_ld_or_ldm_got.refcount = 1;
1982 goto create_got;
1983
1984 case R_X86_64_TPOFF32:
1985 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1986 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1987 &x86_64_elf_howto_table[r_type]);
1988 if (eh != NULL)
1989 eh->zero_undefweak &= 0x2;
1990 break;
1991
1992 case R_X86_64_GOTTPOFF:
1993 if (!bfd_link_executable (info))
1994 info->flags |= DF_STATIC_TLS;
1995 /* Fall through */
1996
1997 case R_X86_64_GOT32:
1998 case R_X86_64_GOTPCREL:
1999 case R_X86_64_GOTPCRELX:
2000 case R_X86_64_REX_GOTPCRELX:
2001 case R_X86_64_TLSGD:
2002 case R_X86_64_GOT64:
2003 case R_X86_64_GOTPCREL64:
2004 case R_X86_64_GOTPLT64:
2005 case R_X86_64_GOTPC32_TLSDESC:
2006 case R_X86_64_TLSDESC_CALL:
2007 /* This symbol requires a global offset table entry. */
2008 {
2009 int tls_type, old_tls_type;
2010
2011 switch (r_type)
2012 {
2013 default: tls_type = GOT_NORMAL; break;
2014 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2015 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2016 case R_X86_64_GOTPC32_TLSDESC:
2017 case R_X86_64_TLSDESC_CALL:
2018 tls_type = GOT_TLS_GDESC; break;
2019 }
2020
2021 if (h != NULL)
2022 {
2023 h->got.refcount = 1;
2024 old_tls_type = eh->tls_type;
2025 }
2026 else
2027 {
2028 bfd_signed_vma *local_got_refcounts;
2029
2030 /* This is a global offset table entry for a local symbol. */
2031 local_got_refcounts = elf_local_got_refcounts (abfd);
2032 if (local_got_refcounts == NULL)
2033 {
2034 bfd_size_type size;
2035
2036 size = symtab_hdr->sh_info;
2037 size *= sizeof (bfd_signed_vma)
2038 + sizeof (bfd_vma) + sizeof (char);
2039 local_got_refcounts = ((bfd_signed_vma *)
2040 bfd_zalloc (abfd, size));
2041 if (local_got_refcounts == NULL)
2042 goto error_return;
2043 elf_local_got_refcounts (abfd) = local_got_refcounts;
2044 elf_x86_local_tlsdesc_gotent (abfd)
2045 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2046 elf_x86_local_got_tls_type (abfd)
2047 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2048 }
2049 local_got_refcounts[r_symndx] = 1;
2050 old_tls_type
2051 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2052 }
2053
2054 /* If a TLS symbol is accessed using IE at least once,
2055 there is no point to use dynamic model for it. */
2056 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2057 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2058 || tls_type != GOT_TLS_IE))
2059 {
2060 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2061 tls_type = old_tls_type;
2062 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2063 && GOT_TLS_GD_ANY_P (tls_type))
2064 tls_type |= old_tls_type;
2065 else
2066 {
2067 if (h)
2068 name = h->root.root.string;
2069 else
2070 name = bfd_elf_sym_name (abfd, symtab_hdr,
2071 isym, NULL);
2072 _bfd_error_handler
2073 /* xgettext:c-format */
2074 (_("%pB: '%s' accessed both as normal and"
2075 " thread local symbol"),
2076 abfd, name);
2077 bfd_set_error (bfd_error_bad_value);
2078 goto error_return;
2079 }
2080 }
2081
2082 if (old_tls_type != tls_type)
2083 {
2084 if (eh != NULL)
2085 eh->tls_type = tls_type;
2086 else
2087 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2088 }
2089 }
2090 /* Fall through */
2091
2092 case R_X86_64_GOTOFF64:
2093 case R_X86_64_GOTPC32:
2094 case R_X86_64_GOTPC64:
2095 create_got:
2096 if (eh != NULL)
2097 eh->zero_undefweak &= 0x2;
2098 break;
2099
2100 case R_X86_64_PLT32:
2101 case R_X86_64_PLT32_BND:
2102 /* This symbol requires a procedure linkage table entry. We
2103 actually build the entry in adjust_dynamic_symbol,
2104 because this might be a case of linking PIC code which is
2105 never referenced by a dynamic object, in which case we
2106 don't need to generate a procedure linkage table entry
2107 after all. */
2108
2109 /* If this is a local symbol, we resolve it directly without
2110 creating a procedure linkage table entry. */
2111 if (h == NULL)
2112 continue;
2113
2114 eh->zero_undefweak &= 0x2;
2115 h->needs_plt = 1;
2116 h->plt.refcount = 1;
2117 break;
2118
2119 case R_X86_64_PLTOFF64:
2120 /* This tries to form the 'address' of a function relative
2121 to GOT. For global symbols we need a PLT entry. */
2122 if (h != NULL)
2123 {
2124 h->needs_plt = 1;
2125 h->plt.refcount = 1;
2126 }
2127 goto create_got;
2128
2129 case R_X86_64_SIZE32:
2130 case R_X86_64_SIZE64:
2131 size_reloc = TRUE;
2132 goto do_size;
2133
2134 case R_X86_64_PC8:
2135 case R_X86_64_PC16:
2136 case R_X86_64_PC32:
2137 case R_X86_64_PC32_BND:
2138 do_check_pic = TRUE;
2139 goto check_pic;
2140
2141 case R_X86_64_32:
2142 if (!ABI_64_P (abfd))
2143 goto pointer;
2144 /* Fall through. */
2145 case R_X86_64_8:
2146 case R_X86_64_16:
2147 case R_X86_64_32S:
2148 /* Check relocation overflow as these relocs may lead to
2149 run-time relocation overflow. Don't error out for
2150 sections we don't care about, such as debug sections or
2151 when relocation overflow check is disabled. */
2152 if (!htab->params->no_reloc_overflow_check
2153 && !converted_reloc
2154 && (bfd_link_pic (info)
2155 || (bfd_link_executable (info)
2156 && h != NULL
2157 && !h->def_regular
2158 && h->def_dynamic
2159 && (sec->flags & SEC_READONLY) == 0)))
2160 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2161 &x86_64_elf_howto_table[r_type]);
2162 /* Fall through. */
2163
2164 case R_X86_64_PC64:
2165 case R_X86_64_64:
2166 pointer:
2167 do_check_pic = FALSE;
2168 check_pic:
2169 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2170 eh->zero_undefweak |= 0x2;
2171 /* We are called after all symbols have been resolved. Only
2172 relocation against STT_GNU_IFUNC symbol must go through
2173 PLT. */
2174 if (h != NULL
2175 && (bfd_link_executable (info)
2176 || h->type == STT_GNU_IFUNC))
2177 {
2178 bfd_boolean func_pointer_ref = FALSE;
2179
2180 if (r_type == R_X86_64_PC32)
2181 {
2182 /* Since something like ".long foo - ." may be used
2183 as pointer, make sure that PLT is used if foo is
2184 a function defined in a shared library. */
2185 if ((sec->flags & SEC_CODE) == 0)
2186 {
2187 h->pointer_equality_needed = 1;
2188 if (bfd_link_pie (info)
2189 && h->type == STT_FUNC
2190 && !h->def_regular
2191 && h->def_dynamic)
2192 {
2193 h->needs_plt = 1;
2194 h->plt.refcount = 1;
2195 }
2196 }
2197 }
2198 else if (r_type != R_X86_64_PC32_BND
2199 && r_type != R_X86_64_PC64)
2200 {
2201 h->pointer_equality_needed = 1;
2202 /* At run-time, R_X86_64_64 can be resolved for both
2203 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2204 can only be resolved for x32. */
2205 if ((sec->flags & SEC_READONLY) == 0
2206 && (r_type == R_X86_64_64
2207 || (!ABI_64_P (abfd)
2208 && (r_type == R_X86_64_32
2209 || r_type == R_X86_64_32S))))
2210 func_pointer_ref = TRUE;
2211 }
2212
2213 if (!func_pointer_ref)
2214 {
2215 /* If this reloc is in a read-only section, we might
2216 need a copy reloc. We can't check reliably at this
2217 stage whether the section is read-only, as input
2218 sections have not yet been mapped to output sections.
2219 Tentatively set the flag for now, and correct in
2220 adjust_dynamic_symbol. */
2221 h->non_got_ref = 1;
2222
2223 /* We may need a .plt entry if the symbol is a function
2224 defined in a shared lib or is a function referenced
2225 from the code or read-only section. */
2226 if (!h->def_regular
2227 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2228 h->plt.refcount = 1;
2229 }
2230 }
2231
2232 if (do_check_pic)
2233 {
2234 /* Don't complain about -fPIC if the symbol is undefined
2235 when building executable unless it is unresolved weak
2236 symbol, references a dynamic definition in PIE or
2237 -z nocopyreloc is used. */
2238 bfd_boolean no_copyreloc_p
2239 = (info->nocopyreloc
2240 || (h != NULL
2241 && !h->root.linker_def
2242 && !h->root.ldscript_def
2243 && eh->def_protected
2244 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
2245 if ((sec->flags & SEC_ALLOC) != 0
2246 && (sec->flags & SEC_READONLY) != 0
2247 && h != NULL
2248 && ((bfd_link_executable (info)
2249 && ((h->root.type == bfd_link_hash_undefweak
2250 && (eh == NULL
2251 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
2252 eh)))
2253 || (bfd_link_pie (info)
2254 && !SYMBOL_DEFINED_NON_SHARED_P (h)
2255 && h->def_dynamic)
2256 || (no_copyreloc_p
2257 && h->def_dynamic
2258 && !(h->root.u.def.section->flags & SEC_CODE))))
2259 || bfd_link_dll (info)))
2260 {
2261 bfd_boolean fail = FALSE;
2262 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
2263 {
2264 /* Symbol is referenced locally. Make sure it is
2265 defined locally. */
2266 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
2267 }
2268 else if (bfd_link_pie (info))
2269 {
2270 /* We can only use PC-relative relocations in PIE
2271 from non-code sections. */
2272 if (h->type == STT_FUNC
2273 && (sec->flags & SEC_CODE) != 0)
2274 fail = TRUE;
2275 }
2276 else if (no_copyreloc_p || bfd_link_dll (info))
2277 {
2278 /* Symbol doesn't need copy reloc and isn't
2279 referenced locally. Don't allow PC-relative
2280 relocations against default and protected
2281 symbols since address of protected function
2282 and location of protected data may not be in
2283 the shared object. */
2284 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2285 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
2286 }
2287
2288 if (fail)
2289 return elf_x86_64_need_pic (info, abfd, sec, h,
2290 symtab_hdr, isym,
2291 &x86_64_elf_howto_table[r_type]);
2292 }
2293 }
2294
2295 size_reloc = FALSE;
2296 do_size:
2297 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2298 htab->pointer_r_type))
2299 {
2300 struct elf_dyn_relocs *p;
2301 struct elf_dyn_relocs **head;
2302
2303 /* We must copy these reloc types into the output file.
2304 Create a reloc section in dynobj and make room for
2305 this reloc. */
2306 if (sreloc == NULL)
2307 {
2308 sreloc = _bfd_elf_make_dynamic_reloc_section
2309 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2310 abfd, /*rela?*/ TRUE);
2311
2312 if (sreloc == NULL)
2313 goto error_return;
2314 }
2315
2316 /* If this is a global symbol, we count the number of
2317 relocations we need for this symbol. */
2318 if (h != NULL)
2319 head = &eh->dyn_relocs;
2320 else
2321 {
2322 /* Track dynamic relocs needed for local syms too.
2323 We really need local syms available to do this
2324 easily. Oh well. */
2325 asection *s;
2326 void **vpp;
2327
2328 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2329 abfd, r_symndx);
2330 if (isym == NULL)
2331 goto error_return;
2332
2333 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2334 if (s == NULL)
2335 s = sec;
2336
2337 /* Beware of type punned pointers vs strict aliasing
2338 rules. */
2339 vpp = &(elf_section_data (s)->local_dynrel);
2340 head = (struct elf_dyn_relocs **)vpp;
2341 }
2342
2343 p = *head;
2344 if (p == NULL || p->sec != sec)
2345 {
2346 bfd_size_type amt = sizeof *p;
2347
2348 p = ((struct elf_dyn_relocs *)
2349 bfd_alloc (htab->elf.dynobj, amt));
2350 if (p == NULL)
2351 goto error_return;
2352 p->next = *head;
2353 *head = p;
2354 p->sec = sec;
2355 p->count = 0;
2356 p->pc_count = 0;
2357 }
2358
2359 p->count += 1;
2360 /* Count size relocation as PC-relative relocation. */
2361 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2362 p->pc_count += 1;
2363 }
2364 break;
2365
2366 /* This relocation describes the C++ object vtable hierarchy.
2367 Reconstruct it for later use during GC. */
2368 case R_X86_64_GNU_VTINHERIT:
2369 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2370 goto error_return;
2371 break;
2372
2373 /* This relocation describes which C++ vtable entries are actually
2374 used. Record for later use during GC. */
2375 case R_X86_64_GNU_VTENTRY:
2376 BFD_ASSERT (h != NULL);
2377 if (h != NULL
2378 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2379 goto error_return;
2380 break;
2381
2382 default:
2383 break;
2384 }
2385 }
2386
2387 if (elf_section_data (sec)->this_hdr.contents != contents)
2388 {
2389 if (!converted && !info->keep_memory)
2390 free (contents);
2391 else
2392 {
2393 /* Cache the section contents for elf_link_input_bfd if any
2394 load is converted or --no-keep-memory isn't used. */
2395 elf_section_data (sec)->this_hdr.contents = contents;
2396 }
2397 }
2398
2399 /* Cache relocations if any load is converted. */
2400 if (elf_section_data (sec)->relocs != relocs && converted)
2401 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2402
2403 return TRUE;
2404
2405 error_return:
2406 if (elf_section_data (sec)->this_hdr.contents != contents)
2407 free (contents);
2408 sec->check_relocs_failed = 1;
2409 return FALSE;
2410 }
2411
2412 /* Return the relocation value for @tpoff relocation
2413 if STT_TLS virtual address is ADDRESS. */
2414
2415 static bfd_vma
2416 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2417 {
2418 struct elf_link_hash_table *htab = elf_hash_table (info);
2419 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2420 bfd_vma static_tls_size;
2421
2422 /* If tls_segment is NULL, we should have signalled an error already. */
2423 if (htab->tls_sec == NULL)
2424 return 0;
2425
2426 /* Consider special static TLS alignment requirements. */
2427 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2428 return address - static_tls_size - htab->tls_sec->vma;
2429 }
2430
2431 /* Relocate an x86_64 ELF section. */
2432
2433 static bfd_boolean
2434 elf_x86_64_relocate_section (bfd *output_bfd,
2435 struct bfd_link_info *info,
2436 bfd *input_bfd,
2437 asection *input_section,
2438 bfd_byte *contents,
2439 Elf_Internal_Rela *relocs,
2440 Elf_Internal_Sym *local_syms,
2441 asection **local_sections)
2442 {
2443 struct elf_x86_link_hash_table *htab;
2444 Elf_Internal_Shdr *symtab_hdr;
2445 struct elf_link_hash_entry **sym_hashes;
2446 bfd_vma *local_got_offsets;
2447 bfd_vma *local_tlsdesc_gotents;
2448 Elf_Internal_Rela *rel;
2449 Elf_Internal_Rela *wrel;
2450 Elf_Internal_Rela *relend;
2451 unsigned int plt_entry_size;
2452
2453 /* Skip if check_relocs failed. */
2454 if (input_section->check_relocs_failed)
2455 return FALSE;
2456
2457 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2458 if (htab == NULL)
2459 return FALSE;
2460
2461 if (!is_x86_elf (input_bfd, htab))
2462 {
2463 bfd_set_error (bfd_error_wrong_format);
2464 return FALSE;
2465 }
2466
2467 plt_entry_size = htab->plt.plt_entry_size;
2468 symtab_hdr = &elf_symtab_hdr (input_bfd);
2469 sym_hashes = elf_sym_hashes (input_bfd);
2470 local_got_offsets = elf_local_got_offsets (input_bfd);
2471 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2472
2473 _bfd_x86_elf_set_tls_module_base (info);
2474
2475 rel = wrel = relocs;
2476 relend = relocs + input_section->reloc_count;
2477 for (; rel < relend; wrel++, rel++)
2478 {
2479 unsigned int r_type, r_type_tls;
2480 reloc_howto_type *howto;
2481 unsigned long r_symndx;
2482 struct elf_link_hash_entry *h;
2483 struct elf_x86_link_hash_entry *eh;
2484 Elf_Internal_Sym *sym;
2485 asection *sec;
2486 bfd_vma off, offplt, plt_offset;
2487 bfd_vma relocation;
2488 bfd_boolean unresolved_reloc;
2489 bfd_reloc_status_type r;
2490 int tls_type;
2491 asection *base_got, *resolved_plt;
2492 bfd_vma st_size;
2493 bfd_boolean resolved_to_zero;
2494 bfd_boolean relative_reloc;
2495 bfd_boolean converted_reloc;
2496 bfd_boolean need_copy_reloc_in_pie;
2497
2498 r_type = ELF32_R_TYPE (rel->r_info);
2499 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2500 || r_type == (int) R_X86_64_GNU_VTENTRY)
2501 {
2502 if (wrel != rel)
2503 *wrel = *rel;
2504 continue;
2505 }
2506
2507 r_symndx = htab->r_sym (rel->r_info);
2508 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2509 if (converted_reloc)
2510 {
2511 r_type &= ~R_X86_64_converted_reloc_bit;
2512 rel->r_info = htab->r_info (r_symndx, r_type);
2513 }
2514
2515 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2516 if (howto == NULL)
2517 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2518
2519 h = NULL;
2520 sym = NULL;
2521 sec = NULL;
2522 unresolved_reloc = FALSE;
2523 if (r_symndx < symtab_hdr->sh_info)
2524 {
2525 sym = local_syms + r_symndx;
2526 sec = local_sections[r_symndx];
2527
2528 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2529 &sec, rel);
2530 st_size = sym->st_size;
2531
2532 /* Relocate against local STT_GNU_IFUNC symbol. */
2533 if (!bfd_link_relocatable (info)
2534 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2535 {
2536 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2537 rel, FALSE);
2538 if (h == NULL)
2539 abort ();
2540
2541 /* Set STT_GNU_IFUNC symbol value. */
2542 h->root.u.def.value = sym->st_value;
2543 h->root.u.def.section = sec;
2544 }
2545 }
2546 else
2547 {
2548 bfd_boolean warned ATTRIBUTE_UNUSED;
2549 bfd_boolean ignored ATTRIBUTE_UNUSED;
2550
2551 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2552 r_symndx, symtab_hdr, sym_hashes,
2553 h, sec, relocation,
2554 unresolved_reloc, warned, ignored);
2555 st_size = h->size;
2556 }
2557
2558 if (sec != NULL && discarded_section (sec))
2559 {
2560 _bfd_clear_contents (howto, input_bfd, input_section,
2561 contents, rel->r_offset);
2562 wrel->r_offset = rel->r_offset;
2563 wrel->r_info = 0;
2564 wrel->r_addend = 0;
2565
2566 /* For ld -r, remove relocations in debug sections against
2567 sections defined in discarded sections. Not done for
2568 eh_frame editing code expects to be present. */
2569 if (bfd_link_relocatable (info)
2570 && (input_section->flags & SEC_DEBUGGING))
2571 wrel--;
2572
2573 continue;
2574 }
2575
2576 if (bfd_link_relocatable (info))
2577 {
2578 if (wrel != rel)
2579 *wrel = *rel;
2580 continue;
2581 }
2582
2583 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2584 {
2585 if (r_type == R_X86_64_64)
2586 {
2587 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2588 zero-extend it to 64bit if addend is zero. */
2589 r_type = R_X86_64_32;
2590 memset (contents + rel->r_offset + 4, 0, 4);
2591 }
2592 else if (r_type == R_X86_64_SIZE64)
2593 {
2594 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2595 zero-extend it to 64bit if addend is zero. */
2596 r_type = R_X86_64_SIZE32;
2597 memset (contents + rel->r_offset + 4, 0, 4);
2598 }
2599 }
2600
2601 eh = (struct elf_x86_link_hash_entry *) h;
2602
2603 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2604 it here if it is defined in a non-shared object. */
2605 if (h != NULL
2606 && h->type == STT_GNU_IFUNC
2607 && h->def_regular)
2608 {
2609 bfd_vma plt_index;
2610 const char *name;
2611
2612 if ((input_section->flags & SEC_ALLOC) == 0)
2613 {
2614 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2615 STT_GNU_IFUNC symbol as STT_FUNC. */
2616 if (elf_section_type (input_section) == SHT_NOTE)
2617 goto skip_ifunc;
2618 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2619 sections because such sections are not SEC_ALLOC and
2620 thus ld.so will not process them. */
2621 if ((input_section->flags & SEC_DEBUGGING) != 0)
2622 continue;
2623 abort ();
2624 }
2625
2626 switch (r_type)
2627 {
2628 default:
2629 break;
2630
2631 case R_X86_64_GOTPCREL:
2632 case R_X86_64_GOTPCRELX:
2633 case R_X86_64_REX_GOTPCRELX:
2634 case R_X86_64_GOTPCREL64:
2635 base_got = htab->elf.sgot;
2636 off = h->got.offset;
2637
2638 if (base_got == NULL)
2639 abort ();
2640
2641 if (off == (bfd_vma) -1)
2642 {
2643 /* We can't use h->got.offset here to save state, or
2644 even just remember the offset, as finish_dynamic_symbol
2645 would use that as offset into .got. */
2646
2647 if (h->plt.offset == (bfd_vma) -1)
2648 abort ();
2649
2650 if (htab->elf.splt != NULL)
2651 {
2652 plt_index = (h->plt.offset / plt_entry_size
2653 - htab->plt.has_plt0);
2654 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2655 base_got = htab->elf.sgotplt;
2656 }
2657 else
2658 {
2659 plt_index = h->plt.offset / plt_entry_size;
2660 off = plt_index * GOT_ENTRY_SIZE;
2661 base_got = htab->elf.igotplt;
2662 }
2663
2664 if (h->dynindx == -1
2665 || h->forced_local
2666 || info->symbolic)
2667 {
2668 /* This references the local defitionion. We must
2669 initialize this entry in the global offset table.
2670 Since the offset must always be a multiple of 8,
2671 we use the least significant bit to record
2672 whether we have initialized it already.
2673
2674 When doing a dynamic link, we create a .rela.got
2675 relocation entry to initialize the value. This
2676 is done in the finish_dynamic_symbol routine. */
2677 if ((off & 1) != 0)
2678 off &= ~1;
2679 else
2680 {
2681 bfd_put_64 (output_bfd, relocation,
2682 base_got->contents + off);
2683 /* Note that this is harmless for the GOTPLT64
2684 case, as -1 | 1 still is -1. */
2685 h->got.offset |= 1;
2686 }
2687 }
2688 }
2689
2690 relocation = (base_got->output_section->vma
2691 + base_got->output_offset + off);
2692
2693 goto do_relocation;
2694 }
2695
2696 if (h->plt.offset == (bfd_vma) -1)
2697 {
2698 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2699 if (r_type == htab->pointer_r_type
2700 && (input_section->flags & SEC_CODE) == 0)
2701 goto do_ifunc_pointer;
2702 goto bad_ifunc_reloc;
2703 }
2704
2705 /* STT_GNU_IFUNC symbol must go through PLT. */
2706 if (htab->elf.splt != NULL)
2707 {
2708 if (htab->plt_second != NULL)
2709 {
2710 resolved_plt = htab->plt_second;
2711 plt_offset = eh->plt_second.offset;
2712 }
2713 else
2714 {
2715 resolved_plt = htab->elf.splt;
2716 plt_offset = h->plt.offset;
2717 }
2718 }
2719 else
2720 {
2721 resolved_plt = htab->elf.iplt;
2722 plt_offset = h->plt.offset;
2723 }
2724
2725 relocation = (resolved_plt->output_section->vma
2726 + resolved_plt->output_offset + plt_offset);
2727
2728 switch (r_type)
2729 {
2730 default:
2731 bad_ifunc_reloc:
2732 if (h->root.root.string)
2733 name = h->root.root.string;
2734 else
2735 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2736 NULL);
2737 _bfd_error_handler
2738 /* xgettext:c-format */
2739 (_("%pB: relocation %s against STT_GNU_IFUNC "
2740 "symbol `%s' isn't supported"), input_bfd,
2741 howto->name, name);
2742 bfd_set_error (bfd_error_bad_value);
2743 return FALSE;
2744
2745 case R_X86_64_32S:
2746 if (bfd_link_pic (info))
2747 abort ();
2748 goto do_relocation;
2749
2750 case R_X86_64_32:
2751 if (ABI_64_P (output_bfd))
2752 goto do_relocation;
2753 /* FALLTHROUGH */
2754 case R_X86_64_64:
2755 do_ifunc_pointer:
2756 if (rel->r_addend != 0)
2757 {
2758 if (h->root.root.string)
2759 name = h->root.root.string;
2760 else
2761 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2762 sym, NULL);
2763 _bfd_error_handler
2764 /* xgettext:c-format */
2765 (_("%pB: relocation %s against STT_GNU_IFUNC "
2766 "symbol `%s' has non-zero addend: %" PRId64),
2767 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2768 bfd_set_error (bfd_error_bad_value);
2769 return FALSE;
2770 }
2771
2772 /* Generate dynamic relcoation only when there is a
2773 non-GOT reference in a shared object or there is no
2774 PLT. */
2775 if ((bfd_link_pic (info) && h->non_got_ref)
2776 || h->plt.offset == (bfd_vma) -1)
2777 {
2778 Elf_Internal_Rela outrel;
2779 asection *sreloc;
2780
2781 /* Need a dynamic relocation to get the real function
2782 address. */
2783 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2784 info,
2785 input_section,
2786 rel->r_offset);
2787 if (outrel.r_offset == (bfd_vma) -1
2788 || outrel.r_offset == (bfd_vma) -2)
2789 abort ();
2790
2791 outrel.r_offset += (input_section->output_section->vma
2792 + input_section->output_offset);
2793
2794 if (POINTER_LOCAL_IFUNC_P (info, h))
2795 {
2796 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2797 h->root.root.string,
2798 h->root.u.def.section->owner);
2799
2800 /* This symbol is resolved locally. */
2801 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2802 outrel.r_addend = (h->root.u.def.value
2803 + h->root.u.def.section->output_section->vma
2804 + h->root.u.def.section->output_offset);
2805 }
2806 else
2807 {
2808 outrel.r_info = htab->r_info (h->dynindx, r_type);
2809 outrel.r_addend = 0;
2810 }
2811
2812 /* Dynamic relocations are stored in
2813 1. .rela.ifunc section in PIC object.
2814 2. .rela.got section in dynamic executable.
2815 3. .rela.iplt section in static executable. */
2816 if (bfd_link_pic (info))
2817 sreloc = htab->elf.irelifunc;
2818 else if (htab->elf.splt != NULL)
2819 sreloc = htab->elf.srelgot;
2820 else
2821 sreloc = htab->elf.irelplt;
2822 elf_append_rela (output_bfd, sreloc, &outrel);
2823
2824 /* If this reloc is against an external symbol, we
2825 do not want to fiddle with the addend. Otherwise,
2826 we need to include the symbol value so that it
2827 becomes an addend for the dynamic reloc. For an
2828 internal symbol, we have updated addend. */
2829 continue;
2830 }
2831 /* FALLTHROUGH */
2832 case R_X86_64_PC32:
2833 case R_X86_64_PC32_BND:
2834 case R_X86_64_PC64:
2835 case R_X86_64_PLT32:
2836 case R_X86_64_PLT32_BND:
2837 goto do_relocation;
2838 }
2839 }
2840
2841 skip_ifunc:
2842 resolved_to_zero = (eh != NULL
2843 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2844
2845 /* When generating a shared object, the relocations handled here are
2846 copied into the output file to be resolved at run time. */
2847 switch (r_type)
2848 {
2849 case R_X86_64_GOT32:
2850 case R_X86_64_GOT64:
2851 /* Relocation is to the entry for this symbol in the global
2852 offset table. */
2853 case R_X86_64_GOTPCREL:
2854 case R_X86_64_GOTPCRELX:
2855 case R_X86_64_REX_GOTPCRELX:
2856 case R_X86_64_GOTPCREL64:
2857 /* Use global offset table entry as symbol value. */
2858 case R_X86_64_GOTPLT64:
2859 /* This is obsolete and treated the same as GOT64. */
2860 base_got = htab->elf.sgot;
2861
2862 if (htab->elf.sgot == NULL)
2863 abort ();
2864
2865 relative_reloc = FALSE;
2866 if (h != NULL)
2867 {
2868 off = h->got.offset;
2869 if (h->needs_plt
2870 && h->plt.offset != (bfd_vma)-1
2871 && off == (bfd_vma)-1)
2872 {
2873 /* We can't use h->got.offset here to save
2874 state, or even just remember the offset, as
2875 finish_dynamic_symbol would use that as offset into
2876 .got. */
2877 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2878 - htab->plt.has_plt0);
2879 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2880 base_got = htab->elf.sgotplt;
2881 }
2882
2883 if (RESOLVED_LOCALLY_P (info, h, htab))
2884 {
2885 /* We must initialize this entry in the global offset
2886 table. Since the offset must always be a multiple
2887 of 8, we use the least significant bit to record
2888 whether we have initialized it already.
2889
2890 When doing a dynamic link, we create a .rela.got
2891 relocation entry to initialize the value. This is
2892 done in the finish_dynamic_symbol routine. */
2893 if ((off & 1) != 0)
2894 off &= ~1;
2895 else
2896 {
2897 bfd_put_64 (output_bfd, relocation,
2898 base_got->contents + off);
2899 /* Note that this is harmless for the GOTPLT64 case,
2900 as -1 | 1 still is -1. */
2901 h->got.offset |= 1;
2902
2903 if (GENERATE_RELATIVE_RELOC_P (info, h))
2904 {
2905 /* If this symbol isn't dynamic in PIC,
2906 generate R_X86_64_RELATIVE here. */
2907 eh->no_finish_dynamic_symbol = 1;
2908 relative_reloc = TRUE;
2909 }
2910 }
2911 }
2912 else
2913 unresolved_reloc = FALSE;
2914 }
2915 else
2916 {
2917 if (local_got_offsets == NULL)
2918 abort ();
2919
2920 off = local_got_offsets[r_symndx];
2921
2922 /* The offset must always be a multiple of 8. We use
2923 the least significant bit to record whether we have
2924 already generated the necessary reloc. */
2925 if ((off & 1) != 0)
2926 off &= ~1;
2927 else
2928 {
2929 bfd_put_64 (output_bfd, relocation,
2930 base_got->contents + off);
2931 local_got_offsets[r_symndx] |= 1;
2932
2933 if (bfd_link_pic (info))
2934 relative_reloc = TRUE;
2935 }
2936 }
2937
2938 if (relative_reloc)
2939 {
2940 asection *s;
2941 Elf_Internal_Rela outrel;
2942
2943 /* We need to generate a R_X86_64_RELATIVE reloc
2944 for the dynamic linker. */
2945 s = htab->elf.srelgot;
2946 if (s == NULL)
2947 abort ();
2948
2949 outrel.r_offset = (base_got->output_section->vma
2950 + base_got->output_offset
2951 + off);
2952 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2953 outrel.r_addend = relocation;
2954 elf_append_rela (output_bfd, s, &outrel);
2955 }
2956
2957 if (off >= (bfd_vma) -2)
2958 abort ();
2959
2960 relocation = base_got->output_section->vma
2961 + base_got->output_offset + off;
2962 if (r_type != R_X86_64_GOTPCREL
2963 && r_type != R_X86_64_GOTPCRELX
2964 && r_type != R_X86_64_REX_GOTPCRELX
2965 && r_type != R_X86_64_GOTPCREL64)
2966 relocation -= htab->elf.sgotplt->output_section->vma
2967 - htab->elf.sgotplt->output_offset;
2968
2969 break;
2970
2971 case R_X86_64_GOTOFF64:
2972 /* Relocation is relative to the start of the global offset
2973 table. */
2974
2975 /* Check to make sure it isn't a protected function or data
2976 symbol for shared library since it may not be local when
2977 used as function address or with copy relocation. We also
2978 need to make sure that a symbol is referenced locally. */
2979 if (bfd_link_pic (info) && h)
2980 {
2981 if (!h->def_regular)
2982 {
2983 const char *v;
2984
2985 switch (ELF_ST_VISIBILITY (h->other))
2986 {
2987 case STV_HIDDEN:
2988 v = _("hidden symbol");
2989 break;
2990 case STV_INTERNAL:
2991 v = _("internal symbol");
2992 break;
2993 case STV_PROTECTED:
2994 v = _("protected symbol");
2995 break;
2996 default:
2997 v = _("symbol");
2998 break;
2999 }
3000
3001 _bfd_error_handler
3002 /* xgettext:c-format */
3003 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3004 " `%s' can not be used when making a shared object"),
3005 input_bfd, v, h->root.root.string);
3006 bfd_set_error (bfd_error_bad_value);
3007 return FALSE;
3008 }
3009 else if (!bfd_link_executable (info)
3010 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3011 && (h->type == STT_FUNC
3012 || h->type == STT_OBJECT)
3013 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3014 {
3015 _bfd_error_handler
3016 /* xgettext:c-format */
3017 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3018 " `%s' can not be used when making a shared object"),
3019 input_bfd,
3020 h->type == STT_FUNC ? "function" : "data",
3021 h->root.root.string);
3022 bfd_set_error (bfd_error_bad_value);
3023 return FALSE;
3024 }
3025 }
3026
3027 /* Note that sgot is not involved in this
3028 calculation. We always want the start of .got.plt. If we
3029 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3030 permitted by the ABI, we might have to change this
3031 calculation. */
3032 relocation -= htab->elf.sgotplt->output_section->vma
3033 + htab->elf.sgotplt->output_offset;
3034 break;
3035
3036 case R_X86_64_GOTPC32:
3037 case R_X86_64_GOTPC64:
3038 /* Use global offset table as symbol value. */
3039 relocation = htab->elf.sgotplt->output_section->vma
3040 + htab->elf.sgotplt->output_offset;
3041 unresolved_reloc = FALSE;
3042 break;
3043
3044 case R_X86_64_PLTOFF64:
3045 /* Relocation is PLT entry relative to GOT. For local
3046 symbols it's the symbol itself relative to GOT. */
3047 if (h != NULL
3048 /* See PLT32 handling. */
3049 && (h->plt.offset != (bfd_vma) -1
3050 || eh->plt_got.offset != (bfd_vma) -1)
3051 && htab->elf.splt != NULL)
3052 {
3053 if (eh->plt_got.offset != (bfd_vma) -1)
3054 {
3055 /* Use the GOT PLT. */
3056 resolved_plt = htab->plt_got;
3057 plt_offset = eh->plt_got.offset;
3058 }
3059 else if (htab->plt_second != NULL)
3060 {
3061 resolved_plt = htab->plt_second;
3062 plt_offset = eh->plt_second.offset;
3063 }
3064 else
3065 {
3066 resolved_plt = htab->elf.splt;
3067 plt_offset = h->plt.offset;
3068 }
3069
3070 relocation = (resolved_plt->output_section->vma
3071 + resolved_plt->output_offset
3072 + plt_offset);
3073 unresolved_reloc = FALSE;
3074 }
3075
3076 relocation -= htab->elf.sgotplt->output_section->vma
3077 + htab->elf.sgotplt->output_offset;
3078 break;
3079
3080 case R_X86_64_PLT32:
3081 case R_X86_64_PLT32_BND:
3082 /* Relocation is to the entry for this symbol in the
3083 procedure linkage table. */
3084
3085 /* Resolve a PLT32 reloc against a local symbol directly,
3086 without using the procedure linkage table. */
3087 if (h == NULL)
3088 break;
3089
3090 if ((h->plt.offset == (bfd_vma) -1
3091 && eh->plt_got.offset == (bfd_vma) -1)
3092 || htab->elf.splt == NULL)
3093 {
3094 /* We didn't make a PLT entry for this symbol. This
3095 happens when statically linking PIC code, or when
3096 using -Bsymbolic. */
3097 break;
3098 }
3099
3100 use_plt:
3101 if (h->plt.offset != (bfd_vma) -1)
3102 {
3103 if (htab->plt_second != NULL)
3104 {
3105 resolved_plt = htab->plt_second;
3106 plt_offset = eh->plt_second.offset;
3107 }
3108 else
3109 {
3110 resolved_plt = htab->elf.splt;
3111 plt_offset = h->plt.offset;
3112 }
3113 }
3114 else
3115 {
3116 /* Use the GOT PLT. */
3117 resolved_plt = htab->plt_got;
3118 plt_offset = eh->plt_got.offset;
3119 }
3120
3121 relocation = (resolved_plt->output_section->vma
3122 + resolved_plt->output_offset
3123 + plt_offset);
3124 unresolved_reloc = FALSE;
3125 break;
3126
3127 case R_X86_64_SIZE32:
3128 case R_X86_64_SIZE64:
3129 /* Set to symbol size. */
3130 relocation = st_size;
3131 goto direct;
3132
3133 case R_X86_64_PC8:
3134 case R_X86_64_PC16:
3135 case R_X86_64_PC32:
3136 case R_X86_64_PC32_BND:
3137 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3138 as function address. */
3139 if (h != NULL
3140 && (input_section->flags & SEC_CODE) == 0
3141 && bfd_link_pie (info)
3142 && h->type == STT_FUNC
3143 && !h->def_regular
3144 && h->def_dynamic)
3145 goto use_plt;
3146 /* Fall through. */
3147
3148 case R_X86_64_8:
3149 case R_X86_64_16:
3150 case R_X86_64_32:
3151 case R_X86_64_PC64:
3152 case R_X86_64_64:
3153 /* FIXME: The ABI says the linker should make sure the value is
3154 the same when it's zeroextended to 64 bit. */
3155
3156 direct:
3157 if ((input_section->flags & SEC_ALLOC) == 0)
3158 break;
3159
3160 need_copy_reloc_in_pie = (bfd_link_pie (info)
3161 && h != NULL
3162 && (h->needs_copy
3163 || eh->needs_copy
3164 || (h->root.type
3165 == bfd_link_hash_undefined))
3166 && (X86_PCREL_TYPE_P (r_type)
3167 || X86_SIZE_TYPE_P (r_type)));
3168
3169 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3170 need_copy_reloc_in_pie,
3171 resolved_to_zero, FALSE))
3172 {
3173 Elf_Internal_Rela outrel;
3174 bfd_boolean skip, relocate;
3175 asection *sreloc;
3176
3177 /* When generating a shared object, these relocations
3178 are copied into the output file to be resolved at run
3179 time. */
3180 skip = FALSE;
3181 relocate = FALSE;
3182
3183 outrel.r_offset =
3184 _bfd_elf_section_offset (output_bfd, info, input_section,
3185 rel->r_offset);
3186 if (outrel.r_offset == (bfd_vma) -1)
3187 skip = TRUE;
3188 else if (outrel.r_offset == (bfd_vma) -2)
3189 skip = TRUE, relocate = TRUE;
3190
3191 outrel.r_offset += (input_section->output_section->vma
3192 + input_section->output_offset);
3193
3194 if (skip)
3195 memset (&outrel, 0, sizeof outrel);
3196
3197 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3198 {
3199 outrel.r_info = htab->r_info (h->dynindx, r_type);
3200 outrel.r_addend = rel->r_addend;
3201 }
3202 else
3203 {
3204 /* This symbol is local, or marked to become local.
3205 When relocation overflow check is disabled, we
3206 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3207 if (r_type == htab->pointer_r_type
3208 || (r_type == R_X86_64_32
3209 && htab->params->no_reloc_overflow_check))
3210 {
3211 relocate = TRUE;
3212 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3213 outrel.r_addend = relocation + rel->r_addend;
3214 }
3215 else if (r_type == R_X86_64_64
3216 && !ABI_64_P (output_bfd))
3217 {
3218 relocate = TRUE;
3219 outrel.r_info = htab->r_info (0,
3220 R_X86_64_RELATIVE64);
3221 outrel.r_addend = relocation + rel->r_addend;
3222 /* Check addend overflow. */
3223 if ((outrel.r_addend & 0x80000000)
3224 != (rel->r_addend & 0x80000000))
3225 {
3226 const char *name;
3227 int addend = rel->r_addend;
3228 if (h && h->root.root.string)
3229 name = h->root.root.string;
3230 else
3231 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3232 sym, NULL);
3233 _bfd_error_handler
3234 /* xgettext:c-format */
3235 (_("%pB: addend %s%#x in relocation %s against "
3236 "symbol `%s' at %#" PRIx64
3237 " in section `%pA' is out of range"),
3238 input_bfd, addend < 0 ? "-" : "", addend,
3239 howto->name, name, (uint64_t) rel->r_offset,
3240 input_section);
3241 bfd_set_error (bfd_error_bad_value);
3242 return FALSE;
3243 }
3244 }
3245 else
3246 {
3247 long sindx;
3248
3249 if (bfd_is_abs_section (sec))
3250 sindx = 0;
3251 else if (sec == NULL || sec->owner == NULL)
3252 {
3253 bfd_set_error (bfd_error_bad_value);
3254 return FALSE;
3255 }
3256 else
3257 {
3258 asection *osec;
3259
3260 /* We are turning this relocation into one
3261 against a section symbol. It would be
3262 proper to subtract the symbol's value,
3263 osec->vma, from the emitted reloc addend,
3264 but ld.so expects buggy relocs. */
3265 osec = sec->output_section;
3266 sindx = elf_section_data (osec)->dynindx;
3267 if (sindx == 0)
3268 {
3269 asection *oi = htab->elf.text_index_section;
3270 sindx = elf_section_data (oi)->dynindx;
3271 }
3272 BFD_ASSERT (sindx != 0);
3273 }
3274
3275 outrel.r_info = htab->r_info (sindx, r_type);
3276 outrel.r_addend = relocation + rel->r_addend;
3277 }
3278 }
3279
3280 sreloc = elf_section_data (input_section)->sreloc;
3281
3282 if (sreloc == NULL || sreloc->contents == NULL)
3283 {
3284 r = bfd_reloc_notsupported;
3285 goto check_relocation_error;
3286 }
3287
3288 elf_append_rela (output_bfd, sreloc, &outrel);
3289
3290 /* If this reloc is against an external symbol, we do
3291 not want to fiddle with the addend. Otherwise, we
3292 need to include the symbol value so that it becomes
3293 an addend for the dynamic reloc. */
3294 if (! relocate)
3295 continue;
3296 }
3297
3298 break;
3299
3300 case R_X86_64_TLSGD:
3301 case R_X86_64_GOTPC32_TLSDESC:
3302 case R_X86_64_TLSDESC_CALL:
3303 case R_X86_64_GOTTPOFF:
3304 tls_type = GOT_UNKNOWN;
3305 if (h == NULL && local_got_offsets)
3306 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3307 else if (h != NULL)
3308 tls_type = elf_x86_hash_entry (h)->tls_type;
3309
3310 r_type_tls = r_type;
3311 if (! elf_x86_64_tls_transition (info, input_bfd,
3312 input_section, contents,
3313 symtab_hdr, sym_hashes,
3314 &r_type_tls, tls_type, rel,
3315 relend, h, r_symndx, TRUE))
3316 return FALSE;
3317
3318 if (r_type_tls == R_X86_64_TPOFF32)
3319 {
3320 bfd_vma roff = rel->r_offset;
3321
3322 BFD_ASSERT (! unresolved_reloc);
3323
3324 if (r_type == R_X86_64_TLSGD)
3325 {
3326 /* GD->LE transition. For 64bit, change
3327 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3328 .word 0x6666; rex64; call __tls_get_addr@PLT
3329 or
3330 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3331 .byte 0x66; rex64
3332 call *__tls_get_addr@GOTPCREL(%rip)
3333 which may be converted to
3334 addr32 call __tls_get_addr
3335 into:
3336 movq %fs:0, %rax
3337 leaq foo@tpoff(%rax), %rax
3338 For 32bit, change
3339 leaq foo@tlsgd(%rip), %rdi
3340 .word 0x6666; rex64; call __tls_get_addr@PLT
3341 or
3342 leaq foo@tlsgd(%rip), %rdi
3343 .byte 0x66; rex64
3344 call *__tls_get_addr@GOTPCREL(%rip)
3345 which may be converted to
3346 addr32 call __tls_get_addr
3347 into:
3348 movl %fs:0, %eax
3349 leaq foo@tpoff(%rax), %rax
3350 For largepic, change:
3351 leaq foo@tlsgd(%rip), %rdi
3352 movabsq $__tls_get_addr@pltoff, %rax
3353 addq %r15, %rax
3354 call *%rax
3355 into:
3356 movq %fs:0, %rax
3357 leaq foo@tpoff(%rax), %rax
3358 nopw 0x0(%rax,%rax,1) */
3359 int largepic = 0;
3360 if (ABI_64_P (output_bfd))
3361 {
3362 if (contents[roff + 5] == 0xb8)
3363 {
3364 if (roff < 3
3365 || (roff - 3 + 22) > input_section->size)
3366 {
3367 corrupt_input:
3368 info->callbacks->einfo
3369 (_("%F%P: corrupt input: %pB\n"),
3370 input_bfd);
3371 return FALSE;
3372 }
3373 memcpy (contents + roff - 3,
3374 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3375 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3376 largepic = 1;
3377 }
3378 else
3379 {
3380 if (roff < 4
3381 || (roff - 4 + 16) > input_section->size)
3382 goto corrupt_input;
3383 memcpy (contents + roff - 4,
3384 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3385 16);
3386 }
3387 }
3388 else
3389 {
3390 if (roff < 3
3391 || (roff - 3 + 15) > input_section->size)
3392 goto corrupt_input;
3393 memcpy (contents + roff - 3,
3394 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3395 15);
3396 }
3397 bfd_put_32 (output_bfd,
3398 elf_x86_64_tpoff (info, relocation),
3399 contents + roff + 8 + largepic);
3400 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3401 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3402 rel++;
3403 wrel++;
3404 continue;
3405 }
3406 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3407 {
3408 /* GDesc -> LE transition.
3409 It's originally something like:
3410 leaq x@tlsdesc(%rip), %rax
3411
3412 Change it to:
3413 movl $x@tpoff, %rax. */
3414
3415 unsigned int val, type;
3416
3417 if (roff < 3)
3418 goto corrupt_input;
3419 type = bfd_get_8 (input_bfd, contents + roff - 3);
3420 val = bfd_get_8 (input_bfd, contents + roff - 1);
3421 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3422 contents + roff - 3);
3423 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3424 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3425 contents + roff - 1);
3426 bfd_put_32 (output_bfd,
3427 elf_x86_64_tpoff (info, relocation),
3428 contents + roff);
3429 continue;
3430 }
3431 else if (r_type == R_X86_64_TLSDESC_CALL)
3432 {
3433 /* GDesc -> LE transition.
3434 It's originally:
3435 call *(%rax)
3436 Turn it into:
3437 xchg %ax,%ax. */
3438 bfd_put_8 (output_bfd, 0x66, contents + roff);
3439 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3440 continue;
3441 }
3442 else if (r_type == R_X86_64_GOTTPOFF)
3443 {
3444 /* IE->LE transition:
3445 For 64bit, originally it can be one of:
3446 movq foo@gottpoff(%rip), %reg
3447 addq foo@gottpoff(%rip), %reg
3448 We change it into:
3449 movq $foo, %reg
3450 leaq foo(%reg), %reg
3451 addq $foo, %reg.
3452 For 32bit, originally it can be one of:
3453 movq foo@gottpoff(%rip), %reg
3454 addl foo@gottpoff(%rip), %reg
3455 We change it into:
3456 movq $foo, %reg
3457 leal foo(%reg), %reg
3458 addl $foo, %reg. */
3459
3460 unsigned int val, type, reg;
3461
3462 if (roff >= 3)
3463 val = bfd_get_8 (input_bfd, contents + roff - 3);
3464 else
3465 {
3466 if (roff < 2)
3467 goto corrupt_input;
3468 val = 0;
3469 }
3470 type = bfd_get_8 (input_bfd, contents + roff - 2);
3471 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3472 reg >>= 3;
3473 if (type == 0x8b)
3474 {
3475 /* movq */
3476 if (val == 0x4c)
3477 {
3478 if (roff < 3)
3479 goto corrupt_input;
3480 bfd_put_8 (output_bfd, 0x49,
3481 contents + roff - 3);
3482 }
3483 else if (!ABI_64_P (output_bfd) && val == 0x44)
3484 {
3485 if (roff < 3)
3486 goto corrupt_input;
3487 bfd_put_8 (output_bfd, 0x41,
3488 contents + roff - 3);
3489 }
3490 bfd_put_8 (output_bfd, 0xc7,
3491 contents + roff - 2);
3492 bfd_put_8 (output_bfd, 0xc0 | reg,
3493 contents + roff - 1);
3494 }
3495 else if (reg == 4)
3496 {
3497 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3498 is special */
3499 if (val == 0x4c)
3500 {
3501 if (roff < 3)
3502 goto corrupt_input;
3503 bfd_put_8 (output_bfd, 0x49,
3504 contents + roff - 3);
3505 }
3506 else if (!ABI_64_P (output_bfd) && val == 0x44)
3507 {
3508 if (roff < 3)
3509 goto corrupt_input;
3510 bfd_put_8 (output_bfd, 0x41,
3511 contents + roff - 3);
3512 }
3513 bfd_put_8 (output_bfd, 0x81,
3514 contents + roff - 2);
3515 bfd_put_8 (output_bfd, 0xc0 | reg,
3516 contents + roff - 1);
3517 }
3518 else
3519 {
3520 /* addq/addl -> leaq/leal */
3521 if (val == 0x4c)
3522 {
3523 if (roff < 3)
3524 goto corrupt_input;
3525 bfd_put_8 (output_bfd, 0x4d,
3526 contents + roff - 3);
3527 }
3528 else if (!ABI_64_P (output_bfd) && val == 0x44)
3529 {
3530 if (roff < 3)
3531 goto corrupt_input;
3532 bfd_put_8 (output_bfd, 0x45,
3533 contents + roff - 3);
3534 }
3535 bfd_put_8 (output_bfd, 0x8d,
3536 contents + roff - 2);
3537 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3538 contents + roff - 1);
3539 }
3540 bfd_put_32 (output_bfd,
3541 elf_x86_64_tpoff (info, relocation),
3542 contents + roff);
3543 continue;
3544 }
3545 else
3546 BFD_ASSERT (FALSE);
3547 }
3548
3549 if (htab->elf.sgot == NULL)
3550 abort ();
3551
3552 if (h != NULL)
3553 {
3554 off = h->got.offset;
3555 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3556 }
3557 else
3558 {
3559 if (local_got_offsets == NULL)
3560 abort ();
3561
3562 off = local_got_offsets[r_symndx];
3563 offplt = local_tlsdesc_gotents[r_symndx];
3564 }
3565
3566 if ((off & 1) != 0)
3567 off &= ~1;
3568 else
3569 {
3570 Elf_Internal_Rela outrel;
3571 int dr_type, indx;
3572 asection *sreloc;
3573
3574 if (htab->elf.srelgot == NULL)
3575 abort ();
3576
3577 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3578
3579 if (GOT_TLS_GDESC_P (tls_type))
3580 {
3581 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3582 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3583 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3584 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3585 + htab->elf.sgotplt->output_offset
3586 + offplt
3587 + htab->sgotplt_jump_table_size);
3588 sreloc = htab->elf.srelplt;
3589 if (indx == 0)
3590 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3591 else
3592 outrel.r_addend = 0;
3593 elf_append_rela (output_bfd, sreloc, &outrel);
3594 }
3595
3596 sreloc = htab->elf.srelgot;
3597
3598 outrel.r_offset = (htab->elf.sgot->output_section->vma
3599 + htab->elf.sgot->output_offset + off);
3600
3601 if (GOT_TLS_GD_P (tls_type))
3602 dr_type = R_X86_64_DTPMOD64;
3603 else if (GOT_TLS_GDESC_P (tls_type))
3604 goto dr_done;
3605 else
3606 dr_type = R_X86_64_TPOFF64;
3607
3608 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3609 outrel.r_addend = 0;
3610 if ((dr_type == R_X86_64_TPOFF64
3611 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3612 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3613 outrel.r_info = htab->r_info (indx, dr_type);
3614
3615 elf_append_rela (output_bfd, sreloc, &outrel);
3616
3617 if (GOT_TLS_GD_P (tls_type))
3618 {
3619 if (indx == 0)
3620 {
3621 BFD_ASSERT (! unresolved_reloc);
3622 bfd_put_64 (output_bfd,
3623 relocation - _bfd_x86_elf_dtpoff_base (info),
3624 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3625 }
3626 else
3627 {
3628 bfd_put_64 (output_bfd, 0,
3629 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3630 outrel.r_info = htab->r_info (indx,
3631 R_X86_64_DTPOFF64);
3632 outrel.r_offset += GOT_ENTRY_SIZE;
3633 elf_append_rela (output_bfd, sreloc,
3634 &outrel);
3635 }
3636 }
3637
3638 dr_done:
3639 if (h != NULL)
3640 h->got.offset |= 1;
3641 else
3642 local_got_offsets[r_symndx] |= 1;
3643 }
3644
3645 if (off >= (bfd_vma) -2
3646 && ! GOT_TLS_GDESC_P (tls_type))
3647 abort ();
3648 if (r_type_tls == r_type)
3649 {
3650 if (r_type == R_X86_64_GOTPC32_TLSDESC
3651 || r_type == R_X86_64_TLSDESC_CALL)
3652 relocation = htab->elf.sgotplt->output_section->vma
3653 + htab->elf.sgotplt->output_offset
3654 + offplt + htab->sgotplt_jump_table_size;
3655 else
3656 relocation = htab->elf.sgot->output_section->vma
3657 + htab->elf.sgot->output_offset + off;
3658 unresolved_reloc = FALSE;
3659 }
3660 else
3661 {
3662 bfd_vma roff = rel->r_offset;
3663
3664 if (r_type == R_X86_64_TLSGD)
3665 {
3666 /* GD->IE transition. For 64bit, change
3667 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3668 .word 0x6666; rex64; call __tls_get_addr@PLT
3669 or
3670 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3671 .byte 0x66; rex64
3672 call *__tls_get_addr@GOTPCREL(%rip
3673 which may be converted to
3674 addr32 call __tls_get_addr
3675 into:
3676 movq %fs:0, %rax
3677 addq foo@gottpoff(%rip), %rax
3678 For 32bit, change
3679 leaq foo@tlsgd(%rip), %rdi
3680 .word 0x6666; rex64; call __tls_get_addr@PLT
3681 or
3682 leaq foo@tlsgd(%rip), %rdi
3683 .byte 0x66; rex64;
3684 call *__tls_get_addr@GOTPCREL(%rip)
3685 which may be converted to
3686 addr32 call __tls_get_addr
3687 into:
3688 movl %fs:0, %eax
3689 addq foo@gottpoff(%rip), %rax
3690 For largepic, change:
3691 leaq foo@tlsgd(%rip), %rdi
3692 movabsq $__tls_get_addr@pltoff, %rax
3693 addq %r15, %rax
3694 call *%rax
3695 into:
3696 movq %fs:0, %rax
3697 addq foo@gottpoff(%rax), %rax
3698 nopw 0x0(%rax,%rax,1) */
3699 int largepic = 0;
3700 if (ABI_64_P (output_bfd))
3701 {
3702 if (contents[roff + 5] == 0xb8)
3703 {
3704 if (roff < 3
3705 || (roff - 3 + 22) > input_section->size)
3706 goto corrupt_input;
3707 memcpy (contents + roff - 3,
3708 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3709 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3710 largepic = 1;
3711 }
3712 else
3713 {
3714 if (roff < 4
3715 || (roff - 4 + 16) > input_section->size)
3716 goto corrupt_input;
3717 memcpy (contents + roff - 4,
3718 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3719 16);
3720 }
3721 }
3722 else
3723 {
3724 if (roff < 3
3725 || (roff - 3 + 15) > input_section->size)
3726 goto corrupt_input;
3727 memcpy (contents + roff - 3,
3728 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3729 15);
3730 }
3731
3732 relocation = (htab->elf.sgot->output_section->vma
3733 + htab->elf.sgot->output_offset + off
3734 - roff
3735 - largepic
3736 - input_section->output_section->vma
3737 - input_section->output_offset
3738 - 12);
3739 bfd_put_32 (output_bfd, relocation,
3740 contents + roff + 8 + largepic);
3741 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3742 rel++;
3743 wrel++;
3744 continue;
3745 }
3746 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3747 {
3748 /* GDesc -> IE transition.
3749 It's originally something like:
3750 leaq x@tlsdesc(%rip), %rax
3751
3752 Change it to:
3753 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3754
3755 /* Now modify the instruction as appropriate. To
3756 turn a leaq into a movq in the form we use it, it
3757 suffices to change the second byte from 0x8d to
3758 0x8b. */
3759 if (roff < 2)
3760 goto corrupt_input;
3761 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3762
3763 bfd_put_32 (output_bfd,
3764 htab->elf.sgot->output_section->vma
3765 + htab->elf.sgot->output_offset + off
3766 - rel->r_offset
3767 - input_section->output_section->vma
3768 - input_section->output_offset
3769 - 4,
3770 contents + roff);
3771 continue;
3772 }
3773 else if (r_type == R_X86_64_TLSDESC_CALL)
3774 {
3775 /* GDesc -> IE transition.
3776 It's originally:
3777 call *(%rax)
3778
3779 Change it to:
3780 xchg %ax, %ax. */
3781
3782 bfd_put_8 (output_bfd, 0x66, contents + roff);
3783 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3784 continue;
3785 }
3786 else
3787 BFD_ASSERT (FALSE);
3788 }
3789 break;
3790
3791 case R_X86_64_TLSLD:
3792 if (! elf_x86_64_tls_transition (info, input_bfd,
3793 input_section, contents,
3794 symtab_hdr, sym_hashes,
3795 &r_type, GOT_UNKNOWN, rel,
3796 relend, h, r_symndx, TRUE))
3797 return FALSE;
3798
3799 if (r_type != R_X86_64_TLSLD)
3800 {
3801 /* LD->LE transition:
3802 leaq foo@tlsld(%rip), %rdi
3803 call __tls_get_addr@PLT
3804 For 64bit, we change it into:
3805 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3806 For 32bit, we change it into:
3807 nopl 0x0(%rax); movl %fs:0, %eax
3808 Or
3809 leaq foo@tlsld(%rip), %rdi;
3810 call *__tls_get_addr@GOTPCREL(%rip)
3811 which may be converted to
3812 addr32 call __tls_get_addr
3813 For 64bit, we change it into:
3814 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3815 For 32bit, we change it into:
3816 nopw 0x0(%rax); movl %fs:0, %eax
3817 For largepic, change:
3818 leaq foo@tlsgd(%rip), %rdi
3819 movabsq $__tls_get_addr@pltoff, %rax
3820 addq %rbx, %rax
3821 call *%rax
3822 into
3823 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3824 movq %fs:0, %eax */
3825
3826 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3827 if (ABI_64_P (output_bfd))
3828 {
3829 if ((rel->r_offset + 5) >= input_section->size)
3830 goto corrupt_input;
3831 if (contents[rel->r_offset + 5] == 0xb8)
3832 {
3833 if (rel->r_offset < 3
3834 || (rel->r_offset - 3 + 22) > input_section->size)
3835 goto corrupt_input;
3836 memcpy (contents + rel->r_offset - 3,
3837 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3838 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3839 }
3840 else if (contents[rel->r_offset + 4] == 0xff
3841 || contents[rel->r_offset + 4] == 0x67)
3842 {
3843 if (rel->r_offset < 3
3844 || (rel->r_offset - 3 + 13) > input_section->size)
3845 goto corrupt_input;
3846 memcpy (contents + rel->r_offset - 3,
3847 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3848 13);
3849
3850 }
3851 else
3852 {
3853 if (rel->r_offset < 3
3854 || (rel->r_offset - 3 + 12) > input_section->size)
3855 goto corrupt_input;
3856 memcpy (contents + rel->r_offset - 3,
3857 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3858 }
3859 }
3860 else
3861 {
3862 if ((rel->r_offset + 4) >= input_section->size)
3863 goto corrupt_input;
3864 if (contents[rel->r_offset + 4] == 0xff)
3865 {
3866 if (rel->r_offset < 3
3867 || (rel->r_offset - 3 + 13) > input_section->size)
3868 goto corrupt_input;
3869 memcpy (contents + rel->r_offset - 3,
3870 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3871 13);
3872 }
3873 else
3874 {
3875 if (rel->r_offset < 3
3876 || (rel->r_offset - 3 + 12) > input_section->size)
3877 goto corrupt_input;
3878 memcpy (contents + rel->r_offset - 3,
3879 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3880 }
3881 }
3882 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3883 and R_X86_64_PLTOFF64. */
3884 rel++;
3885 wrel++;
3886 continue;
3887 }
3888
3889 if (htab->elf.sgot == NULL)
3890 abort ();
3891
3892 off = htab->tls_ld_or_ldm_got.offset;
3893 if (off & 1)
3894 off &= ~1;
3895 else
3896 {
3897 Elf_Internal_Rela outrel;
3898
3899 if (htab->elf.srelgot == NULL)
3900 abort ();
3901
3902 outrel.r_offset = (htab->elf.sgot->output_section->vma
3903 + htab->elf.sgot->output_offset + off);
3904
3905 bfd_put_64 (output_bfd, 0,
3906 htab->elf.sgot->contents + off);
3907 bfd_put_64 (output_bfd, 0,
3908 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3909 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3910 outrel.r_addend = 0;
3911 elf_append_rela (output_bfd, htab->elf.srelgot,
3912 &outrel);
3913 htab->tls_ld_or_ldm_got.offset |= 1;
3914 }
3915 relocation = htab->elf.sgot->output_section->vma
3916 + htab->elf.sgot->output_offset + off;
3917 unresolved_reloc = FALSE;
3918 break;
3919
3920 case R_X86_64_DTPOFF32:
3921 if (!bfd_link_executable (info)
3922 || (input_section->flags & SEC_CODE) == 0)
3923 relocation -= _bfd_x86_elf_dtpoff_base (info);
3924 else
3925 relocation = elf_x86_64_tpoff (info, relocation);
3926 break;
3927
3928 case R_X86_64_TPOFF32:
3929 case R_X86_64_TPOFF64:
3930 BFD_ASSERT (bfd_link_executable (info));
3931 relocation = elf_x86_64_tpoff (info, relocation);
3932 break;
3933
3934 case R_X86_64_DTPOFF64:
3935 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3936 relocation -= _bfd_x86_elf_dtpoff_base (info);
3937 break;
3938
3939 default:
3940 break;
3941 }
3942
3943 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3944 because such sections are not SEC_ALLOC and thus ld.so will
3945 not process them. */
3946 if (unresolved_reloc
3947 && !((input_section->flags & SEC_DEBUGGING) != 0
3948 && h->def_dynamic)
3949 && _bfd_elf_section_offset (output_bfd, info, input_section,
3950 rel->r_offset) != (bfd_vma) -1)
3951 {
3952 switch (r_type)
3953 {
3954 case R_X86_64_32S:
3955 sec = h->root.u.def.section;
3956 if ((info->nocopyreloc
3957 || (eh->def_protected
3958 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3959 && !(h->root.u.def.section->flags & SEC_CODE))
3960 return elf_x86_64_need_pic (info, input_bfd, input_section,
3961 h, NULL, NULL, howto);
3962 /* Fall through. */
3963
3964 default:
3965 _bfd_error_handler
3966 /* xgettext:c-format */
3967 (_("%pB(%pA+%#" PRIx64 "): "
3968 "unresolvable %s relocation against symbol `%s'"),
3969 input_bfd,
3970 input_section,
3971 (uint64_t) rel->r_offset,
3972 howto->name,
3973 h->root.root.string);
3974 return FALSE;
3975 }
3976 }
3977
3978 do_relocation:
3979 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3980 contents, rel->r_offset,
3981 relocation, rel->r_addend);
3982
3983 check_relocation_error:
3984 if (r != bfd_reloc_ok)
3985 {
3986 const char *name;
3987
3988 if (h != NULL)
3989 name = h->root.root.string;
3990 else
3991 {
3992 name = bfd_elf_string_from_elf_section (input_bfd,
3993 symtab_hdr->sh_link,
3994 sym->st_name);
3995 if (name == NULL)
3996 return FALSE;
3997 if (*name == '\0')
3998 name = bfd_section_name (input_bfd, sec);
3999 }
4000
4001 if (r == bfd_reloc_overflow)
4002 {
4003 if (converted_reloc)
4004 {
4005 info->callbacks->einfo
4006 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
4007 return FALSE;
4008 }
4009 (*info->callbacks->reloc_overflow)
4010 (info, (h ? &h->root : NULL), name, howto->name,
4011 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4012 }
4013 else
4014 {
4015 _bfd_error_handler
4016 /* xgettext:c-format */
4017 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4018 input_bfd, input_section,
4019 (uint64_t) rel->r_offset, name, (int) r);
4020 return FALSE;
4021 }
4022 }
4023
4024 if (wrel != rel)
4025 *wrel = *rel;
4026 }
4027
4028 if (wrel != rel)
4029 {
4030 Elf_Internal_Shdr *rel_hdr;
4031 size_t deleted = rel - wrel;
4032
4033 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4034 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4035 if (rel_hdr->sh_size == 0)
4036 {
4037 /* It is too late to remove an empty reloc section. Leave
4038 one NONE reloc.
4039 ??? What is wrong with an empty section??? */
4040 rel_hdr->sh_size = rel_hdr->sh_entsize;
4041 deleted -= 1;
4042 }
4043 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4044 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4045 input_section->reloc_count -= deleted;
4046 }
4047
4048 return TRUE;
4049 }
4050
4051 /* Finish up dynamic symbol handling. We set the contents of various
4052 dynamic sections here. */
4053
4054 static bfd_boolean
4055 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4056 struct bfd_link_info *info,
4057 struct elf_link_hash_entry *h,
4058 Elf_Internal_Sym *sym)
4059 {
4060 struct elf_x86_link_hash_table *htab;
4061 bfd_boolean use_plt_second;
4062 struct elf_x86_link_hash_entry *eh;
4063 bfd_boolean local_undefweak;
4064
4065 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4066 if (htab == NULL)
4067 return FALSE;
4068
4069 /* Use the second PLT section only if there is .plt section. */
4070 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4071
4072 eh = (struct elf_x86_link_hash_entry *) h;
4073 if (eh->no_finish_dynamic_symbol)
4074 abort ();
4075
4076 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4077 resolved undefined weak symbols in executable so that their
4078 references have value 0 at run-time. */
4079 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4080
4081 if (h->plt.offset != (bfd_vma) -1)
4082 {
4083 bfd_vma plt_index;
4084 bfd_vma got_offset, plt_offset;
4085 Elf_Internal_Rela rela;
4086 bfd_byte *loc;
4087 asection *plt, *gotplt, *relplt, *resolved_plt;
4088 const struct elf_backend_data *bed;
4089 bfd_vma plt_got_pcrel_offset;
4090
4091 /* When building a static executable, use .iplt, .igot.plt and
4092 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4093 if (htab->elf.splt != NULL)
4094 {
4095 plt = htab->elf.splt;
4096 gotplt = htab->elf.sgotplt;
4097 relplt = htab->elf.srelplt;
4098 }
4099 else
4100 {
4101 plt = htab->elf.iplt;
4102 gotplt = htab->elf.igotplt;
4103 relplt = htab->elf.irelplt;
4104 }
4105
4106 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4107
4108 /* Get the index in the procedure linkage table which
4109 corresponds to this symbol. This is the index of this symbol
4110 in all the symbols for which we are making plt entries. The
4111 first entry in the procedure linkage table is reserved.
4112
4113 Get the offset into the .got table of the entry that
4114 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4115 bytes. The first three are reserved for the dynamic linker.
4116
4117 For static executables, we don't reserve anything. */
4118
4119 if (plt == htab->elf.splt)
4120 {
4121 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4122 - htab->plt.has_plt0);
4123 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4124 }
4125 else
4126 {
4127 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4128 got_offset = got_offset * GOT_ENTRY_SIZE;
4129 }
4130
4131 /* Fill in the entry in the procedure linkage table. */
4132 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4133 htab->plt.plt_entry_size);
4134 if (use_plt_second)
4135 {
4136 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4137 htab->non_lazy_plt->plt_entry,
4138 htab->non_lazy_plt->plt_entry_size);
4139
4140 resolved_plt = htab->plt_second;
4141 plt_offset = eh->plt_second.offset;
4142 }
4143 else
4144 {
4145 resolved_plt = plt;
4146 plt_offset = h->plt.offset;
4147 }
4148
4149 /* Insert the relocation positions of the plt section. */
4150
4151 /* Put offset the PC-relative instruction referring to the GOT entry,
4152 subtracting the size of that instruction. */
4153 plt_got_pcrel_offset = (gotplt->output_section->vma
4154 + gotplt->output_offset
4155 + got_offset
4156 - resolved_plt->output_section->vma
4157 - resolved_plt->output_offset
4158 - plt_offset
4159 - htab->plt.plt_got_insn_size);
4160
4161 /* Check PC-relative offset overflow in PLT entry. */
4162 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4163 /* xgettext:c-format */
4164 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4165 output_bfd, h->root.root.string);
4166
4167 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4168 (resolved_plt->contents + plt_offset
4169 + htab->plt.plt_got_offset));
4170
4171 /* Fill in the entry in the global offset table, initially this
4172 points to the second part of the PLT entry. Leave the entry
4173 as zero for undefined weak symbol in PIE. No PLT relocation
4174 against undefined weak symbol in PIE. */
4175 if (!local_undefweak)
4176 {
4177 if (htab->plt.has_plt0)
4178 bfd_put_64 (output_bfd, (plt->output_section->vma
4179 + plt->output_offset
4180 + h->plt.offset
4181 + htab->lazy_plt->plt_lazy_offset),
4182 gotplt->contents + got_offset);
4183
4184 /* Fill in the entry in the .rela.plt section. */
4185 rela.r_offset = (gotplt->output_section->vma
4186 + gotplt->output_offset
4187 + got_offset);
4188 if (PLT_LOCAL_IFUNC_P (info, h))
4189 {
4190 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4191 h->root.root.string,
4192 h->root.u.def.section->owner);
4193
4194 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4195 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4196 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4197 rela.r_addend = (h->root.u.def.value
4198 + h->root.u.def.section->output_section->vma
4199 + h->root.u.def.section->output_offset);
4200 /* R_X86_64_IRELATIVE comes last. */
4201 plt_index = htab->next_irelative_index--;
4202 }
4203 else
4204 {
4205 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4206 rela.r_addend = 0;
4207 plt_index = htab->next_jump_slot_index++;
4208 }
4209
4210 /* Don't fill the second and third slots in PLT entry for
4211 static executables nor without PLT0. */
4212 if (plt == htab->elf.splt && htab->plt.has_plt0)
4213 {
4214 bfd_vma plt0_offset
4215 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4216
4217 /* Put relocation index. */
4218 bfd_put_32 (output_bfd, plt_index,
4219 (plt->contents + h->plt.offset
4220 + htab->lazy_plt->plt_reloc_offset));
4221
4222 /* Put offset for jmp .PLT0 and check for overflow. We don't
4223 check relocation index for overflow since branch displacement
4224 will overflow first. */
4225 if (plt0_offset > 0x80000000)
4226 /* xgettext:c-format */
4227 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4228 output_bfd, h->root.root.string);
4229 bfd_put_32 (output_bfd, - plt0_offset,
4230 (plt->contents + h->plt.offset
4231 + htab->lazy_plt->plt_plt_offset));
4232 }
4233
4234 bed = get_elf_backend_data (output_bfd);
4235 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4236 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4237 }
4238 }
4239 else if (eh->plt_got.offset != (bfd_vma) -1)
4240 {
4241 bfd_vma got_offset, plt_offset;
4242 asection *plt, *got;
4243 bfd_boolean got_after_plt;
4244 int32_t got_pcrel_offset;
4245
4246 /* Set the entry in the GOT procedure linkage table. */
4247 plt = htab->plt_got;
4248 got = htab->elf.sgot;
4249 got_offset = h->got.offset;
4250
4251 if (got_offset == (bfd_vma) -1
4252 || (h->type == STT_GNU_IFUNC && h->def_regular)
4253 || plt == NULL
4254 || got == NULL)
4255 abort ();
4256
4257 /* Use the non-lazy PLT entry template for the GOT PLT since they
4258 are the identical. */
4259 /* Fill in the entry in the GOT procedure linkage table. */
4260 plt_offset = eh->plt_got.offset;
4261 memcpy (plt->contents + plt_offset,
4262 htab->non_lazy_plt->plt_entry,
4263 htab->non_lazy_plt->plt_entry_size);
4264
4265 /* Put offset the PC-relative instruction referring to the GOT
4266 entry, subtracting the size of that instruction. */
4267 got_pcrel_offset = (got->output_section->vma
4268 + got->output_offset
4269 + got_offset
4270 - plt->output_section->vma
4271 - plt->output_offset
4272 - plt_offset
4273 - htab->non_lazy_plt->plt_got_insn_size);
4274
4275 /* Check PC-relative offset overflow in GOT PLT entry. */
4276 got_after_plt = got->output_section->vma > plt->output_section->vma;
4277 if ((got_after_plt && got_pcrel_offset < 0)
4278 || (!got_after_plt && got_pcrel_offset > 0))
4279 /* xgettext:c-format */
4280 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4281 output_bfd, h->root.root.string);
4282
4283 bfd_put_32 (output_bfd, got_pcrel_offset,
4284 (plt->contents + plt_offset
4285 + htab->non_lazy_plt->plt_got_offset));
4286 }
4287
4288 if (!local_undefweak
4289 && !h->def_regular
4290 && (h->plt.offset != (bfd_vma) -1
4291 || eh->plt_got.offset != (bfd_vma) -1))
4292 {
4293 /* Mark the symbol as undefined, rather than as defined in
4294 the .plt section. Leave the value if there were any
4295 relocations where pointer equality matters (this is a clue
4296 for the dynamic linker, to make function pointer
4297 comparisons work between an application and shared
4298 library), otherwise set it to zero. If a function is only
4299 called from a binary, there is no need to slow down
4300 shared libraries because of that. */
4301 sym->st_shndx = SHN_UNDEF;
4302 if (!h->pointer_equality_needed)
4303 sym->st_value = 0;
4304 }
4305
4306 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4307
4308 /* Don't generate dynamic GOT relocation against undefined weak
4309 symbol in executable. */
4310 if (h->got.offset != (bfd_vma) -1
4311 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4312 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4313 && !local_undefweak)
4314 {
4315 Elf_Internal_Rela rela;
4316 asection *relgot = htab->elf.srelgot;
4317
4318 /* This symbol has an entry in the global offset table. Set it
4319 up. */
4320 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4321 abort ();
4322
4323 rela.r_offset = (htab->elf.sgot->output_section->vma
4324 + htab->elf.sgot->output_offset
4325 + (h->got.offset &~ (bfd_vma) 1));
4326
4327 /* If this is a static link, or it is a -Bsymbolic link and the
4328 symbol is defined locally or was forced to be local because
4329 of a version file, we just want to emit a RELATIVE reloc.
4330 The entry in the global offset table will already have been
4331 initialized in the relocate_section function. */
4332 if (h->def_regular
4333 && h->type == STT_GNU_IFUNC)
4334 {
4335 if (h->plt.offset == (bfd_vma) -1)
4336 {
4337 /* STT_GNU_IFUNC is referenced without PLT. */
4338 if (htab->elf.splt == NULL)
4339 {
4340 /* use .rel[a].iplt section to store .got relocations
4341 in static executable. */
4342 relgot = htab->elf.irelplt;
4343 }
4344 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4345 {
4346 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4347 h->root.root.string,
4348 h->root.u.def.section->owner);
4349
4350 rela.r_info = htab->r_info (0,
4351 R_X86_64_IRELATIVE);
4352 rela.r_addend = (h->root.u.def.value
4353 + h->root.u.def.section->output_section->vma
4354 + h->root.u.def.section->output_offset);
4355 }
4356 else
4357 goto do_glob_dat;
4358 }
4359 else if (bfd_link_pic (info))
4360 {
4361 /* Generate R_X86_64_GLOB_DAT. */
4362 goto do_glob_dat;
4363 }
4364 else
4365 {
4366 asection *plt;
4367 bfd_vma plt_offset;
4368
4369 if (!h->pointer_equality_needed)
4370 abort ();
4371
4372 /* For non-shared object, we can't use .got.plt, which
4373 contains the real function addres if we need pointer
4374 equality. We load the GOT entry with the PLT entry. */
4375 if (htab->plt_second != NULL)
4376 {
4377 plt = htab->plt_second;
4378 plt_offset = eh->plt_second.offset;
4379 }
4380 else
4381 {
4382 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4383 plt_offset = h->plt.offset;
4384 }
4385 bfd_put_64 (output_bfd, (plt->output_section->vma
4386 + plt->output_offset
4387 + plt_offset),
4388 htab->elf.sgot->contents + h->got.offset);
4389 return TRUE;
4390 }
4391 }
4392 else if (bfd_link_pic (info)
4393 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4394 {
4395 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4396 return FALSE;
4397 BFD_ASSERT((h->got.offset & 1) != 0);
4398 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4399 rela.r_addend = (h->root.u.def.value
4400 + h->root.u.def.section->output_section->vma
4401 + h->root.u.def.section->output_offset);
4402 }
4403 else
4404 {
4405 BFD_ASSERT((h->got.offset & 1) == 0);
4406 do_glob_dat:
4407 bfd_put_64 (output_bfd, (bfd_vma) 0,
4408 htab->elf.sgot->contents + h->got.offset);
4409 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4410 rela.r_addend = 0;
4411 }
4412
4413 elf_append_rela (output_bfd, relgot, &rela);
4414 }
4415
4416 if (h->needs_copy)
4417 {
4418 Elf_Internal_Rela rela;
4419 asection *s;
4420
4421 /* This symbol needs a copy reloc. Set it up. */
4422 VERIFY_COPY_RELOC (h, htab)
4423
4424 rela.r_offset = (h->root.u.def.value
4425 + h->root.u.def.section->output_section->vma
4426 + h->root.u.def.section->output_offset);
4427 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4428 rela.r_addend = 0;
4429 if (h->root.u.def.section == htab->elf.sdynrelro)
4430 s = htab->elf.sreldynrelro;
4431 else
4432 s = htab->elf.srelbss;
4433 elf_append_rela (output_bfd, s, &rela);
4434 }
4435
4436 return TRUE;
4437 }
4438
4439 /* Finish up local dynamic symbol handling. We set the contents of
4440 various dynamic sections here. */
4441
4442 static bfd_boolean
4443 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4444 {
4445 struct elf_link_hash_entry *h
4446 = (struct elf_link_hash_entry *) *slot;
4447 struct bfd_link_info *info
4448 = (struct bfd_link_info *) inf;
4449
4450 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4451 info, h, NULL);
4452 }
4453
4454 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4455 here since undefined weak symbol may not be dynamic and may not be
4456 called for elf_x86_64_finish_dynamic_symbol. */
4457
4458 static bfd_boolean
4459 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4460 void *inf)
4461 {
4462 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4463 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4464
4465 if (h->root.type != bfd_link_hash_undefweak
4466 || h->dynindx != -1)
4467 return TRUE;
4468
4469 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4470 info, h, NULL);
4471 }
4472
4473 /* Used to decide how to sort relocs in an optimal manner for the
4474 dynamic linker, before writing them out. */
4475
4476 static enum elf_reloc_type_class
4477 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4478 const asection *rel_sec ATTRIBUTE_UNUSED,
4479 const Elf_Internal_Rela *rela)
4480 {
4481 bfd *abfd = info->output_bfd;
4482 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4483 struct elf_x86_link_hash_table *htab
4484 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4485
4486 if (htab->elf.dynsym != NULL
4487 && htab->elf.dynsym->contents != NULL)
4488 {
4489 /* Check relocation against STT_GNU_IFUNC symbol if there are
4490 dynamic symbols. */
4491 unsigned long r_symndx = htab->r_sym (rela->r_info);
4492 if (r_symndx != STN_UNDEF)
4493 {
4494 Elf_Internal_Sym sym;
4495 if (!bed->s->swap_symbol_in (abfd,
4496 (htab->elf.dynsym->contents
4497 + r_symndx * bed->s->sizeof_sym),
4498 0, &sym))
4499 abort ();
4500
4501 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4502 return reloc_class_ifunc;
4503 }
4504 }
4505
4506 switch ((int) ELF32_R_TYPE (rela->r_info))
4507 {
4508 case R_X86_64_IRELATIVE:
4509 return reloc_class_ifunc;
4510 case R_X86_64_RELATIVE:
4511 case R_X86_64_RELATIVE64:
4512 return reloc_class_relative;
4513 case R_X86_64_JUMP_SLOT:
4514 return reloc_class_plt;
4515 case R_X86_64_COPY:
4516 return reloc_class_copy;
4517 default:
4518 return reloc_class_normal;
4519 }
4520 }
4521
4522 /* Finish up the dynamic sections. */
4523
4524 static bfd_boolean
4525 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4526 struct bfd_link_info *info)
4527 {
4528 struct elf_x86_link_hash_table *htab;
4529
4530 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4531 if (htab == NULL)
4532 return FALSE;
4533
4534 if (! htab->elf.dynamic_sections_created)
4535 return TRUE;
4536
4537 if (htab->elf.splt && htab->elf.splt->size > 0)
4538 {
4539 elf_section_data (htab->elf.splt->output_section)
4540 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4541
4542 if (htab->plt.has_plt0)
4543 {
4544 /* Fill in the special first entry in the procedure linkage
4545 table. */
4546 memcpy (htab->elf.splt->contents,
4547 htab->lazy_plt->plt0_entry,
4548 htab->lazy_plt->plt0_entry_size);
4549 /* Add offset for pushq GOT+8(%rip), since the instruction
4550 uses 6 bytes subtract this value. */
4551 bfd_put_32 (output_bfd,
4552 (htab->elf.sgotplt->output_section->vma
4553 + htab->elf.sgotplt->output_offset
4554 + 8
4555 - htab->elf.splt->output_section->vma
4556 - htab->elf.splt->output_offset
4557 - 6),
4558 (htab->elf.splt->contents
4559 + htab->lazy_plt->plt0_got1_offset));
4560 /* Add offset for the PC-relative instruction accessing
4561 GOT+16, subtracting the offset to the end of that
4562 instruction. */
4563 bfd_put_32 (output_bfd,
4564 (htab->elf.sgotplt->output_section->vma
4565 + htab->elf.sgotplt->output_offset
4566 + 16
4567 - htab->elf.splt->output_section->vma
4568 - htab->elf.splt->output_offset
4569 - htab->lazy_plt->plt0_got2_insn_end),
4570 (htab->elf.splt->contents
4571 + htab->lazy_plt->plt0_got2_offset));
4572 }
4573
4574 if (htab->tlsdesc_plt)
4575 {
4576 bfd_put_64 (output_bfd, (bfd_vma) 0,
4577 htab->elf.sgot->contents + htab->tlsdesc_got);
4578
4579 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4580 htab->lazy_plt->plt_tlsdesc_entry,
4581 htab->lazy_plt->plt_tlsdesc_entry_size);
4582
4583 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4584 bytes and the instruction uses 6 bytes, subtract these
4585 values. */
4586 bfd_put_32 (output_bfd,
4587 (htab->elf.sgotplt->output_section->vma
4588 + htab->elf.sgotplt->output_offset
4589 + 8
4590 - htab->elf.splt->output_section->vma
4591 - htab->elf.splt->output_offset
4592 - htab->tlsdesc_plt
4593 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4594 (htab->elf.splt->contents
4595 + htab->tlsdesc_plt
4596 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4597 /* Add offset for indirect branch via GOT+TDG, where TDG
4598 stands for htab->tlsdesc_got, subtracting the offset
4599 to the end of that instruction. */
4600 bfd_put_32 (output_bfd,
4601 (htab->elf.sgot->output_section->vma
4602 + htab->elf.sgot->output_offset
4603 + htab->tlsdesc_got
4604 - htab->elf.splt->output_section->vma
4605 - htab->elf.splt->output_offset
4606 - htab->tlsdesc_plt
4607 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4608 (htab->elf.splt->contents
4609 + htab->tlsdesc_plt
4610 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4611 }
4612 }
4613
4614 /* Fill PLT entries for undefined weak symbols in PIE. */
4615 if (bfd_link_pie (info))
4616 bfd_hash_traverse (&info->hash->table,
4617 elf_x86_64_pie_finish_undefweak_symbol,
4618 info);
4619
4620 return TRUE;
4621 }
4622
4623 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4624 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4625 It has to be done before elf_link_sort_relocs is called so that
4626 dynamic relocations are properly sorted. */
4627
4628 static bfd_boolean
4629 elf_x86_64_output_arch_local_syms
4630 (bfd *output_bfd ATTRIBUTE_UNUSED,
4631 struct bfd_link_info *info,
4632 void *flaginfo ATTRIBUTE_UNUSED,
4633 int (*func) (void *, const char *,
4634 Elf_Internal_Sym *,
4635 asection *,
4636 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4637 {
4638 struct elf_x86_link_hash_table *htab
4639 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4640 if (htab == NULL)
4641 return FALSE;
4642
4643 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4644 htab_traverse (htab->loc_hash_table,
4645 elf_x86_64_finish_local_dynamic_symbol,
4646 info);
4647
4648 return TRUE;
4649 }
4650
4651 /* Forward declaration. */
4652 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4653
4654 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4655 dynamic relocations. */
4656
4657 static long
4658 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4659 long symcount ATTRIBUTE_UNUSED,
4660 asymbol **syms ATTRIBUTE_UNUSED,
4661 long dynsymcount,
4662 asymbol **dynsyms,
4663 asymbol **ret)
4664 {
4665 long count, i, n;
4666 int j;
4667 bfd_byte *plt_contents;
4668 long relsize;
4669 const struct elf_x86_lazy_plt_layout *lazy_plt;
4670 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4671 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4672 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4673 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4674 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4675 asection *plt;
4676 enum elf_x86_plt_type plt_type;
4677 struct elf_x86_plt plts[] =
4678 {
4679 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4680 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4681 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4682 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4683 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4684 };
4685
4686 *ret = NULL;
4687
4688 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4689 return 0;
4690
4691 if (dynsymcount <= 0)
4692 return 0;
4693
4694 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4695 if (relsize <= 0)
4696 return -1;
4697
4698 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4699 {
4700 lazy_plt = &elf_x86_64_lazy_plt;
4701 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4702 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4703 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4704 if (ABI_64_P (abfd))
4705 {
4706 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4707 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4708 }
4709 else
4710 {
4711 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4712 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4713 }
4714 }
4715 else
4716 {
4717 lazy_plt = &elf_x86_64_nacl_plt;
4718 non_lazy_plt = NULL;
4719 lazy_bnd_plt = NULL;
4720 non_lazy_bnd_plt = NULL;
4721 lazy_ibt_plt = NULL;
4722 non_lazy_ibt_plt = NULL;
4723 }
4724
4725 count = 0;
4726 for (j = 0; plts[j].name != NULL; j++)
4727 {
4728 plt = bfd_get_section_by_name (abfd, plts[j].name);
4729 if (plt == NULL || plt->size == 0)
4730 continue;
4731
4732 /* Get the PLT section contents. */
4733 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4734 if (plt_contents == NULL)
4735 break;
4736 if (!bfd_get_section_contents (abfd, (asection *) plt,
4737 plt_contents, 0, plt->size))
4738 {
4739 free (plt_contents);
4740 break;
4741 }
4742
4743 /* Check what kind of PLT it is. */
4744 plt_type = plt_unknown;
4745 if (plts[j].type == plt_unknown
4746 && (plt->size >= (lazy_plt->plt_entry_size
4747 + lazy_plt->plt_entry_size)))
4748 {
4749 /* Match lazy PLT first. Need to check the first two
4750 instructions. */
4751 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4752 lazy_plt->plt0_got1_offset) == 0)
4753 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4754 2) == 0))
4755 plt_type = plt_lazy;
4756 else if (lazy_bnd_plt != NULL
4757 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4758 lazy_bnd_plt->plt0_got1_offset) == 0)
4759 && (memcmp (plt_contents + 6,
4760 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4761 {
4762 plt_type = plt_lazy | plt_second;
4763 /* The fist entry in the lazy IBT PLT is the same as the
4764 lazy BND PLT. */
4765 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4766 lazy_ibt_plt->plt_entry,
4767 lazy_ibt_plt->plt_got_offset) == 0))
4768 lazy_plt = lazy_ibt_plt;
4769 else
4770 lazy_plt = lazy_bnd_plt;
4771 }
4772 }
4773
4774 if (non_lazy_plt != NULL
4775 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4776 && plt->size >= non_lazy_plt->plt_entry_size)
4777 {
4778 /* Match non-lazy PLT. */
4779 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4780 non_lazy_plt->plt_got_offset) == 0)
4781 plt_type = plt_non_lazy;
4782 }
4783
4784 if (plt_type == plt_unknown || plt_type == plt_second)
4785 {
4786 if (non_lazy_bnd_plt != NULL
4787 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4788 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4789 non_lazy_bnd_plt->plt_got_offset) == 0))
4790 {
4791 /* Match BND PLT. */
4792 plt_type = plt_second;
4793 non_lazy_plt = non_lazy_bnd_plt;
4794 }
4795 else if (non_lazy_ibt_plt != NULL
4796 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4797 && (memcmp (plt_contents,
4798 non_lazy_ibt_plt->plt_entry,
4799 non_lazy_ibt_plt->plt_got_offset) == 0))
4800 {
4801 /* Match IBT PLT. */
4802 plt_type = plt_second;
4803 non_lazy_plt = non_lazy_ibt_plt;
4804 }
4805 }
4806
4807 if (plt_type == plt_unknown)
4808 {
4809 free (plt_contents);
4810 continue;
4811 }
4812
4813 plts[j].sec = plt;
4814 plts[j].type = plt_type;
4815
4816 if ((plt_type & plt_lazy))
4817 {
4818 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4819 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4820 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4821 /* Skip PLT0 in lazy PLT. */
4822 i = 1;
4823 }
4824 else
4825 {
4826 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4827 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4828 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4829 i = 0;
4830 }
4831
4832 /* Skip lazy PLT when the second PLT is used. */
4833 if (plt_type == (plt_lazy | plt_second))
4834 plts[j].count = 0;
4835 else
4836 {
4837 n = plt->size / plts[j].plt_entry_size;
4838 plts[j].count = n;
4839 count += n - i;
4840 }
4841
4842 plts[j].contents = plt_contents;
4843 }
4844
4845 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4846 (bfd_vma) 0, plts, dynsyms,
4847 ret);
4848 }
4849
4850 /* Handle an x86-64 specific section when reading an object file. This
4851 is called when elfcode.h finds a section with an unknown type. */
4852
4853 static bfd_boolean
4854 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4855 const char *name, int shindex)
4856 {
4857 if (hdr->sh_type != SHT_X86_64_UNWIND)
4858 return FALSE;
4859
4860 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4861 return FALSE;
4862
4863 return TRUE;
4864 }
4865
4866 /* Hook called by the linker routine which adds symbols from an object
4867 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4868 of .bss. */
4869
4870 static bfd_boolean
4871 elf_x86_64_add_symbol_hook (bfd *abfd,
4872 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4873 Elf_Internal_Sym *sym,
4874 const char **namep ATTRIBUTE_UNUSED,
4875 flagword *flagsp ATTRIBUTE_UNUSED,
4876 asection **secp,
4877 bfd_vma *valp)
4878 {
4879 asection *lcomm;
4880
4881 switch (sym->st_shndx)
4882 {
4883 case SHN_X86_64_LCOMMON:
4884 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4885 if (lcomm == NULL)
4886 {
4887 lcomm = bfd_make_section_with_flags (abfd,
4888 "LARGE_COMMON",
4889 (SEC_ALLOC
4890 | SEC_IS_COMMON
4891 | SEC_LINKER_CREATED));
4892 if (lcomm == NULL)
4893 return FALSE;
4894 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4895 }
4896 *secp = lcomm;
4897 *valp = sym->st_size;
4898 return TRUE;
4899 }
4900
4901 return TRUE;
4902 }
4903
4904
4905 /* Given a BFD section, try to locate the corresponding ELF section
4906 index. */
4907
4908 static bfd_boolean
4909 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4910 asection *sec, int *index_return)
4911 {
4912 if (sec == &_bfd_elf_large_com_section)
4913 {
4914 *index_return = SHN_X86_64_LCOMMON;
4915 return TRUE;
4916 }
4917 return FALSE;
4918 }
4919
4920 /* Process a symbol. */
4921
4922 static void
4923 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4924 asymbol *asym)
4925 {
4926 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4927
4928 switch (elfsym->internal_elf_sym.st_shndx)
4929 {
4930 case SHN_X86_64_LCOMMON:
4931 asym->section = &_bfd_elf_large_com_section;
4932 asym->value = elfsym->internal_elf_sym.st_size;
4933 /* Common symbol doesn't set BSF_GLOBAL. */
4934 asym->flags &= ~BSF_GLOBAL;
4935 break;
4936 }
4937 }
4938
4939 static bfd_boolean
4940 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4941 {
4942 return (sym->st_shndx == SHN_COMMON
4943 || sym->st_shndx == SHN_X86_64_LCOMMON);
4944 }
4945
4946 static unsigned int
4947 elf_x86_64_common_section_index (asection *sec)
4948 {
4949 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4950 return SHN_COMMON;
4951 else
4952 return SHN_X86_64_LCOMMON;
4953 }
4954
4955 static asection *
4956 elf_x86_64_common_section (asection *sec)
4957 {
4958 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4959 return bfd_com_section_ptr;
4960 else
4961 return &_bfd_elf_large_com_section;
4962 }
4963
4964 static bfd_boolean
4965 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4966 const Elf_Internal_Sym *sym,
4967 asection **psec,
4968 bfd_boolean newdef,
4969 bfd_boolean olddef,
4970 bfd *oldbfd,
4971 const asection *oldsec)
4972 {
4973 /* A normal common symbol and a large common symbol result in a
4974 normal common symbol. We turn the large common symbol into a
4975 normal one. */
4976 if (!olddef
4977 && h->root.type == bfd_link_hash_common
4978 && !newdef
4979 && bfd_is_com_section (*psec)
4980 && oldsec != *psec)
4981 {
4982 if (sym->st_shndx == SHN_COMMON
4983 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4984 {
4985 h->root.u.c.p->section
4986 = bfd_make_section_old_way (oldbfd, "COMMON");
4987 h->root.u.c.p->section->flags = SEC_ALLOC;
4988 }
4989 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4990 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4991 *psec = bfd_com_section_ptr;
4992 }
4993
4994 return TRUE;
4995 }
4996
4997 static int
4998 elf_x86_64_additional_program_headers (bfd *abfd,
4999 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5000 {
5001 asection *s;
5002 int count = 0;
5003
5004 /* Check to see if we need a large readonly segment. */
5005 s = bfd_get_section_by_name (abfd, ".lrodata");
5006 if (s && (s->flags & SEC_LOAD))
5007 count++;
5008
5009 /* Check to see if we need a large data segment. Since .lbss sections
5010 is placed right after the .bss section, there should be no need for
5011 a large data segment just because of .lbss. */
5012 s = bfd_get_section_by_name (abfd, ".ldata");
5013 if (s && (s->flags & SEC_LOAD))
5014 count++;
5015
5016 return count;
5017 }
5018
5019 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5020
5021 static bfd_boolean
5022 elf_x86_64_relocs_compatible (const bfd_target *input,
5023 const bfd_target *output)
5024 {
5025 return ((xvec_get_elf_backend_data (input)->s->elfclass
5026 == xvec_get_elf_backend_data (output)->s->elfclass)
5027 && _bfd_elf_relocs_compatible (input, output));
5028 }
5029
5030 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5031 with GNU properties if found. Otherwise, return NULL. */
5032
5033 static bfd *
5034 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5035 {
5036 struct elf_x86_init_table init_table;
5037
5038 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5039 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5040 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5041 != (int) R_X86_64_GNU_VTINHERIT)
5042 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5043 != (int) R_X86_64_GNU_VTENTRY))
5044 abort ();
5045
5046 /* This is unused for x86-64. */
5047 init_table.plt0_pad_byte = 0x90;
5048
5049 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
5050 {
5051 const struct elf_backend_data *bed
5052 = get_elf_backend_data (info->output_bfd);
5053 struct elf_x86_link_hash_table *htab
5054 = elf_x86_hash_table (info, bed->target_id);
5055 if (!htab)
5056 abort ();
5057 if (htab->params->bndplt)
5058 {
5059 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
5060 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
5061 }
5062 else
5063 {
5064 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5065 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5066 }
5067
5068 if (ABI_64_P (info->output_bfd))
5069 {
5070 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5071 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5072 }
5073 else
5074 {
5075 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5076 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5077 }
5078 }
5079 else
5080 {
5081 init_table.lazy_plt = &elf_x86_64_nacl_plt;
5082 init_table.non_lazy_plt = NULL;
5083 init_table.lazy_ibt_plt = NULL;
5084 init_table.non_lazy_ibt_plt = NULL;
5085 }
5086
5087 if (ABI_64_P (info->output_bfd))
5088 {
5089 init_table.r_info = elf64_r_info;
5090 init_table.r_sym = elf64_r_sym;
5091 }
5092 else
5093 {
5094 init_table.r_info = elf32_r_info;
5095 init_table.r_sym = elf32_r_sym;
5096 }
5097
5098 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5099 }
5100
5101 static const struct bfd_elf_special_section
5102 elf_x86_64_special_sections[]=
5103 {
5104 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5105 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5106 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5107 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5108 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5109 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5110 { NULL, 0, 0, 0, 0 }
5111 };
5112
5113 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5114 #define TARGET_LITTLE_NAME "elf64-x86-64"
5115 #define ELF_ARCH bfd_arch_i386
5116 #define ELF_TARGET_ID X86_64_ELF_DATA
5117 #define ELF_MACHINE_CODE EM_X86_64
5118 #if DEFAULT_LD_Z_SEPARATE_CODE
5119 # define ELF_MAXPAGESIZE 0x1000
5120 #else
5121 # define ELF_MAXPAGESIZE 0x200000
5122 #endif
5123 #define ELF_MINPAGESIZE 0x1000
5124 #define ELF_COMMONPAGESIZE 0x1000
5125
5126 #define elf_backend_can_gc_sections 1
5127 #define elf_backend_can_refcount 1
5128 #define elf_backend_want_got_plt 1
5129 #define elf_backend_plt_readonly 1
5130 #define elf_backend_want_plt_sym 0
5131 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5132 #define elf_backend_rela_normal 1
5133 #define elf_backend_plt_alignment 4
5134 #define elf_backend_extern_protected_data 1
5135 #define elf_backend_caches_rawsize 1
5136 #define elf_backend_dtrel_excludes_plt 1
5137 #define elf_backend_want_dynrelro 1
5138
5139 #define elf_info_to_howto elf_x86_64_info_to_howto
5140
5141 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5142 #define bfd_elf64_bfd_reloc_name_lookup \
5143 elf_x86_64_reloc_name_lookup
5144
5145 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5146 #define elf_backend_check_relocs elf_x86_64_check_relocs
5147 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5148 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5149 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5150 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5151 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5152 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5153 #ifdef CORE_HEADER
5154 #define elf_backend_write_core_note elf_x86_64_write_core_note
5155 #endif
5156 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5157 #define elf_backend_relocate_section elf_x86_64_relocate_section
5158 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5159 #define elf_backend_object_p elf64_x86_64_elf_object_p
5160 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5161
5162 #define elf_backend_section_from_shdr \
5163 elf_x86_64_section_from_shdr
5164
5165 #define elf_backend_section_from_bfd_section \
5166 elf_x86_64_elf_section_from_bfd_section
5167 #define elf_backend_add_symbol_hook \
5168 elf_x86_64_add_symbol_hook
5169 #define elf_backend_symbol_processing \
5170 elf_x86_64_symbol_processing
5171 #define elf_backend_common_section_index \
5172 elf_x86_64_common_section_index
5173 #define elf_backend_common_section \
5174 elf_x86_64_common_section
5175 #define elf_backend_common_definition \
5176 elf_x86_64_common_definition
5177 #define elf_backend_merge_symbol \
5178 elf_x86_64_merge_symbol
5179 #define elf_backend_special_sections \
5180 elf_x86_64_special_sections
5181 #define elf_backend_additional_program_headers \
5182 elf_x86_64_additional_program_headers
5183 #define elf_backend_setup_gnu_properties \
5184 elf_x86_64_link_setup_gnu_properties
5185 #define elf_backend_hide_symbol \
5186 _bfd_x86_elf_hide_symbol
5187
5188 #undef elf64_bed
5189 #define elf64_bed elf64_x86_64_bed
5190
5191 #include "elf64-target.h"
5192
5193 /* CloudABI support. */
5194
5195 #undef TARGET_LITTLE_SYM
5196 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5197 #undef TARGET_LITTLE_NAME
5198 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5199
5200 #undef ELF_OSABI
5201 #define ELF_OSABI ELFOSABI_CLOUDABI
5202
5203 #undef elf64_bed
5204 #define elf64_bed elf64_x86_64_cloudabi_bed
5205
5206 #include "elf64-target.h"
5207
5208 /* FreeBSD support. */
5209
5210 #undef TARGET_LITTLE_SYM
5211 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5212 #undef TARGET_LITTLE_NAME
5213 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5214
5215 #undef ELF_OSABI
5216 #define ELF_OSABI ELFOSABI_FREEBSD
5217
5218 #undef elf64_bed
5219 #define elf64_bed elf64_x86_64_fbsd_bed
5220
5221 #include "elf64-target.h"
5222
5223 /* Solaris 2 support. */
5224
5225 #undef TARGET_LITTLE_SYM
5226 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5227 #undef TARGET_LITTLE_NAME
5228 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5229
5230 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5231 {
5232 is_solaris /* os */
5233 };
5234
5235 #undef elf_backend_arch_data
5236 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5237
5238 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5239 objects won't be recognized. */
5240 #undef ELF_OSABI
5241
5242 #undef elf64_bed
5243 #define elf64_bed elf64_x86_64_sol2_bed
5244
5245 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5246 boundary. */
5247 #undef elf_backend_static_tls_alignment
5248 #define elf_backend_static_tls_alignment 16
5249
5250 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5251
5252 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5253 File, p.63. */
5254 #undef elf_backend_want_plt_sym
5255 #define elf_backend_want_plt_sym 1
5256
5257 #undef elf_backend_strtab_flags
5258 #define elf_backend_strtab_flags SHF_STRINGS
5259
5260 static bfd_boolean
5261 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5262 bfd *obfd ATTRIBUTE_UNUSED,
5263 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5264 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5265 {
5266 /* PR 19938: FIXME: Need to add code for setting the sh_info
5267 and sh_link fields of Solaris specific section types. */
5268 return FALSE;
5269 }
5270
5271 #undef elf_backend_copy_special_section_fields
5272 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5273
5274 #include "elf64-target.h"
5275
5276 /* Native Client support. */
5277
5278 static bfd_boolean
5279 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5280 {
5281 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5282 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5283 return TRUE;
5284 }
5285
5286 #undef TARGET_LITTLE_SYM
5287 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5288 #undef TARGET_LITTLE_NAME
5289 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5290 #undef elf64_bed
5291 #define elf64_bed elf64_x86_64_nacl_bed
5292
5293 #undef ELF_MAXPAGESIZE
5294 #undef ELF_MINPAGESIZE
5295 #undef ELF_COMMONPAGESIZE
5296 #define ELF_MAXPAGESIZE 0x10000
5297 #define ELF_MINPAGESIZE 0x10000
5298 #define ELF_COMMONPAGESIZE 0x10000
5299
5300 /* Restore defaults. */
5301 #undef ELF_OSABI
5302 #undef elf_backend_static_tls_alignment
5303 #undef elf_backend_want_plt_sym
5304 #define elf_backend_want_plt_sym 0
5305 #undef elf_backend_strtab_flags
5306 #undef elf_backend_copy_special_section_fields
5307
5308 /* NaCl uses substantially different PLT entries for the same effects. */
5309
5310 #undef elf_backend_plt_alignment
5311 #define elf_backend_plt_alignment 5
5312 #define NACL_PLT_ENTRY_SIZE 64
5313 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5314
5315 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5316 {
5317 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5318 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5319 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5320 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5321 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5322
5323 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5324 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5325
5326 /* 32 bytes of nop to pad out to the standard size. */
5327 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5328 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5329 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5330 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5331 0x66, /* excess data16 prefix */
5332 0x90 /* nop */
5333 };
5334
5335 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5336 {
5337 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5338 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5339 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5340 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5341
5342 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5343 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5344 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5345
5346 /* Lazy GOT entries point here (32-byte aligned). */
5347 0x68, /* pushq immediate */
5348 0, 0, 0, 0, /* replaced with index into relocation table. */
5349 0xe9, /* jmp relative */
5350 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5351
5352 /* 22 bytes of nop to pad out to the standard size. */
5353 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5354 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5355 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5356 };
5357
5358 /* .eh_frame covering the .plt section. */
5359
5360 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5361 {
5362 #if (PLT_CIE_LENGTH != 20 \
5363 || PLT_FDE_LENGTH != 36 \
5364 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5365 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5366 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5367 #endif
5368 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5369 0, 0, 0, 0, /* CIE ID */
5370 1, /* CIE version */
5371 'z', 'R', 0, /* Augmentation string */
5372 1, /* Code alignment factor */
5373 0x78, /* Data alignment factor */
5374 16, /* Return address column */
5375 1, /* Augmentation size */
5376 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5377 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5378 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5379 DW_CFA_nop, DW_CFA_nop,
5380
5381 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5382 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5383 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5384 0, 0, 0, 0, /* .plt size goes here */
5385 0, /* Augmentation size */
5386 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5387 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5388 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5389 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5390 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5391 13, /* Block length */
5392 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5393 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5394 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5395 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5396 DW_CFA_nop, DW_CFA_nop
5397 };
5398
5399 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5400 {
5401 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5402 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5403 elf_x86_64_nacl_plt_entry, /* plt_entry */
5404 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5405 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
5406 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
5407 2, /* plt_tlsdesc_got1_offset */
5408 9, /* plt_tlsdesc_got2_offset */
5409 6, /* plt_tlsdesc_got1_insn_end */
5410 13, /* plt_tlsdesc_got2_insn_end */
5411 2, /* plt0_got1_offset */
5412 9, /* plt0_got2_offset */
5413 13, /* plt0_got2_insn_end */
5414 3, /* plt_got_offset */
5415 33, /* plt_reloc_offset */
5416 38, /* plt_plt_offset */
5417 7, /* plt_got_insn_size */
5418 42, /* plt_plt_insn_end */
5419 32, /* plt_lazy_offset */
5420 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5421 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5422 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5423 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5424 };
5425
5426 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5427 {
5428 is_nacl /* os */
5429 };
5430
5431 #undef elf_backend_arch_data
5432 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5433
5434 #undef elf_backend_object_p
5435 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5436 #undef elf_backend_modify_segment_map
5437 #define elf_backend_modify_segment_map nacl_modify_segment_map
5438 #undef elf_backend_modify_program_headers
5439 #define elf_backend_modify_program_headers nacl_modify_program_headers
5440 #undef elf_backend_final_write_processing
5441 #define elf_backend_final_write_processing nacl_final_write_processing
5442
5443 #include "elf64-target.h"
5444
5445 /* Native Client x32 support. */
5446
5447 static bfd_boolean
5448 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5449 {
5450 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5451 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5452 return TRUE;
5453 }
5454
5455 #undef TARGET_LITTLE_SYM
5456 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5457 #undef TARGET_LITTLE_NAME
5458 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5459 #undef elf32_bed
5460 #define elf32_bed elf32_x86_64_nacl_bed
5461
5462 #define bfd_elf32_bfd_reloc_type_lookup \
5463 elf_x86_64_reloc_type_lookup
5464 #define bfd_elf32_bfd_reloc_name_lookup \
5465 elf_x86_64_reloc_name_lookup
5466 #define bfd_elf32_get_synthetic_symtab \
5467 elf_x86_64_get_synthetic_symtab
5468
5469 #undef elf_backend_object_p
5470 #define elf_backend_object_p \
5471 elf32_x86_64_nacl_elf_object_p
5472
5473 #undef elf_backend_bfd_from_remote_memory
5474 #define elf_backend_bfd_from_remote_memory \
5475 _bfd_elf32_bfd_from_remote_memory
5476
5477 #undef elf_backend_size_info
5478 #define elf_backend_size_info \
5479 _bfd_elf32_size_info
5480
5481 #undef elf32_bed
5482 #define elf32_bed elf32_x86_64_bed
5483
5484 #include "elf32-target.h"
5485
5486 /* Restore defaults. */
5487 #undef elf_backend_object_p
5488 #define elf_backend_object_p elf64_x86_64_elf_object_p
5489 #undef elf_backend_bfd_from_remote_memory
5490 #undef elf_backend_size_info
5491 #undef elf_backend_modify_segment_map
5492 #undef elf_backend_modify_program_headers
5493 #undef elf_backend_final_write_processing
5494
5495 /* Intel L1OM support. */
5496
5497 static bfd_boolean
5498 elf64_l1om_elf_object_p (bfd *abfd)
5499 {
5500 /* Set the right machine number for an L1OM elf64 file. */
5501 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5502 return TRUE;
5503 }
5504
5505 #undef TARGET_LITTLE_SYM
5506 #define TARGET_LITTLE_SYM l1om_elf64_vec
5507 #undef TARGET_LITTLE_NAME
5508 #define TARGET_LITTLE_NAME "elf64-l1om"
5509 #undef ELF_ARCH
5510 #define ELF_ARCH bfd_arch_l1om
5511
5512 #undef ELF_MACHINE_CODE
5513 #define ELF_MACHINE_CODE EM_L1OM
5514
5515 #undef ELF_OSABI
5516
5517 #undef elf64_bed
5518 #define elf64_bed elf64_l1om_bed
5519
5520 #undef elf_backend_object_p
5521 #define elf_backend_object_p elf64_l1om_elf_object_p
5522
5523 /* Restore defaults. */
5524 #undef ELF_MAXPAGESIZE
5525 #undef ELF_MINPAGESIZE
5526 #undef ELF_COMMONPAGESIZE
5527 #if DEFAULT_LD_Z_SEPARATE_CODE
5528 # define ELF_MAXPAGESIZE 0x1000
5529 #else
5530 # define ELF_MAXPAGESIZE 0x200000
5531 #endif
5532 #define ELF_MINPAGESIZE 0x1000
5533 #define ELF_COMMONPAGESIZE 0x1000
5534 #undef elf_backend_plt_alignment
5535 #define elf_backend_plt_alignment 4
5536 #undef elf_backend_arch_data
5537 #define elf_backend_arch_data &elf_x86_64_arch_bed
5538
5539 #include "elf64-target.h"
5540
5541 /* FreeBSD L1OM support. */
5542
5543 #undef TARGET_LITTLE_SYM
5544 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5545 #undef TARGET_LITTLE_NAME
5546 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5547
5548 #undef ELF_OSABI
5549 #define ELF_OSABI ELFOSABI_FREEBSD
5550
5551 #undef elf64_bed
5552 #define elf64_bed elf64_l1om_fbsd_bed
5553
5554 #include "elf64-target.h"
5555
5556 /* Intel K1OM support. */
5557
5558 static bfd_boolean
5559 elf64_k1om_elf_object_p (bfd *abfd)
5560 {
5561 /* Set the right machine number for an K1OM elf64 file. */
5562 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5563 return TRUE;
5564 }
5565
5566 #undef TARGET_LITTLE_SYM
5567 #define TARGET_LITTLE_SYM k1om_elf64_vec
5568 #undef TARGET_LITTLE_NAME
5569 #define TARGET_LITTLE_NAME "elf64-k1om"
5570 #undef ELF_ARCH
5571 #define ELF_ARCH bfd_arch_k1om
5572
5573 #undef ELF_MACHINE_CODE
5574 #define ELF_MACHINE_CODE EM_K1OM
5575
5576 #undef ELF_OSABI
5577
5578 #undef elf64_bed
5579 #define elf64_bed elf64_k1om_bed
5580
5581 #undef elf_backend_object_p
5582 #define elf_backend_object_p elf64_k1om_elf_object_p
5583
5584 #undef elf_backend_static_tls_alignment
5585
5586 #undef elf_backend_want_plt_sym
5587 #define elf_backend_want_plt_sym 0
5588
5589 #include "elf64-target.h"
5590
5591 /* FreeBSD K1OM support. */
5592
5593 #undef TARGET_LITTLE_SYM
5594 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5595 #undef TARGET_LITTLE_NAME
5596 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5597
5598 #undef ELF_OSABI
5599 #define ELF_OSABI ELFOSABI_FREEBSD
5600
5601 #undef elf64_bed
5602 #define elf64_bed elf64_k1om_fbsd_bed
5603
5604 #include "elf64-target.h"
5605
5606 /* 32bit x86-64 support. */
5607
5608 #undef TARGET_LITTLE_SYM
5609 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5610 #undef TARGET_LITTLE_NAME
5611 #define TARGET_LITTLE_NAME "elf32-x86-64"
5612 #undef elf32_bed
5613
5614 #undef ELF_ARCH
5615 #define ELF_ARCH bfd_arch_i386
5616
5617 #undef ELF_MACHINE_CODE
5618 #define ELF_MACHINE_CODE EM_X86_64
5619
5620 #undef ELF_OSABI
5621
5622 #undef elf_backend_object_p
5623 #define elf_backend_object_p \
5624 elf32_x86_64_elf_object_p
5625
5626 #undef elf_backend_bfd_from_remote_memory
5627 #define elf_backend_bfd_from_remote_memory \
5628 _bfd_elf32_bfd_from_remote_memory
5629
5630 #undef elf_backend_size_info
5631 #define elf_backend_size_info \
5632 _bfd_elf32_size_info
5633
5634 #include "elf32-target.h"
This page took 0.15574 seconds and 4 git commands to generate.