Re-indent elf_x86_64_relocate_section
[deliverable/binutils-gdb.git] / bfd / elf64-x86-64.c
1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2019 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "elf-nacl.h"
24 #include "dwarf2.h"
25 #include "libiberty.h"
26
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
29
30 #ifdef CORE_HEADER
31 #include <stdarg.h>
32 #include CORE_HEADER
33 #endif
34
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
37
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
42
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
47 {
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
50 FALSE),
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
53 FALSE),
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
56 TRUE),
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
59 FALSE),
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
62 TRUE),
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
65 FALSE),
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
68 MINUS_ONE, FALSE),
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
71 MINUS_ONE, FALSE),
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
74 MINUS_ONE, FALSE),
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
77 0xffffffff, TRUE),
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
80 FALSE),
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
83 FALSE),
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
94 MINUS_ONE, FALSE),
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
97 MINUS_ONE, FALSE),
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
100 MINUS_ONE, FALSE),
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
103 0xffffffff, TRUE),
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
106 0xffffffff, TRUE),
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
109 0xffffffff, FALSE),
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
112 0xffffffff, TRUE),
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
115 0xffffffff, FALSE),
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
118 TRUE),
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
127 FALSE),
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
130 MINUS_ONE, TRUE),
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
136 MINUS_ONE, FALSE),
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
139 MINUS_ONE, FALSE),
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
142 FALSE),
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
145 FALSE),
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
153 FALSE, 0, 0, FALSE),
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
156 "R_X86_64_TLSDESC",
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
160 MINUS_ONE, FALSE),
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
163 MINUS_ONE, FALSE),
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
166 TRUE),
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
169 TRUE),
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
172 0xffffffff, TRUE),
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
175 0xffffffff, TRUE),
176
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
183
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
187
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
191 FALSE),
192
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
196 FALSE)
197 };
198
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
201
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
208
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
211
212 /* Map BFD relocs to the x86_64 elf relocs. */
213 struct elf_reloc_map
214 {
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
217 };
218
219 static const struct elf_reloc_map x86_64_reloc_map[] =
220 {
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
265 };
266
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
269 {
270 unsigned i;
271
272 if (r_type == (unsigned int) R_X86_64_32)
273 {
274 if (ABI_64_P (abfd))
275 i = r_type;
276 else
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
278 }
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
281 {
282 if (r_type >= (unsigned int) R_X86_64_standard)
283 {
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
286 abfd, r_type);
287 bfd_set_error (bfd_error_bad_value);
288 return NULL;
289 }
290 i = r_type;
291 }
292 else
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
296 }
297
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
302 {
303 unsigned int i;
304
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
306 i++)
307 {
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
311 }
312 return NULL;
313 }
314
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
317 const char *r_name)
318 {
319 unsigned int i;
320
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 {
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
327 return reloc;
328 }
329
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
334
335 return NULL;
336 }
337
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
339
340 static bfd_boolean
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
343 {
344 unsigned r_type;
345
346 r_type = ELF32_R_TYPE (dst->r_info);
347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
348 if (cache_ptr->howto == NULL)
349 return FALSE;
350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
351 return TRUE;
352 }
353 \f
354 /* Support for core dump NOTE sections. */
355 static bfd_boolean
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
357 {
358 int offset;
359 size_t size;
360
361 switch (note->descsz)
362 {
363 default:
364 return FALSE;
365
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
367 /* pr_cursig */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
369
370 /* pr_pid */
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
372
373 /* pr_reg */
374 offset = 72;
375 size = 216;
376
377 break;
378
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
380 /* pr_cursig */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
383
384 /* pr_pid */
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
387
388 /* pr_reg */
389 offset = 112;
390 size = 216;
391
392 break;
393 }
394
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
398 }
399
400 static bfd_boolean
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
402 {
403 switch (note->descsz)
404 {
405 default:
406 return FALSE;
407
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
415 break;
416
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
424 }
425
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
429
430 {
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
433
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
436 }
437
438 return TRUE;
439 }
440
441 #ifdef CORE_HEADER
442 # if GCC_VERSION >= 8000
443 # pragma GCC diagnostic push
444 # pragma GCC diagnostic ignored "-Wstringop-truncation"
445 # endif
446 static char *
447 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
448 int note_type, ...)
449 {
450 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
451 va_list ap;
452 const char *fname, *psargs;
453 long pid;
454 int cursig;
455 const void *gregs;
456
457 switch (note_type)
458 {
459 default:
460 return NULL;
461
462 case NT_PRPSINFO:
463 va_start (ap, note_type);
464 fname = va_arg (ap, const char *);
465 psargs = va_arg (ap, const char *);
466 va_end (ap);
467
468 if (bed->s->elfclass == ELFCLASS32)
469 {
470 prpsinfo32_t data;
471 memset (&data, 0, sizeof (data));
472 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
473 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
474 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
475 &data, sizeof (data));
476 }
477 else
478 {
479 prpsinfo64_t data;
480 memset (&data, 0, sizeof (data));
481 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
482 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
483 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
484 &data, sizeof (data));
485 }
486 /* NOTREACHED */
487
488 case NT_PRSTATUS:
489 va_start (ap, note_type);
490 pid = va_arg (ap, long);
491 cursig = va_arg (ap, int);
492 gregs = va_arg (ap, const void *);
493 va_end (ap);
494
495 if (bed->s->elfclass == ELFCLASS32)
496 {
497 if (bed->elf_machine_code == EM_X86_64)
498 {
499 prstatusx32_t prstat;
500 memset (&prstat, 0, sizeof (prstat));
501 prstat.pr_pid = pid;
502 prstat.pr_cursig = cursig;
503 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
504 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
505 &prstat, sizeof (prstat));
506 }
507 else
508 {
509 prstatus32_t prstat;
510 memset (&prstat, 0, sizeof (prstat));
511 prstat.pr_pid = pid;
512 prstat.pr_cursig = cursig;
513 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
514 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
515 &prstat, sizeof (prstat));
516 }
517 }
518 else
519 {
520 prstatus64_t prstat;
521 memset (&prstat, 0, sizeof (prstat));
522 prstat.pr_pid = pid;
523 prstat.pr_cursig = cursig;
524 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
525 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
526 &prstat, sizeof (prstat));
527 }
528 }
529 /* NOTREACHED */
530 }
531 # if GCC_VERSION >= 8000
532 # pragma GCC diagnostic pop
533 # endif
534 #endif
535 \f
536 /* Functions for the x86-64 ELF linker. */
537
538 /* The size in bytes of an entry in the global offset table. */
539
540 #define GOT_ENTRY_SIZE 8
541
542 /* The size in bytes of an entry in the lazy procedure linkage table. */
543
544 #define LAZY_PLT_ENTRY_SIZE 16
545
546 /* The size in bytes of an entry in the non-lazy procedure linkage
547 table. */
548
549 #define NON_LAZY_PLT_ENTRY_SIZE 8
550
551 /* The first entry in a lazy procedure linkage table looks like this.
552 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
553 works. */
554
555 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
556 {
557 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
558 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
559 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
560 };
561
562 /* Subsequent entries in a lazy procedure linkage table look like this. */
563
564 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
565 {
566 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
567 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
568 0x68, /* pushq immediate */
569 0, 0, 0, 0, /* replaced with index into relocation table. */
570 0xe9, /* jmp relative */
571 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
572 };
573
574 /* The first entry in a lazy procedure linkage table with BND prefix
575 like this. */
576
577 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
578 {
579 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
580 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
581 0x0f, 0x1f, 0 /* nopl (%rax) */
582 };
583
584 /* Subsequent entries for branches with BND prefx in a lazy procedure
585 linkage table look like this. */
586
587 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
588 {
589 0x68, 0, 0, 0, 0, /* pushq immediate */
590 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
591 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
592 };
593
594 /* The first entry in the IBT-enabled lazy procedure linkage table is the
595 the same as the lazy PLT with BND prefix so that bound registers are
596 preserved when control is passed to dynamic linker. Subsequent
597 entries for a IBT-enabled lazy procedure linkage table look like
598 this. */
599
600 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
601 {
602 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
603 0x68, 0, 0, 0, 0, /* pushq immediate */
604 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
605 0x90 /* nop */
606 };
607
608 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
609 is the same as the normal lazy PLT. Subsequent entries for an
610 x32 IBT-enabled lazy procedure linkage table look like this. */
611
612 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
613 {
614 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
615 0x68, 0, 0, 0, 0, /* pushq immediate */
616 0xe9, 0, 0, 0, 0, /* jmpq relative */
617 0x66, 0x90 /* xchg %ax,%ax */
618 };
619
620 /* Entries in the non-lazey procedure linkage table look like this. */
621
622 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
623 {
624 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
625 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
626 0x66, 0x90 /* xchg %ax,%ax */
627 };
628
629 /* Entries for branches with BND prefix in the non-lazey procedure
630 linkage table look like this. */
631
632 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
633 {
634 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
635 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
636 0x90 /* nop */
637 };
638
639 /* Entries for branches with IBT-enabled in the non-lazey procedure
640 linkage table look like this. They have the same size as the lazy
641 PLT entry. */
642
643 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
644 {
645 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
646 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
647 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
648 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
649 };
650
651 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
652 linkage table look like this. They have the same size as the lazy
653 PLT entry. */
654
655 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
656 {
657 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
658 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
659 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
660 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
661 };
662
663 /* The TLSDESC entry in a lazy procedure linkage table. */
664 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
665 {
666 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
667 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
668 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
669 };
670
671 /* .eh_frame covering the lazy .plt section. */
672
673 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
674 {
675 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
676 0, 0, 0, 0, /* CIE ID */
677 1, /* CIE version */
678 'z', 'R', 0, /* Augmentation string */
679 1, /* Code alignment factor */
680 0x78, /* Data alignment factor */
681 16, /* Return address column */
682 1, /* Augmentation size */
683 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
684 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
685 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
686 DW_CFA_nop, DW_CFA_nop,
687
688 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
689 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
690 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
691 0, 0, 0, 0, /* .plt size goes here */
692 0, /* Augmentation size */
693 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
694 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
695 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
696 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
697 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
698 11, /* Block length */
699 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
700 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
701 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
702 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
703 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
704 };
705
706 /* .eh_frame covering the lazy BND .plt section. */
707
708 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
709 {
710 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
711 0, 0, 0, 0, /* CIE ID */
712 1, /* CIE version */
713 'z', 'R', 0, /* Augmentation string */
714 1, /* Code alignment factor */
715 0x78, /* Data alignment factor */
716 16, /* Return address column */
717 1, /* Augmentation size */
718 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
719 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
720 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
721 DW_CFA_nop, DW_CFA_nop,
722
723 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
724 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
725 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
726 0, 0, 0, 0, /* .plt size goes here */
727 0, /* Augmentation size */
728 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
729 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
730 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
731 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
732 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
733 11, /* Block length */
734 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
735 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
736 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
737 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
738 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
739 };
740
741 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
742
743 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
744 {
745 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
746 0, 0, 0, 0, /* CIE ID */
747 1, /* CIE version */
748 'z', 'R', 0, /* Augmentation string */
749 1, /* Code alignment factor */
750 0x78, /* Data alignment factor */
751 16, /* Return address column */
752 1, /* Augmentation size */
753 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
754 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
755 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
756 DW_CFA_nop, DW_CFA_nop,
757
758 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
759 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
760 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
761 0, 0, 0, 0, /* .plt size goes here */
762 0, /* Augmentation size */
763 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
764 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
765 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
766 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
767 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
768 11, /* Block length */
769 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
770 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
771 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
772 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
773 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
774 };
775
776 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
777
778 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
779 {
780 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
781 0, 0, 0, 0, /* CIE ID */
782 1, /* CIE version */
783 'z', 'R', 0, /* Augmentation string */
784 1, /* Code alignment factor */
785 0x78, /* Data alignment factor */
786 16, /* Return address column */
787 1, /* Augmentation size */
788 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
789 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
790 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
791 DW_CFA_nop, DW_CFA_nop,
792
793 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
794 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
795 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
796 0, 0, 0, 0, /* .plt size goes here */
797 0, /* Augmentation size */
798 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
799 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
800 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
801 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
802 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
803 11, /* Block length */
804 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
805 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
806 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
807 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
808 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
809 };
810
811 /* .eh_frame covering the non-lazy .plt section. */
812
813 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
814 {
815 #define PLT_GOT_FDE_LENGTH 20
816 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
817 0, 0, 0, 0, /* CIE ID */
818 1, /* CIE version */
819 'z', 'R', 0, /* Augmentation string */
820 1, /* Code alignment factor */
821 0x78, /* Data alignment factor */
822 16, /* Return address column */
823 1, /* Augmentation size */
824 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
825 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
826 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
827 DW_CFA_nop, DW_CFA_nop,
828
829 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
830 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
831 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
832 0, 0, 0, 0, /* non-lazy .plt size goes here */
833 0, /* Augmentation size */
834 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
835 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
836 };
837
838 /* These are the standard parameters. */
839 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
840 {
841 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
842 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
843 elf_x86_64_lazy_plt_entry, /* plt_entry */
844 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
845 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
846 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
847 6, /* plt_tlsdesc_got1_offset */
848 12, /* plt_tlsdesc_got2_offset */
849 10, /* plt_tlsdesc_got1_insn_end */
850 16, /* plt_tlsdesc_got2_insn_end */
851 2, /* plt0_got1_offset */
852 8, /* plt0_got2_offset */
853 12, /* plt0_got2_insn_end */
854 2, /* plt_got_offset */
855 7, /* plt_reloc_offset */
856 12, /* plt_plt_offset */
857 6, /* plt_got_insn_size */
858 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
859 6, /* plt_lazy_offset */
860 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
861 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
862 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
863 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
864 };
865
866 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
867 {
868 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
869 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
870 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
871 2, /* plt_got_offset */
872 6, /* plt_got_insn_size */
873 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
875 };
876
877 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
878 {
879 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
880 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
881 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
882 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
883 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
884 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
885 6, /* plt_tlsdesc_got1_offset */
886 12, /* plt_tlsdesc_got2_offset */
887 10, /* plt_tlsdesc_got1_insn_end */
888 16, /* plt_tlsdesc_got2_insn_end */
889 2, /* plt0_got1_offset */
890 1+8, /* plt0_got2_offset */
891 1+12, /* plt0_got2_insn_end */
892 1+2, /* plt_got_offset */
893 1, /* plt_reloc_offset */
894 7, /* plt_plt_offset */
895 1+6, /* plt_got_insn_size */
896 11, /* plt_plt_insn_end */
897 0, /* plt_lazy_offset */
898 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
899 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
900 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
901 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
902 };
903
904 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
905 {
906 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
907 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
908 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
909 1+2, /* plt_got_offset */
910 1+6, /* plt_got_insn_size */
911 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
912 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
913 };
914
915 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
916 {
917 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
918 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
919 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
920 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
921 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
922 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
923 6, /* plt_tlsdesc_got1_offset */
924 12, /* plt_tlsdesc_got2_offset */
925 10, /* plt_tlsdesc_got1_insn_end */
926 16, /* plt_tlsdesc_got2_insn_end */
927 2, /* plt0_got1_offset */
928 1+8, /* plt0_got2_offset */
929 1+12, /* plt0_got2_insn_end */
930 4+1+2, /* plt_got_offset */
931 4+1, /* plt_reloc_offset */
932 4+1+6, /* plt_plt_offset */
933 4+1+6, /* plt_got_insn_size */
934 4+1+5+5, /* plt_plt_insn_end */
935 0, /* plt_lazy_offset */
936 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
937 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
938 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
939 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
940 };
941
942 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
943 {
944 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
946 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
947 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
948 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
949 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
950 6, /* plt_tlsdesc_got1_offset */
951 12, /* plt_tlsdesc_got2_offset */
952 10, /* plt_tlsdesc_got1_insn_end */
953 16, /* plt_tlsdesc_got2_insn_end */
954 2, /* plt0_got1_offset */
955 8, /* plt0_got2_offset */
956 12, /* plt0_got2_insn_end */
957 4+2, /* plt_got_offset */
958 4+1, /* plt_reloc_offset */
959 4+6, /* plt_plt_offset */
960 4+6, /* plt_got_insn_size */
961 4+5+5, /* plt_plt_insn_end */
962 0, /* plt_lazy_offset */
963 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
964 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
965 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
966 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
967 };
968
969 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
970 {
971 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
972 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
973 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
974 4+1+2, /* plt_got_offset */
975 4+1+6, /* plt_got_insn_size */
976 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
977 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
978 };
979
980 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
981 {
982 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
983 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
984 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
985 4+2, /* plt_got_offset */
986 4+6, /* plt_got_insn_size */
987 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
988 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
989 };
990
991 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
992 {
993 is_normal /* os */
994 };
995
996 #define elf_backend_arch_data &elf_x86_64_arch_bed
997
998 static bfd_boolean
999 elf64_x86_64_elf_object_p (bfd *abfd)
1000 {
1001 /* Set the right machine number for an x86-64 elf64 file. */
1002 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1003 return TRUE;
1004 }
1005
1006 static bfd_boolean
1007 elf32_x86_64_elf_object_p (bfd *abfd)
1008 {
1009 /* Set the right machine number for an x86-64 elf32 file. */
1010 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1011 return TRUE;
1012 }
1013
1014 /* Return TRUE if the TLS access code sequence support transition
1015 from R_TYPE. */
1016
1017 static bfd_boolean
1018 elf_x86_64_check_tls_transition (bfd *abfd,
1019 struct bfd_link_info *info,
1020 asection *sec,
1021 bfd_byte *contents,
1022 Elf_Internal_Shdr *symtab_hdr,
1023 struct elf_link_hash_entry **sym_hashes,
1024 unsigned int r_type,
1025 const Elf_Internal_Rela *rel,
1026 const Elf_Internal_Rela *relend)
1027 {
1028 unsigned int val;
1029 unsigned long r_symndx;
1030 bfd_boolean largepic = FALSE;
1031 struct elf_link_hash_entry *h;
1032 bfd_vma offset;
1033 struct elf_x86_link_hash_table *htab;
1034 bfd_byte *call;
1035 bfd_boolean indirect_call;
1036
1037 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1038 offset = rel->r_offset;
1039 switch (r_type)
1040 {
1041 case R_X86_64_TLSGD:
1042 case R_X86_64_TLSLD:
1043 if ((rel + 1) >= relend)
1044 return FALSE;
1045
1046 if (r_type == R_X86_64_TLSGD)
1047 {
1048 /* Check transition from GD access model. For 64bit, only
1049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1050 .word 0x6666; rex64; call __tls_get_addr@PLT
1051 or
1052 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1053 .byte 0x66; rex64
1054 call *__tls_get_addr@GOTPCREL(%rip)
1055 which may be converted to
1056 addr32 call __tls_get_addr
1057 can transit to different access model. For 32bit, only
1058 leaq foo@tlsgd(%rip), %rdi
1059 .word 0x6666; rex64; call __tls_get_addr@PLT
1060 or
1061 leaq foo@tlsgd(%rip), %rdi
1062 .byte 0x66; rex64
1063 call *__tls_get_addr@GOTPCREL(%rip)
1064 which may be converted to
1065 addr32 call __tls_get_addr
1066 can transit to different access model. For largepic,
1067 we also support:
1068 leaq foo@tlsgd(%rip), %rdi
1069 movabsq $__tls_get_addr@pltoff, %rax
1070 addq $r15, %rax
1071 call *%rax
1072 or
1073 leaq foo@tlsgd(%rip), %rdi
1074 movabsq $__tls_get_addr@pltoff, %rax
1075 addq $rbx, %rax
1076 call *%rax */
1077
1078 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1079
1080 if ((offset + 12) > sec->size)
1081 return FALSE;
1082
1083 call = contents + offset + 4;
1084 if (call[0] != 0x66
1085 || !((call[1] == 0x48
1086 && call[2] == 0xff
1087 && call[3] == 0x15)
1088 || (call[1] == 0x48
1089 && call[2] == 0x67
1090 && call[3] == 0xe8)
1091 || (call[1] == 0x66
1092 && call[2] == 0x48
1093 && call[3] == 0xe8)))
1094 {
1095 if (!ABI_64_P (abfd)
1096 || (offset + 19) > sec->size
1097 || offset < 3
1098 || memcmp (call - 7, leaq + 1, 3) != 0
1099 || memcmp (call, "\x48\xb8", 2) != 0
1100 || call[11] != 0x01
1101 || call[13] != 0xff
1102 || call[14] != 0xd0
1103 || !((call[10] == 0x48 && call[12] == 0xd8)
1104 || (call[10] == 0x4c && call[12] == 0xf8)))
1105 return FALSE;
1106 largepic = TRUE;
1107 }
1108 else if (ABI_64_P (abfd))
1109 {
1110 if (offset < 4
1111 || memcmp (contents + offset - 4, leaq, 4) != 0)
1112 return FALSE;
1113 }
1114 else
1115 {
1116 if (offset < 3
1117 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1118 return FALSE;
1119 }
1120 indirect_call = call[2] == 0xff;
1121 }
1122 else
1123 {
1124 /* Check transition from LD access model. Only
1125 leaq foo@tlsld(%rip), %rdi;
1126 call __tls_get_addr@PLT
1127 or
1128 leaq foo@tlsld(%rip), %rdi;
1129 call *__tls_get_addr@GOTPCREL(%rip)
1130 which may be converted to
1131 addr32 call __tls_get_addr
1132 can transit to different access model. For largepic
1133 we also support:
1134 leaq foo@tlsld(%rip), %rdi
1135 movabsq $__tls_get_addr@pltoff, %rax
1136 addq $r15, %rax
1137 call *%rax
1138 or
1139 leaq foo@tlsld(%rip), %rdi
1140 movabsq $__tls_get_addr@pltoff, %rax
1141 addq $rbx, %rax
1142 call *%rax */
1143
1144 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1145
1146 if (offset < 3 || (offset + 9) > sec->size)
1147 return FALSE;
1148
1149 if (memcmp (contents + offset - 3, lea, 3) != 0)
1150 return FALSE;
1151
1152 call = contents + offset + 4;
1153 if (!(call[0] == 0xe8
1154 || (call[0] == 0xff && call[1] == 0x15)
1155 || (call[0] == 0x67 && call[1] == 0xe8)))
1156 {
1157 if (!ABI_64_P (abfd)
1158 || (offset + 19) > sec->size
1159 || memcmp (call, "\x48\xb8", 2) != 0
1160 || call[11] != 0x01
1161 || call[13] != 0xff
1162 || call[14] != 0xd0
1163 || !((call[10] == 0x48 && call[12] == 0xd8)
1164 || (call[10] == 0x4c && call[12] == 0xf8)))
1165 return FALSE;
1166 largepic = TRUE;
1167 }
1168 indirect_call = call[0] == 0xff;
1169 }
1170
1171 r_symndx = htab->r_sym (rel[1].r_info);
1172 if (r_symndx < symtab_hdr->sh_info)
1173 return FALSE;
1174
1175 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1176 if (h == NULL
1177 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1178 return FALSE;
1179 else
1180 {
1181 r_type = (ELF32_R_TYPE (rel[1].r_info)
1182 & ~R_X86_64_converted_reloc_bit);
1183 if (largepic)
1184 return r_type == R_X86_64_PLTOFF64;
1185 else if (indirect_call)
1186 return r_type == R_X86_64_GOTPCRELX;
1187 else
1188 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1189 }
1190
1191 case R_X86_64_GOTTPOFF:
1192 /* Check transition from IE access model:
1193 mov foo@gottpoff(%rip), %reg
1194 add foo@gottpoff(%rip), %reg
1195 */
1196
1197 /* Check REX prefix first. */
1198 if (offset >= 3 && (offset + 4) <= sec->size)
1199 {
1200 val = bfd_get_8 (abfd, contents + offset - 3);
1201 if (val != 0x48 && val != 0x4c)
1202 {
1203 /* X32 may have 0x44 REX prefix or no REX prefix. */
1204 if (ABI_64_P (abfd))
1205 return FALSE;
1206 }
1207 }
1208 else
1209 {
1210 /* X32 may not have any REX prefix. */
1211 if (ABI_64_P (abfd))
1212 return FALSE;
1213 if (offset < 2 || (offset + 3) > sec->size)
1214 return FALSE;
1215 }
1216
1217 val = bfd_get_8 (abfd, contents + offset - 2);
1218 if (val != 0x8b && val != 0x03)
1219 return FALSE;
1220
1221 val = bfd_get_8 (abfd, contents + offset - 1);
1222 return (val & 0xc7) == 5;
1223
1224 case R_X86_64_GOTPC32_TLSDESC:
1225 /* Check transition from GDesc access model:
1226 leaq x@tlsdesc(%rip), %rax
1227
1228 Make sure it's a leaq adding rip to a 32-bit offset
1229 into any register, although it's probably almost always
1230 going to be rax. */
1231
1232 if (offset < 3 || (offset + 4) > sec->size)
1233 return FALSE;
1234
1235 val = bfd_get_8 (abfd, contents + offset - 3);
1236 if ((val & 0xfb) != 0x48)
1237 return FALSE;
1238
1239 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1240 return FALSE;
1241
1242 val = bfd_get_8 (abfd, contents + offset - 1);
1243 return (val & 0xc7) == 0x05;
1244
1245 case R_X86_64_TLSDESC_CALL:
1246 /* Check transition from GDesc access model:
1247 call *x@tlsdesc(%rax)
1248 */
1249 if (offset + 2 <= sec->size)
1250 {
1251 /* Make sure that it's a call *x@tlsdesc(%rax). */
1252 call = contents + offset;
1253 return call[0] == 0xff && call[1] == 0x10;
1254 }
1255
1256 return FALSE;
1257
1258 default:
1259 abort ();
1260 }
1261 }
1262
1263 /* Return TRUE if the TLS access transition is OK or no transition
1264 will be performed. Update R_TYPE if there is a transition. */
1265
1266 static bfd_boolean
1267 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1268 asection *sec, bfd_byte *contents,
1269 Elf_Internal_Shdr *symtab_hdr,
1270 struct elf_link_hash_entry **sym_hashes,
1271 unsigned int *r_type, int tls_type,
1272 const Elf_Internal_Rela *rel,
1273 const Elf_Internal_Rela *relend,
1274 struct elf_link_hash_entry *h,
1275 unsigned long r_symndx,
1276 bfd_boolean from_relocate_section)
1277 {
1278 unsigned int from_type = *r_type;
1279 unsigned int to_type = from_type;
1280 bfd_boolean check = TRUE;
1281
1282 /* Skip TLS transition for functions. */
1283 if (h != NULL
1284 && (h->type == STT_FUNC
1285 || h->type == STT_GNU_IFUNC))
1286 return TRUE;
1287
1288 switch (from_type)
1289 {
1290 case R_X86_64_TLSGD:
1291 case R_X86_64_GOTPC32_TLSDESC:
1292 case R_X86_64_TLSDESC_CALL:
1293 case R_X86_64_GOTTPOFF:
1294 if (bfd_link_executable (info))
1295 {
1296 if (h == NULL)
1297 to_type = R_X86_64_TPOFF32;
1298 else
1299 to_type = R_X86_64_GOTTPOFF;
1300 }
1301
1302 /* When we are called from elf_x86_64_relocate_section, there may
1303 be additional transitions based on TLS_TYPE. */
1304 if (from_relocate_section)
1305 {
1306 unsigned int new_to_type = to_type;
1307
1308 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1309 new_to_type = R_X86_64_TPOFF32;
1310
1311 if (to_type == R_X86_64_TLSGD
1312 || to_type == R_X86_64_GOTPC32_TLSDESC
1313 || to_type == R_X86_64_TLSDESC_CALL)
1314 {
1315 if (tls_type == GOT_TLS_IE)
1316 new_to_type = R_X86_64_GOTTPOFF;
1317 }
1318
1319 /* We checked the transition before when we were called from
1320 elf_x86_64_check_relocs. We only want to check the new
1321 transition which hasn't been checked before. */
1322 check = new_to_type != to_type && from_type == to_type;
1323 to_type = new_to_type;
1324 }
1325
1326 break;
1327
1328 case R_X86_64_TLSLD:
1329 if (bfd_link_executable (info))
1330 to_type = R_X86_64_TPOFF32;
1331 break;
1332
1333 default:
1334 return TRUE;
1335 }
1336
1337 /* Return TRUE if there is no transition. */
1338 if (from_type == to_type)
1339 return TRUE;
1340
1341 /* Check if the transition can be performed. */
1342 if (check
1343 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1344 symtab_hdr, sym_hashes,
1345 from_type, rel, relend))
1346 {
1347 reloc_howto_type *from, *to;
1348 const char *name;
1349
1350 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1351 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1352
1353 if (from == NULL || to == NULL)
1354 return FALSE;
1355
1356 if (h)
1357 name = h->root.root.string;
1358 else
1359 {
1360 struct elf_x86_link_hash_table *htab;
1361
1362 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1363 if (htab == NULL)
1364 name = "*unknown*";
1365 else
1366 {
1367 Elf_Internal_Sym *isym;
1368
1369 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1370 abfd, r_symndx);
1371 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1372 }
1373 }
1374
1375 _bfd_error_handler
1376 /* xgettext:c-format */
1377 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1378 " in section `%pA' failed"),
1379 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1380 bfd_set_error (bfd_error_bad_value);
1381 return FALSE;
1382 }
1383
1384 *r_type = to_type;
1385 return TRUE;
1386 }
1387
1388 /* Rename some of the generic section flags to better document how they
1389 are used here. */
1390 #define check_relocs_failed sec_flg0
1391
1392 static bfd_boolean
1393 elf_x86_64_need_pic (struct bfd_link_info *info,
1394 bfd *input_bfd, asection *sec,
1395 struct elf_link_hash_entry *h,
1396 Elf_Internal_Shdr *symtab_hdr,
1397 Elf_Internal_Sym *isym,
1398 reloc_howto_type *howto)
1399 {
1400 const char *v = "";
1401 const char *und = "";
1402 const char *pic = "";
1403 const char *object;
1404
1405 const char *name;
1406 if (h)
1407 {
1408 name = h->root.root.string;
1409 switch (ELF_ST_VISIBILITY (h->other))
1410 {
1411 case STV_HIDDEN:
1412 v = _("hidden symbol ");
1413 break;
1414 case STV_INTERNAL:
1415 v = _("internal symbol ");
1416 break;
1417 case STV_PROTECTED:
1418 v = _("protected symbol ");
1419 break;
1420 default:
1421 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1422 v = _("protected symbol ");
1423 else
1424 v = _("symbol ");
1425 pic = _("; recompile with -fPIC");
1426 break;
1427 }
1428
1429 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1430 und = _("undefined ");
1431 }
1432 else
1433 {
1434 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1435 pic = _("; recompile with -fPIC");
1436 }
1437
1438 if (bfd_link_dll (info))
1439 object = _("a shared object");
1440 else if (bfd_link_pie (info))
1441 object = _("a PIE object");
1442 else
1443 object = _("a PDE object");
1444
1445 /* xgettext:c-format */
1446 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1447 "not be used when making %s%s"),
1448 input_bfd, howto->name, und, v, name,
1449 object, pic);
1450 bfd_set_error (bfd_error_bad_value);
1451 sec->check_relocs_failed = 1;
1452 return FALSE;
1453 }
1454
1455 /* With the local symbol, foo, we convert
1456 mov foo@GOTPCREL(%rip), %reg
1457 to
1458 lea foo(%rip), %reg
1459 and convert
1460 call/jmp *foo@GOTPCREL(%rip)
1461 to
1462 nop call foo/jmp foo nop
1463 When PIC is false, convert
1464 test %reg, foo@GOTPCREL(%rip)
1465 to
1466 test $foo, %reg
1467 and convert
1468 binop foo@GOTPCREL(%rip), %reg
1469 to
1470 binop $foo, %reg
1471 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1472 instructions. */
1473
1474 static bfd_boolean
1475 elf_x86_64_convert_load_reloc (bfd *abfd,
1476 bfd_byte *contents,
1477 unsigned int *r_type_p,
1478 Elf_Internal_Rela *irel,
1479 struct elf_link_hash_entry *h,
1480 bfd_boolean *converted,
1481 struct bfd_link_info *link_info)
1482 {
1483 struct elf_x86_link_hash_table *htab;
1484 bfd_boolean is_pic;
1485 bfd_boolean no_overflow;
1486 bfd_boolean relocx;
1487 bfd_boolean to_reloc_pc32;
1488 asection *tsec;
1489 bfd_signed_vma raddend;
1490 unsigned int opcode;
1491 unsigned int modrm;
1492 unsigned int r_type = *r_type_p;
1493 unsigned int r_symndx;
1494 bfd_vma roff = irel->r_offset;
1495
1496 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1497 return TRUE;
1498
1499 raddend = irel->r_addend;
1500 /* Addend for 32-bit PC-relative relocation must be -4. */
1501 if (raddend != -4)
1502 return TRUE;
1503
1504 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1505 is_pic = bfd_link_pic (link_info);
1506
1507 relocx = (r_type == R_X86_64_GOTPCRELX
1508 || r_type == R_X86_64_REX_GOTPCRELX);
1509
1510 /* TRUE if --no-relax is used. */
1511 no_overflow = link_info->disable_target_specific_optimizations > 1;
1512
1513 r_symndx = htab->r_sym (irel->r_info);
1514
1515 opcode = bfd_get_8 (abfd, contents + roff - 2);
1516
1517 /* Convert mov to lea since it has been done for a while. */
1518 if (opcode != 0x8b)
1519 {
1520 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1521 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1522 test, xor instructions. */
1523 if (!relocx)
1524 return TRUE;
1525 }
1526
1527 /* We convert only to R_X86_64_PC32:
1528 1. Branch.
1529 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1530 3. no_overflow is true.
1531 4. PIC.
1532 */
1533 to_reloc_pc32 = (opcode == 0xff
1534 || !relocx
1535 || no_overflow
1536 || is_pic);
1537
1538 /* Get the symbol referred to by the reloc. */
1539 if (h == NULL)
1540 {
1541 Elf_Internal_Sym *isym
1542 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1543
1544 /* Skip relocation against undefined symbols. */
1545 if (isym->st_shndx == SHN_UNDEF)
1546 return TRUE;
1547
1548 if (isym->st_shndx == SHN_ABS)
1549 tsec = bfd_abs_section_ptr;
1550 else if (isym->st_shndx == SHN_COMMON)
1551 tsec = bfd_com_section_ptr;
1552 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1553 tsec = &_bfd_elf_large_com_section;
1554 else
1555 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1556 }
1557 else
1558 {
1559 /* Undefined weak symbol is only bound locally in executable
1560 and its reference is resolved as 0 without relocation
1561 overflow. We can only perform this optimization for
1562 GOTPCRELX relocations since we need to modify REX byte.
1563 It is OK convert mov with R_X86_64_GOTPCREL to
1564 R_X86_64_PC32. */
1565 bfd_boolean local_ref;
1566 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1567
1568 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1569 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1570 if ((relocx || opcode == 0x8b)
1571 && (h->root.type == bfd_link_hash_undefweak
1572 && !eh->linker_def
1573 && local_ref))
1574 {
1575 if (opcode == 0xff)
1576 {
1577 /* Skip for branch instructions since R_X86_64_PC32
1578 may overflow. */
1579 if (no_overflow)
1580 return TRUE;
1581 }
1582 else if (relocx)
1583 {
1584 /* For non-branch instructions, we can convert to
1585 R_X86_64_32/R_X86_64_32S since we know if there
1586 is a REX byte. */
1587 to_reloc_pc32 = FALSE;
1588 }
1589
1590 /* Since we don't know the current PC when PIC is true,
1591 we can't convert to R_X86_64_PC32. */
1592 if (to_reloc_pc32 && is_pic)
1593 return TRUE;
1594
1595 goto convert;
1596 }
1597 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1598 ld.so may use its link-time address. */
1599 else if (h->start_stop
1600 || eh->linker_def
1601 || ((h->def_regular
1602 || h->root.type == bfd_link_hash_defined
1603 || h->root.type == bfd_link_hash_defweak)
1604 && h != htab->elf.hdynamic
1605 && local_ref))
1606 {
1607 /* bfd_link_hash_new or bfd_link_hash_undefined is
1608 set by an assignment in a linker script in
1609 bfd_elf_record_link_assignment. start_stop is set
1610 on __start_SECNAME/__stop_SECNAME which mark section
1611 SECNAME. */
1612 if (h->start_stop
1613 || eh->linker_def
1614 || (h->def_regular
1615 && (h->root.type == bfd_link_hash_new
1616 || h->root.type == bfd_link_hash_undefined
1617 || ((h->root.type == bfd_link_hash_defined
1618 || h->root.type == bfd_link_hash_defweak)
1619 && h->root.u.def.section == bfd_und_section_ptr))))
1620 {
1621 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1622 if (no_overflow)
1623 return TRUE;
1624 goto convert;
1625 }
1626 tsec = h->root.u.def.section;
1627 }
1628 else
1629 return TRUE;
1630 }
1631
1632 /* Don't convert GOTPCREL relocation against large section. */
1633 if (elf_section_data (tsec) != NULL
1634 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1635 return TRUE;
1636
1637 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1638 if (no_overflow)
1639 return TRUE;
1640
1641 convert:
1642 if (opcode == 0xff)
1643 {
1644 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1645 unsigned int nop;
1646 unsigned int disp;
1647 bfd_vma nop_offset;
1648
1649 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1650 R_X86_64_PC32. */
1651 modrm = bfd_get_8 (abfd, contents + roff - 1);
1652 if (modrm == 0x25)
1653 {
1654 /* Convert to "jmp foo nop". */
1655 modrm = 0xe9;
1656 nop = NOP_OPCODE;
1657 nop_offset = irel->r_offset + 3;
1658 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1659 irel->r_offset -= 1;
1660 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1661 }
1662 else
1663 {
1664 struct elf_x86_link_hash_entry *eh
1665 = (struct elf_x86_link_hash_entry *) h;
1666
1667 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1668 is a nop prefix. */
1669 modrm = 0xe8;
1670 /* To support TLS optimization, always use addr32 prefix for
1671 "call *__tls_get_addr@GOTPCREL(%rip)". */
1672 if (eh && eh->tls_get_addr)
1673 {
1674 nop = 0x67;
1675 nop_offset = irel->r_offset - 2;
1676 }
1677 else
1678 {
1679 nop = link_info->call_nop_byte;
1680 if (link_info->call_nop_as_suffix)
1681 {
1682 nop_offset = irel->r_offset + 3;
1683 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1684 irel->r_offset -= 1;
1685 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1686 }
1687 else
1688 nop_offset = irel->r_offset - 2;
1689 }
1690 }
1691 bfd_put_8 (abfd, nop, contents + nop_offset);
1692 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1693 r_type = R_X86_64_PC32;
1694 }
1695 else
1696 {
1697 unsigned int rex;
1698 unsigned int rex_mask = REX_R;
1699
1700 if (r_type == R_X86_64_REX_GOTPCRELX)
1701 rex = bfd_get_8 (abfd, contents + roff - 3);
1702 else
1703 rex = 0;
1704
1705 if (opcode == 0x8b)
1706 {
1707 if (to_reloc_pc32)
1708 {
1709 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1710 "lea foo(%rip), %reg". */
1711 opcode = 0x8d;
1712 r_type = R_X86_64_PC32;
1713 }
1714 else
1715 {
1716 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1717 "mov $foo, %reg". */
1718 opcode = 0xc7;
1719 modrm = bfd_get_8 (abfd, contents + roff - 1);
1720 modrm = 0xc0 | (modrm & 0x38) >> 3;
1721 if ((rex & REX_W) != 0
1722 && ABI_64_P (link_info->output_bfd))
1723 {
1724 /* Keep the REX_W bit in REX byte for LP64. */
1725 r_type = R_X86_64_32S;
1726 goto rewrite_modrm_rex;
1727 }
1728 else
1729 {
1730 /* If the REX_W bit in REX byte isn't needed,
1731 use R_X86_64_32 and clear the W bit to avoid
1732 sign-extend imm32 to imm64. */
1733 r_type = R_X86_64_32;
1734 /* Clear the W bit in REX byte. */
1735 rex_mask |= REX_W;
1736 goto rewrite_modrm_rex;
1737 }
1738 }
1739 }
1740 else
1741 {
1742 /* R_X86_64_PC32 isn't supported. */
1743 if (to_reloc_pc32)
1744 return TRUE;
1745
1746 modrm = bfd_get_8 (abfd, contents + roff - 1);
1747 if (opcode == 0x85)
1748 {
1749 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1750 "test $foo, %reg". */
1751 modrm = 0xc0 | (modrm & 0x38) >> 3;
1752 opcode = 0xf7;
1753 }
1754 else
1755 {
1756 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1757 "binop $foo, %reg". */
1758 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1759 opcode = 0x81;
1760 }
1761
1762 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1763 overflow when sign-extending imm32 to imm64. */
1764 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1765
1766 rewrite_modrm_rex:
1767 bfd_put_8 (abfd, modrm, contents + roff - 1);
1768
1769 if (rex)
1770 {
1771 /* Move the R bit to the B bit in REX byte. */
1772 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1773 bfd_put_8 (abfd, rex, contents + roff - 3);
1774 }
1775
1776 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1777 irel->r_addend = 0;
1778 }
1779
1780 bfd_put_8 (abfd, opcode, contents + roff - 2);
1781 }
1782
1783 *r_type_p = r_type;
1784 irel->r_info = htab->r_info (r_symndx,
1785 r_type | R_X86_64_converted_reloc_bit);
1786
1787 *converted = TRUE;
1788
1789 return TRUE;
1790 }
1791
1792 /* Look through the relocs for a section during the first phase, and
1793 calculate needed space in the global offset table, procedure
1794 linkage table, and dynamic reloc sections. */
1795
1796 static bfd_boolean
1797 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1798 asection *sec,
1799 const Elf_Internal_Rela *relocs)
1800 {
1801 struct elf_x86_link_hash_table *htab;
1802 Elf_Internal_Shdr *symtab_hdr;
1803 struct elf_link_hash_entry **sym_hashes;
1804 const Elf_Internal_Rela *rel;
1805 const Elf_Internal_Rela *rel_end;
1806 asection *sreloc;
1807 bfd_byte *contents;
1808 bfd_boolean converted;
1809
1810 if (bfd_link_relocatable (info))
1811 return TRUE;
1812
1813 /* Don't do anything special with non-loaded, non-alloced sections.
1814 In particular, any relocs in such sections should not affect GOT
1815 and PLT reference counting (ie. we don't allow them to create GOT
1816 or PLT entries), there's no possibility or desire to optimize TLS
1817 relocs, and there's not much point in propagating relocs to shared
1818 libs that the dynamic linker won't relocate. */
1819 if ((sec->flags & SEC_ALLOC) == 0)
1820 return TRUE;
1821
1822 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1823 if (htab == NULL)
1824 {
1825 sec->check_relocs_failed = 1;
1826 return FALSE;
1827 }
1828
1829 BFD_ASSERT (is_x86_elf (abfd, htab));
1830
1831 /* Get the section contents. */
1832 if (elf_section_data (sec)->this_hdr.contents != NULL)
1833 contents = elf_section_data (sec)->this_hdr.contents;
1834 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1835 {
1836 sec->check_relocs_failed = 1;
1837 return FALSE;
1838 }
1839
1840 symtab_hdr = &elf_symtab_hdr (abfd);
1841 sym_hashes = elf_sym_hashes (abfd);
1842
1843 converted = FALSE;
1844
1845 sreloc = NULL;
1846
1847 rel_end = relocs + sec->reloc_count;
1848 for (rel = relocs; rel < rel_end; rel++)
1849 {
1850 unsigned int r_type;
1851 unsigned int r_symndx;
1852 struct elf_link_hash_entry *h;
1853 struct elf_x86_link_hash_entry *eh;
1854 Elf_Internal_Sym *isym;
1855 const char *name;
1856 bfd_boolean size_reloc;
1857 bfd_boolean converted_reloc;
1858 bfd_boolean do_check_pic;
1859
1860 r_symndx = htab->r_sym (rel->r_info);
1861 r_type = ELF32_R_TYPE (rel->r_info);
1862
1863 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1864 {
1865 /* xgettext:c-format */
1866 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1867 abfd, r_symndx);
1868 goto error_return;
1869 }
1870
1871 if (r_symndx < symtab_hdr->sh_info)
1872 {
1873 /* A local symbol. */
1874 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1875 abfd, r_symndx);
1876 if (isym == NULL)
1877 goto error_return;
1878
1879 /* Check relocation against local STT_GNU_IFUNC symbol. */
1880 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1881 {
1882 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1883 TRUE);
1884 if (h == NULL)
1885 goto error_return;
1886
1887 /* Fake a STT_GNU_IFUNC symbol. */
1888 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1889 isym, NULL);
1890 h->type = STT_GNU_IFUNC;
1891 h->def_regular = 1;
1892 h->ref_regular = 1;
1893 h->forced_local = 1;
1894 h->root.type = bfd_link_hash_defined;
1895 }
1896 else
1897 h = NULL;
1898 }
1899 else
1900 {
1901 isym = NULL;
1902 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1903 while (h->root.type == bfd_link_hash_indirect
1904 || h->root.type == bfd_link_hash_warning)
1905 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1906 }
1907
1908 /* Check invalid x32 relocations. */
1909 if (!ABI_64_P (abfd))
1910 switch (r_type)
1911 {
1912 default:
1913 break;
1914
1915 case R_X86_64_DTPOFF64:
1916 case R_X86_64_TPOFF64:
1917 case R_X86_64_PC64:
1918 case R_X86_64_GOTOFF64:
1919 case R_X86_64_GOT64:
1920 case R_X86_64_GOTPCREL64:
1921 case R_X86_64_GOTPC64:
1922 case R_X86_64_GOTPLT64:
1923 case R_X86_64_PLTOFF64:
1924 {
1925 if (h)
1926 name = h->root.root.string;
1927 else
1928 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1929 NULL);
1930 _bfd_error_handler
1931 /* xgettext:c-format */
1932 (_("%pB: relocation %s against symbol `%s' isn't "
1933 "supported in x32 mode"), abfd,
1934 x86_64_elf_howto_table[r_type].name, name);
1935 bfd_set_error (bfd_error_bad_value);
1936 goto error_return;
1937 }
1938 break;
1939 }
1940
1941 if (h != NULL)
1942 {
1943 /* It is referenced by a non-shared object. */
1944 h->ref_regular = 1;
1945
1946 if (h->type == STT_GNU_IFUNC)
1947 elf_tdata (info->output_bfd)->has_gnu_symbols
1948 |= elf_gnu_symbol_ifunc;
1949 }
1950
1951 converted_reloc = FALSE;
1952 if ((r_type == R_X86_64_GOTPCREL
1953 || r_type == R_X86_64_GOTPCRELX
1954 || r_type == R_X86_64_REX_GOTPCRELX)
1955 && (h == NULL || h->type != STT_GNU_IFUNC))
1956 {
1957 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1958 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1959 irel, h, &converted_reloc,
1960 info))
1961 goto error_return;
1962
1963 if (converted_reloc)
1964 converted = TRUE;
1965 }
1966
1967 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1968 symtab_hdr, sym_hashes,
1969 &r_type, GOT_UNKNOWN,
1970 rel, rel_end, h, r_symndx, FALSE))
1971 goto error_return;
1972
1973 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1974 if (h == htab->elf.hgot)
1975 htab->got_referenced = TRUE;
1976
1977 eh = (struct elf_x86_link_hash_entry *) h;
1978 switch (r_type)
1979 {
1980 case R_X86_64_TLSLD:
1981 htab->tls_ld_or_ldm_got.refcount = 1;
1982 goto create_got;
1983
1984 case R_X86_64_TPOFF32:
1985 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1986 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1987 &x86_64_elf_howto_table[r_type]);
1988 if (eh != NULL)
1989 eh->zero_undefweak &= 0x2;
1990 break;
1991
1992 case R_X86_64_GOTTPOFF:
1993 if (!bfd_link_executable (info))
1994 info->flags |= DF_STATIC_TLS;
1995 /* Fall through */
1996
1997 case R_X86_64_GOT32:
1998 case R_X86_64_GOTPCREL:
1999 case R_X86_64_GOTPCRELX:
2000 case R_X86_64_REX_GOTPCRELX:
2001 case R_X86_64_TLSGD:
2002 case R_X86_64_GOT64:
2003 case R_X86_64_GOTPCREL64:
2004 case R_X86_64_GOTPLT64:
2005 case R_X86_64_GOTPC32_TLSDESC:
2006 case R_X86_64_TLSDESC_CALL:
2007 /* This symbol requires a global offset table entry. */
2008 {
2009 int tls_type, old_tls_type;
2010
2011 switch (r_type)
2012 {
2013 default: tls_type = GOT_NORMAL; break;
2014 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
2015 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
2016 case R_X86_64_GOTPC32_TLSDESC:
2017 case R_X86_64_TLSDESC_CALL:
2018 tls_type = GOT_TLS_GDESC; break;
2019 }
2020
2021 if (h != NULL)
2022 {
2023 h->got.refcount = 1;
2024 old_tls_type = eh->tls_type;
2025 }
2026 else
2027 {
2028 bfd_signed_vma *local_got_refcounts;
2029
2030 /* This is a global offset table entry for a local symbol. */
2031 local_got_refcounts = elf_local_got_refcounts (abfd);
2032 if (local_got_refcounts == NULL)
2033 {
2034 bfd_size_type size;
2035
2036 size = symtab_hdr->sh_info;
2037 size *= sizeof (bfd_signed_vma)
2038 + sizeof (bfd_vma) + sizeof (char);
2039 local_got_refcounts = ((bfd_signed_vma *)
2040 bfd_zalloc (abfd, size));
2041 if (local_got_refcounts == NULL)
2042 goto error_return;
2043 elf_local_got_refcounts (abfd) = local_got_refcounts;
2044 elf_x86_local_tlsdesc_gotent (abfd)
2045 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2046 elf_x86_local_got_tls_type (abfd)
2047 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2048 }
2049 local_got_refcounts[r_symndx] = 1;
2050 old_tls_type
2051 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2052 }
2053
2054 /* If a TLS symbol is accessed using IE at least once,
2055 there is no point to use dynamic model for it. */
2056 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2057 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2058 || tls_type != GOT_TLS_IE))
2059 {
2060 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2061 tls_type = old_tls_type;
2062 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2063 && GOT_TLS_GD_ANY_P (tls_type))
2064 tls_type |= old_tls_type;
2065 else
2066 {
2067 if (h)
2068 name = h->root.root.string;
2069 else
2070 name = bfd_elf_sym_name (abfd, symtab_hdr,
2071 isym, NULL);
2072 _bfd_error_handler
2073 /* xgettext:c-format */
2074 (_("%pB: '%s' accessed both as normal and"
2075 " thread local symbol"),
2076 abfd, name);
2077 bfd_set_error (bfd_error_bad_value);
2078 goto error_return;
2079 }
2080 }
2081
2082 if (old_tls_type != tls_type)
2083 {
2084 if (eh != NULL)
2085 eh->tls_type = tls_type;
2086 else
2087 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2088 }
2089 }
2090 /* Fall through */
2091
2092 case R_X86_64_GOTOFF64:
2093 case R_X86_64_GOTPC32:
2094 case R_X86_64_GOTPC64:
2095 create_got:
2096 if (eh != NULL)
2097 eh->zero_undefweak &= 0x2;
2098 break;
2099
2100 case R_X86_64_PLT32:
2101 case R_X86_64_PLT32_BND:
2102 /* This symbol requires a procedure linkage table entry. We
2103 actually build the entry in adjust_dynamic_symbol,
2104 because this might be a case of linking PIC code which is
2105 never referenced by a dynamic object, in which case we
2106 don't need to generate a procedure linkage table entry
2107 after all. */
2108
2109 /* If this is a local symbol, we resolve it directly without
2110 creating a procedure linkage table entry. */
2111 if (h == NULL)
2112 continue;
2113
2114 eh->zero_undefweak &= 0x2;
2115 h->needs_plt = 1;
2116 h->plt.refcount = 1;
2117 break;
2118
2119 case R_X86_64_PLTOFF64:
2120 /* This tries to form the 'address' of a function relative
2121 to GOT. For global symbols we need a PLT entry. */
2122 if (h != NULL)
2123 {
2124 h->needs_plt = 1;
2125 h->plt.refcount = 1;
2126 }
2127 goto create_got;
2128
2129 case R_X86_64_SIZE32:
2130 case R_X86_64_SIZE64:
2131 size_reloc = TRUE;
2132 goto do_size;
2133
2134 case R_X86_64_PC8:
2135 case R_X86_64_PC16:
2136 case R_X86_64_PC32:
2137 case R_X86_64_PC32_BND:
2138 do_check_pic = TRUE;
2139 goto check_pic;
2140
2141 case R_X86_64_32:
2142 if (!ABI_64_P (abfd))
2143 goto pointer;
2144 /* Fall through. */
2145 case R_X86_64_8:
2146 case R_X86_64_16:
2147 case R_X86_64_32S:
2148 /* Check relocation overflow as these relocs may lead to
2149 run-time relocation overflow. Don't error out for
2150 sections we don't care about, such as debug sections or
2151 when relocation overflow check is disabled. */
2152 if (!info->no_reloc_overflow_check
2153 && !converted_reloc
2154 && (bfd_link_pic (info)
2155 || (bfd_link_executable (info)
2156 && h != NULL
2157 && !h->def_regular
2158 && h->def_dynamic
2159 && (sec->flags & SEC_READONLY) == 0)))
2160 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2161 &x86_64_elf_howto_table[r_type]);
2162 /* Fall through. */
2163
2164 case R_X86_64_PC64:
2165 case R_X86_64_64:
2166 pointer:
2167 do_check_pic = FALSE;
2168 check_pic:
2169 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2170 eh->zero_undefweak |= 0x2;
2171 /* We are called after all symbols have been resolved. Only
2172 relocation against STT_GNU_IFUNC symbol must go through
2173 PLT. */
2174 if (h != NULL
2175 && (bfd_link_executable (info)
2176 || h->type == STT_GNU_IFUNC))
2177 {
2178 bfd_boolean func_pointer_ref = FALSE;
2179
2180 if (r_type == R_X86_64_PC32)
2181 {
2182 /* Since something like ".long foo - ." may be used
2183 as pointer, make sure that PLT is used if foo is
2184 a function defined in a shared library. */
2185 if ((sec->flags & SEC_CODE) == 0)
2186 {
2187 h->pointer_equality_needed = 1;
2188 if (bfd_link_pie (info)
2189 && h->type == STT_FUNC
2190 && !h->def_regular
2191 && h->def_dynamic)
2192 {
2193 h->needs_plt = 1;
2194 h->plt.refcount = 1;
2195 }
2196 }
2197 }
2198 else if (r_type != R_X86_64_PC32_BND
2199 && r_type != R_X86_64_PC64)
2200 {
2201 h->pointer_equality_needed = 1;
2202 /* At run-time, R_X86_64_64 can be resolved for both
2203 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2204 can only be resolved for x32. */
2205 if ((sec->flags & SEC_READONLY) == 0
2206 && (r_type == R_X86_64_64
2207 || (!ABI_64_P (abfd)
2208 && (r_type == R_X86_64_32
2209 || r_type == R_X86_64_32S))))
2210 func_pointer_ref = TRUE;
2211 }
2212
2213 if (!func_pointer_ref)
2214 {
2215 /* If this reloc is in a read-only section, we might
2216 need a copy reloc. We can't check reliably at this
2217 stage whether the section is read-only, as input
2218 sections have not yet been mapped to output sections.
2219 Tentatively set the flag for now, and correct in
2220 adjust_dynamic_symbol. */
2221 h->non_got_ref = 1;
2222
2223 /* We may need a .plt entry if the symbol is a function
2224 defined in a shared lib or is a function referenced
2225 from the code or read-only section. */
2226 if (!h->def_regular
2227 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2228 h->plt.refcount = 1;
2229 }
2230 }
2231
2232 if (do_check_pic)
2233 {
2234 /* Don't complain about -fPIC if the symbol is undefined
2235 when building executable unless it is unresolved weak
2236 symbol, references a dynamic definition in PIE or
2237 -z nocopyreloc is used. */
2238 bfd_boolean no_copyreloc_p
2239 = (info->nocopyreloc
2240 || (h != NULL
2241 && !h->root.linker_def
2242 && !h->root.ldscript_def
2243 && eh->def_protected
2244 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)));
2245 if ((sec->flags & SEC_ALLOC) != 0
2246 && (sec->flags & SEC_READONLY) != 0
2247 && h != NULL
2248 && ((bfd_link_executable (info)
2249 && ((h->root.type == bfd_link_hash_undefweak
2250 && (eh == NULL
2251 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
2252 eh)))
2253 || (bfd_link_pie (info)
2254 && !SYMBOL_DEFINED_NON_SHARED_P (h)
2255 && h->def_dynamic)
2256 || (no_copyreloc_p
2257 && h->def_dynamic
2258 && !(h->root.u.def.section->flags & SEC_CODE))))
2259 || bfd_link_dll (info)))
2260 {
2261 bfd_boolean fail = FALSE;
2262 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
2263 {
2264 /* Symbol is referenced locally. Make sure it is
2265 defined locally. */
2266 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
2267 }
2268 else if (bfd_link_pie (info))
2269 {
2270 /* We can only use PC-relative relocations in PIE
2271 from non-code sections. */
2272 if (h->type == STT_FUNC
2273 && (sec->flags & SEC_CODE) != 0)
2274 fail = TRUE;
2275 }
2276 else if (no_copyreloc_p || bfd_link_dll (info))
2277 {
2278 /* Symbol doesn't need copy reloc and isn't
2279 referenced locally. Don't allow PC-relative
2280 relocations against default and protected
2281 symbols since address of protected function
2282 and location of protected data may not be in
2283 the shared object. */
2284 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
2285 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
2286 }
2287
2288 if (fail)
2289 return elf_x86_64_need_pic (info, abfd, sec, h,
2290 symtab_hdr, isym,
2291 &x86_64_elf_howto_table[r_type]);
2292 }
2293 }
2294
2295 size_reloc = FALSE;
2296 do_size:
2297 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2298 htab->pointer_r_type))
2299 {
2300 struct elf_dyn_relocs *p;
2301 struct elf_dyn_relocs **head;
2302
2303 /* We must copy these reloc types into the output file.
2304 Create a reloc section in dynobj and make room for
2305 this reloc. */
2306 if (sreloc == NULL)
2307 {
2308 sreloc = _bfd_elf_make_dynamic_reloc_section
2309 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2310 abfd, /*rela?*/ TRUE);
2311
2312 if (sreloc == NULL)
2313 goto error_return;
2314 }
2315
2316 /* If this is a global symbol, we count the number of
2317 relocations we need for this symbol. */
2318 if (h != NULL)
2319 head = &eh->dyn_relocs;
2320 else
2321 {
2322 /* Track dynamic relocs needed for local syms too.
2323 We really need local syms available to do this
2324 easily. Oh well. */
2325 asection *s;
2326 void **vpp;
2327
2328 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2329 abfd, r_symndx);
2330 if (isym == NULL)
2331 goto error_return;
2332
2333 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2334 if (s == NULL)
2335 s = sec;
2336
2337 /* Beware of type punned pointers vs strict aliasing
2338 rules. */
2339 vpp = &(elf_section_data (s)->local_dynrel);
2340 head = (struct elf_dyn_relocs **)vpp;
2341 }
2342
2343 p = *head;
2344 if (p == NULL || p->sec != sec)
2345 {
2346 bfd_size_type amt = sizeof *p;
2347
2348 p = ((struct elf_dyn_relocs *)
2349 bfd_alloc (htab->elf.dynobj, amt));
2350 if (p == NULL)
2351 goto error_return;
2352 p->next = *head;
2353 *head = p;
2354 p->sec = sec;
2355 p->count = 0;
2356 p->pc_count = 0;
2357 }
2358
2359 p->count += 1;
2360 /* Count size relocation as PC-relative relocation. */
2361 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2362 p->pc_count += 1;
2363 }
2364 break;
2365
2366 /* This relocation describes the C++ object vtable hierarchy.
2367 Reconstruct it for later use during GC. */
2368 case R_X86_64_GNU_VTINHERIT:
2369 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2370 goto error_return;
2371 break;
2372
2373 /* This relocation describes which C++ vtable entries are actually
2374 used. Record for later use during GC. */
2375 case R_X86_64_GNU_VTENTRY:
2376 BFD_ASSERT (h != NULL);
2377 if (h != NULL
2378 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2379 goto error_return;
2380 break;
2381
2382 default:
2383 break;
2384 }
2385 }
2386
2387 if (elf_section_data (sec)->this_hdr.contents != contents)
2388 {
2389 if (!converted && !info->keep_memory)
2390 free (contents);
2391 else
2392 {
2393 /* Cache the section contents for elf_link_input_bfd if any
2394 load is converted or --no-keep-memory isn't used. */
2395 elf_section_data (sec)->this_hdr.contents = contents;
2396 }
2397 }
2398
2399 /* Cache relocations if any load is converted. */
2400 if (elf_section_data (sec)->relocs != relocs && converted)
2401 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2402
2403 return TRUE;
2404
2405 error_return:
2406 if (elf_section_data (sec)->this_hdr.contents != contents)
2407 free (contents);
2408 sec->check_relocs_failed = 1;
2409 return FALSE;
2410 }
2411
2412 /* Return the relocation value for @tpoff relocation
2413 if STT_TLS virtual address is ADDRESS. */
2414
2415 static bfd_vma
2416 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2417 {
2418 struct elf_link_hash_table *htab = elf_hash_table (info);
2419 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2420 bfd_vma static_tls_size;
2421
2422 /* If tls_segment is NULL, we should have signalled an error already. */
2423 if (htab->tls_sec == NULL)
2424 return 0;
2425
2426 /* Consider special static TLS alignment requirements. */
2427 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2428 return address - static_tls_size - htab->tls_sec->vma;
2429 }
2430
2431 /* Relocate an x86_64 ELF section. */
2432
2433 static bfd_boolean
2434 elf_x86_64_relocate_section (bfd *output_bfd,
2435 struct bfd_link_info *info,
2436 bfd *input_bfd,
2437 asection *input_section,
2438 bfd_byte *contents,
2439 Elf_Internal_Rela *relocs,
2440 Elf_Internal_Sym *local_syms,
2441 asection **local_sections)
2442 {
2443 struct elf_x86_link_hash_table *htab;
2444 Elf_Internal_Shdr *symtab_hdr;
2445 struct elf_link_hash_entry **sym_hashes;
2446 bfd_vma *local_got_offsets;
2447 bfd_vma *local_tlsdesc_gotents;
2448 Elf_Internal_Rela *rel;
2449 Elf_Internal_Rela *wrel;
2450 Elf_Internal_Rela *relend;
2451 unsigned int plt_entry_size;
2452
2453 /* Skip if check_relocs failed. */
2454 if (input_section->check_relocs_failed)
2455 return FALSE;
2456
2457 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2458 if (htab == NULL)
2459 return FALSE;
2460
2461 if (!is_x86_elf (input_bfd, htab))
2462 {
2463 bfd_set_error (bfd_error_wrong_format);
2464 return FALSE;
2465 }
2466
2467 plt_entry_size = htab->plt.plt_entry_size;
2468 symtab_hdr = &elf_symtab_hdr (input_bfd);
2469 sym_hashes = elf_sym_hashes (input_bfd);
2470 local_got_offsets = elf_local_got_offsets (input_bfd);
2471 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2472
2473 _bfd_x86_elf_set_tls_module_base (info);
2474
2475 rel = wrel = relocs;
2476 relend = relocs + input_section->reloc_count;
2477 for (; rel < relend; wrel++, rel++)
2478 {
2479 unsigned int r_type, r_type_tls;
2480 reloc_howto_type *howto;
2481 unsigned long r_symndx;
2482 struct elf_link_hash_entry *h;
2483 struct elf_x86_link_hash_entry *eh;
2484 Elf_Internal_Sym *sym;
2485 asection *sec;
2486 bfd_vma off, offplt, plt_offset;
2487 bfd_vma relocation;
2488 bfd_boolean unresolved_reloc;
2489 bfd_reloc_status_type r;
2490 int tls_type;
2491 asection *base_got, *resolved_plt;
2492 bfd_vma st_size;
2493 bfd_boolean resolved_to_zero;
2494 bfd_boolean relative_reloc;
2495 bfd_boolean converted_reloc;
2496 bfd_boolean need_copy_reloc_in_pie;
2497
2498 r_type = ELF32_R_TYPE (rel->r_info);
2499 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2500 || r_type == (int) R_X86_64_GNU_VTENTRY)
2501 {
2502 if (wrel != rel)
2503 *wrel = *rel;
2504 continue;
2505 }
2506
2507 r_symndx = htab->r_sym (rel->r_info);
2508 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2509 if (converted_reloc)
2510 {
2511 r_type &= ~R_X86_64_converted_reloc_bit;
2512 rel->r_info = htab->r_info (r_symndx, r_type);
2513 }
2514
2515 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2516 if (howto == NULL)
2517 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2518
2519 h = NULL;
2520 sym = NULL;
2521 sec = NULL;
2522 unresolved_reloc = FALSE;
2523 if (r_symndx < symtab_hdr->sh_info)
2524 {
2525 sym = local_syms + r_symndx;
2526 sec = local_sections[r_symndx];
2527
2528 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2529 &sec, rel);
2530 st_size = sym->st_size;
2531
2532 /* Relocate against local STT_GNU_IFUNC symbol. */
2533 if (!bfd_link_relocatable (info)
2534 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2535 {
2536 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2537 rel, FALSE);
2538 if (h == NULL)
2539 abort ();
2540
2541 /* Set STT_GNU_IFUNC symbol value. */
2542 h->root.u.def.value = sym->st_value;
2543 h->root.u.def.section = sec;
2544 }
2545 }
2546 else
2547 {
2548 bfd_boolean warned ATTRIBUTE_UNUSED;
2549 bfd_boolean ignored ATTRIBUTE_UNUSED;
2550
2551 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2552 r_symndx, symtab_hdr, sym_hashes,
2553 h, sec, relocation,
2554 unresolved_reloc, warned, ignored);
2555 st_size = h->size;
2556 }
2557
2558 if (sec != NULL && discarded_section (sec))
2559 {
2560 _bfd_clear_contents (howto, input_bfd, input_section,
2561 contents, rel->r_offset);
2562 wrel->r_offset = rel->r_offset;
2563 wrel->r_info = 0;
2564 wrel->r_addend = 0;
2565
2566 /* For ld -r, remove relocations in debug sections against
2567 sections defined in discarded sections. Not done for
2568 eh_frame editing code expects to be present. */
2569 if (bfd_link_relocatable (info)
2570 && (input_section->flags & SEC_DEBUGGING))
2571 wrel--;
2572
2573 continue;
2574 }
2575
2576 if (bfd_link_relocatable (info))
2577 {
2578 if (wrel != rel)
2579 *wrel = *rel;
2580 continue;
2581 }
2582
2583 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2584 {
2585 if (r_type == R_X86_64_64)
2586 {
2587 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2588 zero-extend it to 64bit if addend is zero. */
2589 r_type = R_X86_64_32;
2590 memset (contents + rel->r_offset + 4, 0, 4);
2591 }
2592 else if (r_type == R_X86_64_SIZE64)
2593 {
2594 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2595 zero-extend it to 64bit if addend is zero. */
2596 r_type = R_X86_64_SIZE32;
2597 memset (contents + rel->r_offset + 4, 0, 4);
2598 }
2599 }
2600
2601 eh = (struct elf_x86_link_hash_entry *) h;
2602
2603 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2604 it here if it is defined in a non-shared object. */
2605 if (h != NULL
2606 && h->type == STT_GNU_IFUNC
2607 && h->def_regular)
2608 {
2609 bfd_vma plt_index;
2610 const char *name;
2611
2612 if ((input_section->flags & SEC_ALLOC) == 0)
2613 {
2614 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2615 STT_GNU_IFUNC symbol as STT_FUNC. */
2616 if (elf_section_type (input_section) == SHT_NOTE)
2617 goto skip_ifunc;
2618 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2619 sections because such sections are not SEC_ALLOC and
2620 thus ld.so will not process them. */
2621 if ((input_section->flags & SEC_DEBUGGING) != 0)
2622 continue;
2623 abort ();
2624 }
2625
2626 switch (r_type)
2627 {
2628 default:
2629 break;
2630
2631 case R_X86_64_GOTPCREL:
2632 case R_X86_64_GOTPCRELX:
2633 case R_X86_64_REX_GOTPCRELX:
2634 case R_X86_64_GOTPCREL64:
2635 base_got = htab->elf.sgot;
2636 off = h->got.offset;
2637
2638 if (base_got == NULL)
2639 abort ();
2640
2641 if (off == (bfd_vma) -1)
2642 {
2643 /* We can't use h->got.offset here to save state, or
2644 even just remember the offset, as finish_dynamic_symbol
2645 would use that as offset into .got. */
2646
2647 if (h->plt.offset == (bfd_vma) -1)
2648 abort ();
2649
2650 if (htab->elf.splt != NULL)
2651 {
2652 plt_index = (h->plt.offset / plt_entry_size
2653 - htab->plt.has_plt0);
2654 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2655 base_got = htab->elf.sgotplt;
2656 }
2657 else
2658 {
2659 plt_index = h->plt.offset / plt_entry_size;
2660 off = plt_index * GOT_ENTRY_SIZE;
2661 base_got = htab->elf.igotplt;
2662 }
2663
2664 if (h->dynindx == -1
2665 || h->forced_local
2666 || info->symbolic)
2667 {
2668 /* This references the local defitionion. We must
2669 initialize this entry in the global offset table.
2670 Since the offset must always be a multiple of 8,
2671 we use the least significant bit to record
2672 whether we have initialized it already.
2673
2674 When doing a dynamic link, we create a .rela.got
2675 relocation entry to initialize the value. This
2676 is done in the finish_dynamic_symbol routine. */
2677 if ((off & 1) != 0)
2678 off &= ~1;
2679 else
2680 {
2681 bfd_put_64 (output_bfd, relocation,
2682 base_got->contents + off);
2683 /* Note that this is harmless for the GOTPLT64
2684 case, as -1 | 1 still is -1. */
2685 h->got.offset |= 1;
2686 }
2687 }
2688 }
2689
2690 relocation = (base_got->output_section->vma
2691 + base_got->output_offset + off);
2692
2693 goto do_relocation;
2694 }
2695
2696 if (h->plt.offset == (bfd_vma) -1)
2697 {
2698 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2699 if (r_type == htab->pointer_r_type
2700 && (input_section->flags & SEC_CODE) == 0)
2701 goto do_ifunc_pointer;
2702 goto bad_ifunc_reloc;
2703 }
2704
2705 /* STT_GNU_IFUNC symbol must go through PLT. */
2706 if (htab->elf.splt != NULL)
2707 {
2708 if (htab->plt_second != NULL)
2709 {
2710 resolved_plt = htab->plt_second;
2711 plt_offset = eh->plt_second.offset;
2712 }
2713 else
2714 {
2715 resolved_plt = htab->elf.splt;
2716 plt_offset = h->plt.offset;
2717 }
2718 }
2719 else
2720 {
2721 resolved_plt = htab->elf.iplt;
2722 plt_offset = h->plt.offset;
2723 }
2724
2725 relocation = (resolved_plt->output_section->vma
2726 + resolved_plt->output_offset + plt_offset);
2727
2728 switch (r_type)
2729 {
2730 default:
2731 bad_ifunc_reloc:
2732 if (h->root.root.string)
2733 name = h->root.root.string;
2734 else
2735 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2736 NULL);
2737 _bfd_error_handler
2738 /* xgettext:c-format */
2739 (_("%pB: relocation %s against STT_GNU_IFUNC "
2740 "symbol `%s' isn't supported"), input_bfd,
2741 howto->name, name);
2742 bfd_set_error (bfd_error_bad_value);
2743 return FALSE;
2744
2745 case R_X86_64_32S:
2746 if (bfd_link_pic (info))
2747 abort ();
2748 goto do_relocation;
2749
2750 case R_X86_64_32:
2751 if (ABI_64_P (output_bfd))
2752 goto do_relocation;
2753 /* FALLTHROUGH */
2754 case R_X86_64_64:
2755 do_ifunc_pointer:
2756 if (rel->r_addend != 0)
2757 {
2758 if (h->root.root.string)
2759 name = h->root.root.string;
2760 else
2761 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2762 sym, NULL);
2763 _bfd_error_handler
2764 /* xgettext:c-format */
2765 (_("%pB: relocation %s against STT_GNU_IFUNC "
2766 "symbol `%s' has non-zero addend: %" PRId64),
2767 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2768 bfd_set_error (bfd_error_bad_value);
2769 return FALSE;
2770 }
2771
2772 /* Generate dynamic relcoation only when there is a
2773 non-GOT reference in a shared object or there is no
2774 PLT. */
2775 if ((bfd_link_pic (info) && h->non_got_ref)
2776 || h->plt.offset == (bfd_vma) -1)
2777 {
2778 Elf_Internal_Rela outrel;
2779 asection *sreloc;
2780
2781 /* Need a dynamic relocation to get the real function
2782 address. */
2783 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2784 info,
2785 input_section,
2786 rel->r_offset);
2787 if (outrel.r_offset == (bfd_vma) -1
2788 || outrel.r_offset == (bfd_vma) -2)
2789 abort ();
2790
2791 outrel.r_offset += (input_section->output_section->vma
2792 + input_section->output_offset);
2793
2794 if (POINTER_LOCAL_IFUNC_P (info, h))
2795 {
2796 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2797 h->root.root.string,
2798 h->root.u.def.section->owner);
2799
2800 /* This symbol is resolved locally. */
2801 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2802 outrel.r_addend = (h->root.u.def.value
2803 + h->root.u.def.section->output_section->vma
2804 + h->root.u.def.section->output_offset);
2805 }
2806 else
2807 {
2808 outrel.r_info = htab->r_info (h->dynindx, r_type);
2809 outrel.r_addend = 0;
2810 }
2811
2812 /* Dynamic relocations are stored in
2813 1. .rela.ifunc section in PIC object.
2814 2. .rela.got section in dynamic executable.
2815 3. .rela.iplt section in static executable. */
2816 if (bfd_link_pic (info))
2817 sreloc = htab->elf.irelifunc;
2818 else if (htab->elf.splt != NULL)
2819 sreloc = htab->elf.srelgot;
2820 else
2821 sreloc = htab->elf.irelplt;
2822 elf_append_rela (output_bfd, sreloc, &outrel);
2823
2824 /* If this reloc is against an external symbol, we
2825 do not want to fiddle with the addend. Otherwise,
2826 we need to include the symbol value so that it
2827 becomes an addend for the dynamic reloc. For an
2828 internal symbol, we have updated addend. */
2829 continue;
2830 }
2831 /* FALLTHROUGH */
2832 case R_X86_64_PC32:
2833 case R_X86_64_PC32_BND:
2834 case R_X86_64_PC64:
2835 case R_X86_64_PLT32:
2836 case R_X86_64_PLT32_BND:
2837 goto do_relocation;
2838 }
2839 }
2840
2841 skip_ifunc:
2842 resolved_to_zero = (eh != NULL
2843 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2844
2845 /* When generating a shared object, the relocations handled here are
2846 copied into the output file to be resolved at run time. */
2847 switch (r_type)
2848 {
2849 case R_X86_64_GOT32:
2850 case R_X86_64_GOT64:
2851 /* Relocation is to the entry for this symbol in the global
2852 offset table. */
2853 case R_X86_64_GOTPCREL:
2854 case R_X86_64_GOTPCRELX:
2855 case R_X86_64_REX_GOTPCRELX:
2856 case R_X86_64_GOTPCREL64:
2857 /* Use global offset table entry as symbol value. */
2858 case R_X86_64_GOTPLT64:
2859 /* This is obsolete and treated the same as GOT64. */
2860 base_got = htab->elf.sgot;
2861
2862 if (htab->elf.sgot == NULL)
2863 abort ();
2864
2865 relative_reloc = FALSE;
2866 if (h != NULL)
2867 {
2868 off = h->got.offset;
2869 if (h->needs_plt
2870 && h->plt.offset != (bfd_vma)-1
2871 && off == (bfd_vma)-1)
2872 {
2873 /* We can't use h->got.offset here to save
2874 state, or even just remember the offset, as
2875 finish_dynamic_symbol would use that as offset into
2876 .got. */
2877 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2878 - htab->plt.has_plt0);
2879 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2880 base_got = htab->elf.sgotplt;
2881 }
2882
2883 if (RESOLVED_LOCALLY_P (info, h, htab))
2884 {
2885 /* We must initialize this entry in the global offset
2886 table. Since the offset must always be a multiple
2887 of 8, we use the least significant bit to record
2888 whether we have initialized it already.
2889
2890 When doing a dynamic link, we create a .rela.got
2891 relocation entry to initialize the value. This is
2892 done in the finish_dynamic_symbol routine. */
2893 if ((off & 1) != 0)
2894 off &= ~1;
2895 else
2896 {
2897 bfd_put_64 (output_bfd, relocation,
2898 base_got->contents + off);
2899 /* Note that this is harmless for the GOTPLT64 case,
2900 as -1 | 1 still is -1. */
2901 h->got.offset |= 1;
2902
2903 if (GENERATE_RELATIVE_RELOC_P (info, h))
2904 {
2905 /* If this symbol isn't dynamic in PIC,
2906 generate R_X86_64_RELATIVE here. */
2907 eh->no_finish_dynamic_symbol = 1;
2908 relative_reloc = TRUE;
2909 }
2910 }
2911 }
2912 else
2913 unresolved_reloc = FALSE;
2914 }
2915 else
2916 {
2917 if (local_got_offsets == NULL)
2918 abort ();
2919
2920 off = local_got_offsets[r_symndx];
2921
2922 /* The offset must always be a multiple of 8. We use
2923 the least significant bit to record whether we have
2924 already generated the necessary reloc. */
2925 if ((off & 1) != 0)
2926 off &= ~1;
2927 else
2928 {
2929 bfd_put_64 (output_bfd, relocation,
2930 base_got->contents + off);
2931 local_got_offsets[r_symndx] |= 1;
2932
2933 if (bfd_link_pic (info))
2934 relative_reloc = TRUE;
2935 }
2936 }
2937
2938 if (relative_reloc)
2939 {
2940 asection *s;
2941 Elf_Internal_Rela outrel;
2942
2943 /* We need to generate a R_X86_64_RELATIVE reloc
2944 for the dynamic linker. */
2945 s = htab->elf.srelgot;
2946 if (s == NULL)
2947 abort ();
2948
2949 outrel.r_offset = (base_got->output_section->vma
2950 + base_got->output_offset
2951 + off);
2952 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2953 outrel.r_addend = relocation;
2954 elf_append_rela (output_bfd, s, &outrel);
2955 }
2956
2957 if (off >= (bfd_vma) -2)
2958 abort ();
2959
2960 relocation = base_got->output_section->vma
2961 + base_got->output_offset + off;
2962 if (r_type != R_X86_64_GOTPCREL
2963 && r_type != R_X86_64_GOTPCRELX
2964 && r_type != R_X86_64_REX_GOTPCRELX
2965 && r_type != R_X86_64_GOTPCREL64)
2966 relocation -= htab->elf.sgotplt->output_section->vma
2967 - htab->elf.sgotplt->output_offset;
2968
2969 break;
2970
2971 case R_X86_64_GOTOFF64:
2972 /* Relocation is relative to the start of the global offset
2973 table. */
2974
2975 /* Check to make sure it isn't a protected function or data
2976 symbol for shared library since it may not be local when
2977 used as function address or with copy relocation. We also
2978 need to make sure that a symbol is referenced locally. */
2979 if (bfd_link_pic (info) && h)
2980 {
2981 if (!h->def_regular)
2982 {
2983 const char *v;
2984
2985 switch (ELF_ST_VISIBILITY (h->other))
2986 {
2987 case STV_HIDDEN:
2988 v = _("hidden symbol");
2989 break;
2990 case STV_INTERNAL:
2991 v = _("internal symbol");
2992 break;
2993 case STV_PROTECTED:
2994 v = _("protected symbol");
2995 break;
2996 default:
2997 v = _("symbol");
2998 break;
2999 }
3000
3001 _bfd_error_handler
3002 /* xgettext:c-format */
3003 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3004 " `%s' can not be used when making a shared object"),
3005 input_bfd, v, h->root.root.string);
3006 bfd_set_error (bfd_error_bad_value);
3007 return FALSE;
3008 }
3009 else if (!bfd_link_executable (info)
3010 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3011 && (h->type == STT_FUNC
3012 || h->type == STT_OBJECT)
3013 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3014 {
3015 _bfd_error_handler
3016 /* xgettext:c-format */
3017 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3018 " `%s' can not be used when making a shared object"),
3019 input_bfd,
3020 h->type == STT_FUNC ? "function" : "data",
3021 h->root.root.string);
3022 bfd_set_error (bfd_error_bad_value);
3023 return FALSE;
3024 }
3025 }
3026
3027 /* Note that sgot is not involved in this
3028 calculation. We always want the start of .got.plt. If we
3029 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3030 permitted by the ABI, we might have to change this
3031 calculation. */
3032 relocation -= htab->elf.sgotplt->output_section->vma
3033 + htab->elf.sgotplt->output_offset;
3034 break;
3035
3036 case R_X86_64_GOTPC32:
3037 case R_X86_64_GOTPC64:
3038 /* Use global offset table as symbol value. */
3039 relocation = htab->elf.sgotplt->output_section->vma
3040 + htab->elf.sgotplt->output_offset;
3041 unresolved_reloc = FALSE;
3042 break;
3043
3044 case R_X86_64_PLTOFF64:
3045 /* Relocation is PLT entry relative to GOT. For local
3046 symbols it's the symbol itself relative to GOT. */
3047 if (h != NULL
3048 /* See PLT32 handling. */
3049 && (h->plt.offset != (bfd_vma) -1
3050 || eh->plt_got.offset != (bfd_vma) -1)
3051 && htab->elf.splt != NULL)
3052 {
3053 if (eh->plt_got.offset != (bfd_vma) -1)
3054 {
3055 /* Use the GOT PLT. */
3056 resolved_plt = htab->plt_got;
3057 plt_offset = eh->plt_got.offset;
3058 }
3059 else if (htab->plt_second != NULL)
3060 {
3061 resolved_plt = htab->plt_second;
3062 plt_offset = eh->plt_second.offset;
3063 }
3064 else
3065 {
3066 resolved_plt = htab->elf.splt;
3067 plt_offset = h->plt.offset;
3068 }
3069
3070 relocation = (resolved_plt->output_section->vma
3071 + resolved_plt->output_offset
3072 + plt_offset);
3073 unresolved_reloc = FALSE;
3074 }
3075
3076 relocation -= htab->elf.sgotplt->output_section->vma
3077 + htab->elf.sgotplt->output_offset;
3078 break;
3079
3080 case R_X86_64_PLT32:
3081 case R_X86_64_PLT32_BND:
3082 /* Relocation is to the entry for this symbol in the
3083 procedure linkage table. */
3084
3085 /* Resolve a PLT32 reloc against a local symbol directly,
3086 without using the procedure linkage table. */
3087 if (h == NULL)
3088 break;
3089
3090 if ((h->plt.offset == (bfd_vma) -1
3091 && eh->plt_got.offset == (bfd_vma) -1)
3092 || htab->elf.splt == NULL)
3093 {
3094 /* We didn't make a PLT entry for this symbol. This
3095 happens when statically linking PIC code, or when
3096 using -Bsymbolic. */
3097 break;
3098 }
3099
3100 use_plt:
3101 if (h->plt.offset != (bfd_vma) -1)
3102 {
3103 if (htab->plt_second != NULL)
3104 {
3105 resolved_plt = htab->plt_second;
3106 plt_offset = eh->plt_second.offset;
3107 }
3108 else
3109 {
3110 resolved_plt = htab->elf.splt;
3111 plt_offset = h->plt.offset;
3112 }
3113 }
3114 else
3115 {
3116 /* Use the GOT PLT. */
3117 resolved_plt = htab->plt_got;
3118 plt_offset = eh->plt_got.offset;
3119 }
3120
3121 relocation = (resolved_plt->output_section->vma
3122 + resolved_plt->output_offset
3123 + plt_offset);
3124 unresolved_reloc = FALSE;
3125 break;
3126
3127 case R_X86_64_SIZE32:
3128 case R_X86_64_SIZE64:
3129 /* Set to symbol size. */
3130 relocation = st_size;
3131 goto direct;
3132
3133 case R_X86_64_PC8:
3134 case R_X86_64_PC16:
3135 case R_X86_64_PC32:
3136 case R_X86_64_PC32_BND:
3137 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3138 as function address. */
3139 if (h != NULL
3140 && (input_section->flags & SEC_CODE) == 0
3141 && bfd_link_pie (info)
3142 && h->type == STT_FUNC
3143 && !h->def_regular
3144 && h->def_dynamic)
3145 goto use_plt;
3146 /* Fall through. */
3147
3148 case R_X86_64_8:
3149 case R_X86_64_16:
3150 case R_X86_64_32:
3151 case R_X86_64_PC64:
3152 case R_X86_64_64:
3153 /* FIXME: The ABI says the linker should make sure the value is
3154 the same when it's zeroextended to 64 bit. */
3155
3156 direct:
3157 if ((input_section->flags & SEC_ALLOC) == 0)
3158 break;
3159
3160 need_copy_reloc_in_pie = (bfd_link_pie (info)
3161 && h != NULL
3162 && (h->needs_copy
3163 || eh->needs_copy
3164 || (h->root.type
3165 == bfd_link_hash_undefined))
3166 && (X86_PCREL_TYPE_P (r_type)
3167 || X86_SIZE_TYPE_P (r_type)));
3168
3169 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3170 need_copy_reloc_in_pie,
3171 resolved_to_zero, FALSE))
3172 {
3173 Elf_Internal_Rela outrel;
3174 bfd_boolean skip, relocate;
3175 asection *sreloc;
3176
3177 /* When generating a shared object, these relocations
3178 are copied into the output file to be resolved at run
3179 time. */
3180 skip = FALSE;
3181 relocate = FALSE;
3182
3183 outrel.r_offset =
3184 _bfd_elf_section_offset (output_bfd, info, input_section,
3185 rel->r_offset);
3186 if (outrel.r_offset == (bfd_vma) -1)
3187 skip = TRUE;
3188 else if (outrel.r_offset == (bfd_vma) -2)
3189 skip = TRUE, relocate = TRUE;
3190
3191 outrel.r_offset += (input_section->output_section->vma
3192 + input_section->output_offset);
3193
3194 if (skip)
3195 memset (&outrel, 0, sizeof outrel);
3196
3197 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3198 {
3199 outrel.r_info = htab->r_info (h->dynindx, r_type);
3200 outrel.r_addend = rel->r_addend;
3201 }
3202 else
3203 {
3204 /* This symbol is local, or marked to become local.
3205 When relocation overflow check is disabled, we
3206 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3207 if (r_type == htab->pointer_r_type
3208 || (r_type == R_X86_64_32
3209 && info->no_reloc_overflow_check))
3210 {
3211 relocate = TRUE;
3212 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3213 outrel.r_addend = relocation + rel->r_addend;
3214 }
3215 else if (r_type == R_X86_64_64
3216 && !ABI_64_P (output_bfd))
3217 {
3218 relocate = TRUE;
3219 outrel.r_info = htab->r_info (0,
3220 R_X86_64_RELATIVE64);
3221 outrel.r_addend = relocation + rel->r_addend;
3222 /* Check addend overflow. */
3223 if ((outrel.r_addend & 0x80000000)
3224 != (rel->r_addend & 0x80000000))
3225 {
3226 const char *name;
3227 int addend = rel->r_addend;
3228 if (h && h->root.root.string)
3229 name = h->root.root.string;
3230 else
3231 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3232 sym, NULL);
3233 _bfd_error_handler
3234 /* xgettext:c-format */
3235 (_("%pB: addend %s%#x in relocation %s against "
3236 "symbol `%s' at %#" PRIx64
3237 " in section `%pA' is out of range"),
3238 input_bfd, addend < 0 ? "-" : "", addend,
3239 howto->name, name, (uint64_t) rel->r_offset,
3240 input_section);
3241 bfd_set_error (bfd_error_bad_value);
3242 return FALSE;
3243 }
3244 }
3245 else
3246 {
3247 long sindx;
3248
3249 if (bfd_is_abs_section (sec))
3250 sindx = 0;
3251 else if (sec == NULL || sec->owner == NULL)
3252 {
3253 bfd_set_error (bfd_error_bad_value);
3254 return FALSE;
3255 }
3256 else
3257 {
3258 asection *osec;
3259
3260 /* We are turning this relocation into one
3261 against a section symbol. It would be
3262 proper to subtract the symbol's value,
3263 osec->vma, from the emitted reloc addend,
3264 but ld.so expects buggy relocs. */
3265 osec = sec->output_section;
3266 sindx = elf_section_data (osec)->dynindx;
3267 if (sindx == 0)
3268 {
3269 asection *oi = htab->elf.text_index_section;
3270 sindx = elf_section_data (oi)->dynindx;
3271 }
3272 BFD_ASSERT (sindx != 0);
3273 }
3274
3275 outrel.r_info = htab->r_info (sindx, r_type);
3276 outrel.r_addend = relocation + rel->r_addend;
3277 }
3278 }
3279
3280 sreloc = elf_section_data (input_section)->sreloc;
3281
3282 if (sreloc == NULL || sreloc->contents == NULL)
3283 {
3284 r = bfd_reloc_notsupported;
3285 goto check_relocation_error;
3286 }
3287
3288 elf_append_rela (output_bfd, sreloc, &outrel);
3289
3290 /* If this reloc is against an external symbol, we do
3291 not want to fiddle with the addend. Otherwise, we
3292 need to include the symbol value so that it becomes
3293 an addend for the dynamic reloc. */
3294 if (! relocate)
3295 continue;
3296 }
3297
3298 break;
3299
3300 case R_X86_64_TLSGD:
3301 case R_X86_64_GOTPC32_TLSDESC:
3302 case R_X86_64_TLSDESC_CALL:
3303 case R_X86_64_GOTTPOFF:
3304 tls_type = GOT_UNKNOWN;
3305 if (h == NULL && local_got_offsets)
3306 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3307 else if (h != NULL)
3308 tls_type = elf_x86_hash_entry (h)->tls_type;
3309
3310 r_type_tls = r_type;
3311 if (! elf_x86_64_tls_transition (info, input_bfd,
3312 input_section, contents,
3313 symtab_hdr, sym_hashes,
3314 &r_type_tls, tls_type, rel,
3315 relend, h, r_symndx, TRUE))
3316 return FALSE;
3317
3318 if (r_type_tls == R_X86_64_TPOFF32)
3319 {
3320 bfd_vma roff = rel->r_offset;
3321
3322 BFD_ASSERT (! unresolved_reloc);
3323
3324 if (r_type == R_X86_64_TLSGD)
3325 {
3326 /* GD->LE transition. For 64bit, change
3327 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3328 .word 0x6666; rex64; call __tls_get_addr@PLT
3329 or
3330 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3331 .byte 0x66; rex64
3332 call *__tls_get_addr@GOTPCREL(%rip)
3333 which may be converted to
3334 addr32 call __tls_get_addr
3335 into:
3336 movq %fs:0, %rax
3337 leaq foo@tpoff(%rax), %rax
3338 For 32bit, change
3339 leaq foo@tlsgd(%rip), %rdi
3340 .word 0x6666; rex64; call __tls_get_addr@PLT
3341 or
3342 leaq foo@tlsgd(%rip), %rdi
3343 .byte 0x66; rex64
3344 call *__tls_get_addr@GOTPCREL(%rip)
3345 which may be converted to
3346 addr32 call __tls_get_addr
3347 into:
3348 movl %fs:0, %eax
3349 leaq foo@tpoff(%rax), %rax
3350 For largepic, change:
3351 leaq foo@tlsgd(%rip), %rdi
3352 movabsq $__tls_get_addr@pltoff, %rax
3353 addq %r15, %rax
3354 call *%rax
3355 into:
3356 movq %fs:0, %rax
3357 leaq foo@tpoff(%rax), %rax
3358 nopw 0x0(%rax,%rax,1) */
3359 int largepic = 0;
3360 if (ABI_64_P (output_bfd))
3361 {
3362 if (contents[roff + 5] == 0xb8)
3363 {
3364 memcpy (contents + roff - 3,
3365 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3366 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3367 largepic = 1;
3368 }
3369 else
3370 memcpy (contents + roff - 4,
3371 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3372 16);
3373 }
3374 else
3375 memcpy (contents + roff - 3,
3376 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3377 15);
3378 bfd_put_32 (output_bfd,
3379 elf_x86_64_tpoff (info, relocation),
3380 contents + roff + 8 + largepic);
3381 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3382 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3383 rel++;
3384 wrel++;
3385 continue;
3386 }
3387 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3388 {
3389 /* GDesc -> LE transition.
3390 It's originally something like:
3391 leaq x@tlsdesc(%rip), %rax
3392
3393 Change it to:
3394 movl $x@tpoff, %rax. */
3395
3396 unsigned int val, type;
3397
3398 type = bfd_get_8 (input_bfd, contents + roff - 3);
3399 val = bfd_get_8 (input_bfd, contents + roff - 1);
3400 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3401 contents + roff - 3);
3402 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3403 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3404 contents + roff - 1);
3405 bfd_put_32 (output_bfd,
3406 elf_x86_64_tpoff (info, relocation),
3407 contents + roff);
3408 continue;
3409 }
3410 else if (r_type == R_X86_64_TLSDESC_CALL)
3411 {
3412 /* GDesc -> LE transition.
3413 It's originally:
3414 call *(%rax)
3415 Turn it into:
3416 xchg %ax,%ax. */
3417 bfd_put_8 (output_bfd, 0x66, contents + roff);
3418 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3419 continue;
3420 }
3421 else if (r_type == R_X86_64_GOTTPOFF)
3422 {
3423 /* IE->LE transition:
3424 For 64bit, originally it can be one of:
3425 movq foo@gottpoff(%rip), %reg
3426 addq foo@gottpoff(%rip), %reg
3427 We change it into:
3428 movq $foo, %reg
3429 leaq foo(%reg), %reg
3430 addq $foo, %reg.
3431 For 32bit, originally it can be one of:
3432 movq foo@gottpoff(%rip), %reg
3433 addl foo@gottpoff(%rip), %reg
3434 We change it into:
3435 movq $foo, %reg
3436 leal foo(%reg), %reg
3437 addl $foo, %reg. */
3438
3439 unsigned int val, type, reg;
3440
3441 if (roff >= 3)
3442 val = bfd_get_8 (input_bfd, contents + roff - 3);
3443 else
3444 val = 0;
3445 type = bfd_get_8 (input_bfd, contents + roff - 2);
3446 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3447 reg >>= 3;
3448 if (type == 0x8b)
3449 {
3450 /* movq */
3451 if (val == 0x4c)
3452 bfd_put_8 (output_bfd, 0x49,
3453 contents + roff - 3);
3454 else if (!ABI_64_P (output_bfd) && val == 0x44)
3455 bfd_put_8 (output_bfd, 0x41,
3456 contents + roff - 3);
3457 bfd_put_8 (output_bfd, 0xc7,
3458 contents + roff - 2);
3459 bfd_put_8 (output_bfd, 0xc0 | reg,
3460 contents + roff - 1);
3461 }
3462 else if (reg == 4)
3463 {
3464 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3465 is special */
3466 if (val == 0x4c)
3467 bfd_put_8 (output_bfd, 0x49,
3468 contents + roff - 3);
3469 else if (!ABI_64_P (output_bfd) && val == 0x44)
3470 bfd_put_8 (output_bfd, 0x41,
3471 contents + roff - 3);
3472 bfd_put_8 (output_bfd, 0x81,
3473 contents + roff - 2);
3474 bfd_put_8 (output_bfd, 0xc0 | reg,
3475 contents + roff - 1);
3476 }
3477 else
3478 {
3479 /* addq/addl -> leaq/leal */
3480 if (val == 0x4c)
3481 bfd_put_8 (output_bfd, 0x4d,
3482 contents + roff - 3);
3483 else if (!ABI_64_P (output_bfd) && val == 0x44)
3484 bfd_put_8 (output_bfd, 0x45,
3485 contents + roff - 3);
3486 bfd_put_8 (output_bfd, 0x8d,
3487 contents + roff - 2);
3488 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3489 contents + roff - 1);
3490 }
3491 bfd_put_32 (output_bfd,
3492 elf_x86_64_tpoff (info, relocation),
3493 contents + roff);
3494 continue;
3495 }
3496 else
3497 BFD_ASSERT (FALSE);
3498 }
3499
3500 if (htab->elf.sgot == NULL)
3501 abort ();
3502
3503 if (h != NULL)
3504 {
3505 off = h->got.offset;
3506 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3507 }
3508 else
3509 {
3510 if (local_got_offsets == NULL)
3511 abort ();
3512
3513 off = local_got_offsets[r_symndx];
3514 offplt = local_tlsdesc_gotents[r_symndx];
3515 }
3516
3517 if ((off & 1) != 0)
3518 off &= ~1;
3519 else
3520 {
3521 Elf_Internal_Rela outrel;
3522 int dr_type, indx;
3523 asection *sreloc;
3524
3525 if (htab->elf.srelgot == NULL)
3526 abort ();
3527
3528 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3529
3530 if (GOT_TLS_GDESC_P (tls_type))
3531 {
3532 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3533 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3534 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3535 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3536 + htab->elf.sgotplt->output_offset
3537 + offplt
3538 + htab->sgotplt_jump_table_size);
3539 sreloc = htab->elf.srelplt;
3540 if (indx == 0)
3541 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3542 else
3543 outrel.r_addend = 0;
3544 elf_append_rela (output_bfd, sreloc, &outrel);
3545 }
3546
3547 sreloc = htab->elf.srelgot;
3548
3549 outrel.r_offset = (htab->elf.sgot->output_section->vma
3550 + htab->elf.sgot->output_offset + off);
3551
3552 if (GOT_TLS_GD_P (tls_type))
3553 dr_type = R_X86_64_DTPMOD64;
3554 else if (GOT_TLS_GDESC_P (tls_type))
3555 goto dr_done;
3556 else
3557 dr_type = R_X86_64_TPOFF64;
3558
3559 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3560 outrel.r_addend = 0;
3561 if ((dr_type == R_X86_64_TPOFF64
3562 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3563 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3564 outrel.r_info = htab->r_info (indx, dr_type);
3565
3566 elf_append_rela (output_bfd, sreloc, &outrel);
3567
3568 if (GOT_TLS_GD_P (tls_type))
3569 {
3570 if (indx == 0)
3571 {
3572 BFD_ASSERT (! unresolved_reloc);
3573 bfd_put_64 (output_bfd,
3574 relocation - _bfd_x86_elf_dtpoff_base (info),
3575 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3576 }
3577 else
3578 {
3579 bfd_put_64 (output_bfd, 0,
3580 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3581 outrel.r_info = htab->r_info (indx,
3582 R_X86_64_DTPOFF64);
3583 outrel.r_offset += GOT_ENTRY_SIZE;
3584 elf_append_rela (output_bfd, sreloc,
3585 &outrel);
3586 }
3587 }
3588
3589 dr_done:
3590 if (h != NULL)
3591 h->got.offset |= 1;
3592 else
3593 local_got_offsets[r_symndx] |= 1;
3594 }
3595
3596 if (off >= (bfd_vma) -2
3597 && ! GOT_TLS_GDESC_P (tls_type))
3598 abort ();
3599 if (r_type_tls == r_type)
3600 {
3601 if (r_type == R_X86_64_GOTPC32_TLSDESC
3602 || r_type == R_X86_64_TLSDESC_CALL)
3603 relocation = htab->elf.sgotplt->output_section->vma
3604 + htab->elf.sgotplt->output_offset
3605 + offplt + htab->sgotplt_jump_table_size;
3606 else
3607 relocation = htab->elf.sgot->output_section->vma
3608 + htab->elf.sgot->output_offset + off;
3609 unresolved_reloc = FALSE;
3610 }
3611 else
3612 {
3613 bfd_vma roff = rel->r_offset;
3614
3615 if (r_type == R_X86_64_TLSGD)
3616 {
3617 /* GD->IE transition. For 64bit, change
3618 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3619 .word 0x6666; rex64; call __tls_get_addr@PLT
3620 or
3621 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3622 .byte 0x66; rex64
3623 call *__tls_get_addr@GOTPCREL(%rip
3624 which may be converted to
3625 addr32 call __tls_get_addr
3626 into:
3627 movq %fs:0, %rax
3628 addq foo@gottpoff(%rip), %rax
3629 For 32bit, change
3630 leaq foo@tlsgd(%rip), %rdi
3631 .word 0x6666; rex64; call __tls_get_addr@PLT
3632 or
3633 leaq foo@tlsgd(%rip), %rdi
3634 .byte 0x66; rex64;
3635 call *__tls_get_addr@GOTPCREL(%rip)
3636 which may be converted to
3637 addr32 call __tls_get_addr
3638 into:
3639 movl %fs:0, %eax
3640 addq foo@gottpoff(%rip), %rax
3641 For largepic, change:
3642 leaq foo@tlsgd(%rip), %rdi
3643 movabsq $__tls_get_addr@pltoff, %rax
3644 addq %r15, %rax
3645 call *%rax
3646 into:
3647 movq %fs:0, %rax
3648 addq foo@gottpoff(%rax), %rax
3649 nopw 0x0(%rax,%rax,1) */
3650 int largepic = 0;
3651 if (ABI_64_P (output_bfd))
3652 {
3653 if (contents[roff + 5] == 0xb8)
3654 {
3655 memcpy (contents + roff - 3,
3656 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3657 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3658 largepic = 1;
3659 }
3660 else
3661 memcpy (contents + roff - 4,
3662 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3663 16);
3664 }
3665 else
3666 memcpy (contents + roff - 3,
3667 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3668 15);
3669
3670 relocation = (htab->elf.sgot->output_section->vma
3671 + htab->elf.sgot->output_offset + off
3672 - roff
3673 - largepic
3674 - input_section->output_section->vma
3675 - input_section->output_offset
3676 - 12);
3677 bfd_put_32 (output_bfd, relocation,
3678 contents + roff + 8 + largepic);
3679 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3680 rel++;
3681 wrel++;
3682 continue;
3683 }
3684 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3685 {
3686 /* GDesc -> IE transition.
3687 It's originally something like:
3688 leaq x@tlsdesc(%rip), %rax
3689
3690 Change it to:
3691 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3692
3693 /* Now modify the instruction as appropriate. To
3694 turn a leaq into a movq in the form we use it, it
3695 suffices to change the second byte from 0x8d to
3696 0x8b. */
3697 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3698
3699 bfd_put_32 (output_bfd,
3700 htab->elf.sgot->output_section->vma
3701 + htab->elf.sgot->output_offset + off
3702 - rel->r_offset
3703 - input_section->output_section->vma
3704 - input_section->output_offset
3705 - 4,
3706 contents + roff);
3707 continue;
3708 }
3709 else if (r_type == R_X86_64_TLSDESC_CALL)
3710 {
3711 /* GDesc -> IE transition.
3712 It's originally:
3713 call *(%rax)
3714
3715 Change it to:
3716 xchg %ax, %ax. */
3717
3718 bfd_put_8 (output_bfd, 0x66, contents + roff);
3719 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3720 continue;
3721 }
3722 else
3723 BFD_ASSERT (FALSE);
3724 }
3725 break;
3726
3727 case R_X86_64_TLSLD:
3728 if (! elf_x86_64_tls_transition (info, input_bfd,
3729 input_section, contents,
3730 symtab_hdr, sym_hashes,
3731 &r_type, GOT_UNKNOWN, rel,
3732 relend, h, r_symndx, TRUE))
3733 return FALSE;
3734
3735 if (r_type != R_X86_64_TLSLD)
3736 {
3737 /* LD->LE transition:
3738 leaq foo@tlsld(%rip), %rdi
3739 call __tls_get_addr@PLT
3740 For 64bit, we change it into:
3741 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3742 For 32bit, we change it into:
3743 nopl 0x0(%rax); movl %fs:0, %eax
3744 Or
3745 leaq foo@tlsld(%rip), %rdi;
3746 call *__tls_get_addr@GOTPCREL(%rip)
3747 which may be converted to
3748 addr32 call __tls_get_addr
3749 For 64bit, we change it into:
3750 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3751 For 32bit, we change it into:
3752 nopw 0x0(%rax); movl %fs:0, %eax
3753 For largepic, change:
3754 leaq foo@tlsgd(%rip), %rdi
3755 movabsq $__tls_get_addr@pltoff, %rax
3756 addq %rbx, %rax
3757 call *%rax
3758 into
3759 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3760 movq %fs:0, %eax */
3761
3762 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3763 if (ABI_64_P (output_bfd))
3764 {
3765 if (contents[rel->r_offset + 5] == 0xb8)
3766 memcpy (contents + rel->r_offset - 3,
3767 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3768 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3769 else if (contents[rel->r_offset + 4] == 0xff
3770 || contents[rel->r_offset + 4] == 0x67)
3771 memcpy (contents + rel->r_offset - 3,
3772 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3773 13);
3774 else
3775 memcpy (contents + rel->r_offset - 3,
3776 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3777 }
3778 else
3779 {
3780 if (contents[rel->r_offset + 4] == 0xff)
3781 memcpy (contents + rel->r_offset - 3,
3782 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3783 13);
3784 else
3785 memcpy (contents + rel->r_offset - 3,
3786 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3787 }
3788 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3789 and R_X86_64_PLTOFF64. */
3790 rel++;
3791 wrel++;
3792 continue;
3793 }
3794
3795 if (htab->elf.sgot == NULL)
3796 abort ();
3797
3798 off = htab->tls_ld_or_ldm_got.offset;
3799 if (off & 1)
3800 off &= ~1;
3801 else
3802 {
3803 Elf_Internal_Rela outrel;
3804
3805 if (htab->elf.srelgot == NULL)
3806 abort ();
3807
3808 outrel.r_offset = (htab->elf.sgot->output_section->vma
3809 + htab->elf.sgot->output_offset + off);
3810
3811 bfd_put_64 (output_bfd, 0,
3812 htab->elf.sgot->contents + off);
3813 bfd_put_64 (output_bfd, 0,
3814 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3815 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3816 outrel.r_addend = 0;
3817 elf_append_rela (output_bfd, htab->elf.srelgot,
3818 &outrel);
3819 htab->tls_ld_or_ldm_got.offset |= 1;
3820 }
3821 relocation = htab->elf.sgot->output_section->vma
3822 + htab->elf.sgot->output_offset + off;
3823 unresolved_reloc = FALSE;
3824 break;
3825
3826 case R_X86_64_DTPOFF32:
3827 if (!bfd_link_executable (info)
3828 || (input_section->flags & SEC_CODE) == 0)
3829 relocation -= _bfd_x86_elf_dtpoff_base (info);
3830 else
3831 relocation = elf_x86_64_tpoff (info, relocation);
3832 break;
3833
3834 case R_X86_64_TPOFF32:
3835 case R_X86_64_TPOFF64:
3836 BFD_ASSERT (bfd_link_executable (info));
3837 relocation = elf_x86_64_tpoff (info, relocation);
3838 break;
3839
3840 case R_X86_64_DTPOFF64:
3841 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3842 relocation -= _bfd_x86_elf_dtpoff_base (info);
3843 break;
3844
3845 default:
3846 break;
3847 }
3848
3849 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3850 because such sections are not SEC_ALLOC and thus ld.so will
3851 not process them. */
3852 if (unresolved_reloc
3853 && !((input_section->flags & SEC_DEBUGGING) != 0
3854 && h->def_dynamic)
3855 && _bfd_elf_section_offset (output_bfd, info, input_section,
3856 rel->r_offset) != (bfd_vma) -1)
3857 {
3858 switch (r_type)
3859 {
3860 case R_X86_64_32S:
3861 sec = h->root.u.def.section;
3862 if ((info->nocopyreloc
3863 || (eh->def_protected
3864 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3865 && !(h->root.u.def.section->flags & SEC_CODE))
3866 return elf_x86_64_need_pic (info, input_bfd, input_section,
3867 h, NULL, NULL, howto);
3868 /* Fall through. */
3869
3870 default:
3871 _bfd_error_handler
3872 /* xgettext:c-format */
3873 (_("%pB(%pA+%#" PRIx64 "): "
3874 "unresolvable %s relocation against symbol `%s'"),
3875 input_bfd,
3876 input_section,
3877 (uint64_t) rel->r_offset,
3878 howto->name,
3879 h->root.root.string);
3880 return FALSE;
3881 }
3882 }
3883
3884 do_relocation:
3885 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3886 contents, rel->r_offset,
3887 relocation, rel->r_addend);
3888
3889 check_relocation_error:
3890 if (r != bfd_reloc_ok)
3891 {
3892 const char *name;
3893
3894 if (h != NULL)
3895 name = h->root.root.string;
3896 else
3897 {
3898 name = bfd_elf_string_from_elf_section (input_bfd,
3899 symtab_hdr->sh_link,
3900 sym->st_name);
3901 if (name == NULL)
3902 return FALSE;
3903 if (*name == '\0')
3904 name = bfd_section_name (input_bfd, sec);
3905 }
3906
3907 if (r == bfd_reloc_overflow)
3908 {
3909 if (converted_reloc)
3910 {
3911 info->callbacks->einfo
3912 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3913 return FALSE;
3914 }
3915 (*info->callbacks->reloc_overflow)
3916 (info, (h ? &h->root : NULL), name, howto->name,
3917 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3918 }
3919 else
3920 {
3921 _bfd_error_handler
3922 /* xgettext:c-format */
3923 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
3924 input_bfd, input_section,
3925 (uint64_t) rel->r_offset, name, (int) r);
3926 return FALSE;
3927 }
3928 }
3929
3930 if (wrel != rel)
3931 *wrel = *rel;
3932 }
3933
3934 if (wrel != rel)
3935 {
3936 Elf_Internal_Shdr *rel_hdr;
3937 size_t deleted = rel - wrel;
3938
3939 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3940 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3941 if (rel_hdr->sh_size == 0)
3942 {
3943 /* It is too late to remove an empty reloc section. Leave
3944 one NONE reloc.
3945 ??? What is wrong with an empty section??? */
3946 rel_hdr->sh_size = rel_hdr->sh_entsize;
3947 deleted -= 1;
3948 }
3949 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3950 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3951 input_section->reloc_count -= deleted;
3952 }
3953
3954 return TRUE;
3955 }
3956
3957 /* Finish up dynamic symbol handling. We set the contents of various
3958 dynamic sections here. */
3959
3960 static bfd_boolean
3961 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3962 struct bfd_link_info *info,
3963 struct elf_link_hash_entry *h,
3964 Elf_Internal_Sym *sym)
3965 {
3966 struct elf_x86_link_hash_table *htab;
3967 bfd_boolean use_plt_second;
3968 struct elf_x86_link_hash_entry *eh;
3969 bfd_boolean local_undefweak;
3970
3971 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3972 if (htab == NULL)
3973 return FALSE;
3974
3975 /* Use the second PLT section only if there is .plt section. */
3976 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3977
3978 eh = (struct elf_x86_link_hash_entry *) h;
3979 if (eh->no_finish_dynamic_symbol)
3980 abort ();
3981
3982 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3983 resolved undefined weak symbols in executable so that their
3984 references have value 0 at run-time. */
3985 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3986
3987 if (h->plt.offset != (bfd_vma) -1)
3988 {
3989 bfd_vma plt_index;
3990 bfd_vma got_offset, plt_offset;
3991 Elf_Internal_Rela rela;
3992 bfd_byte *loc;
3993 asection *plt, *gotplt, *relplt, *resolved_plt;
3994 const struct elf_backend_data *bed;
3995 bfd_vma plt_got_pcrel_offset;
3996
3997 /* When building a static executable, use .iplt, .igot.plt and
3998 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3999 if (htab->elf.splt != NULL)
4000 {
4001 plt = htab->elf.splt;
4002 gotplt = htab->elf.sgotplt;
4003 relplt = htab->elf.srelplt;
4004 }
4005 else
4006 {
4007 plt = htab->elf.iplt;
4008 gotplt = htab->elf.igotplt;
4009 relplt = htab->elf.irelplt;
4010 }
4011
4012 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4013
4014 /* Get the index in the procedure linkage table which
4015 corresponds to this symbol. This is the index of this symbol
4016 in all the symbols for which we are making plt entries. The
4017 first entry in the procedure linkage table is reserved.
4018
4019 Get the offset into the .got table of the entry that
4020 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4021 bytes. The first three are reserved for the dynamic linker.
4022
4023 For static executables, we don't reserve anything. */
4024
4025 if (plt == htab->elf.splt)
4026 {
4027 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4028 - htab->plt.has_plt0);
4029 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4030 }
4031 else
4032 {
4033 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4034 got_offset = got_offset * GOT_ENTRY_SIZE;
4035 }
4036
4037 /* Fill in the entry in the procedure linkage table. */
4038 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4039 htab->plt.plt_entry_size);
4040 if (use_plt_second)
4041 {
4042 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4043 htab->non_lazy_plt->plt_entry,
4044 htab->non_lazy_plt->plt_entry_size);
4045
4046 resolved_plt = htab->plt_second;
4047 plt_offset = eh->plt_second.offset;
4048 }
4049 else
4050 {
4051 resolved_plt = plt;
4052 plt_offset = h->plt.offset;
4053 }
4054
4055 /* Insert the relocation positions of the plt section. */
4056
4057 /* Put offset the PC-relative instruction referring to the GOT entry,
4058 subtracting the size of that instruction. */
4059 plt_got_pcrel_offset = (gotplt->output_section->vma
4060 + gotplt->output_offset
4061 + got_offset
4062 - resolved_plt->output_section->vma
4063 - resolved_plt->output_offset
4064 - plt_offset
4065 - htab->plt.plt_got_insn_size);
4066
4067 /* Check PC-relative offset overflow in PLT entry. */
4068 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4069 /* xgettext:c-format */
4070 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4071 output_bfd, h->root.root.string);
4072
4073 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4074 (resolved_plt->contents + plt_offset
4075 + htab->plt.plt_got_offset));
4076
4077 /* Fill in the entry in the global offset table, initially this
4078 points to the second part of the PLT entry. Leave the entry
4079 as zero for undefined weak symbol in PIE. No PLT relocation
4080 against undefined weak symbol in PIE. */
4081 if (!local_undefweak)
4082 {
4083 if (htab->plt.has_plt0)
4084 bfd_put_64 (output_bfd, (plt->output_section->vma
4085 + plt->output_offset
4086 + h->plt.offset
4087 + htab->lazy_plt->plt_lazy_offset),
4088 gotplt->contents + got_offset);
4089
4090 /* Fill in the entry in the .rela.plt section. */
4091 rela.r_offset = (gotplt->output_section->vma
4092 + gotplt->output_offset
4093 + got_offset);
4094 if (PLT_LOCAL_IFUNC_P (info, h))
4095 {
4096 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4097 h->root.root.string,
4098 h->root.u.def.section->owner);
4099
4100 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4101 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4102 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4103 rela.r_addend = (h->root.u.def.value
4104 + h->root.u.def.section->output_section->vma
4105 + h->root.u.def.section->output_offset);
4106 /* R_X86_64_IRELATIVE comes last. */
4107 plt_index = htab->next_irelative_index--;
4108 }
4109 else
4110 {
4111 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4112 rela.r_addend = 0;
4113 plt_index = htab->next_jump_slot_index++;
4114 }
4115
4116 /* Don't fill the second and third slots in PLT entry for
4117 static executables nor without PLT0. */
4118 if (plt == htab->elf.splt && htab->plt.has_plt0)
4119 {
4120 bfd_vma plt0_offset
4121 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4122
4123 /* Put relocation index. */
4124 bfd_put_32 (output_bfd, plt_index,
4125 (plt->contents + h->plt.offset
4126 + htab->lazy_plt->plt_reloc_offset));
4127
4128 /* Put offset for jmp .PLT0 and check for overflow. We don't
4129 check relocation index for overflow since branch displacement
4130 will overflow first. */
4131 if (plt0_offset > 0x80000000)
4132 /* xgettext:c-format */
4133 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4134 output_bfd, h->root.root.string);
4135 bfd_put_32 (output_bfd, - plt0_offset,
4136 (plt->contents + h->plt.offset
4137 + htab->lazy_plt->plt_plt_offset));
4138 }
4139
4140 bed = get_elf_backend_data (output_bfd);
4141 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4142 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4143 }
4144 }
4145 else if (eh->plt_got.offset != (bfd_vma) -1)
4146 {
4147 bfd_vma got_offset, plt_offset;
4148 asection *plt, *got;
4149 bfd_boolean got_after_plt;
4150 int32_t got_pcrel_offset;
4151
4152 /* Set the entry in the GOT procedure linkage table. */
4153 plt = htab->plt_got;
4154 got = htab->elf.sgot;
4155 got_offset = h->got.offset;
4156
4157 if (got_offset == (bfd_vma) -1
4158 || (h->type == STT_GNU_IFUNC && h->def_regular)
4159 || plt == NULL
4160 || got == NULL)
4161 abort ();
4162
4163 /* Use the non-lazy PLT entry template for the GOT PLT since they
4164 are the identical. */
4165 /* Fill in the entry in the GOT procedure linkage table. */
4166 plt_offset = eh->plt_got.offset;
4167 memcpy (plt->contents + plt_offset,
4168 htab->non_lazy_plt->plt_entry,
4169 htab->non_lazy_plt->plt_entry_size);
4170
4171 /* Put offset the PC-relative instruction referring to the GOT
4172 entry, subtracting the size of that instruction. */
4173 got_pcrel_offset = (got->output_section->vma
4174 + got->output_offset
4175 + got_offset
4176 - plt->output_section->vma
4177 - plt->output_offset
4178 - plt_offset
4179 - htab->non_lazy_plt->plt_got_insn_size);
4180
4181 /* Check PC-relative offset overflow in GOT PLT entry. */
4182 got_after_plt = got->output_section->vma > plt->output_section->vma;
4183 if ((got_after_plt && got_pcrel_offset < 0)
4184 || (!got_after_plt && got_pcrel_offset > 0))
4185 /* xgettext:c-format */
4186 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4187 output_bfd, h->root.root.string);
4188
4189 bfd_put_32 (output_bfd, got_pcrel_offset,
4190 (plt->contents + plt_offset
4191 + htab->non_lazy_plt->plt_got_offset));
4192 }
4193
4194 if (!local_undefweak
4195 && !h->def_regular
4196 && (h->plt.offset != (bfd_vma) -1
4197 || eh->plt_got.offset != (bfd_vma) -1))
4198 {
4199 /* Mark the symbol as undefined, rather than as defined in
4200 the .plt section. Leave the value if there were any
4201 relocations where pointer equality matters (this is a clue
4202 for the dynamic linker, to make function pointer
4203 comparisons work between an application and shared
4204 library), otherwise set it to zero. If a function is only
4205 called from a binary, there is no need to slow down
4206 shared libraries because of that. */
4207 sym->st_shndx = SHN_UNDEF;
4208 if (!h->pointer_equality_needed)
4209 sym->st_value = 0;
4210 }
4211
4212 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4213
4214 /* Don't generate dynamic GOT relocation against undefined weak
4215 symbol in executable. */
4216 if (h->got.offset != (bfd_vma) -1
4217 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4218 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4219 && !local_undefweak)
4220 {
4221 Elf_Internal_Rela rela;
4222 asection *relgot = htab->elf.srelgot;
4223
4224 /* This symbol has an entry in the global offset table. Set it
4225 up. */
4226 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4227 abort ();
4228
4229 rela.r_offset = (htab->elf.sgot->output_section->vma
4230 + htab->elf.sgot->output_offset
4231 + (h->got.offset &~ (bfd_vma) 1));
4232
4233 /* If this is a static link, or it is a -Bsymbolic link and the
4234 symbol is defined locally or was forced to be local because
4235 of a version file, we just want to emit a RELATIVE reloc.
4236 The entry in the global offset table will already have been
4237 initialized in the relocate_section function. */
4238 if (h->def_regular
4239 && h->type == STT_GNU_IFUNC)
4240 {
4241 if (h->plt.offset == (bfd_vma) -1)
4242 {
4243 /* STT_GNU_IFUNC is referenced without PLT. */
4244 if (htab->elf.splt == NULL)
4245 {
4246 /* use .rel[a].iplt section to store .got relocations
4247 in static executable. */
4248 relgot = htab->elf.irelplt;
4249 }
4250 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4251 {
4252 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4253 h->root.root.string,
4254 h->root.u.def.section->owner);
4255
4256 rela.r_info = htab->r_info (0,
4257 R_X86_64_IRELATIVE);
4258 rela.r_addend = (h->root.u.def.value
4259 + h->root.u.def.section->output_section->vma
4260 + h->root.u.def.section->output_offset);
4261 }
4262 else
4263 goto do_glob_dat;
4264 }
4265 else if (bfd_link_pic (info))
4266 {
4267 /* Generate R_X86_64_GLOB_DAT. */
4268 goto do_glob_dat;
4269 }
4270 else
4271 {
4272 asection *plt;
4273 bfd_vma plt_offset;
4274
4275 if (!h->pointer_equality_needed)
4276 abort ();
4277
4278 /* For non-shared object, we can't use .got.plt, which
4279 contains the real function addres if we need pointer
4280 equality. We load the GOT entry with the PLT entry. */
4281 if (htab->plt_second != NULL)
4282 {
4283 plt = htab->plt_second;
4284 plt_offset = eh->plt_second.offset;
4285 }
4286 else
4287 {
4288 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4289 plt_offset = h->plt.offset;
4290 }
4291 bfd_put_64 (output_bfd, (plt->output_section->vma
4292 + plt->output_offset
4293 + plt_offset),
4294 htab->elf.sgot->contents + h->got.offset);
4295 return TRUE;
4296 }
4297 }
4298 else if (bfd_link_pic (info)
4299 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4300 {
4301 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4302 return FALSE;
4303 BFD_ASSERT((h->got.offset & 1) != 0);
4304 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4305 rela.r_addend = (h->root.u.def.value
4306 + h->root.u.def.section->output_section->vma
4307 + h->root.u.def.section->output_offset);
4308 }
4309 else
4310 {
4311 BFD_ASSERT((h->got.offset & 1) == 0);
4312 do_glob_dat:
4313 bfd_put_64 (output_bfd, (bfd_vma) 0,
4314 htab->elf.sgot->contents + h->got.offset);
4315 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4316 rela.r_addend = 0;
4317 }
4318
4319 elf_append_rela (output_bfd, relgot, &rela);
4320 }
4321
4322 if (h->needs_copy)
4323 {
4324 Elf_Internal_Rela rela;
4325 asection *s;
4326
4327 /* This symbol needs a copy reloc. Set it up. */
4328 VERIFY_COPY_RELOC (h, htab)
4329
4330 rela.r_offset = (h->root.u.def.value
4331 + h->root.u.def.section->output_section->vma
4332 + h->root.u.def.section->output_offset);
4333 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4334 rela.r_addend = 0;
4335 if (h->root.u.def.section == htab->elf.sdynrelro)
4336 s = htab->elf.sreldynrelro;
4337 else
4338 s = htab->elf.srelbss;
4339 elf_append_rela (output_bfd, s, &rela);
4340 }
4341
4342 return TRUE;
4343 }
4344
4345 /* Finish up local dynamic symbol handling. We set the contents of
4346 various dynamic sections here. */
4347
4348 static bfd_boolean
4349 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4350 {
4351 struct elf_link_hash_entry *h
4352 = (struct elf_link_hash_entry *) *slot;
4353 struct bfd_link_info *info
4354 = (struct bfd_link_info *) inf;
4355
4356 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4357 info, h, NULL);
4358 }
4359
4360 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4361 here since undefined weak symbol may not be dynamic and may not be
4362 called for elf_x86_64_finish_dynamic_symbol. */
4363
4364 static bfd_boolean
4365 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4366 void *inf)
4367 {
4368 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4369 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4370
4371 if (h->root.type != bfd_link_hash_undefweak
4372 || h->dynindx != -1)
4373 return TRUE;
4374
4375 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4376 info, h, NULL);
4377 }
4378
4379 /* Used to decide how to sort relocs in an optimal manner for the
4380 dynamic linker, before writing them out. */
4381
4382 static enum elf_reloc_type_class
4383 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4384 const asection *rel_sec ATTRIBUTE_UNUSED,
4385 const Elf_Internal_Rela *rela)
4386 {
4387 bfd *abfd = info->output_bfd;
4388 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4389 struct elf_x86_link_hash_table *htab
4390 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4391
4392 if (htab->elf.dynsym != NULL
4393 && htab->elf.dynsym->contents != NULL)
4394 {
4395 /* Check relocation against STT_GNU_IFUNC symbol if there are
4396 dynamic symbols. */
4397 unsigned long r_symndx = htab->r_sym (rela->r_info);
4398 if (r_symndx != STN_UNDEF)
4399 {
4400 Elf_Internal_Sym sym;
4401 if (!bed->s->swap_symbol_in (abfd,
4402 (htab->elf.dynsym->contents
4403 + r_symndx * bed->s->sizeof_sym),
4404 0, &sym))
4405 abort ();
4406
4407 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4408 return reloc_class_ifunc;
4409 }
4410 }
4411
4412 switch ((int) ELF32_R_TYPE (rela->r_info))
4413 {
4414 case R_X86_64_IRELATIVE:
4415 return reloc_class_ifunc;
4416 case R_X86_64_RELATIVE:
4417 case R_X86_64_RELATIVE64:
4418 return reloc_class_relative;
4419 case R_X86_64_JUMP_SLOT:
4420 return reloc_class_plt;
4421 case R_X86_64_COPY:
4422 return reloc_class_copy;
4423 default:
4424 return reloc_class_normal;
4425 }
4426 }
4427
4428 /* Finish up the dynamic sections. */
4429
4430 static bfd_boolean
4431 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4432 struct bfd_link_info *info)
4433 {
4434 struct elf_x86_link_hash_table *htab;
4435
4436 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4437 if (htab == NULL)
4438 return FALSE;
4439
4440 if (! htab->elf.dynamic_sections_created)
4441 return TRUE;
4442
4443 if (htab->elf.splt && htab->elf.splt->size > 0)
4444 {
4445 elf_section_data (htab->elf.splt->output_section)
4446 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4447
4448 if (htab->plt.has_plt0)
4449 {
4450 /* Fill in the special first entry in the procedure linkage
4451 table. */
4452 memcpy (htab->elf.splt->contents,
4453 htab->lazy_plt->plt0_entry,
4454 htab->lazy_plt->plt0_entry_size);
4455 /* Add offset for pushq GOT+8(%rip), since the instruction
4456 uses 6 bytes subtract this value. */
4457 bfd_put_32 (output_bfd,
4458 (htab->elf.sgotplt->output_section->vma
4459 + htab->elf.sgotplt->output_offset
4460 + 8
4461 - htab->elf.splt->output_section->vma
4462 - htab->elf.splt->output_offset
4463 - 6),
4464 (htab->elf.splt->contents
4465 + htab->lazy_plt->plt0_got1_offset));
4466 /* Add offset for the PC-relative instruction accessing
4467 GOT+16, subtracting the offset to the end of that
4468 instruction. */
4469 bfd_put_32 (output_bfd,
4470 (htab->elf.sgotplt->output_section->vma
4471 + htab->elf.sgotplt->output_offset
4472 + 16
4473 - htab->elf.splt->output_section->vma
4474 - htab->elf.splt->output_offset
4475 - htab->lazy_plt->plt0_got2_insn_end),
4476 (htab->elf.splt->contents
4477 + htab->lazy_plt->plt0_got2_offset));
4478 }
4479
4480 if (htab->tlsdesc_plt)
4481 {
4482 bfd_put_64 (output_bfd, (bfd_vma) 0,
4483 htab->elf.sgot->contents + htab->tlsdesc_got);
4484
4485 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4486 htab->lazy_plt->plt_tlsdesc_entry,
4487 htab->lazy_plt->plt_tlsdesc_entry_size);
4488
4489 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4490 bytes and the instruction uses 6 bytes, subtract these
4491 values. */
4492 bfd_put_32 (output_bfd,
4493 (htab->elf.sgotplt->output_section->vma
4494 + htab->elf.sgotplt->output_offset
4495 + 8
4496 - htab->elf.splt->output_section->vma
4497 - htab->elf.splt->output_offset
4498 - htab->tlsdesc_plt
4499 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
4500 (htab->elf.splt->contents
4501 + htab->tlsdesc_plt
4502 + htab->lazy_plt->plt_tlsdesc_got1_offset));
4503 /* Add offset for indirect branch via GOT+TDG, where TDG
4504 stands for htab->tlsdesc_got, subtracting the offset
4505 to the end of that instruction. */
4506 bfd_put_32 (output_bfd,
4507 (htab->elf.sgot->output_section->vma
4508 + htab->elf.sgot->output_offset
4509 + htab->tlsdesc_got
4510 - htab->elf.splt->output_section->vma
4511 - htab->elf.splt->output_offset
4512 - htab->tlsdesc_plt
4513 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
4514 (htab->elf.splt->contents
4515 + htab->tlsdesc_plt
4516 + htab->lazy_plt->plt_tlsdesc_got2_offset));
4517 }
4518 }
4519
4520 /* Fill PLT entries for undefined weak symbols in PIE. */
4521 if (bfd_link_pie (info))
4522 bfd_hash_traverse (&info->hash->table,
4523 elf_x86_64_pie_finish_undefweak_symbol,
4524 info);
4525
4526 return TRUE;
4527 }
4528
4529 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4530 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4531 It has to be done before elf_link_sort_relocs is called so that
4532 dynamic relocations are properly sorted. */
4533
4534 static bfd_boolean
4535 elf_x86_64_output_arch_local_syms
4536 (bfd *output_bfd ATTRIBUTE_UNUSED,
4537 struct bfd_link_info *info,
4538 void *flaginfo ATTRIBUTE_UNUSED,
4539 int (*func) (void *, const char *,
4540 Elf_Internal_Sym *,
4541 asection *,
4542 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4543 {
4544 struct elf_x86_link_hash_table *htab
4545 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4546 if (htab == NULL)
4547 return FALSE;
4548
4549 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4550 htab_traverse (htab->loc_hash_table,
4551 elf_x86_64_finish_local_dynamic_symbol,
4552 info);
4553
4554 return TRUE;
4555 }
4556
4557 /* Forward declaration. */
4558 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4559
4560 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4561 dynamic relocations. */
4562
4563 static long
4564 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4565 long symcount ATTRIBUTE_UNUSED,
4566 asymbol **syms ATTRIBUTE_UNUSED,
4567 long dynsymcount,
4568 asymbol **dynsyms,
4569 asymbol **ret)
4570 {
4571 long count, i, n;
4572 int j;
4573 bfd_byte *plt_contents;
4574 long relsize;
4575 const struct elf_x86_lazy_plt_layout *lazy_plt;
4576 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4577 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4578 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4579 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4580 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4581 asection *plt;
4582 enum elf_x86_plt_type plt_type;
4583 struct elf_x86_plt plts[] =
4584 {
4585 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4586 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4587 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4588 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4589 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4590 };
4591
4592 *ret = NULL;
4593
4594 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4595 return 0;
4596
4597 if (dynsymcount <= 0)
4598 return 0;
4599
4600 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4601 if (relsize <= 0)
4602 return -1;
4603
4604 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4605 {
4606 lazy_plt = &elf_x86_64_lazy_plt;
4607 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4608 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4609 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4610 if (ABI_64_P (abfd))
4611 {
4612 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4613 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4614 }
4615 else
4616 {
4617 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4618 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4619 }
4620 }
4621 else
4622 {
4623 lazy_plt = &elf_x86_64_nacl_plt;
4624 non_lazy_plt = NULL;
4625 lazy_bnd_plt = NULL;
4626 non_lazy_bnd_plt = NULL;
4627 lazy_ibt_plt = NULL;
4628 non_lazy_ibt_plt = NULL;
4629 }
4630
4631 count = 0;
4632 for (j = 0; plts[j].name != NULL; j++)
4633 {
4634 plt = bfd_get_section_by_name (abfd, plts[j].name);
4635 if (plt == NULL || plt->size == 0)
4636 continue;
4637
4638 /* Get the PLT section contents. */
4639 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4640 if (plt_contents == NULL)
4641 break;
4642 if (!bfd_get_section_contents (abfd, (asection *) plt,
4643 plt_contents, 0, plt->size))
4644 {
4645 free (plt_contents);
4646 break;
4647 }
4648
4649 /* Check what kind of PLT it is. */
4650 plt_type = plt_unknown;
4651 if (plts[j].type == plt_unknown
4652 && (plt->size >= (lazy_plt->plt_entry_size
4653 + lazy_plt->plt_entry_size)))
4654 {
4655 /* Match lazy PLT first. Need to check the first two
4656 instructions. */
4657 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4658 lazy_plt->plt0_got1_offset) == 0)
4659 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4660 2) == 0))
4661 plt_type = plt_lazy;
4662 else if (lazy_bnd_plt != NULL
4663 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4664 lazy_bnd_plt->plt0_got1_offset) == 0)
4665 && (memcmp (plt_contents + 6,
4666 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4667 {
4668 plt_type = plt_lazy | plt_second;
4669 /* The fist entry in the lazy IBT PLT is the same as the
4670 lazy BND PLT. */
4671 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4672 lazy_ibt_plt->plt_entry,
4673 lazy_ibt_plt->plt_got_offset) == 0))
4674 lazy_plt = lazy_ibt_plt;
4675 else
4676 lazy_plt = lazy_bnd_plt;
4677 }
4678 }
4679
4680 if (non_lazy_plt != NULL
4681 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4682 && plt->size >= non_lazy_plt->plt_entry_size)
4683 {
4684 /* Match non-lazy PLT. */
4685 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4686 non_lazy_plt->plt_got_offset) == 0)
4687 plt_type = plt_non_lazy;
4688 }
4689
4690 if (plt_type == plt_unknown || plt_type == plt_second)
4691 {
4692 if (non_lazy_bnd_plt != NULL
4693 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4694 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4695 non_lazy_bnd_plt->plt_got_offset) == 0))
4696 {
4697 /* Match BND PLT. */
4698 plt_type = plt_second;
4699 non_lazy_plt = non_lazy_bnd_plt;
4700 }
4701 else if (non_lazy_ibt_plt != NULL
4702 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4703 && (memcmp (plt_contents,
4704 non_lazy_ibt_plt->plt_entry,
4705 non_lazy_ibt_plt->plt_got_offset) == 0))
4706 {
4707 /* Match IBT PLT. */
4708 plt_type = plt_second;
4709 non_lazy_plt = non_lazy_ibt_plt;
4710 }
4711 }
4712
4713 if (plt_type == plt_unknown)
4714 {
4715 free (plt_contents);
4716 continue;
4717 }
4718
4719 plts[j].sec = plt;
4720 plts[j].type = plt_type;
4721
4722 if ((plt_type & plt_lazy))
4723 {
4724 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4725 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4726 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4727 /* Skip PLT0 in lazy PLT. */
4728 i = 1;
4729 }
4730 else
4731 {
4732 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4733 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4734 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4735 i = 0;
4736 }
4737
4738 /* Skip lazy PLT when the second PLT is used. */
4739 if (plt_type == (plt_lazy | plt_second))
4740 plts[j].count = 0;
4741 else
4742 {
4743 n = plt->size / plts[j].plt_entry_size;
4744 plts[j].count = n;
4745 count += n - i;
4746 }
4747
4748 plts[j].contents = plt_contents;
4749 }
4750
4751 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4752 (bfd_vma) 0, plts, dynsyms,
4753 ret);
4754 }
4755
4756 /* Handle an x86-64 specific section when reading an object file. This
4757 is called when elfcode.h finds a section with an unknown type. */
4758
4759 static bfd_boolean
4760 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4761 const char *name, int shindex)
4762 {
4763 if (hdr->sh_type != SHT_X86_64_UNWIND)
4764 return FALSE;
4765
4766 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4767 return FALSE;
4768
4769 return TRUE;
4770 }
4771
4772 /* Hook called by the linker routine which adds symbols from an object
4773 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4774 of .bss. */
4775
4776 static bfd_boolean
4777 elf_x86_64_add_symbol_hook (bfd *abfd,
4778 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4779 Elf_Internal_Sym *sym,
4780 const char **namep ATTRIBUTE_UNUSED,
4781 flagword *flagsp ATTRIBUTE_UNUSED,
4782 asection **secp,
4783 bfd_vma *valp)
4784 {
4785 asection *lcomm;
4786
4787 switch (sym->st_shndx)
4788 {
4789 case SHN_X86_64_LCOMMON:
4790 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4791 if (lcomm == NULL)
4792 {
4793 lcomm = bfd_make_section_with_flags (abfd,
4794 "LARGE_COMMON",
4795 (SEC_ALLOC
4796 | SEC_IS_COMMON
4797 | SEC_LINKER_CREATED));
4798 if (lcomm == NULL)
4799 return FALSE;
4800 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4801 }
4802 *secp = lcomm;
4803 *valp = sym->st_size;
4804 return TRUE;
4805 }
4806
4807 return TRUE;
4808 }
4809
4810
4811 /* Given a BFD section, try to locate the corresponding ELF section
4812 index. */
4813
4814 static bfd_boolean
4815 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4816 asection *sec, int *index_return)
4817 {
4818 if (sec == &_bfd_elf_large_com_section)
4819 {
4820 *index_return = SHN_X86_64_LCOMMON;
4821 return TRUE;
4822 }
4823 return FALSE;
4824 }
4825
4826 /* Process a symbol. */
4827
4828 static void
4829 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4830 asymbol *asym)
4831 {
4832 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4833
4834 switch (elfsym->internal_elf_sym.st_shndx)
4835 {
4836 case SHN_X86_64_LCOMMON:
4837 asym->section = &_bfd_elf_large_com_section;
4838 asym->value = elfsym->internal_elf_sym.st_size;
4839 /* Common symbol doesn't set BSF_GLOBAL. */
4840 asym->flags &= ~BSF_GLOBAL;
4841 break;
4842 }
4843 }
4844
4845 static bfd_boolean
4846 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4847 {
4848 return (sym->st_shndx == SHN_COMMON
4849 || sym->st_shndx == SHN_X86_64_LCOMMON);
4850 }
4851
4852 static unsigned int
4853 elf_x86_64_common_section_index (asection *sec)
4854 {
4855 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4856 return SHN_COMMON;
4857 else
4858 return SHN_X86_64_LCOMMON;
4859 }
4860
4861 static asection *
4862 elf_x86_64_common_section (asection *sec)
4863 {
4864 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4865 return bfd_com_section_ptr;
4866 else
4867 return &_bfd_elf_large_com_section;
4868 }
4869
4870 static bfd_boolean
4871 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4872 const Elf_Internal_Sym *sym,
4873 asection **psec,
4874 bfd_boolean newdef,
4875 bfd_boolean olddef,
4876 bfd *oldbfd,
4877 const asection *oldsec)
4878 {
4879 /* A normal common symbol and a large common symbol result in a
4880 normal common symbol. We turn the large common symbol into a
4881 normal one. */
4882 if (!olddef
4883 && h->root.type == bfd_link_hash_common
4884 && !newdef
4885 && bfd_is_com_section (*psec)
4886 && oldsec != *psec)
4887 {
4888 if (sym->st_shndx == SHN_COMMON
4889 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4890 {
4891 h->root.u.c.p->section
4892 = bfd_make_section_old_way (oldbfd, "COMMON");
4893 h->root.u.c.p->section->flags = SEC_ALLOC;
4894 }
4895 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4896 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4897 *psec = bfd_com_section_ptr;
4898 }
4899
4900 return TRUE;
4901 }
4902
4903 static int
4904 elf_x86_64_additional_program_headers (bfd *abfd,
4905 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4906 {
4907 asection *s;
4908 int count = 0;
4909
4910 /* Check to see if we need a large readonly segment. */
4911 s = bfd_get_section_by_name (abfd, ".lrodata");
4912 if (s && (s->flags & SEC_LOAD))
4913 count++;
4914
4915 /* Check to see if we need a large data segment. Since .lbss sections
4916 is placed right after the .bss section, there should be no need for
4917 a large data segment just because of .lbss. */
4918 s = bfd_get_section_by_name (abfd, ".ldata");
4919 if (s && (s->flags & SEC_LOAD))
4920 count++;
4921
4922 return count;
4923 }
4924
4925 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4926
4927 static bfd_boolean
4928 elf_x86_64_relocs_compatible (const bfd_target *input,
4929 const bfd_target *output)
4930 {
4931 return ((xvec_get_elf_backend_data (input)->s->elfclass
4932 == xvec_get_elf_backend_data (output)->s->elfclass)
4933 && _bfd_elf_relocs_compatible (input, output));
4934 }
4935
4936 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4937 with GNU properties if found. Otherwise, return NULL. */
4938
4939 static bfd *
4940 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4941 {
4942 struct elf_x86_init_table init_table;
4943
4944 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4945 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4946 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4947 != (int) R_X86_64_GNU_VTINHERIT)
4948 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4949 != (int) R_X86_64_GNU_VTENTRY))
4950 abort ();
4951
4952 /* This is unused for x86-64. */
4953 init_table.plt0_pad_byte = 0x90;
4954
4955 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
4956 {
4957 if (info->bndplt)
4958 {
4959 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4960 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4961 }
4962 else
4963 {
4964 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4965 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4966 }
4967
4968 if (ABI_64_P (info->output_bfd))
4969 {
4970 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4971 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4972 }
4973 else
4974 {
4975 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4976 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4977 }
4978 }
4979 else
4980 {
4981 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4982 init_table.non_lazy_plt = NULL;
4983 init_table.lazy_ibt_plt = NULL;
4984 init_table.non_lazy_ibt_plt = NULL;
4985 }
4986
4987 if (ABI_64_P (info->output_bfd))
4988 {
4989 init_table.r_info = elf64_r_info;
4990 init_table.r_sym = elf64_r_sym;
4991 }
4992 else
4993 {
4994 init_table.r_info = elf32_r_info;
4995 init_table.r_sym = elf32_r_sym;
4996 }
4997
4998 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4999 }
5000
5001 static const struct bfd_elf_special_section
5002 elf_x86_64_special_sections[]=
5003 {
5004 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5005 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5006 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5007 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5008 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5009 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5010 { NULL, 0, 0, 0, 0 }
5011 };
5012
5013 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5014 #define TARGET_LITTLE_NAME "elf64-x86-64"
5015 #define ELF_ARCH bfd_arch_i386
5016 #define ELF_TARGET_ID X86_64_ELF_DATA
5017 #define ELF_MACHINE_CODE EM_X86_64
5018 #if DEFAULT_LD_Z_SEPARATE_CODE
5019 # define ELF_MAXPAGESIZE 0x1000
5020 #else
5021 # define ELF_MAXPAGESIZE 0x200000
5022 #endif
5023 #define ELF_MINPAGESIZE 0x1000
5024 #define ELF_COMMONPAGESIZE 0x1000
5025
5026 #define elf_backend_can_gc_sections 1
5027 #define elf_backend_can_refcount 1
5028 #define elf_backend_want_got_plt 1
5029 #define elf_backend_plt_readonly 1
5030 #define elf_backend_want_plt_sym 0
5031 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5032 #define elf_backend_rela_normal 1
5033 #define elf_backend_plt_alignment 4
5034 #define elf_backend_extern_protected_data 1
5035 #define elf_backend_caches_rawsize 1
5036 #define elf_backend_dtrel_excludes_plt 1
5037 #define elf_backend_want_dynrelro 1
5038
5039 #define elf_info_to_howto elf_x86_64_info_to_howto
5040
5041 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5042 #define bfd_elf64_bfd_reloc_name_lookup \
5043 elf_x86_64_reloc_name_lookup
5044
5045 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5046 #define elf_backend_check_relocs elf_x86_64_check_relocs
5047 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5048 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5049 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5050 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5051 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5052 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5053 #ifdef CORE_HEADER
5054 #define elf_backend_write_core_note elf_x86_64_write_core_note
5055 #endif
5056 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5057 #define elf_backend_relocate_section elf_x86_64_relocate_section
5058 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5059 #define elf_backend_object_p elf64_x86_64_elf_object_p
5060 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5061
5062 #define elf_backend_section_from_shdr \
5063 elf_x86_64_section_from_shdr
5064
5065 #define elf_backend_section_from_bfd_section \
5066 elf_x86_64_elf_section_from_bfd_section
5067 #define elf_backend_add_symbol_hook \
5068 elf_x86_64_add_symbol_hook
5069 #define elf_backend_symbol_processing \
5070 elf_x86_64_symbol_processing
5071 #define elf_backend_common_section_index \
5072 elf_x86_64_common_section_index
5073 #define elf_backend_common_section \
5074 elf_x86_64_common_section
5075 #define elf_backend_common_definition \
5076 elf_x86_64_common_definition
5077 #define elf_backend_merge_symbol \
5078 elf_x86_64_merge_symbol
5079 #define elf_backend_special_sections \
5080 elf_x86_64_special_sections
5081 #define elf_backend_additional_program_headers \
5082 elf_x86_64_additional_program_headers
5083 #define elf_backend_setup_gnu_properties \
5084 elf_x86_64_link_setup_gnu_properties
5085 #define elf_backend_hide_symbol \
5086 _bfd_x86_elf_hide_symbol
5087
5088 #undef elf64_bed
5089 #define elf64_bed elf64_x86_64_bed
5090
5091 #include "elf64-target.h"
5092
5093 /* CloudABI support. */
5094
5095 #undef TARGET_LITTLE_SYM
5096 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5097 #undef TARGET_LITTLE_NAME
5098 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5099
5100 #undef ELF_OSABI
5101 #define ELF_OSABI ELFOSABI_CLOUDABI
5102
5103 #undef elf64_bed
5104 #define elf64_bed elf64_x86_64_cloudabi_bed
5105
5106 #include "elf64-target.h"
5107
5108 /* FreeBSD support. */
5109
5110 #undef TARGET_LITTLE_SYM
5111 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5112 #undef TARGET_LITTLE_NAME
5113 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5114
5115 #undef ELF_OSABI
5116 #define ELF_OSABI ELFOSABI_FREEBSD
5117
5118 #undef elf64_bed
5119 #define elf64_bed elf64_x86_64_fbsd_bed
5120
5121 #include "elf64-target.h"
5122
5123 /* Solaris 2 support. */
5124
5125 #undef TARGET_LITTLE_SYM
5126 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5127 #undef TARGET_LITTLE_NAME
5128 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5129
5130 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5131 {
5132 is_solaris /* os */
5133 };
5134
5135 #undef elf_backend_arch_data
5136 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5137
5138 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5139 objects won't be recognized. */
5140 #undef ELF_OSABI
5141
5142 #undef elf64_bed
5143 #define elf64_bed elf64_x86_64_sol2_bed
5144
5145 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5146 boundary. */
5147 #undef elf_backend_static_tls_alignment
5148 #define elf_backend_static_tls_alignment 16
5149
5150 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5151
5152 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5153 File, p.63. */
5154 #undef elf_backend_want_plt_sym
5155 #define elf_backend_want_plt_sym 1
5156
5157 #undef elf_backend_strtab_flags
5158 #define elf_backend_strtab_flags SHF_STRINGS
5159
5160 static bfd_boolean
5161 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5162 bfd *obfd ATTRIBUTE_UNUSED,
5163 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5164 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5165 {
5166 /* PR 19938: FIXME: Need to add code for setting the sh_info
5167 and sh_link fields of Solaris specific section types. */
5168 return FALSE;
5169 }
5170
5171 #undef elf_backend_copy_special_section_fields
5172 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5173
5174 #include "elf64-target.h"
5175
5176 /* Native Client support. */
5177
5178 static bfd_boolean
5179 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5180 {
5181 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5182 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5183 return TRUE;
5184 }
5185
5186 #undef TARGET_LITTLE_SYM
5187 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5188 #undef TARGET_LITTLE_NAME
5189 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5190 #undef elf64_bed
5191 #define elf64_bed elf64_x86_64_nacl_bed
5192
5193 #undef ELF_MAXPAGESIZE
5194 #undef ELF_MINPAGESIZE
5195 #undef ELF_COMMONPAGESIZE
5196 #define ELF_MAXPAGESIZE 0x10000
5197 #define ELF_MINPAGESIZE 0x10000
5198 #define ELF_COMMONPAGESIZE 0x10000
5199
5200 /* Restore defaults. */
5201 #undef ELF_OSABI
5202 #undef elf_backend_static_tls_alignment
5203 #undef elf_backend_want_plt_sym
5204 #define elf_backend_want_plt_sym 0
5205 #undef elf_backend_strtab_flags
5206 #undef elf_backend_copy_special_section_fields
5207
5208 /* NaCl uses substantially different PLT entries for the same effects. */
5209
5210 #undef elf_backend_plt_alignment
5211 #define elf_backend_plt_alignment 5
5212 #define NACL_PLT_ENTRY_SIZE 64
5213 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5214
5215 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5216 {
5217 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5218 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5219 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5220 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5221 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5222
5223 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5224 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5225
5226 /* 32 bytes of nop to pad out to the standard size. */
5227 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5228 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5229 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5230 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5231 0x66, /* excess data16 prefix */
5232 0x90 /* nop */
5233 };
5234
5235 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5236 {
5237 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5238 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5239 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5240 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5241
5242 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5243 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5244 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5245
5246 /* Lazy GOT entries point here (32-byte aligned). */
5247 0x68, /* pushq immediate */
5248 0, 0, 0, 0, /* replaced with index into relocation table. */
5249 0xe9, /* jmp relative */
5250 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5251
5252 /* 22 bytes of nop to pad out to the standard size. */
5253 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5254 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5255 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5256 };
5257
5258 /* .eh_frame covering the .plt section. */
5259
5260 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5261 {
5262 #if (PLT_CIE_LENGTH != 20 \
5263 || PLT_FDE_LENGTH != 36 \
5264 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5265 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5266 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5267 #endif
5268 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5269 0, 0, 0, 0, /* CIE ID */
5270 1, /* CIE version */
5271 'z', 'R', 0, /* Augmentation string */
5272 1, /* Code alignment factor */
5273 0x78, /* Data alignment factor */
5274 16, /* Return address column */
5275 1, /* Augmentation size */
5276 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5277 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5278 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5279 DW_CFA_nop, DW_CFA_nop,
5280
5281 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5282 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5283 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5284 0, 0, 0, 0, /* .plt size goes here */
5285 0, /* Augmentation size */
5286 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5287 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5288 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5289 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5290 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5291 13, /* Block length */
5292 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5293 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5294 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5295 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5296 DW_CFA_nop, DW_CFA_nop
5297 };
5298
5299 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5300 {
5301 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5302 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5303 elf_x86_64_nacl_plt_entry, /* plt_entry */
5304 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5305 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */
5306 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
5307 2, /* plt_tlsdesc_got1_offset */
5308 9, /* plt_tlsdesc_got2_offset */
5309 6, /* plt_tlsdesc_got1_insn_end */
5310 13, /* plt_tlsdesc_got2_insn_end */
5311 2, /* plt0_got1_offset */
5312 9, /* plt0_got2_offset */
5313 13, /* plt0_got2_insn_end */
5314 3, /* plt_got_offset */
5315 33, /* plt_reloc_offset */
5316 38, /* plt_plt_offset */
5317 7, /* plt_got_insn_size */
5318 42, /* plt_plt_insn_end */
5319 32, /* plt_lazy_offset */
5320 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5321 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5322 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5323 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5324 };
5325
5326 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5327 {
5328 is_nacl /* os */
5329 };
5330
5331 #undef elf_backend_arch_data
5332 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5333
5334 #undef elf_backend_object_p
5335 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5336 #undef elf_backend_modify_segment_map
5337 #define elf_backend_modify_segment_map nacl_modify_segment_map
5338 #undef elf_backend_modify_program_headers
5339 #define elf_backend_modify_program_headers nacl_modify_program_headers
5340 #undef elf_backend_final_write_processing
5341 #define elf_backend_final_write_processing nacl_final_write_processing
5342
5343 #include "elf64-target.h"
5344
5345 /* Native Client x32 support. */
5346
5347 static bfd_boolean
5348 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5349 {
5350 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5351 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5352 return TRUE;
5353 }
5354
5355 #undef TARGET_LITTLE_SYM
5356 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5357 #undef TARGET_LITTLE_NAME
5358 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5359 #undef elf32_bed
5360 #define elf32_bed elf32_x86_64_nacl_bed
5361
5362 #define bfd_elf32_bfd_reloc_type_lookup \
5363 elf_x86_64_reloc_type_lookup
5364 #define bfd_elf32_bfd_reloc_name_lookup \
5365 elf_x86_64_reloc_name_lookup
5366 #define bfd_elf32_get_synthetic_symtab \
5367 elf_x86_64_get_synthetic_symtab
5368
5369 #undef elf_backend_object_p
5370 #define elf_backend_object_p \
5371 elf32_x86_64_nacl_elf_object_p
5372
5373 #undef elf_backend_bfd_from_remote_memory
5374 #define elf_backend_bfd_from_remote_memory \
5375 _bfd_elf32_bfd_from_remote_memory
5376
5377 #undef elf_backend_size_info
5378 #define elf_backend_size_info \
5379 _bfd_elf32_size_info
5380
5381 #undef elf32_bed
5382 #define elf32_bed elf32_x86_64_bed
5383
5384 #include "elf32-target.h"
5385
5386 /* Restore defaults. */
5387 #undef elf_backend_object_p
5388 #define elf_backend_object_p elf64_x86_64_elf_object_p
5389 #undef elf_backend_bfd_from_remote_memory
5390 #undef elf_backend_size_info
5391 #undef elf_backend_modify_segment_map
5392 #undef elf_backend_modify_program_headers
5393 #undef elf_backend_final_write_processing
5394
5395 /* Intel L1OM support. */
5396
5397 static bfd_boolean
5398 elf64_l1om_elf_object_p (bfd *abfd)
5399 {
5400 /* Set the right machine number for an L1OM elf64 file. */
5401 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5402 return TRUE;
5403 }
5404
5405 #undef TARGET_LITTLE_SYM
5406 #define TARGET_LITTLE_SYM l1om_elf64_vec
5407 #undef TARGET_LITTLE_NAME
5408 #define TARGET_LITTLE_NAME "elf64-l1om"
5409 #undef ELF_ARCH
5410 #define ELF_ARCH bfd_arch_l1om
5411
5412 #undef ELF_MACHINE_CODE
5413 #define ELF_MACHINE_CODE EM_L1OM
5414
5415 #undef ELF_OSABI
5416
5417 #undef elf64_bed
5418 #define elf64_bed elf64_l1om_bed
5419
5420 #undef elf_backend_object_p
5421 #define elf_backend_object_p elf64_l1om_elf_object_p
5422
5423 /* Restore defaults. */
5424 #undef ELF_MAXPAGESIZE
5425 #undef ELF_MINPAGESIZE
5426 #undef ELF_COMMONPAGESIZE
5427 #if DEFAULT_LD_Z_SEPARATE_CODE
5428 # define ELF_MAXPAGESIZE 0x1000
5429 #else
5430 # define ELF_MAXPAGESIZE 0x200000
5431 #endif
5432 #define ELF_MINPAGESIZE 0x1000
5433 #define ELF_COMMONPAGESIZE 0x1000
5434 #undef elf_backend_plt_alignment
5435 #define elf_backend_plt_alignment 4
5436 #undef elf_backend_arch_data
5437 #define elf_backend_arch_data &elf_x86_64_arch_bed
5438
5439 #include "elf64-target.h"
5440
5441 /* FreeBSD L1OM support. */
5442
5443 #undef TARGET_LITTLE_SYM
5444 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5445 #undef TARGET_LITTLE_NAME
5446 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5447
5448 #undef ELF_OSABI
5449 #define ELF_OSABI ELFOSABI_FREEBSD
5450
5451 #undef elf64_bed
5452 #define elf64_bed elf64_l1om_fbsd_bed
5453
5454 #include "elf64-target.h"
5455
5456 /* Intel K1OM support. */
5457
5458 static bfd_boolean
5459 elf64_k1om_elf_object_p (bfd *abfd)
5460 {
5461 /* Set the right machine number for an K1OM elf64 file. */
5462 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5463 return TRUE;
5464 }
5465
5466 #undef TARGET_LITTLE_SYM
5467 #define TARGET_LITTLE_SYM k1om_elf64_vec
5468 #undef TARGET_LITTLE_NAME
5469 #define TARGET_LITTLE_NAME "elf64-k1om"
5470 #undef ELF_ARCH
5471 #define ELF_ARCH bfd_arch_k1om
5472
5473 #undef ELF_MACHINE_CODE
5474 #define ELF_MACHINE_CODE EM_K1OM
5475
5476 #undef ELF_OSABI
5477
5478 #undef elf64_bed
5479 #define elf64_bed elf64_k1om_bed
5480
5481 #undef elf_backend_object_p
5482 #define elf_backend_object_p elf64_k1om_elf_object_p
5483
5484 #undef elf_backend_static_tls_alignment
5485
5486 #undef elf_backend_want_plt_sym
5487 #define elf_backend_want_plt_sym 0
5488
5489 #include "elf64-target.h"
5490
5491 /* FreeBSD K1OM support. */
5492
5493 #undef TARGET_LITTLE_SYM
5494 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5495 #undef TARGET_LITTLE_NAME
5496 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5497
5498 #undef ELF_OSABI
5499 #define ELF_OSABI ELFOSABI_FREEBSD
5500
5501 #undef elf64_bed
5502 #define elf64_bed elf64_k1om_fbsd_bed
5503
5504 #include "elf64-target.h"
5505
5506 /* 32bit x86-64 support. */
5507
5508 #undef TARGET_LITTLE_SYM
5509 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5510 #undef TARGET_LITTLE_NAME
5511 #define TARGET_LITTLE_NAME "elf32-x86-64"
5512 #undef elf32_bed
5513
5514 #undef ELF_ARCH
5515 #define ELF_ARCH bfd_arch_i386
5516
5517 #undef ELF_MACHINE_CODE
5518 #define ELF_MACHINE_CODE EM_X86_64
5519
5520 #undef ELF_OSABI
5521
5522 #undef elf_backend_object_p
5523 #define elf_backend_object_p \
5524 elf32_x86_64_elf_object_p
5525
5526 #undef elf_backend_bfd_from_remote_memory
5527 #define elf_backend_bfd_from_remote_memory \
5528 _bfd_elf32_bfd_from_remote_memory
5529
5530 #undef elf_backend_size_info
5531 #define elf_backend_size_info \
5532 _bfd_elf32_size_info
5533
5534 #include "elf32-target.h"
This page took 0.15999 seconds and 5 git commands to generate.